From c80d640d35566ef4cd3f11bdadf34891a95d88d9 Mon Sep 17 00:00:00 2001 From: Lex Christopherson Date: Thu, 12 Mar 2026 21:55:17 -0600 Subject: [PATCH] feat: vendor Pi source into workspace monorepo Vendor all 4 Pi packages (tui, ai, agent-core, coding-agent) from pi-mono v0.57.1 as @gsd/* workspace packages under packages/. This replaces the compiled npm dependency (@mariozechner/pi-coding-agent) and patch-package workflow, giving direct source access for modifications. - Copy Pi source from pi-mono v0.57.1 into packages/ - Create workspace package.json + tsconfig.json for each package - Rename ~240 imports from @mariozechner/pi-* to @gsd/pi-* - Apply existing patches as source edits (setModel persist, VT input) - Remove @mariozechner/pi-coding-agent dep and patch-package - Update build pipeline to build packages in dependency order - Add pi-upstream git remote for future selective syncing Co-Authored-By: Claude Opus 4.6 --- .gitignore | 5 + package-lock.json | 1402 +- package.json | 16 +- packages/pi-agent-core/package.json | 14 + packages/pi-agent-core/src/agent-loop.ts | 417 + packages/pi-agent-core/src/agent.ts | 568 + packages/pi-agent-core/src/index.ts | 8 + packages/pi-agent-core/src/proxy.ts | 340 + packages/pi-agent-core/src/types.ts | 194 + packages/pi-agent-core/tsconfig.json | 27 + packages/pi-ai/bedrock-provider.d.ts | 1 + packages/pi-ai/bedrock-provider.js | 1 + packages/pi-ai/package.json | 40 + packages/pi-ai/src/api-registry.ts | 98 + packages/pi-ai/src/bedrock-provider.ts | 6 + packages/pi-ai/src/cli.ts | 133 + packages/pi-ai/src/env-api-keys.ts | 129 + packages/pi-ai/src/index.ts | 32 + packages/pi-ai/src/models.generated.ts | 13370 ++++++++++++++++ packages/pi-ai/src/models.ts | 77 + packages/pi-ai/src/oauth.ts | 1 + .../pi-ai/src/providers/amazon-bedrock.ts | 751 + packages/pi-ai/src/providers/anthropic.ts | 883 + .../src/providers/azure-openai-responses.ts | 259 + .../src/providers/github-copilot-headers.ts | 37 + .../pi-ai/src/providers/google-gemini-cli.ts | 967 ++ packages/pi-ai/src/providers/google-shared.ts | 313 + packages/pi-ai/src/providers/google-vertex.ts | 485 + packages/pi-ai/src/providers/google.ts | 455 + packages/pi-ai/src/providers/mistral.ts | 582 + .../src/providers/openai-codex-responses.ts | 875 + .../pi-ai/src/providers/openai-completions.ts | 820 + .../src/providers/openai-responses-shared.ts | 496 + .../pi-ai/src/providers/openai-responses.ts | 262 + .../pi-ai/src/providers/register-builtins.ts | 186 + .../pi-ai/src/providers/simple-options.ts | 46 + .../pi-ai/src/providers/transform-messages.ts | 172 + packages/pi-ai/src/stream.ts | 59 + packages/pi-ai/src/types.ts | 321 + packages/pi-ai/src/utils/event-stream.ts | 87 + packages/pi-ai/src/utils/hash.ts | 13 + packages/pi-ai/src/utils/json-parse.ts | 28 + packages/pi-ai/src/utils/oauth/anthropic.ts | 138 + .../pi-ai/src/utils/oauth/github-copilot.ts | 381 + .../src/utils/oauth/google-antigravity.ts | 457 + .../src/utils/oauth/google-gemini-cli.ts | 599 + packages/pi-ai/src/utils/oauth/index.ts | 162 + .../pi-ai/src/utils/oauth/openai-codex.ts | 455 + packages/pi-ai/src/utils/oauth/pkce.ts | 34 + packages/pi-ai/src/utils/oauth/types.ts | 59 + packages/pi-ai/src/utils/overflow.ts | 123 + packages/pi-ai/src/utils/sanitize-unicode.ts | 25 + packages/pi-ai/src/utils/typebox-helpers.ts | 24 + packages/pi-ai/src/utils/validation.ts | 84 + packages/pi-ai/tsconfig.json | 27 + packages/pi-coding-agent/package.json | 55 + packages/pi-coding-agent/src/cli.ts | 18 + packages/pi-coding-agent/src/cli/args.ts | 316 + .../src/cli/config-selector.ts | 52 + .../pi-coding-agent/src/cli/file-processor.ts | 96 + .../pi-coding-agent/src/cli/list-models.ts | 104 + .../pi-coding-agent/src/cli/session-picker.ts | 51 + packages/pi-coding-agent/src/config.ts | 241 + .../pi-coding-agent/src/core/agent-session.ts | 3050 ++++ .../pi-coding-agent/src/core/auth-storage.ts | 489 + .../pi-coding-agent/src/core/bash-executor.ts | 278 + .../core/compaction/branch-summarization.ts | 352 + .../src/core/compaction/compaction.ts | 813 + .../src/core/compaction/index.ts | 7 + .../src/core/compaction/utils.ts | 170 + packages/pi-coding-agent/src/core/defaults.ts | 3 + .../pi-coding-agent/src/core/diagnostics.ts | 15 + .../pi-coding-agent/src/core/event-bus.ts | 33 + packages/pi-coding-agent/src/core/exec.ts | 104 + .../src/core/export-html/ansi-to-html.ts | 258 + .../src/core/export-html/index.ts | 306 + .../src/core/export-html/template.css | 971 ++ .../src/core/export-html/template.html | 54 + .../src/core/export-html/template.js | 1583 ++ .../src/core/export-html/tool-renderer.ts | 114 + .../src/core/extensions/index.ts | 171 + .../src/core/extensions/loader.ts | 545 + .../src/core/extensions/runner.ts | 884 + .../src/core/extensions/types.ts | 1411 ++ .../src/core/extensions/wrapper.ts | 118 + .../src/core/footer-data-provider.ts | 144 + packages/pi-coding-agent/src/core/index.ts | 61 + .../pi-coding-agent/src/core/keybindings.ts | 211 + packages/pi-coding-agent/src/core/messages.ts | 195 + .../src/core/model-registry.ts | 694 + .../src/core/model-resolver.ts | 594 + .../src/core/package-manager.ts | 1794 +++ .../src/core/prompt-templates.ts | 299 + .../src/core/resolve-config-value.ts | 64 + .../src/core/resource-loader.ts | 868 + packages/pi-coding-agent/src/core/sdk.ts | 373 + .../src/core/session-manager.ts | 1410 ++ .../src/core/settings-manager.ts | 942 ++ packages/pi-coding-agent/src/core/skills.ts | 459 + .../src/core/slash-commands.ts | 38 + .../pi-coding-agent/src/core/system-prompt.ts | 218 + packages/pi-coding-agent/src/core/timings.ts | 25 + .../pi-coding-agent/src/core/tools/bash.ts | 347 + .../src/core/tools/edit-diff.ts | 308 + .../pi-coding-agent/src/core/tools/edit.ts | 227 + .../pi-coding-agent/src/core/tools/find.ts | 273 + .../pi-coding-agent/src/core/tools/grep.ts | 346 + .../pi-coding-agent/src/core/tools/index.ts | 139 + packages/pi-coding-agent/src/core/tools/ls.ts | 170 + .../src/core/tools/path-utils.ts | 94 + .../pi-coding-agent/src/core/tools/read.ts | 222 + .../src/core/tools/truncate.ts | 265 + .../pi-coding-agent/src/core/tools/write.ts | 118 + packages/pi-coding-agent/src/index.ts | 333 + packages/pi-coding-agent/src/main.ts | 821 + packages/pi-coding-agent/src/migrations.ts | 295 + packages/pi-coding-agent/src/modes/index.ts | 9 + .../src/modes/interactive/components/armin.ts | 382 + .../components/assistant-message.ts | 115 + .../interactive/components/bash-execution.ts | 210 + .../interactive/components/bordered-loader.ts | 66 + .../components/branch-summary-message.ts | 58 + .../components/compaction-summary-message.ts | 59 + .../interactive/components/config-selector.ts | 592 + .../interactive/components/countdown-timer.ts | 38 + .../interactive/components/custom-editor.ts | 80 + .../interactive/components/custom-message.ts | 99 + .../modes/interactive/components/daxnuts.ts | 164 + .../src/modes/interactive/components/diff.ts | 147 + .../interactive/components/dynamic-border.ts | 25 + .../components/extension-editor.ts | 147 + .../interactive/components/extension-input.ts | 85 + .../components/extension-selector.ts | 107 + .../modes/interactive/components/footer.ts | 216 + .../src/modes/interactive/components/index.ts | 32 + .../components/keybinding-hints.ts | 66 + .../interactive/components/login-dialog.ts | 174 + .../interactive/components/model-selector.ts | 329 + .../interactive/components/oauth-selector.ts | 121 + .../components/scoped-models-selector.ts | 346 + .../components/session-selector-search.ts | 194 + .../components/session-selector.ts | 1019 ++ .../components/settings-selector.ts | 421 + .../components/show-images-selector.ts | 45 + .../components/skill-invocation-message.ts | 55 + .../interactive/components/theme-selector.ts | 62 + .../components/thinking-selector.ts | 64 + .../interactive/components/tool-execution.ts | 916 ++ .../interactive/components/tree-selector.ts | 1184 ++ .../components/user-message-selector.ts | 143 + .../interactive/components/user-message.ts | 32 + .../interactive/components/visual-truncate.ts | 50 + .../src/modes/interactive/interactive-mode.ts | 4464 ++++++ .../src/modes/interactive/theme/dark.json | 85 + .../src/modes/interactive/theme/light.json | 84 + .../modes/interactive/theme/theme-schema.json | 335 + .../src/modes/interactive/theme/theme.ts | 1105 ++ .../pi-coding-agent/src/modes/print-mode.ts | 124 + .../pi-coding-agent/src/modes/rpc/jsonl.ts | 58 + .../src/modes/rpc/rpc-client.ts | 505 + .../pi-coding-agent/src/modes/rpc/rpc-mode.ts | 638 + .../src/modes/rpc/rpc-types.ts | 263 + .../pi-coding-agent/src/utils/changelog.ts | 99 + .../src/utils/clipboard-image.ts | 207 + .../src/utils/clipboard-native.ts | 21 + .../pi-coding-agent/src/utils/clipboard.ts | 62 + .../pi-coding-agent/src/utils/frontmatter.ts | 39 + packages/pi-coding-agent/src/utils/git.ts | 192 + .../src/utils/image-convert.ts | 38 + .../pi-coding-agent/src/utils/image-resize.ts | 231 + packages/pi-coding-agent/src/utils/mime.ts | 30 + packages/pi-coding-agent/src/utils/photon.ts | 139 + packages/pi-coding-agent/src/utils/shell.ts | 202 + packages/pi-coding-agent/src/utils/sleep.ts | 18 + .../src/utils/tools-manager.ts | 286 + packages/pi-coding-agent/tsconfig.json | 27 + packages/pi-tui/package.json | 21 + packages/pi-tui/src/autocomplete.ts | 736 + packages/pi-tui/src/components/box.ts | 137 + .../src/components/cancellable-loader.ts | 40 + packages/pi-tui/src/components/editor.ts | 2035 +++ packages/pi-tui/src/components/image.ts | 104 + packages/pi-tui/src/components/input.ts | 521 + packages/pi-tui/src/components/loader.ts | 55 + packages/pi-tui/src/components/markdown.ts | 806 + packages/pi-tui/src/components/select-list.ts | 188 + .../pi-tui/src/components/settings-list.ts | 250 + packages/pi-tui/src/components/spacer.ts | 28 + packages/pi-tui/src/components/text.ts | 106 + .../pi-tui/src/components/truncated-text.ts | 65 + packages/pi-tui/src/editor-component.ts | 74 + packages/pi-tui/src/fuzzy.ts | 133 + packages/pi-tui/src/index.ts | 93 + packages/pi-tui/src/keybindings.ts | 189 + packages/pi-tui/src/keys.ts | 1255 ++ packages/pi-tui/src/kill-ring.ts | 46 + packages/pi-tui/src/stdin-buffer.ts | 386 + packages/pi-tui/src/terminal-image.ts | 381 + packages/pi-tui/src/terminal.ts | 349 + packages/pi-tui/src/tui.ts | 1212 ++ packages/pi-tui/src/undo-stack.ts | 28 + packages/pi-tui/src/utils.ts | 905 ++ packages/pi-tui/tsconfig.json | 27 + ...@mariozechner+pi-coding-agent+0.57.1.patch | 108 - patches/@mariozechner+pi-tui+0.57.1.patch | 47 - scripts/postinstall.js | 17 +- scripts/sync-pkg-version.cjs | 2 +- src/cli.ts | 2 +- src/onboarding.ts | 2 +- src/pi-migration.ts | 2 +- src/resource-loader.ts | 2 +- .../extensions/ask-user-questions.ts | 4 +- src/resources/extensions/bg-shell/index.ts | 8 +- .../extensions/browser-tools/index.ts | 6 +- src/resources/extensions/context7/index.ts | 6 +- .../extensions/get-secrets-from-user.ts | 4 +- .../extensions/google-search/index.ts | 6 +- src/resources/extensions/gsd/activity-log.ts | 2 +- src/resources/extensions/gsd/auto.ts | 4 +- src/resources/extensions/gsd/commands.ts | 4 +- .../extensions/gsd/dashboard-overlay.ts | 4 +- src/resources/extensions/gsd/guided-flow.ts | 2 +- src/resources/extensions/gsd/index.ts | 8 +- src/resources/extensions/gsd/metrics.ts | 2 +- .../extensions/gsd/migrate/command.ts | 2 +- src/resources/extensions/gsd/preferences.ts | 2 +- .../extensions/gsd/skill-discovery.ts | 2 +- .../extensions/gsd/worktree-command.ts | 2 +- src/resources/extensions/mac-tools/index.ts | 4 +- src/resources/extensions/mcporter/index.ts | 6 +- .../remote-questions/remote-command.ts | 6 +- .../search-the-web/command-search-provider.ts | 4 +- .../extensions/search-the-web/index.ts | 2 +- .../extensions/search-the-web/provider.ts | 2 +- .../search-the-web/tool-fetch-page.ts | 6 +- .../search-the-web/tool-llm-context.ts | 8 +- .../extensions/search-the-web/tool-search.ts | 8 +- src/resources/extensions/shared/confirm-ui.ts | 6 +- .../extensions/shared/interview-ui.ts | 6 +- .../extensions/shared/next-action-ui.ts | 6 +- .../extensions/shared/progress-widget.ts | 4 +- .../extensions/shared/thinking-widget.ts | 6 +- src/resources/extensions/shared/ui.ts | 8 +- src/resources/extensions/shared/wizard-ui.ts | 8 +- .../extensions/slash-commands/audit.ts | 2 +- .../extensions/slash-commands/clear.ts | 2 +- .../slash-commands/create-extension.ts | 4 +- .../slash-commands/create-slash-command.ts | 4 +- .../extensions/slash-commands/index.ts | 2 +- src/resources/extensions/subagent/agents.ts | 2 +- src/resources/extensions/subagent/index.ts | 10 +- src/resources/extensions/voice/index.ts | 6 +- src/tests/app-smoke.test.ts | 4 +- src/wizard.ts | 2 +- 254 files changed, 78899 insertions(+), 1306 deletions(-) create mode 100644 packages/pi-agent-core/package.json create mode 100644 packages/pi-agent-core/src/agent-loop.ts create mode 100644 packages/pi-agent-core/src/agent.ts create mode 100644 packages/pi-agent-core/src/index.ts create mode 100644 packages/pi-agent-core/src/proxy.ts create mode 100644 packages/pi-agent-core/src/types.ts create mode 100644 packages/pi-agent-core/tsconfig.json create mode 100644 packages/pi-ai/bedrock-provider.d.ts create mode 100644 packages/pi-ai/bedrock-provider.js create mode 100644 packages/pi-ai/package.json create mode 100644 packages/pi-ai/src/api-registry.ts create mode 100644 packages/pi-ai/src/bedrock-provider.ts create mode 100644 packages/pi-ai/src/cli.ts create mode 100644 packages/pi-ai/src/env-api-keys.ts create mode 100644 packages/pi-ai/src/index.ts create mode 100644 packages/pi-ai/src/models.generated.ts create mode 100644 packages/pi-ai/src/models.ts create mode 100644 packages/pi-ai/src/oauth.ts create mode 100644 packages/pi-ai/src/providers/amazon-bedrock.ts create mode 100644 packages/pi-ai/src/providers/anthropic.ts create mode 100644 packages/pi-ai/src/providers/azure-openai-responses.ts create mode 100644 packages/pi-ai/src/providers/github-copilot-headers.ts create mode 100644 packages/pi-ai/src/providers/google-gemini-cli.ts create mode 100644 packages/pi-ai/src/providers/google-shared.ts create mode 100644 packages/pi-ai/src/providers/google-vertex.ts create mode 100644 packages/pi-ai/src/providers/google.ts create mode 100644 packages/pi-ai/src/providers/mistral.ts create mode 100644 packages/pi-ai/src/providers/openai-codex-responses.ts create mode 100644 packages/pi-ai/src/providers/openai-completions.ts create mode 100644 packages/pi-ai/src/providers/openai-responses-shared.ts create mode 100644 packages/pi-ai/src/providers/openai-responses.ts create mode 100644 packages/pi-ai/src/providers/register-builtins.ts create mode 100644 packages/pi-ai/src/providers/simple-options.ts create mode 100644 packages/pi-ai/src/providers/transform-messages.ts create mode 100644 packages/pi-ai/src/stream.ts create mode 100644 packages/pi-ai/src/types.ts create mode 100644 packages/pi-ai/src/utils/event-stream.ts create mode 100644 packages/pi-ai/src/utils/hash.ts create mode 100644 packages/pi-ai/src/utils/json-parse.ts create mode 100644 packages/pi-ai/src/utils/oauth/anthropic.ts create mode 100644 packages/pi-ai/src/utils/oauth/github-copilot.ts create mode 100644 packages/pi-ai/src/utils/oauth/google-antigravity.ts create mode 100644 packages/pi-ai/src/utils/oauth/google-gemini-cli.ts create mode 100644 packages/pi-ai/src/utils/oauth/index.ts create mode 100644 packages/pi-ai/src/utils/oauth/openai-codex.ts create mode 100644 packages/pi-ai/src/utils/oauth/pkce.ts create mode 100644 packages/pi-ai/src/utils/oauth/types.ts create mode 100644 packages/pi-ai/src/utils/overflow.ts create mode 100644 packages/pi-ai/src/utils/sanitize-unicode.ts create mode 100644 packages/pi-ai/src/utils/typebox-helpers.ts create mode 100644 packages/pi-ai/src/utils/validation.ts create mode 100644 packages/pi-ai/tsconfig.json create mode 100644 packages/pi-coding-agent/package.json create mode 100644 packages/pi-coding-agent/src/cli.ts create mode 100644 packages/pi-coding-agent/src/cli/args.ts create mode 100644 packages/pi-coding-agent/src/cli/config-selector.ts create mode 100644 packages/pi-coding-agent/src/cli/file-processor.ts create mode 100644 packages/pi-coding-agent/src/cli/list-models.ts create mode 100644 packages/pi-coding-agent/src/cli/session-picker.ts create mode 100644 packages/pi-coding-agent/src/config.ts create mode 100644 packages/pi-coding-agent/src/core/agent-session.ts create mode 100644 packages/pi-coding-agent/src/core/auth-storage.ts create mode 100644 packages/pi-coding-agent/src/core/bash-executor.ts create mode 100644 packages/pi-coding-agent/src/core/compaction/branch-summarization.ts create mode 100644 packages/pi-coding-agent/src/core/compaction/compaction.ts create mode 100644 packages/pi-coding-agent/src/core/compaction/index.ts create mode 100644 packages/pi-coding-agent/src/core/compaction/utils.ts create mode 100644 packages/pi-coding-agent/src/core/defaults.ts create mode 100644 packages/pi-coding-agent/src/core/diagnostics.ts create mode 100644 packages/pi-coding-agent/src/core/event-bus.ts create mode 100644 packages/pi-coding-agent/src/core/exec.ts create mode 100644 packages/pi-coding-agent/src/core/export-html/ansi-to-html.ts create mode 100644 packages/pi-coding-agent/src/core/export-html/index.ts create mode 100644 packages/pi-coding-agent/src/core/export-html/template.css create mode 100644 packages/pi-coding-agent/src/core/export-html/template.html create mode 100644 packages/pi-coding-agent/src/core/export-html/template.js create mode 100644 packages/pi-coding-agent/src/core/export-html/tool-renderer.ts create mode 100644 packages/pi-coding-agent/src/core/extensions/index.ts create mode 100644 packages/pi-coding-agent/src/core/extensions/loader.ts create mode 100644 packages/pi-coding-agent/src/core/extensions/runner.ts create mode 100644 packages/pi-coding-agent/src/core/extensions/types.ts create mode 100644 packages/pi-coding-agent/src/core/extensions/wrapper.ts create mode 100644 packages/pi-coding-agent/src/core/footer-data-provider.ts create mode 100644 packages/pi-coding-agent/src/core/index.ts create mode 100644 packages/pi-coding-agent/src/core/keybindings.ts create mode 100644 packages/pi-coding-agent/src/core/messages.ts create mode 100644 packages/pi-coding-agent/src/core/model-registry.ts create mode 100644 packages/pi-coding-agent/src/core/model-resolver.ts create mode 100644 packages/pi-coding-agent/src/core/package-manager.ts create mode 100644 packages/pi-coding-agent/src/core/prompt-templates.ts create mode 100644 packages/pi-coding-agent/src/core/resolve-config-value.ts create mode 100644 packages/pi-coding-agent/src/core/resource-loader.ts create mode 100644 packages/pi-coding-agent/src/core/sdk.ts create mode 100644 packages/pi-coding-agent/src/core/session-manager.ts create mode 100644 packages/pi-coding-agent/src/core/settings-manager.ts create mode 100644 packages/pi-coding-agent/src/core/skills.ts create mode 100644 packages/pi-coding-agent/src/core/slash-commands.ts create mode 100644 packages/pi-coding-agent/src/core/system-prompt.ts create mode 100644 packages/pi-coding-agent/src/core/timings.ts create mode 100644 packages/pi-coding-agent/src/core/tools/bash.ts create mode 100644 packages/pi-coding-agent/src/core/tools/edit-diff.ts create mode 100644 packages/pi-coding-agent/src/core/tools/edit.ts create mode 100644 packages/pi-coding-agent/src/core/tools/find.ts create mode 100644 packages/pi-coding-agent/src/core/tools/grep.ts create mode 100644 packages/pi-coding-agent/src/core/tools/index.ts create mode 100644 packages/pi-coding-agent/src/core/tools/ls.ts create mode 100644 packages/pi-coding-agent/src/core/tools/path-utils.ts create mode 100644 packages/pi-coding-agent/src/core/tools/read.ts create mode 100644 packages/pi-coding-agent/src/core/tools/truncate.ts create mode 100644 packages/pi-coding-agent/src/core/tools/write.ts create mode 100644 packages/pi-coding-agent/src/index.ts create mode 100644 packages/pi-coding-agent/src/main.ts create mode 100644 packages/pi-coding-agent/src/migrations.ts create mode 100644 packages/pi-coding-agent/src/modes/index.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/armin.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/assistant-message.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/bash-execution.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/bordered-loader.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/branch-summary-message.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/compaction-summary-message.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/config-selector.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/countdown-timer.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/custom-editor.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/custom-message.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/daxnuts.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/diff.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/dynamic-border.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/extension-editor.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/extension-input.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/extension-selector.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/footer.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/index.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/keybinding-hints.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/login-dialog.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/model-selector.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/oauth-selector.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/scoped-models-selector.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/session-selector-search.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/session-selector.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/settings-selector.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/show-images-selector.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/skill-invocation-message.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/theme-selector.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/thinking-selector.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/tool-execution.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/tree-selector.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/user-message-selector.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/user-message.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/components/visual-truncate.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/interactive-mode.ts create mode 100644 packages/pi-coding-agent/src/modes/interactive/theme/dark.json create mode 100644 packages/pi-coding-agent/src/modes/interactive/theme/light.json create mode 100644 packages/pi-coding-agent/src/modes/interactive/theme/theme-schema.json create mode 100644 packages/pi-coding-agent/src/modes/interactive/theme/theme.ts create mode 100644 packages/pi-coding-agent/src/modes/print-mode.ts create mode 100644 packages/pi-coding-agent/src/modes/rpc/jsonl.ts create mode 100644 packages/pi-coding-agent/src/modes/rpc/rpc-client.ts create mode 100644 packages/pi-coding-agent/src/modes/rpc/rpc-mode.ts create mode 100644 packages/pi-coding-agent/src/modes/rpc/rpc-types.ts create mode 100644 packages/pi-coding-agent/src/utils/changelog.ts create mode 100644 packages/pi-coding-agent/src/utils/clipboard-image.ts create mode 100644 packages/pi-coding-agent/src/utils/clipboard-native.ts create mode 100644 packages/pi-coding-agent/src/utils/clipboard.ts create mode 100644 packages/pi-coding-agent/src/utils/frontmatter.ts create mode 100644 packages/pi-coding-agent/src/utils/git.ts create mode 100644 packages/pi-coding-agent/src/utils/image-convert.ts create mode 100644 packages/pi-coding-agent/src/utils/image-resize.ts create mode 100644 packages/pi-coding-agent/src/utils/mime.ts create mode 100644 packages/pi-coding-agent/src/utils/photon.ts create mode 100644 packages/pi-coding-agent/src/utils/shell.ts create mode 100644 packages/pi-coding-agent/src/utils/sleep.ts create mode 100644 packages/pi-coding-agent/src/utils/tools-manager.ts create mode 100644 packages/pi-coding-agent/tsconfig.json create mode 100644 packages/pi-tui/package.json create mode 100644 packages/pi-tui/src/autocomplete.ts create mode 100644 packages/pi-tui/src/components/box.ts create mode 100644 packages/pi-tui/src/components/cancellable-loader.ts create mode 100644 packages/pi-tui/src/components/editor.ts create mode 100644 packages/pi-tui/src/components/image.ts create mode 100644 packages/pi-tui/src/components/input.ts create mode 100644 packages/pi-tui/src/components/loader.ts create mode 100644 packages/pi-tui/src/components/markdown.ts create mode 100644 packages/pi-tui/src/components/select-list.ts create mode 100644 packages/pi-tui/src/components/settings-list.ts create mode 100644 packages/pi-tui/src/components/spacer.ts create mode 100644 packages/pi-tui/src/components/text.ts create mode 100644 packages/pi-tui/src/components/truncated-text.ts create mode 100644 packages/pi-tui/src/editor-component.ts create mode 100644 packages/pi-tui/src/fuzzy.ts create mode 100644 packages/pi-tui/src/index.ts create mode 100644 packages/pi-tui/src/keybindings.ts create mode 100644 packages/pi-tui/src/keys.ts create mode 100644 packages/pi-tui/src/kill-ring.ts create mode 100644 packages/pi-tui/src/stdin-buffer.ts create mode 100644 packages/pi-tui/src/terminal-image.ts create mode 100644 packages/pi-tui/src/terminal.ts create mode 100644 packages/pi-tui/src/tui.ts create mode 100644 packages/pi-tui/src/undo-stack.ts create mode 100644 packages/pi-tui/src/utils.ts create mode 100644 packages/pi-tui/tsconfig.json delete mode 100644 patches/@mariozechner+pi-coding-agent+0.57.1.patch delete mode 100644 patches/@mariozechner+pi-tui+0.57.1.patch diff --git a/.gitignore b/.gitignore index ac4782171..7cc9d94f4 100644 --- a/.gitignore +++ b/.gitignore @@ -28,6 +28,10 @@ coverage/ .cache/ tmp/ +# ── Workspace packages ── +packages/*/dist/ +packages/*/node_modules/ + # ── GSD baseline (auto-generated) ── dist/ .bg_shell @@ -36,6 +40,7 @@ dist/ AGENTS.md .bg-shell/ TODOS.md +.planning/ # ── GSD baseline (auto-generated) ── .gsd/ diff --git a/package-lock.json b/package-lock.json index ac8d4a64e..58b33ad53 100644 --- a/package-lock.json +++ b/package-lock.json @@ -9,9 +9,11 @@ "version": "2.6.0", "hasInstallScript": true, "license": "MIT", + "workspaces": [ + "packages/*" + ], "dependencies": { "@clack/prompts": "^1.1.0", - "@mariozechner/pi-coding-agent": "^0.57.1", "picocolors": "^1.1.1", "playwright": "^1.58.2" }, @@ -21,7 +23,6 @@ }, "devDependencies": { "@types/node": "^22.0.0", - "patch-package": "^8.0.1", "typescript": "^5.4.0" }, "engines": { @@ -188,15 +189,15 @@ } }, "node_modules/@aws-sdk/client-bedrock-runtime": { - "version": "3.1006.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/client-bedrock-runtime/-/client-bedrock-runtime-3.1006.0.tgz", - "integrity": "sha512-xoReIImKWGEgI5+44ZqADIfjSQTx367d3wkH1kX8ZZNe70mUQxXDzLp1iWBk4FLjQyTnv0J0vMIvhSHVfvFxXA==", + "version": "3.1008.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/client-bedrock-runtime/-/client-bedrock-runtime-3.1008.0.tgz", + "integrity": "sha512-155H8HBuN4PLbhwOk7lA7RJ3wD4EWjminnNQoUS9PK2wQ0oGdTad0IHz1aCzNZNmI3fxsJqyty6YBSkbCZ5Lew==", "license": "Apache-2.0", "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "^3.973.19", - "@aws-sdk/credential-provider-node": "^3.972.19", + "@aws-sdk/credential-provider-node": "^3.972.20", "@aws-sdk/eventstream-handler-node": "^3.972.10", "@aws-sdk/middleware-eventstream": "^3.972.7", "@aws-sdk/middleware-host-header": "^3.972.7", @@ -205,11 +206,11 @@ "@aws-sdk/middleware-user-agent": "^3.972.20", "@aws-sdk/middleware-websocket": "^3.972.12", "@aws-sdk/region-config-resolver": "^3.972.7", - "@aws-sdk/token-providers": "3.1006.0", + "@aws-sdk/token-providers": "3.1008.0", "@aws-sdk/types": "^3.973.5", "@aws-sdk/util-endpoints": "^3.996.4", "@aws-sdk/util-user-agent-browser": "^3.972.7", - "@aws-sdk/util-user-agent-node": "^3.973.5", + "@aws-sdk/util-user-agent-node": "^3.973.6", "@smithy/config-resolver": "^4.4.10", "@smithy/core": "^3.23.9", "@smithy/eventstream-serde-browser": "^4.2.11", @@ -307,19 +308,19 @@ } }, "node_modules/@aws-sdk/credential-provider-ini": { - "version": "3.972.18", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-ini/-/credential-provider-ini-3.972.18.tgz", - "integrity": "sha512-vthIAXJISZnj2576HeyLBj4WTeX+I7PwWeRkbOa0mVX39K13SCGxCgOFuKj2ytm9qTlLOmXe4cdEnroteFtJfw==", + "version": "3.972.19", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-ini/-/credential-provider-ini-3.972.19.tgz", + "integrity": "sha512-pVJVjWqVrPqjpFq7o0mCmeZu1Y0c94OCHSYgivdCD2wfmYVtBbwQErakruhgOD8pcMcx9SCqRw1pzHKR7OGBcA==", "license": "Apache-2.0", "dependencies": { "@aws-sdk/core": "^3.973.19", "@aws-sdk/credential-provider-env": "^3.972.17", "@aws-sdk/credential-provider-http": "^3.972.19", - "@aws-sdk/credential-provider-login": "^3.972.18", + "@aws-sdk/credential-provider-login": "^3.972.19", "@aws-sdk/credential-provider-process": "^3.972.17", - "@aws-sdk/credential-provider-sso": "^3.972.18", - "@aws-sdk/credential-provider-web-identity": "^3.972.18", - "@aws-sdk/nested-clients": "^3.996.8", + "@aws-sdk/credential-provider-sso": "^3.972.19", + "@aws-sdk/credential-provider-web-identity": "^3.972.19", + "@aws-sdk/nested-clients": "^3.996.9", "@aws-sdk/types": "^3.973.5", "@smithy/credential-provider-imds": "^4.2.11", "@smithy/property-provider": "^4.2.11", @@ -332,13 +333,13 @@ } }, "node_modules/@aws-sdk/credential-provider-login": { - "version": "3.972.18", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-login/-/credential-provider-login-3.972.18.tgz", - "integrity": "sha512-kINzc5BBxdYBkPZ0/i1AMPMOk5b5QaFNbYMElVw5QTX13AKj6jcxnv/YNl9oW9mg+Y08ti19hh01HhyEAxsSJQ==", + "version": "3.972.19", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-login/-/credential-provider-login-3.972.19.tgz", + "integrity": "sha512-jOXdZ1o+CywQKr6gyxgxuUmnGwTTnY2Kxs1PM7fI6AYtDWDnmW/yKXayNqkF8KjP1unflqMWKVbVt5VgmE3L0g==", "license": "Apache-2.0", "dependencies": { "@aws-sdk/core": "^3.973.19", - "@aws-sdk/nested-clients": "^3.996.8", + "@aws-sdk/nested-clients": "^3.996.9", "@aws-sdk/types": "^3.973.5", "@smithy/property-provider": "^4.2.11", "@smithy/protocol-http": "^5.3.11", @@ -351,17 +352,17 @@ } }, "node_modules/@aws-sdk/credential-provider-node": { - "version": "3.972.19", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-node/-/credential-provider-node-3.972.19.tgz", - "integrity": "sha512-yDWQ9dFTr+IMxwanFe7+tbN5++q8psZBjlUwOiCXn1EzANoBgtqBwcpYcHaMGtn0Wlfj4NuXdf2JaEx1lz5RaQ==", + "version": "3.972.20", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-node/-/credential-provider-node-3.972.20.tgz", + "integrity": "sha512-0xHca2BnPY0kzjDYPH7vk8YbfdBPpWVS67rtqQMalYDQUCBYS37cZ55K6TuFxCoIyNZgSCFrVKr9PXC5BVvQQw==", "license": "Apache-2.0", "dependencies": { "@aws-sdk/credential-provider-env": "^3.972.17", "@aws-sdk/credential-provider-http": "^3.972.19", - "@aws-sdk/credential-provider-ini": "^3.972.18", + "@aws-sdk/credential-provider-ini": "^3.972.19", "@aws-sdk/credential-provider-process": "^3.972.17", - "@aws-sdk/credential-provider-sso": "^3.972.18", - "@aws-sdk/credential-provider-web-identity": "^3.972.18", + "@aws-sdk/credential-provider-sso": "^3.972.19", + "@aws-sdk/credential-provider-web-identity": "^3.972.19", "@aws-sdk/types": "^3.973.5", "@smithy/credential-provider-imds": "^4.2.11", "@smithy/property-provider": "^4.2.11", @@ -391,32 +392,14 @@ } }, "node_modules/@aws-sdk/credential-provider-sso": { - "version": "3.972.18", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-sso/-/credential-provider-sso-3.972.18.tgz", - "integrity": "sha512-YHYEfj5S2aqInRt5ub8nDOX8vAxgMvd84wm2Y3WVNfFa/53vOv9T7WOAqXI25qjj3uEcV46xxfqdDQk04h5XQA==", + "version": "3.972.19", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-sso/-/credential-provider-sso-3.972.19.tgz", + "integrity": "sha512-kVjQsEU3b///q7EZGrUzol9wzwJFKbEzqJKSq82A9ShrUTEO7FNylTtby3sPV19ndADZh1H3FB3+5ZrvKtEEeg==", "license": "Apache-2.0", "dependencies": { "@aws-sdk/core": "^3.973.19", - "@aws-sdk/nested-clients": "^3.996.8", - "@aws-sdk/token-providers": "3.1005.0", - "@aws-sdk/types": "^3.973.5", - "@smithy/property-provider": "^4.2.11", - "@smithy/shared-ini-file-loader": "^4.4.6", - "@smithy/types": "^4.13.0", - "tslib": "^2.6.2" - }, - "engines": { - "node": ">=20.0.0" - } - }, - "node_modules/@aws-sdk/credential-provider-sso/node_modules/@aws-sdk/token-providers": { - "version": "3.1005.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/token-providers/-/token-providers-3.1005.0.tgz", - "integrity": "sha512-vMxd+ivKqSxU9bHx5vmAlFKDAkjGotFU56IOkDa5DaTu1WWwbcse0yFHEm9I537oVvodaiwMl3VBwgHfzQ2rvw==", - "license": "Apache-2.0", - "dependencies": { - "@aws-sdk/core": "^3.973.19", - "@aws-sdk/nested-clients": "^3.996.8", + "@aws-sdk/nested-clients": "^3.996.9", + "@aws-sdk/token-providers": "3.1008.0", "@aws-sdk/types": "^3.973.5", "@smithy/property-provider": "^4.2.11", "@smithy/shared-ini-file-loader": "^4.4.6", @@ -428,13 +411,13 @@ } }, "node_modules/@aws-sdk/credential-provider-web-identity": { - "version": "3.972.18", - "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-web-identity/-/credential-provider-web-identity-3.972.18.tgz", - "integrity": "sha512-OqlEQpJ+J3T5B96qtC1zLLwkBloechP+fezKbCH0sbd2cCc0Ra55XpxWpk/hRj69xAOYtHvoC4orx6eTa4zU7g==", + "version": "3.972.19", + "resolved": "https://registry.npmjs.org/@aws-sdk/credential-provider-web-identity/-/credential-provider-web-identity-3.972.19.tgz", + "integrity": "sha512-BV1BlTFdG4w4tAihxN7iXDBoNcNewXD4q8uZlNQiUrnqxwGWUhKHODIQVSPlQGxXClEj+63m+cqZskw+ESmeZg==", "license": "Apache-2.0", "dependencies": { "@aws-sdk/core": "^3.973.19", - "@aws-sdk/nested-clients": "^3.996.8", + "@aws-sdk/nested-clients": "^3.996.9", "@aws-sdk/types": "^3.973.5", "@smithy/property-provider": "^4.2.11", "@smithy/shared-ini-file-loader": "^4.4.6", @@ -563,9 +546,9 @@ } }, "node_modules/@aws-sdk/nested-clients": { - "version": "3.996.8", - "resolved": "https://registry.npmjs.org/@aws-sdk/nested-clients/-/nested-clients-3.996.8.tgz", - "integrity": "sha512-6HlLm8ciMW8VzfB80kfIx16PBA9lOa9Dl+dmCBi78JDhvGlx3I7Rorwi5PpVRkL31RprXnYna3yBf6UKkD/PqA==", + "version": "3.996.9", + "resolved": "https://registry.npmjs.org/@aws-sdk/nested-clients/-/nested-clients-3.996.9.tgz", + "integrity": "sha512-+RpVtpmQbbtzFOKhMlsRcXM/3f1Z49qTOHaA8gEpHOYruERmog6f2AUtf/oTRLCWjR9H2b3roqryV/hI7QMW8w==", "license": "Apache-2.0", "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", @@ -579,7 +562,7 @@ "@aws-sdk/types": "^3.973.5", "@aws-sdk/util-endpoints": "^3.996.4", "@aws-sdk/util-user-agent-browser": "^3.972.7", - "@aws-sdk/util-user-agent-node": "^3.973.5", + "@aws-sdk/util-user-agent-node": "^3.973.6", "@smithy/config-resolver": "^4.4.10", "@smithy/core": "^3.23.9", "@smithy/fetch-http-handler": "^5.3.13", @@ -628,13 +611,13 @@ } }, "node_modules/@aws-sdk/token-providers": { - "version": "3.1006.0", - "resolved": "https://registry.npmjs.org/@aws-sdk/token-providers/-/token-providers-3.1006.0.tgz", - "integrity": "sha512-eCBaQI1w5PcliOdh8Y0YONOim2zNSTEK4E7gXYC4vIqiT/lzVODIFxmpc8oOBLPSANzcr9daIPPtjQ2C75dLFg==", + "version": "3.1008.0", + "resolved": "https://registry.npmjs.org/@aws-sdk/token-providers/-/token-providers-3.1008.0.tgz", + "integrity": "sha512-TulwlHQBWcJs668kNUDMZHN51DeLrDsYT59Ux4a/nbvr025gM6HjKJJ3LvnZccam7OS/ZKUVkWomCneRQKJbBg==", "license": "Apache-2.0", "dependencies": { "@aws-sdk/core": "^3.973.19", - "@aws-sdk/nested-clients": "^3.996.8", + "@aws-sdk/nested-clients": "^3.996.9", "@aws-sdk/types": "^3.973.5", "@smithy/property-provider": "^4.2.11", "@smithy/shared-ini-file-loader": "^4.4.6", @@ -714,15 +697,16 @@ } }, "node_modules/@aws-sdk/util-user-agent-node": { - "version": "3.973.5", - "resolved": "https://registry.npmjs.org/@aws-sdk/util-user-agent-node/-/util-user-agent-node-3.973.5.tgz", - "integrity": "sha512-Dyy38O4GeMk7UQ48RupfHif//gqnOPbq/zlvRssc11E2mClT+aUfc3VS2yD8oLtzqO3RsqQ9I3gOBB4/+HjPOw==", + "version": "3.973.6", + "resolved": "https://registry.npmjs.org/@aws-sdk/util-user-agent-node/-/util-user-agent-node-3.973.6.tgz", + "integrity": "sha512-iF7G0prk7AvmOK64FcLvc/fW+Ty1H+vttajL7PvJFReU8urMxfYmynTTuFKDTA76Wgpq3FzTPKwabMQIXQHiXQ==", "license": "Apache-2.0", "dependencies": { "@aws-sdk/middleware-user-agent": "^3.972.20", "@aws-sdk/types": "^3.973.5", "@smithy/node-config-provider": "^4.3.11", "@smithy/types": "^4.13.0", + "@smithy/util-config-provider": "^4.2.2", "tslib": "^2.6.2" }, "engines": { @@ -752,9 +736,9 @@ } }, "node_modules/@aws/lambda-invoke-store": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/@aws/lambda-invoke-store/-/lambda-invoke-store-0.2.3.tgz", - "integrity": "sha512-oLvsaPMTBejkkmHhjf09xTgk71mOqyr/409NKhRIL08If7AhVfUsJhVsx386uJaqNd42v9kWamQ9lFbkoC2dYw==", + "version": "0.2.4", + "resolved": "https://registry.npmjs.org/@aws/lambda-invoke-store/-/lambda-invoke-store-0.2.4.tgz", + "integrity": "sha512-iY8yvjE0y651BixKNPgmv1WrQc+GZ142sb0z4gYnChDDY2YqI4P/jsSopBWrKfAt7LOJAkOXt7rC/hms+WclQQ==", "license": "Apache-2.0", "engines": { "node": ">=18.0.0" @@ -770,9 +754,9 @@ } }, "node_modules/@borewit/text-codec": { - "version": "0.2.1", - "resolved": "https://registry.npmjs.org/@borewit/text-codec/-/text-codec-0.2.1.tgz", - "integrity": "sha512-k7vvKPbf7J2fZ5klGRD9AeKfUvojuZIQ3BT5u7Jfv+puwXkUBUT5PVyMDfJZpy30CBDXGMgw7fguK/lpOMBvgw==", + "version": "0.2.2", + "resolved": "https://registry.npmjs.org/@borewit/text-codec/-/text-codec-0.2.2.tgz", + "integrity": "sha512-DDaRehssg1aNrH4+2hnj1B7vnUGEjU6OIlyRdkMd0aUdIUvKXrJfXsy8LVtXAy7DRvYVluWbMspsRhz2lcW0mQ==", "license": "MIT", "funding": { "type": "github", @@ -799,9 +783,9 @@ } }, "node_modules/@google/genai": { - "version": "1.44.0", - "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.44.0.tgz", - "integrity": "sha512-kRt9ZtuXmz+tLlcNntN/VV4LRdpl6ZOu5B1KbfNgfR65db15O6sUQcwnwLka8sT/V6qysD93fWrgJHF2L7dA9A==", + "version": "1.45.0", + "resolved": "https://registry.npmjs.org/@google/genai/-/genai-1.45.0.tgz", + "integrity": "sha512-+sNRWhKiRibVgc4OKi7aBJJ0A7RcoVD8tGG+eFkqxAWRjASDW+ktS9lLwTDnAxZICzCVoeAdu8dYLJVTX60N9w==", "license": "Apache-2.0", "dependencies": { "google-auth-library": "^10.3.0", @@ -821,6 +805,22 @@ } } }, + "node_modules/@gsd/pi-agent-core": { + "resolved": "packages/pi-agent-core", + "link": true + }, + "node_modules/@gsd/pi-ai": { + "resolved": "packages/pi-ai", + "link": true + }, + "node_modules/@gsd/pi-coding-agent": { + "resolved": "packages/pi-coding-agent", + "link": true + }, + "node_modules/@gsd/pi-tui": { + "resolved": "packages/pi-tui", + "link": true + }, "node_modules/@mariozechner/clipboard": { "version": "0.3.2", "resolved": "https://registry.npmjs.org/@mariozechner/clipboard/-/clipboard-0.3.2.tgz", @@ -1013,100 +1013,6 @@ "jiti": "lib/jiti-cli.mjs" } }, - "node_modules/@mariozechner/pi-agent-core": { - "version": "0.57.1", - "resolved": "https://registry.npmjs.org/@mariozechner/pi-agent-core/-/pi-agent-core-0.57.1.tgz", - "integrity": "sha512-WXsBbkNWOObFGHkhixaT8GXJpHDd3+fn8QntYF+4R8Sa9WB90ENXWidO6b7vcKX+JX0jjO5dIsQxmzosARJKlg==", - "license": "MIT", - "dependencies": { - "@mariozechner/pi-ai": "^0.57.1" - }, - "engines": { - "node": ">=20.0.0" - } - }, - "node_modules/@mariozechner/pi-ai": { - "version": "0.57.1", - "resolved": "https://registry.npmjs.org/@mariozechner/pi-ai/-/pi-ai-0.57.1.tgz", - "integrity": "sha512-Bd/J4a3YpdzJVyHLih0vDSdB0QPL4ti0XsAwtHOK/8eVhB0fHM1CpcgIrcBFJ23TMcKXMi0qamz18ERfp8tmgg==", - "license": "MIT", - "dependencies": { - "@anthropic-ai/sdk": "^0.73.0", - "@aws-sdk/client-bedrock-runtime": "^3.983.0", - "@google/genai": "^1.40.0", - "@mistralai/mistralai": "1.14.1", - "@sinclair/typebox": "^0.34.41", - "ajv": "^8.17.1", - "ajv-formats": "^3.0.1", - "chalk": "^5.6.2", - "openai": "6.26.0", - "partial-json": "^0.1.7", - "proxy-agent": "^6.5.0", - "undici": "^7.19.1", - "zod-to-json-schema": "^3.24.6" - }, - "bin": { - "pi-ai": "dist/cli.js" - }, - "engines": { - "node": ">=20.0.0" - } - }, - "node_modules/@mariozechner/pi-coding-agent": { - "version": "0.57.1", - "resolved": "https://registry.npmjs.org/@mariozechner/pi-coding-agent/-/pi-coding-agent-0.57.1.tgz", - "integrity": "sha512-u5MQEduj68rwVIsRsqrWkJYiJCyPph/a6bMoJAQKo1sb+Pc17Y/ojwa+wGssnUMjEB38AQKofWTVe8NFEpSWNw==", - "license": "MIT", - "dependencies": { - "@mariozechner/jiti": "^2.6.2", - "@mariozechner/pi-agent-core": "^0.57.1", - "@mariozechner/pi-ai": "^0.57.1", - "@mariozechner/pi-tui": "^0.57.1", - "@silvia-odwyer/photon-node": "^0.3.4", - "chalk": "^5.5.0", - "cli-highlight": "^2.1.11", - "diff": "^8.0.2", - "extract-zip": "^2.0.1", - "file-type": "^21.1.1", - "glob": "^13.0.1", - "hosted-git-info": "^9.0.2", - "ignore": "^7.0.5", - "marked": "^15.0.12", - "minimatch": "^10.2.3", - "proper-lockfile": "^4.1.2", - "strip-ansi": "^7.1.0", - "undici": "^7.19.1", - "yaml": "^2.8.2" - }, - "bin": { - "pi": "dist/cli.js" - }, - "engines": { - "node": ">=20.6.0" - }, - "optionalDependencies": { - "@mariozechner/clipboard": "^0.3.2" - } - }, - "node_modules/@mariozechner/pi-tui": { - "version": "0.57.1", - "resolved": "https://registry.npmjs.org/@mariozechner/pi-tui/-/pi-tui-0.57.1.tgz", - "integrity": "sha512-cjoRghLbeAHV0tTJeHgZXaryUi5zzBZofeZ7uJun1gztnckLLRjoVeaPTujNlc5BIfyKvFqhh1QWCZng/MXlpg==", - "license": "MIT", - "dependencies": { - "@types/mime-types": "^2.1.4", - "chalk": "^5.5.0", - "get-east-asian-width": "^1.3.0", - "marked": "^15.0.12", - "mime-types": "^3.0.1" - }, - "engines": { - "node": ">=20.0.0" - }, - "optionalDependencies": { - "koffi": "^2.9.0" - } - }, "node_modules/@mistralai/mistralai": { "version": "1.14.1", "resolved": "https://registry.npmjs.org/@mistralai/mistralai/-/mistralai-1.14.1.tgz", @@ -1194,12 +1100,12 @@ "license": "MIT" }, "node_modules/@smithy/abort-controller": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@smithy/abort-controller/-/abort-controller-4.2.11.tgz", - "integrity": "sha512-Hj4WoYWMJnSpM6/kchsm4bUNTL9XiSyhvoMb2KIq4VJzyDt7JpGHUZHkVNPZVC7YE1tf8tPeVauxpFBKGW4/KQ==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/@smithy/abort-controller/-/abort-controller-4.2.12.tgz", + "integrity": "sha512-xolrFw6b+2iYGl6EcOL7IJY71vvyZ0DJ3mcKtpykqPe2uscwtzDZJa1uVQXyP7w9Dd+kGwYnPbMsJrGISKiY/Q==", "license": "Apache-2.0", "dependencies": { - "@smithy/types": "^4.13.0", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1207,16 +1113,16 @@ } }, "node_modules/@smithy/config-resolver": { - "version": "4.4.10", - "resolved": "https://registry.npmjs.org/@smithy/config-resolver/-/config-resolver-4.4.10.tgz", - "integrity": "sha512-IRTkd6ps0ru+lTWnfnsbXzW80A8Od8p3pYiZnW98K2Hb20rqfsX7VTlfUwhrcOeSSy68Gn9WBofwPuw3e5CCsg==", + "version": "4.4.11", + "resolved": "https://registry.npmjs.org/@smithy/config-resolver/-/config-resolver-4.4.11.tgz", + "integrity": "sha512-YxFiiG4YDAtX7WMN7RuhHZLeTmRRAOyCbr+zB8e3AQzHPnUhS8zXjB1+cniPVQI3xbWsQPM0X2aaIkO/ME0ymw==", "license": "Apache-2.0", "dependencies": { - "@smithy/node-config-provider": "^4.3.11", - "@smithy/types": "^4.13.0", + "@smithy/node-config-provider": "^4.3.12", + "@smithy/types": "^4.13.1", "@smithy/util-config-provider": "^4.2.2", - "@smithy/util-endpoints": "^3.3.2", - "@smithy/util-middleware": "^4.2.11", + "@smithy/util-endpoints": "^3.3.3", + "@smithy/util-middleware": "^4.2.12", "tslib": "^2.6.2" }, "engines": { @@ -1224,18 +1130,18 @@ } }, "node_modules/@smithy/core": { - "version": "3.23.9", - "resolved": "https://registry.npmjs.org/@smithy/core/-/core-3.23.9.tgz", - "integrity": "sha512-1Vcut4LEL9HZsdpI0vFiRYIsaoPwZLjAxnVQDUMQK8beMS+EYPLDQCXtbzfxmM5GzSgjfe2Q9M7WaXwIMQllyQ==", + "version": "3.23.11", + "resolved": "https://registry.npmjs.org/@smithy/core/-/core-3.23.11.tgz", + "integrity": "sha512-952rGf7hBRnhUIaeLp6q4MptKW8sPFe5VvkoZ5qIzFAtx6c/QZ/54FS3yootsyUSf9gJX/NBqEBNdNR7jMIlpQ==", "license": "Apache-2.0", "dependencies": { - "@smithy/middleware-serde": "^4.2.12", - "@smithy/protocol-http": "^5.3.11", - "@smithy/types": "^4.13.0", + "@smithy/protocol-http": "^5.3.12", + "@smithy/types": "^4.13.1", + "@smithy/url-parser": "^4.2.12", "@smithy/util-base64": "^4.3.2", "@smithy/util-body-length-browser": "^4.2.2", - "@smithy/util-middleware": "^4.2.11", - "@smithy/util-stream": "^4.5.17", + "@smithy/util-middleware": "^4.2.12", + "@smithy/util-stream": "^4.5.19", "@smithy/util-utf8": "^4.2.2", "@smithy/uuid": "^1.1.2", "tslib": "^2.6.2" @@ -1245,15 +1151,15 @@ } }, "node_modules/@smithy/credential-provider-imds": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@smithy/credential-provider-imds/-/credential-provider-imds-4.2.11.tgz", - "integrity": "sha512-lBXrS6ku0kTj3xLmsJW0WwqWbGQ6ueooYyp/1L9lkyT0M02C+DWwYwc5aTyXFbRaK38ojALxNixg+LxKSHZc0g==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/@smithy/credential-provider-imds/-/credential-provider-imds-4.2.12.tgz", + "integrity": "sha512-cr2lR792vNZcYMriSIj+Um3x9KWrjcu98kn234xA6reOAFMmbRpQMOv8KPgEmLLtx3eldU6c5wALKFqNOhugmg==", "license": "Apache-2.0", "dependencies": { - "@smithy/node-config-provider": "^4.3.11", - "@smithy/property-provider": "^4.2.11", - "@smithy/types": "^4.13.0", - "@smithy/url-parser": "^4.2.11", + "@smithy/node-config-provider": "^4.3.12", + "@smithy/property-provider": "^4.2.12", + "@smithy/types": "^4.13.1", + "@smithy/url-parser": "^4.2.12", "tslib": "^2.6.2" }, "engines": { @@ -1261,13 +1167,13 @@ } }, "node_modules/@smithy/eventstream-codec": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@smithy/eventstream-codec/-/eventstream-codec-4.2.11.tgz", - "integrity": "sha512-Sf39Ml0iVX+ba/bgMPxaXWAAFmHqYLTmbjAPfLPLY8CrYkRDEqZdUsKC1OwVMCdJXfAt0v4j49GIJ8DoSYAe6w==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/@smithy/eventstream-codec/-/eventstream-codec-4.2.12.tgz", + "integrity": "sha512-FE3bZdEl62ojmy8x4FHqxq2+BuOHlcxiH5vaZ6aqHJr3AIZzwF5jfx8dEiU/X0a8RboyNDjmXjlbr8AdEyLgiA==", "license": "Apache-2.0", "dependencies": { "@aws-crypto/crc32": "5.2.0", - "@smithy/types": "^4.13.0", + "@smithy/types": "^4.13.1", "@smithy/util-hex-encoding": "^4.2.2", "tslib": "^2.6.2" }, @@ -1276,13 +1182,13 @@ } }, "node_modules/@smithy/eventstream-serde-browser": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-browser/-/eventstream-serde-browser-4.2.11.tgz", - "integrity": "sha512-3rEpo3G6f/nRS7fQDsZmxw/ius6rnlIpz4UX6FlALEzz8JoSxFmdBt0SZnthis+km7sQo6q5/3e+UJcuQivoXA==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-browser/-/eventstream-serde-browser-4.2.12.tgz", + "integrity": "sha512-XUSuMxlTxV5pp4VpqZf6Sa3vT/Q75FVkLSpSSE3KkWBvAQWeuWt1msTv8fJfgA4/jcJhrbrbMzN1AC/hvPmm5A==", "license": "Apache-2.0", "dependencies": { - "@smithy/eventstream-serde-universal": "^4.2.11", - "@smithy/types": "^4.13.0", + "@smithy/eventstream-serde-universal": "^4.2.12", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1290,12 +1196,12 @@ } }, "node_modules/@smithy/eventstream-serde-config-resolver": { - "version": "4.3.11", - "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-config-resolver/-/eventstream-serde-config-resolver-4.3.11.tgz", - "integrity": "sha512-XeNIA8tcP/GDWnnKkO7qEm/bg0B/bP9lvIXZBXcGZwZ+VYM8h8k9wuDvUODtdQ2Wcp2RcBkPTCSMmaniVHrMlA==", + "version": "4.3.12", + "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-config-resolver/-/eventstream-serde-config-resolver-4.3.12.tgz", + "integrity": "sha512-7epsAZ3QvfHkngz6RXQYseyZYHlmWXSTPOfPmXkiS+zA6TBNo1awUaMFL9vxyXlGdoELmCZyZe1nQE+imbmV+Q==", "license": "Apache-2.0", "dependencies": { - "@smithy/types": "^4.13.0", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1303,13 +1209,13 @@ } }, "node_modules/@smithy/eventstream-serde-node": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-node/-/eventstream-serde-node-4.2.11.tgz", - "integrity": "sha512-fzbCh18rscBDTQSCrsp1fGcclLNF//nJyhjldsEl/5wCYmgpHblv5JSppQAyQI24lClsFT0wV06N1Porn0IsEw==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-node/-/eventstream-serde-node-4.2.12.tgz", + "integrity": "sha512-D1pFuExo31854eAvg89KMn9Oab/wEeJR6Buy32B49A9Ogdtx5fwZPqBHUlDzaCDpycTFk2+fSQgX689Qsk7UGA==", "license": "Apache-2.0", "dependencies": { - "@smithy/eventstream-serde-universal": "^4.2.11", - "@smithy/types": "^4.13.0", + "@smithy/eventstream-serde-universal": "^4.2.12", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1317,13 +1223,13 @@ } }, "node_modules/@smithy/eventstream-serde-universal": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-universal/-/eventstream-serde-universal-4.2.11.tgz", - "integrity": "sha512-MJ7HcI+jEkqoWT5vp+uoVaAjBrmxBtKhZTeynDRG/seEjJfqyg3SiqMMqyPnAMzmIfLaeJ/uiuSDP/l9AnMy/Q==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/@smithy/eventstream-serde-universal/-/eventstream-serde-universal-4.2.12.tgz", + "integrity": "sha512-+yNuTiyBACxOJUTvbsNsSOfH9G9oKbaJE1lNL3YHpGcuucl6rPZMi3nrpehpVOVR2E07YqFFmtwpImtpzlouHQ==", "license": "Apache-2.0", "dependencies": { - "@smithy/eventstream-codec": "^4.2.11", - "@smithy/types": "^4.13.0", + "@smithy/eventstream-codec": "^4.2.12", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1331,14 +1237,14 @@ } }, "node_modules/@smithy/fetch-http-handler": { - "version": "5.3.13", - "resolved": "https://registry.npmjs.org/@smithy/fetch-http-handler/-/fetch-http-handler-5.3.13.tgz", - "integrity": "sha512-U2Hcfl2s3XaYjikN9cT4mPu8ybDbImV3baXR0PkVlC0TTx808bRP3FaPGAzPtB8OByI+JqJ1kyS+7GEgae7+qQ==", + "version": "5.3.15", + "resolved": "https://registry.npmjs.org/@smithy/fetch-http-handler/-/fetch-http-handler-5.3.15.tgz", + "integrity": "sha512-T4jFU5N/yiIfrtrsb9uOQn7RdELdM/7HbyLNr6uO/mpkj1ctiVs7CihVr51w4LyQlXWDpXFn4BElf1WmQvZu/A==", "license": "Apache-2.0", "dependencies": { - "@smithy/protocol-http": "^5.3.11", - "@smithy/querystring-builder": "^4.2.11", - "@smithy/types": "^4.13.0", + "@smithy/protocol-http": "^5.3.12", + "@smithy/querystring-builder": "^4.2.12", + "@smithy/types": "^4.13.1", "@smithy/util-base64": "^4.3.2", "tslib": "^2.6.2" }, @@ -1347,12 +1253,12 @@ } }, "node_modules/@smithy/hash-node": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@smithy/hash-node/-/hash-node-4.2.11.tgz", - "integrity": "sha512-T+p1pNynRkydpdL015ruIoyPSRw9e/SQOWmSAMmmprfswMrd5Ow5igOWNVlvyVFZlxXqGmyH3NQwfwy8r5Jx0A==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/@smithy/hash-node/-/hash-node-4.2.12.tgz", + "integrity": "sha512-QhBYbGrbxTkZ43QoTPrK72DoYviDeg6YKDrHTMJbbC+A0sml3kSjzFtXP7BtbyJnXojLfTQldGdUR0RGD8dA3w==", "license": "Apache-2.0", "dependencies": { - "@smithy/types": "^4.13.0", + "@smithy/types": "^4.13.1", "@smithy/util-buffer-from": "^4.2.2", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" @@ -1362,12 +1268,12 @@ } }, "node_modules/@smithy/invalid-dependency": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@smithy/invalid-dependency/-/invalid-dependency-4.2.11.tgz", - "integrity": "sha512-cGNMrgykRmddrNhYy1yBdrp5GwIgEkniS7k9O1VLB38yxQtlvrxpZtUVvo6T4cKpeZsriukBuuxfJcdZQc/f/g==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/@smithy/invalid-dependency/-/invalid-dependency-4.2.12.tgz", + "integrity": "sha512-/4F1zb7Z8LOu1PalTdESFHR0RbPwHd3FcaG1sI3UEIriQTWakysgJr65lc1jj6QY5ye7aFsisajotH6UhWfm/g==", "license": "Apache-2.0", "dependencies": { - "@smithy/types": "^4.13.0", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1387,13 +1293,13 @@ } }, "node_modules/@smithy/middleware-content-length": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@smithy/middleware-content-length/-/middleware-content-length-4.2.11.tgz", - "integrity": "sha512-UvIfKYAKhCzr4p6jFevPlKhQwyQwlJ6IeKLDhmV1PlYfcW3RL4ROjNEDtSik4NYMi9kDkH7eSwyTP3vNJ/u/Dw==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/@smithy/middleware-content-length/-/middleware-content-length-4.2.12.tgz", + "integrity": "sha512-YE58Yz+cvFInWI/wOTrB+DbvUVz/pLn5mC5MvOV4fdRUc6qGwygyngcucRQjAhiCEbmfLOXX0gntSIcgMvAjmA==", "license": "Apache-2.0", "dependencies": { - "@smithy/protocol-http": "^5.3.11", - "@smithy/types": "^4.13.0", + "@smithy/protocol-http": "^5.3.12", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1401,18 +1307,18 @@ } }, "node_modules/@smithy/middleware-endpoint": { - "version": "4.4.23", - "resolved": "https://registry.npmjs.org/@smithy/middleware-endpoint/-/middleware-endpoint-4.4.23.tgz", - "integrity": "sha512-UEFIejZy54T1EJn2aWJ45voB7RP2T+IRzUqocIdM6GFFa5ClZncakYJfcYnoXt3UsQrZZ9ZRauGm77l9UCbBLw==", + "version": "4.4.25", + "resolved": "https://registry.npmjs.org/@smithy/middleware-endpoint/-/middleware-endpoint-4.4.25.tgz", + "integrity": "sha512-dqjLwZs2eBxIUG6Qtw8/YZ4DvzHGIf0DA18wrgtfP6a50UIO7e2nY0FPdcbv5tVJKqWCCU5BmGMOUwT7Puan+A==", "license": "Apache-2.0", "dependencies": { - "@smithy/core": "^3.23.9", - "@smithy/middleware-serde": "^4.2.12", - "@smithy/node-config-provider": "^4.3.11", - "@smithy/shared-ini-file-loader": "^4.4.6", - "@smithy/types": "^4.13.0", - "@smithy/url-parser": "^4.2.11", - "@smithy/util-middleware": "^4.2.11", + "@smithy/core": "^3.23.11", + "@smithy/middleware-serde": "^4.2.14", + "@smithy/node-config-provider": "^4.3.12", + "@smithy/shared-ini-file-loader": "^4.4.7", + "@smithy/types": "^4.13.1", + "@smithy/url-parser": "^4.2.12", + "@smithy/util-middleware": "^4.2.12", "tslib": "^2.6.2" }, "engines": { @@ -1420,18 +1326,18 @@ } }, "node_modules/@smithy/middleware-retry": { - "version": "4.4.40", - "resolved": "https://registry.npmjs.org/@smithy/middleware-retry/-/middleware-retry-4.4.40.tgz", - "integrity": "sha512-YhEMakG1Ae57FajERdHNZ4ShOPIY7DsgV+ZoAxo/5BT0KIe+f6DDU2rtIymNNFIj22NJfeeI6LWIifrwM0f+rA==", + "version": "4.4.42", + "resolved": "https://registry.npmjs.org/@smithy/middleware-retry/-/middleware-retry-4.4.42.tgz", + "integrity": "sha512-vbwyqHRIpIZutNXZpLAozakzamcINaRCpEy1MYmK6xBeW3xN+TyPRA123GjXnuxZIjc9848MRRCugVMTXxC4Eg==", "license": "Apache-2.0", "dependencies": { - "@smithy/node-config-provider": "^4.3.11", - "@smithy/protocol-http": "^5.3.11", - "@smithy/service-error-classification": "^4.2.11", - "@smithy/smithy-client": "^4.12.3", - "@smithy/types": "^4.13.0", - "@smithy/util-middleware": "^4.2.11", - "@smithy/util-retry": "^4.2.11", + "@smithy/node-config-provider": "^4.3.12", + "@smithy/protocol-http": "^5.3.12", + "@smithy/service-error-classification": "^4.2.12", + "@smithy/smithy-client": "^4.12.5", + "@smithy/types": "^4.13.1", + "@smithy/util-middleware": "^4.2.12", + "@smithy/util-retry": "^4.2.12", "@smithy/uuid": "^1.1.2", "tslib": "^2.6.2" }, @@ -1440,13 +1346,14 @@ } }, "node_modules/@smithy/middleware-serde": { - "version": "4.2.12", - "resolved": "https://registry.npmjs.org/@smithy/middleware-serde/-/middleware-serde-4.2.12.tgz", - "integrity": "sha512-W9g1bOLui7Xn5FABRVS0o3rXL0gfN37d/8I/W7i0N7oxjx9QecUmXEMSUMADTODwdtka9cN43t5BI2CodLJpng==", + "version": "4.2.14", + "resolved": "https://registry.npmjs.org/@smithy/middleware-serde/-/middleware-serde-4.2.14.tgz", + "integrity": "sha512-+CcaLoLa5apzSRtloOyG7lQvkUw2ZDml3hRh4QiG9WyEPfW5Ke/3tPOPiPjUneuT59Tpn8+c3RVaUvvkkwqZwg==", "license": "Apache-2.0", "dependencies": { - "@smithy/protocol-http": "^5.3.11", - "@smithy/types": "^4.13.0", + "@smithy/core": "^3.23.11", + "@smithy/protocol-http": "^5.3.12", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1454,12 +1361,12 @@ } }, "node_modules/@smithy/middleware-stack": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@smithy/middleware-stack/-/middleware-stack-4.2.11.tgz", - "integrity": "sha512-s+eenEPW6RgliDk2IhjD2hWOxIx1NKrOHxEwNUaUXxYBxIyCcDfNULZ2Mu15E3kwcJWBedTET/kEASPV1A1Akg==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/@smithy/middleware-stack/-/middleware-stack-4.2.12.tgz", + "integrity": "sha512-kruC5gRHwsCOuyCd4ouQxYjgRAym2uDlCvQ5acuMtRrcdfg7mFBg6blaxcJ09STpt3ziEkis6bhg1uwrWU7txw==", "license": "Apache-2.0", "dependencies": { - "@smithy/types": "^4.13.0", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1467,14 +1374,14 @@ } }, "node_modules/@smithy/node-config-provider": { - "version": "4.3.11", - "resolved": "https://registry.npmjs.org/@smithy/node-config-provider/-/node-config-provider-4.3.11.tgz", - "integrity": "sha512-xD17eE7kaLgBBGf5CZQ58hh2YmwK1Z0O8YhffwB/De2jsL0U3JklmhVYJ9Uf37OtUDLF2gsW40Xwwag9U869Gg==", + "version": "4.3.12", + "resolved": "https://registry.npmjs.org/@smithy/node-config-provider/-/node-config-provider-4.3.12.tgz", + "integrity": "sha512-tr2oKX2xMcO+rBOjobSwVAkV05SIfUKz8iI53rzxEmgW3GOOPOv0UioSDk+J8OpRQnpnhsO3Af6IEBabQBVmiw==", "license": "Apache-2.0", "dependencies": { - "@smithy/property-provider": "^4.2.11", - "@smithy/shared-ini-file-loader": "^4.4.6", - "@smithy/types": "^4.13.0", + "@smithy/property-provider": "^4.2.12", + "@smithy/shared-ini-file-loader": "^4.4.7", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1482,15 +1389,15 @@ } }, "node_modules/@smithy/node-http-handler": { - "version": "4.4.14", - "resolved": "https://registry.npmjs.org/@smithy/node-http-handler/-/node-http-handler-4.4.14.tgz", - "integrity": "sha512-DamSqaU8nuk0xTJDrYnRzZndHwwRnyj/n/+RqGGCcBKB4qrQem0mSDiWdupaNWdwxzyMU91qxDmHOCazfhtO3A==", + "version": "4.4.16", + "resolved": "https://registry.npmjs.org/@smithy/node-http-handler/-/node-http-handler-4.4.16.tgz", + "integrity": "sha512-ULC8UCS/HivdCB3jhi+kLFYe4B5gxH2gi9vHBfEIiRrT2jfKiZNiETJSlzRtE6B26XbBHjPtc8iZKSNqMol9bw==", "license": "Apache-2.0", "dependencies": { - "@smithy/abort-controller": "^4.2.11", - "@smithy/protocol-http": "^5.3.11", - "@smithy/querystring-builder": "^4.2.11", - "@smithy/types": "^4.13.0", + "@smithy/abort-controller": "^4.2.12", + "@smithy/protocol-http": "^5.3.12", + "@smithy/querystring-builder": "^4.2.12", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1498,12 +1405,12 @@ } }, "node_modules/@smithy/property-provider": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@smithy/property-provider/-/property-provider-4.2.11.tgz", - "integrity": "sha512-14T1V64o6/ndyrnl1ze1ZhyLzIeYNN47oF/QU6P5m82AEtyOkMJTb0gO1dPubYjyyKuPD6OSVMPDKe+zioOnCg==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/@smithy/property-provider/-/property-provider-4.2.12.tgz", + "integrity": "sha512-jqve46eYU1v7pZ5BM+fmkbq3DerkSluPr5EhvOcHxygxzD05ByDRppRwRPPpFrsFo5yDtCYLKu+kreHKVrvc7A==", "license": "Apache-2.0", "dependencies": { - "@smithy/types": "^4.13.0", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1511,12 +1418,12 @@ } }, "node_modules/@smithy/protocol-http": { - "version": "5.3.11", - "resolved": "https://registry.npmjs.org/@smithy/protocol-http/-/protocol-http-5.3.11.tgz", - "integrity": "sha512-hI+barOVDJBkNt4y0L2mu3Ugc0w7+BpJ2CZuLwXtSltGAAwCb3IvnalGlbDV/UCS6a9ZuT3+exd1WxNdLb5IlQ==", + "version": "5.3.12", + "resolved": "https://registry.npmjs.org/@smithy/protocol-http/-/protocol-http-5.3.12.tgz", + "integrity": "sha512-fit0GZK9I1xoRlR4jXmbLhoN0OdEpa96ul8M65XdmXnxXkuMxM0Y8HDT0Fh0Xb4I85MBvBClOzgSrV1X2s1Hxw==", "license": "Apache-2.0", "dependencies": { - "@smithy/types": "^4.13.0", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1524,12 +1431,12 @@ } }, "node_modules/@smithy/querystring-builder": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@smithy/querystring-builder/-/querystring-builder-4.2.11.tgz", - "integrity": "sha512-7spdikrYiljpket6u0up2Ck2mxhy7dZ0+TDd+S53Dg2DHd6wg+YNJrTCHiLdgZmEXZKI7LJZcwL3721ZRDFiqA==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/@smithy/querystring-builder/-/querystring-builder-4.2.12.tgz", + "integrity": "sha512-6wTZjGABQufekycfDGMEB84BgtdOE/rCVTov+EDXQ8NHKTUNIp/j27IliwP7tjIU9LR+sSzyGBOXjeEtVgzCHg==", "license": "Apache-2.0", "dependencies": { - "@smithy/types": "^4.13.0", + "@smithy/types": "^4.13.1", "@smithy/util-uri-escape": "^4.2.2", "tslib": "^2.6.2" }, @@ -1538,12 +1445,12 @@ } }, "node_modules/@smithy/querystring-parser": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@smithy/querystring-parser/-/querystring-parser-4.2.11.tgz", - "integrity": "sha512-nE3IRNjDltvGcoThD2abTozI1dkSy8aX+a2N1Rs55en5UsdyyIXgGEmevUL3okZFoJC77JgRGe99xYohhsjivQ==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/@smithy/querystring-parser/-/querystring-parser-4.2.12.tgz", + "integrity": "sha512-P2OdvrgiAKpkPNKlKUtWbNZKB1XjPxM086NeVhK+W+wI46pIKdWBe5QyXvhUm3MEcyS/rkLvY8rZzyUdmyDZBw==", "license": "Apache-2.0", "dependencies": { - "@smithy/types": "^4.13.0", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1551,24 +1458,24 @@ } }, "node_modules/@smithy/service-error-classification": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@smithy/service-error-classification/-/service-error-classification-4.2.11.tgz", - "integrity": "sha512-HkMFJZJUhzU3HvND1+Yw/kYWXp4RPDLBWLcK1n+Vqw8xn4y2YiBhdww8IxhkQjP/QlZun5bwm3vcHc8AqIU3zw==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/@smithy/service-error-classification/-/service-error-classification-4.2.12.tgz", + "integrity": "sha512-LlP29oSQN0Tw0b6D0Xo6BIikBswuIiGYbRACy5ujw/JgWSzTdYj46U83ssf6Ux0GyNJVivs2uReU8pt7Eu9okQ==", "license": "Apache-2.0", "dependencies": { - "@smithy/types": "^4.13.0" + "@smithy/types": "^4.13.1" }, "engines": { "node": ">=18.0.0" } }, "node_modules/@smithy/shared-ini-file-loader": { - "version": "4.4.6", - "resolved": "https://registry.npmjs.org/@smithy/shared-ini-file-loader/-/shared-ini-file-loader-4.4.6.tgz", - "integrity": "sha512-IB/M5I8G0EeXZTHsAxpx51tMQ5R719F3aq+fjEB6VtNcCHDc0ajFDIGDZw+FW9GxtEkgTduiPpjveJdA/CX7sw==", + "version": "4.4.7", + "resolved": "https://registry.npmjs.org/@smithy/shared-ini-file-loader/-/shared-ini-file-loader-4.4.7.tgz", + "integrity": "sha512-HrOKWsUb+otTeo1HxVWeEb99t5ER1XrBi/xka2Wv6NVmTbuCUC1dvlrksdvxFtODLBjsC+PHK+fuy2x/7Ynyiw==", "license": "Apache-2.0", "dependencies": { - "@smithy/types": "^4.13.0", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1576,16 +1483,16 @@ } }, "node_modules/@smithy/signature-v4": { - "version": "5.3.11", - "resolved": "https://registry.npmjs.org/@smithy/signature-v4/-/signature-v4-5.3.11.tgz", - "integrity": "sha512-V1L6N9aKOBAN4wEHLyqjLBnAz13mtILU0SeDrjOaIZEeN6IFa6DxwRt1NNpOdmSpQUfkBj0qeD3m6P77uzMhgQ==", + "version": "5.3.12", + "resolved": "https://registry.npmjs.org/@smithy/signature-v4/-/signature-v4-5.3.12.tgz", + "integrity": "sha512-B/FBwO3MVOL00DaRSXfXfa/TRXRheagt/q5A2NM13u7q+sHS59EOVGQNfG7DkmVtdQm5m3vOosoKAXSqn/OEgw==", "license": "Apache-2.0", "dependencies": { "@smithy/is-array-buffer": "^4.2.2", - "@smithy/protocol-http": "^5.3.11", - "@smithy/types": "^4.13.0", + "@smithy/protocol-http": "^5.3.12", + "@smithy/types": "^4.13.1", "@smithy/util-hex-encoding": "^4.2.2", - "@smithy/util-middleware": "^4.2.11", + "@smithy/util-middleware": "^4.2.12", "@smithy/util-uri-escape": "^4.2.2", "@smithy/util-utf8": "^4.2.2", "tslib": "^2.6.2" @@ -1595,17 +1502,17 @@ } }, "node_modules/@smithy/smithy-client": { - "version": "4.12.3", - "resolved": "https://registry.npmjs.org/@smithy/smithy-client/-/smithy-client-4.12.3.tgz", - "integrity": "sha512-7k4UxjSpHmPN2AxVhvIazRSzFQjWnud3sOsXcFStzagww17j1cFQYqTSiQ8xuYK3vKLR1Ni8FzuT3VlKr3xCNw==", + "version": "4.12.5", + "resolved": "https://registry.npmjs.org/@smithy/smithy-client/-/smithy-client-4.12.5.tgz", + "integrity": "sha512-UqwYawyqSr/aog8mnLnfbPurS0gi4G7IYDcD28cUIBhsvWs1+rQcL2IwkUQ+QZ7dibaoRzhNF99fAQ9AUcO00w==", "license": "Apache-2.0", "dependencies": { - "@smithy/core": "^3.23.9", - "@smithy/middleware-endpoint": "^4.4.23", - "@smithy/middleware-stack": "^4.2.11", - "@smithy/protocol-http": "^5.3.11", - "@smithy/types": "^4.13.0", - "@smithy/util-stream": "^4.5.17", + "@smithy/core": "^3.23.11", + "@smithy/middleware-endpoint": "^4.4.25", + "@smithy/middleware-stack": "^4.2.12", + "@smithy/protocol-http": "^5.3.12", + "@smithy/types": "^4.13.1", + "@smithy/util-stream": "^4.5.19", "tslib": "^2.6.2" }, "engines": { @@ -1613,9 +1520,9 @@ } }, "node_modules/@smithy/types": { - "version": "4.13.0", - "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.13.0.tgz", - "integrity": "sha512-COuLsZILbbQsdrwKQpkkpyep7lCsByxwj7m0Mg5v66/ZTyenlfBc40/QFQ5chO0YN/PNEH1Bi3fGtfXPnYNeDw==", + "version": "4.13.1", + "resolved": "https://registry.npmjs.org/@smithy/types/-/types-4.13.1.tgz", + "integrity": "sha512-787F3yzE2UiJIQ+wYW1CVg2odHjmaWLGksnKQHUrK/lYZSEcy1msuLVvxaR/sI2/aDe9U+TBuLsXnr3vod1g0g==", "license": "Apache-2.0", "dependencies": { "tslib": "^2.6.2" @@ -1625,13 +1532,13 @@ } }, "node_modules/@smithy/url-parser": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@smithy/url-parser/-/url-parser-4.2.11.tgz", - "integrity": "sha512-oTAGGHo8ZYc5VZsBREzuf5lf2pAurJQsccMusVZ85wDkX66ojEc/XauiGjzCj50A61ObFTPe6d7Pyt6UBYaing==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/@smithy/url-parser/-/url-parser-4.2.12.tgz", + "integrity": "sha512-wOPKPEpso+doCZGIlr+e1lVI6+9VAKfL4kZWFgzVgGWY2hZxshNKod4l2LXS3PRC9otH/JRSjtEHqQ/7eLciRA==", "license": "Apache-2.0", "dependencies": { - "@smithy/querystring-parser": "^4.2.11", - "@smithy/types": "^4.13.0", + "@smithy/querystring-parser": "^4.2.12", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1702,14 +1609,14 @@ } }, "node_modules/@smithy/util-defaults-mode-browser": { - "version": "4.3.39", - "resolved": "https://registry.npmjs.org/@smithy/util-defaults-mode-browser/-/util-defaults-mode-browser-4.3.39.tgz", - "integrity": "sha512-ui7/Ho/+VHqS7Km2wBw4/Ab4RktoiSshgcgpJzC4keFPs6tLJS4IQwbeahxQS3E/w98uq6E1mirCH/id9xIXeQ==", + "version": "4.3.41", + "resolved": "https://registry.npmjs.org/@smithy/util-defaults-mode-browser/-/util-defaults-mode-browser-4.3.41.tgz", + "integrity": "sha512-M1w1Ux0rSVvBOxIIiqbxvZvhnjQ+VUjJrugtORE90BbadSTH+jsQL279KRL3Hv0w69rE7EuYkV/4Lepz/NBW9g==", "license": "Apache-2.0", "dependencies": { - "@smithy/property-provider": "^4.2.11", - "@smithy/smithy-client": "^4.12.3", - "@smithy/types": "^4.13.0", + "@smithy/property-provider": "^4.2.12", + "@smithy/smithy-client": "^4.12.5", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1717,17 +1624,17 @@ } }, "node_modules/@smithy/util-defaults-mode-node": { - "version": "4.2.42", - "resolved": "https://registry.npmjs.org/@smithy/util-defaults-mode-node/-/util-defaults-mode-node-4.2.42.tgz", - "integrity": "sha512-QDA84CWNe8Akpj15ofLO+1N3Rfg8qa2K5uX0y6HnOp4AnRYRgWrKx/xzbYNbVF9ZsyJUYOfcoaN3y93wA/QJ2A==", + "version": "4.2.44", + "resolved": "https://registry.npmjs.org/@smithy/util-defaults-mode-node/-/util-defaults-mode-node-4.2.44.tgz", + "integrity": "sha512-YPze3/lD1KmWuZsl9JlfhcgGLX7AXhSoaCDtiPntUjNW5/YY0lOHjkcgxyE9x/h5vvS1fzDifMGjzqnNlNiqOQ==", "license": "Apache-2.0", "dependencies": { - "@smithy/config-resolver": "^4.4.10", - "@smithy/credential-provider-imds": "^4.2.11", - "@smithy/node-config-provider": "^4.3.11", - "@smithy/property-provider": "^4.2.11", - "@smithy/smithy-client": "^4.12.3", - "@smithy/types": "^4.13.0", + "@smithy/config-resolver": "^4.4.11", + "@smithy/credential-provider-imds": "^4.2.12", + "@smithy/node-config-provider": "^4.3.12", + "@smithy/property-provider": "^4.2.12", + "@smithy/smithy-client": "^4.12.5", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1735,13 +1642,13 @@ } }, "node_modules/@smithy/util-endpoints": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/@smithy/util-endpoints/-/util-endpoints-3.3.2.tgz", - "integrity": "sha512-+4HFLpE5u29AbFlTdlKIT7jfOzZ8PDYZKTb3e+AgLz986OYwqTourQ5H+jg79/66DB69Un1+qKecLnkZdAsYcA==", + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/@smithy/util-endpoints/-/util-endpoints-3.3.3.tgz", + "integrity": "sha512-VACQVe50j0HZPjpwWcjyT51KUQ4AnsvEaQ2lKHOSL4mNLD0G9BjEniQ+yCt1qqfKfiAHRAts26ud7hBjamrwig==", "license": "Apache-2.0", "dependencies": { - "@smithy/node-config-provider": "^4.3.11", - "@smithy/types": "^4.13.0", + "@smithy/node-config-provider": "^4.3.12", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1761,12 +1668,12 @@ } }, "node_modules/@smithy/util-middleware": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@smithy/util-middleware/-/util-middleware-4.2.11.tgz", - "integrity": "sha512-r3dtF9F+TpSZUxpOVVtPfk09Rlo4lT6ORBqEvX3IBT6SkQAdDSVKR5GcfmZbtl7WKhKnmb3wbDTQ6ibR2XHClw==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/@smithy/util-middleware/-/util-middleware-4.2.12.tgz", + "integrity": "sha512-Er805uFUOvgc0l8nv0e0su0VFISoxhJ/AwOn3gL2NWNY2LUEldP5WtVcRYSQBcjg0y9NfG8JYrCJaYDpupBHJQ==", "license": "Apache-2.0", "dependencies": { - "@smithy/types": "^4.13.0", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1774,13 +1681,13 @@ } }, "node_modules/@smithy/util-retry": { - "version": "4.2.11", - "resolved": "https://registry.npmjs.org/@smithy/util-retry/-/util-retry-4.2.11.tgz", - "integrity": "sha512-XSZULmL5x6aCTTii59wJqKsY1l3eMIAomRAccW7Tzh9r8s7T/7rdo03oektuH5jeYRlJMPcNP92EuRDvk9aXbw==", + "version": "4.2.12", + "resolved": "https://registry.npmjs.org/@smithy/util-retry/-/util-retry-4.2.12.tgz", + "integrity": "sha512-1zopLDUEOwumjcHdJ1mwBHddubYF8GMQvstVCLC54Y46rqoHwlIU+8ZzUeaBcD+WCJHyDGSeZ2ml9YSe9aqcoQ==", "license": "Apache-2.0", "dependencies": { - "@smithy/service-error-classification": "^4.2.11", - "@smithy/types": "^4.13.0", + "@smithy/service-error-classification": "^4.2.12", + "@smithy/types": "^4.13.1", "tslib": "^2.6.2" }, "engines": { @@ -1788,14 +1695,14 @@ } }, "node_modules/@smithy/util-stream": { - "version": "4.5.17", - "resolved": "https://registry.npmjs.org/@smithy/util-stream/-/util-stream-4.5.17.tgz", - "integrity": "sha512-793BYZ4h2JAQkNHcEnyFxDTcZbm9bVybD0UV/LEWmZ5bkTms7JqjfrLMi2Qy0E5WFcCzLwCAPgcvcvxoeALbAQ==", + "version": "4.5.19", + "resolved": "https://registry.npmjs.org/@smithy/util-stream/-/util-stream-4.5.19.tgz", + "integrity": "sha512-v4sa+3xTweL1CLO2UP0p7tvIMH/Rq1X4KKOxd568mpe6LSLMQCnDHs4uv7m3ukpl3HvcN2JH6jiCS0SNRXKP/w==", "license": "Apache-2.0", "dependencies": { - "@smithy/fetch-http-handler": "^5.3.13", - "@smithy/node-http-handler": "^4.4.14", - "@smithy/types": "^4.13.0", + "@smithy/fetch-http-handler": "^5.3.15", + "@smithy/node-http-handler": "^4.4.16", + "@smithy/types": "^4.13.1", "@smithy/util-base64": "^4.3.2", "@smithy/util-buffer-from": "^4.2.2", "@smithy/util-hex-encoding": "^4.2.2", @@ -1872,6 +1779,20 @@ "integrity": "sha512-C5Mc6rdnsaJDjO3UpGW/CQTHtCKaYlScZTly4JIu97Jxo/odCiH0ITnDXSJPTOrEKk/ycSZ0AOgTmkDtkOsvIA==", "license": "MIT" }, + "node_modules/@types/diff": { + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/@types/diff/-/diff-7.0.2.tgz", + "integrity": "sha512-JSWRMozjFKsGlEjiiKajUjIJVKuKdE3oVy2DNtK+fUo8q82nhFZ2CPQwicAIkXrofahDXrWJ7mjelvZphMS98Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/hosted-git-info": { + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/@types/hosted-git-info/-/hosted-git-info-3.0.5.tgz", + "integrity": "sha512-Dmngh7U003cOHPhKGyA7LWqrnvcTyILNgNPmNCxlx7j8MIi54iBliiT8XqVLIQ3GchoOjVAyBzNJVyuaJjqokg==", + "dev": true, + "license": "MIT" + }, "node_modules/@types/mime-types": { "version": "2.1.4", "resolved": "https://registry.npmjs.org/@types/mime-types/-/mime-types-2.1.4.tgz", @@ -1887,6 +1808,16 @@ "undici-types": "~6.21.0" } }, + "node_modules/@types/proper-lockfile": { + "version": "4.1.4", + "resolved": "https://registry.npmjs.org/@types/proper-lockfile/-/proper-lockfile-4.1.4.tgz", + "integrity": "sha512-uo2ABllncSqg9F1D4nugVl9v93RmjxF6LJzQLMLDdPaXCUIDPeOJ21Gbqi43xNKzBi/WQ0Q0dICqufzQbMjipQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/retry": "*" + } + }, "node_modules/@types/retry": { "version": "0.12.0", "resolved": "https://registry.npmjs.org/@types/retry/-/retry-0.12.0.tgz", @@ -1903,13 +1834,6 @@ "@types/node": "*" } }, - "node_modules/@yarnpkg/lockfile": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@yarnpkg/lockfile/-/lockfile-1.1.0.tgz", - "integrity": "sha512-GpSwvyXOcOOlV70vbnzjj4fW5xW/FdUF6nQEt1ENy7m4ZCczi1+/buVUPAqmGfqznsORNFzUMjctTIp8a9tuCQ==", - "dev": true, - "license": "BSD-2-Clause" - }, "node_modules/agent-base": { "version": "7.1.4", "resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz", @@ -2062,19 +1986,6 @@ "node": "18 || 20 || >=22" } }, - "node_modules/braces": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", - "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", - "dev": true, - "license": "MIT", - "dependencies": { - "fill-range": "^7.1.1" - }, - "engines": { - "node": ">=8" - } - }, "node_modules/buffer-crc32": { "version": "0.2.13", "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", @@ -2090,56 +2001,6 @@ "integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==", "license": "BSD-3-Clause" }, - "node_modules/call-bind": { - "version": "1.0.8", - "resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz", - "integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.0", - "es-define-property": "^1.0.0", - "get-intrinsic": "^1.2.4", - "set-function-length": "^1.2.2" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/call-bind-apply-helpers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz", - "integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0", - "function-bind": "^1.1.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/call-bound": { - "version": "1.0.4", - "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", - "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.2", - "get-intrinsic": "^1.3.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/chalk": { "version": "5.6.2", "resolved": "https://registry.npmjs.org/chalk/-/chalk-5.6.2.tgz", @@ -2152,22 +2013,6 @@ "url": "https://github.com/chalk/chalk?sponsor=1" } }, - "node_modules/ci-info": { - "version": "3.9.0", - "resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz", - "integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/sibiraj-s" - } - ], - "license": "MIT", - "engines": { - "node": ">=8" - } - }, "node_modules/cli-highlight": { "version": "2.1.11", "resolved": "https://registry.npmjs.org/cli-highlight/-/cli-highlight-2.1.11.tgz", @@ -2255,21 +2100,6 @@ "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", "license": "MIT" }, - "node_modules/cross-spawn": { - "version": "7.0.6", - "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz", - "integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==", - "dev": true, - "license": "MIT", - "dependencies": { - "path-key": "^3.1.0", - "shebang-command": "^2.0.0", - "which": "^2.0.1" - }, - "engines": { - "node": ">= 8" - } - }, "node_modules/data-uri-to-buffer": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz", @@ -2296,24 +2126,6 @@ } } }, - "node_modules/define-data-property": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz", - "integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==", - "dev": true, - "license": "MIT", - "dependencies": { - "es-define-property": "^1.0.0", - "es-errors": "^1.3.0", - "gopd": "^1.0.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/degenerator": { "version": "5.0.1", "resolved": "https://registry.npmjs.org/degenerator/-/degenerator-5.0.1.tgz", @@ -2337,21 +2149,6 @@ "node": ">=0.3.1" } }, - "node_modules/dunder-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz", - "integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.1", - "es-errors": "^1.3.0", - "gopd": "^1.2.0" - }, - "engines": { - "node": ">= 0.4" - } - }, "node_modules/ecdsa-sig-formatter": { "version": "1.0.11", "resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz", @@ -2376,39 +2173,6 @@ "once": "^1.4.0" } }, - "node_modules/es-define-property": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", - "integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-errors": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz", - "integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/es-object-atoms": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz", - "integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==", - "dev": true, - "license": "MIT", - "dependencies": { - "es-errors": "^1.3.0" - }, - "engines": { - "node": ">= 0.4" - } - }, "node_modules/escalade": { "version": "3.2.0", "resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz", @@ -2519,9 +2283,9 @@ "license": "BSD-3-Clause" }, "node_modules/fast-xml-builder": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/fast-xml-builder/-/fast-xml-builder-1.1.0.tgz", - "integrity": "sha512-7mtITW/we2/wTUZqMyBOR2F8xP4CRxMiSEcQxPIqdRWdO2L/HZSOlzoNyghmyDwNB8BDxePooV1ZTJpkOUhdRg==", + "version": "1.1.2", + "resolved": "https://registry.npmjs.org/fast-xml-builder/-/fast-xml-builder-1.1.2.tgz", + "integrity": "sha512-NJAmiuVaJEjVa7TjLZKlYd7RqmzOC91EtPFXHvlTcqBVo50Qh7XV5IwvXi1c7NRz2Q/majGX9YLcwJtWgHjtkA==", "funding": [ { "type": "github", @@ -2530,7 +2294,7 @@ ], "license": "MIT", "dependencies": { - "path-expression-matcher": "^1.1.2" + "path-expression-matcher": "^1.1.3" } }, "node_modules/fast-xml-parser": { @@ -2602,29 +2366,6 @@ "url": "https://github.com/sindresorhus/file-type?sponsor=1" } }, - "node_modules/fill-range": { - "version": "7.1.1", - "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", - "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", - "dev": true, - "license": "MIT", - "dependencies": { - "to-regex-range": "^5.0.1" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/find-yarn-workspace-root": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/find-yarn-workspace-root/-/find-yarn-workspace-root-2.0.0.tgz", - "integrity": "sha512-1IMnbjt4KzsQfnhnzNd8wUEgXZ44IzZaZmnLYx7D5FZlaHt2gW20Cri8Q+E/t5tIj4+epTBub+2Zxu/vNILzqQ==", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "micromatch": "^4.0.2" - } - }, "node_modules/formdata-polyfill": { "version": "4.0.10", "resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz", @@ -2637,21 +2378,6 @@ "node": ">=12.20.0" } }, - "node_modules/fs-extra": { - "version": "10.1.0", - "resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz", - "integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.2.0", - "jsonfile": "^6.0.1", - "universalify": "^2.0.0" - }, - "engines": { - "node": ">=12" - } - }, "node_modules/fsevents": { "version": "2.3.2", "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz", @@ -2666,16 +2392,6 @@ "node": "^8.16.0 || ^10.6.0 || >=11.0.0" } }, - "node_modules/function-bind": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", - "integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==", - "dev": true, - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/gaxios": { "version": "7.1.4", "resolved": "https://registry.npmjs.org/gaxios/-/gaxios-7.1.4.tgz", @@ -2725,45 +2441,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/get-intrinsic": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz", - "integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind-apply-helpers": "^1.0.2", - "es-define-property": "^1.0.1", - "es-errors": "^1.3.0", - "es-object-atoms": "^1.1.1", - "function-bind": "^1.1.2", - "get-proto": "^1.0.1", - "gopd": "^1.2.0", - "has-symbols": "^1.1.0", - "hasown": "^2.0.2", - "math-intrinsics": "^1.1.0" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/get-proto": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz", - "integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==", - "dev": true, - "license": "MIT", - "dependencies": { - "dunder-proto": "^1.0.1", - "es-object-atoms": "^1.0.0" - }, - "engines": { - "node": ">= 0.4" - } - }, "node_modules/get-stream": { "version": "5.2.0", "resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz", @@ -2845,19 +2522,6 @@ "node": ">=14" } }, - "node_modules/gopd": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", - "integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/graceful-fs": { "version": "4.2.11", "resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz", @@ -2873,45 +2537,6 @@ "node": ">=8" } }, - "node_modules/has-property-descriptors": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz", - "integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==", - "dev": true, - "license": "MIT", - "dependencies": { - "es-define-property": "^1.0.0" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/has-symbols": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", - "integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/hasown": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz", - "integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "function-bind": "^1.1.2" - }, - "engines": { - "node": ">= 0.4" - } - }, "node_modules/highlight.js": { "version": "10.7.3", "resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-10.7.3.tgz", @@ -2997,22 +2622,6 @@ "node": ">= 12" } }, - "node_modules/is-docker": { - "version": "2.2.1", - "resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz", - "integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==", - "dev": true, - "license": "MIT", - "bin": { - "is-docker": "cli.js" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/is-fullwidth-code-point": { "version": "3.0.0", "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz", @@ -3022,43 +2631,6 @@ "node": ">=8" } }, - "node_modules/is-number": { - "version": "7.0.0", - "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", - "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.12.0" - } - }, - "node_modules/is-wsl": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz", - "integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==", - "dev": true, - "license": "MIT", - "dependencies": { - "is-docker": "^2.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/isarray": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz", - "integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==", - "dev": true, - "license": "MIT" - }, - "node_modules/isexe": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", - "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", - "dev": true, - "license": "ISC" - }, "node_modules/json-bigint": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz", @@ -3087,49 +2659,6 @@ "integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==", "license": "MIT" }, - "node_modules/json-stable-stringify": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/json-stable-stringify/-/json-stable-stringify-1.3.0.tgz", - "integrity": "sha512-qtYiSSFlwot9XHtF9bD9c7rwKjr+RecWT//ZnPvSmEjpV5mmPOCN4j8UjY5hbjNkOwZ/jQv3J6R1/pL7RwgMsg==", - "dev": true, - "license": "MIT", - "dependencies": { - "call-bind": "^1.0.8", - "call-bound": "^1.0.4", - "isarray": "^2.0.5", - "jsonify": "^0.0.1", - "object-keys": "^1.1.1" - }, - "engines": { - "node": ">= 0.4" - }, - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/jsonfile": { - "version": "6.2.0", - "resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz", - "integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==", - "dev": true, - "license": "MIT", - "dependencies": { - "universalify": "^2.0.0" - }, - "optionalDependencies": { - "graceful-fs": "^4.1.6" - } - }, - "node_modules/jsonify": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/jsonify/-/jsonify-0.0.1.tgz", - "integrity": "sha512-2/Ki0GcmuqSrgFyelQq9M05y7PS0mEwuIzrf3f1fPqkVDVRvZrPZtVSMHxdgo8Aq0sxAOb/cr2aqqA3LeWHVPg==", - "dev": true, - "license": "Public Domain", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/jwa": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.1.tgz", @@ -3151,20 +2680,10 @@ "safe-buffer": "^5.0.1" } }, - "node_modules/klaw-sync": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/klaw-sync/-/klaw-sync-6.0.0.tgz", - "integrity": "sha512-nIeuVSzdCCs6TDPTqI8w1Yre34sSq7AkZ4B3sfOBbI2CgVSB4Du4aLQijFU2+lhAFCwt9+42Hel6lQNIv6AntQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "graceful-fs": "^4.1.11" - } - }, "node_modules/koffi": { - "version": "2.15.1", - "resolved": "https://registry.npmjs.org/koffi/-/koffi-2.15.1.tgz", - "integrity": "sha512-mnc0C0crx/xMSljb5s9QbnLrlFHprioFO1hkXyuSuO/QtbpLDa0l/uM21944UfQunMKmp3/r789DTDxVyyH6aA==", + "version": "2.15.2", + "resolved": "https://registry.npmjs.org/koffi/-/koffi-2.15.2.tgz", + "integrity": "sha512-r9tjJLVRSOhCRWdVyQlF3/Ugzeg13jlzS4czS82MAgLff4W+BcYOW7g8Y62t9O5JYjYOLAjAovAZDNlDfZNu+g==", "hasInstallScript": true, "license": "MIT", "optional": true, @@ -3199,30 +2718,6 @@ "node": ">= 18" } }, - "node_modules/math-intrinsics": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", - "integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/micromatch": { - "version": "4.0.8", - "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", - "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", - "dev": true, - "license": "MIT", - "dependencies": { - "braces": "^3.0.3", - "picomatch": "^2.3.1" - }, - "engines": { - "node": ">=8.6" - } - }, "node_modules/mime-db": { "version": "1.54.0", "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz", @@ -3263,16 +2758,6 @@ "url": "https://github.com/sponsors/isaacs" } }, - "node_modules/minimist": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", - "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", - "dev": true, - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, "node_modules/minipass": { "version": "7.1.3", "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.3.tgz", @@ -3355,16 +2840,6 @@ "node": ">=0.10.0" } }, - "node_modules/object-keys": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz", - "integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 0.4" - } - }, "node_modules/once": { "version": "1.4.0", "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", @@ -3374,23 +2849,6 @@ "wrappy": "1" } }, - "node_modules/open": { - "version": "7.4.2", - "resolved": "https://registry.npmjs.org/open/-/open-7.4.2.tgz", - "integrity": "sha512-MVHddDVweXZF3awtlAS+6pgKLlm/JgxZ90+/NBurBoQctVOOB/zDdVjcyPzQ+0laDGbsWgrRkflI65sQeOgT9Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "is-docker": "^2.0.0", - "is-wsl": "^2.1.1" - }, - "engines": { - "node": ">=8" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/openai": { "version": "6.26.0", "resolved": "https://registry.npmjs.org/openai/-/openai-6.26.0.tgz", @@ -3484,57 +2942,10 @@ "integrity": "sha512-Njv/59hHaokb/hRUjce3Hdv12wd60MtM9Z5Olmn+nehe0QDAsRtRbJPvJ0Z91TusF0SuZRIvnM+S4l6EIP8leA==", "license": "MIT" }, - "node_modules/patch-package": { - "version": "8.0.1", - "resolved": "https://registry.npmjs.org/patch-package/-/patch-package-8.0.1.tgz", - "integrity": "sha512-VsKRIA8f5uqHQ7NGhwIna6Bx6D9s/1iXlA1hthBVBEbkq+t4kXD0HHt+rJhf/Z+Ci0F/HCB2hvn0qLdLG+Qxlw==", - "dev": true, - "license": "MIT", - "dependencies": { - "@yarnpkg/lockfile": "^1.1.0", - "chalk": "^4.1.2", - "ci-info": "^3.7.0", - "cross-spawn": "^7.0.3", - "find-yarn-workspace-root": "^2.0.0", - "fs-extra": "^10.0.0", - "json-stable-stringify": "^1.0.2", - "klaw-sync": "^6.0.0", - "minimist": "^1.2.6", - "open": "^7.4.2", - "semver": "^7.5.3", - "slash": "^2.0.0", - "tmp": "^0.2.4", - "yaml": "^2.2.2" - }, - "bin": { - "patch-package": "index.js" - }, - "engines": { - "node": ">=14", - "npm": ">5" - } - }, - "node_modules/patch-package/node_modules/chalk": { - "version": "4.1.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", - "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^4.1.0", - "supports-color": "^7.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/chalk/chalk?sponsor=1" - } - }, "node_modules/path-expression-matcher": { - "version": "1.1.2", - "resolved": "https://registry.npmjs.org/path-expression-matcher/-/path-expression-matcher-1.1.2.tgz", - "integrity": "sha512-LXWqJmcpp2BKOEmgt4CyuESFmBfPuhJlAHKJsFzuJU6CxErWk75BrO+Ni77M9OxHN6dCYKM4vj+21Z6cOL96YQ==", + "version": "1.1.3", + "resolved": "https://registry.npmjs.org/path-expression-matcher/-/path-expression-matcher-1.1.3.tgz", + "integrity": "sha512-qdVgY8KXmVdJZRSS1JdEPOKPdTiEK/pi0RkcT2sw1RhXxohdujUlJFPuS1TSkevZ9vzd3ZlL7ULl1MHGTApKzQ==", "funding": [ { "type": "github", @@ -3546,16 +2957,6 @@ "node": ">=14.0.0" } }, - "node_modules/path-key": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", - "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, "node_modules/path-scurry": { "version": "2.0.2", "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-2.0.2.tgz", @@ -3584,19 +2985,6 @@ "integrity": "sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==", "license": "ISC" }, - "node_modules/picomatch": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", - "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8.6" - }, - "funding": { - "url": "https://github.com/sponsors/jonschlinkert" - } - }, "node_modules/playwright": { "version": "1.58.2", "resolved": "https://registry.npmjs.org/playwright/-/playwright-1.58.2.tgz", @@ -3762,60 +3150,6 @@ ], "license": "MIT" }, - "node_modules/semver": { - "version": "7.7.4", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz", - "integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==", - "dev": true, - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/set-function-length": { - "version": "1.2.2", - "resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz", - "integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==", - "dev": true, - "license": "MIT", - "dependencies": { - "define-data-property": "^1.1.4", - "es-errors": "^1.3.0", - "function-bind": "^1.1.2", - "get-intrinsic": "^1.2.4", - "gopd": "^1.0.1", - "has-property-descriptors": "^1.0.2" - }, - "engines": { - "node": ">= 0.4" - } - }, - "node_modules/shebang-command": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", - "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", - "dev": true, - "license": "MIT", - "dependencies": { - "shebang-regex": "^3.0.0" - }, - "engines": { - "node": ">=8" - } - }, - "node_modules/shebang-regex": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", - "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=8" - } - }, "node_modules/signal-exit": { "version": "3.0.7", "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz", @@ -3828,16 +3162,6 @@ "integrity": "sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==", "license": "MIT" }, - "node_modules/slash": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz", - "integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, "node_modules/smart-buffer": { "version": "4.2.0", "resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz", @@ -4003,29 +3327,6 @@ "node": ">=0.8" } }, - "node_modules/tmp": { - "version": "0.2.5", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.5.tgz", - "integrity": "sha512-voyz6MApa1rQGUxT3E+BK7/ROe8itEx7vD8/HEvt4xwXucvQ5G5oeEiHkmHZJuBO21RpOf+YYm9MOivj709jow==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14.14" - } - }, - "node_modules/to-regex-range": { - "version": "5.0.1", - "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", - "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "is-number": "^7.0.0" - }, - "engines": { - "node": ">=8.0" - } - }, "node_modules/token-types": { "version": "6.1.2", "resolved": "https://registry.npmjs.org/token-types/-/token-types-6.1.2.tgz", @@ -4083,9 +3384,9 @@ } }, "node_modules/undici": { - "version": "7.22.0", - "resolved": "https://registry.npmjs.org/undici/-/undici-7.22.0.tgz", - "integrity": "sha512-RqslV2Us5BrllB+JeiZnK4peryVTndy9Dnqq62S3yYRRTj0tFQCwEniUy2167skdGOy3vqRzEvl1Dm4sV2ReDg==", + "version": "7.24.0", + "resolved": "https://registry.npmjs.org/undici/-/undici-7.24.0.tgz", + "integrity": "sha512-jxytwMHhsbdpBXxLAcuu0fzlQeXCNnWdDyRHpvWsUl8vd98UwYdl9YTyn8/HcpcJPC3pwUveefsa3zTxyD/ERg==", "license": "MIT", "engines": { "node": ">=20.18.1" @@ -4097,16 +3398,6 @@ "integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==", "license": "MIT" }, - "node_modules/universalify": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz", - "integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 10.0.0" - } - }, "node_modules/web-streams-polyfill": { "version": "3.3.3", "resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz", @@ -4116,22 +3407,6 @@ "node": ">= 8" } }, - "node_modules/which": { - "version": "2.0.2", - "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", - "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", - "dev": true, - "license": "ISC", - "dependencies": { - "isexe": "^2.0.0" - }, - "bin": { - "node-which": "bin/node-which" - }, - "engines": { - "node": ">= 8" - } - }, "node_modules/wrap-ansi": { "version": "7.0.0", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz", @@ -4287,6 +3562,79 @@ "peerDependencies": { "zod": "^3.25 || ^4" } + }, + "packages/pi-agent-core": { + "name": "@gsd/pi-agent-core", + "version": "0.57.1", + "dependencies": { + "@gsd/pi-ai": "*" + } + }, + "packages/pi-ai": { + "name": "@gsd/pi-ai", + "version": "0.57.1", + "dependencies": { + "@anthropic-ai/sdk": "^0.73.0", + "@aws-sdk/client-bedrock-runtime": "^3.983.0", + "@google/genai": "^1.40.0", + "@mistralai/mistralai": "1.14.1", + "@sinclair/typebox": "^0.34.41", + "ajv": "^8.17.1", + "ajv-formats": "^3.0.1", + "chalk": "^5.6.2", + "openai": "6.26.0", + "partial-json": "^0.1.7", + "proxy-agent": "^6.5.0", + "undici": "^7.19.1", + "zod-to-json-schema": "^3.24.6" + } + }, + "packages/pi-coding-agent": { + "name": "@gsd/pi-coding-agent", + "version": "0.57.1", + "dependencies": { + "@gsd/pi-agent-core": "*", + "@gsd/pi-ai": "*", + "@gsd/pi-tui": "*", + "@mariozechner/jiti": "^2.6.2", + "@silvia-odwyer/photon-node": "^0.3.4", + "chalk": "^5.5.0", + "cli-highlight": "^2.1.11", + "diff": "^8.0.2", + "extract-zip": "^2.0.1", + "file-type": "^21.1.1", + "glob": "^13.0.1", + "hosted-git-info": "^9.0.2", + "ignore": "^7.0.5", + "marked": "^15.0.12", + "minimatch": "^10.2.3", + "proper-lockfile": "^4.1.2", + "strip-ansi": "^7.1.0", + "undici": "^7.19.1", + "yaml": "^2.8.2" + }, + "devDependencies": { + "@types/diff": "^7.0.2", + "@types/hosted-git-info": "^3.0.5", + "@types/proper-lockfile": "^4.1.4" + }, + "optionalDependencies": { + "@mariozechner/clipboard": "^0.3.2" + } + }, + "packages/pi-tui": { + "name": "@gsd/pi-tui", + "version": "0.57.1", + "dependencies": { + "@types/mime-types": "^2.1.4", + "chalk": "^5.5.0", + "get-east-asian-width": "^1.3.0", + "marked": "^15.0.12", + "mime-types": "^3.0.1" + }, + "optionalDependencies": { + "koffi": "^2.9.0" + } } } } diff --git a/package.json b/package.json index ce445ef54..5619d899d 100644 --- a/package.json +++ b/package.json @@ -12,13 +12,16 @@ "url": "https://github.com/glittercowboy/gsd-pi/issues" }, "type": "module", + "workspaces": [ + "packages/*" + ], "bin": { "gsd": "dist/loader.js", "gsd-cli": "dist/loader.js" }, "files": [ "dist", - "patches", + "packages", "pkg", "src/resources", "scripts/postinstall.js", @@ -33,8 +36,13 @@ "node": ">=20.6.0" }, "scripts": { - "build": "tsc && npm run copy-themes", - "copy-themes": "node -e \"const{mkdirSync,cpSync}=require('fs');const{resolve}=require('path');const src=resolve(__dirname,'node_modules/@mariozechner/pi-coding-agent/dist/modes/interactive/theme');mkdirSync('pkg/dist/modes/interactive/theme',{recursive:true});cpSync(src,'pkg/dist/modes/interactive/theme',{recursive:true})\"", + "build:pi-tui": "npm run build -w @gsd/pi-tui", + "build:pi-ai": "npm run build -w @gsd/pi-ai", + "build:pi-agent-core": "npm run build -w @gsd/pi-agent-core", + "build:pi-coding-agent": "npm run build -w @gsd/pi-coding-agent", + "build:pi": "npm run build:pi-tui && npm run build:pi-ai && npm run build:pi-agent-core && npm run build:pi-coding-agent", + "build": "npm run build:pi && tsc && npm run copy-themes", + "copy-themes": "node -e \"const{mkdirSync,cpSync}=require('fs');const{resolve}=require('path');const src=resolve(__dirname,'packages/pi-coding-agent/dist/modes/interactive/theme');mkdirSync('pkg/dist/modes/interactive/theme',{recursive:true});cpSync(src,'pkg/dist/modes/interactive/theme',{recursive:true})\"", "test": "node --import ./src/resources/extensions/gsd/tests/resolve-ts.mjs --experimental-strip-types --test src/resources/extensions/gsd/tests/*.test.ts src/resources/extensions/gsd/tests/*.test.mjs src/tests/*.test.ts", "dev": "tsc --watch", "postinstall": "node scripts/postinstall.js", @@ -45,13 +53,11 @@ }, "dependencies": { "@clack/prompts": "^1.1.0", - "@mariozechner/pi-coding-agent": "^0.57.1", "picocolors": "^1.1.1", "playwright": "^1.58.2" }, "devDependencies": { "@types/node": "^22.0.0", - "patch-package": "^8.0.1", "typescript": "^5.4.0" }, "overrides": { diff --git a/packages/pi-agent-core/package.json b/packages/pi-agent-core/package.json new file mode 100644 index 000000000..60202508c --- /dev/null +++ b/packages/pi-agent-core/package.json @@ -0,0 +1,14 @@ +{ + "name": "@gsd/pi-agent-core", + "version": "0.57.1", + "description": "General-purpose agent core (vendored from pi-mono)", + "type": "module", + "main": "./dist/index.js", + "types": "./dist/index.d.ts", + "scripts": { + "build": "tsc -p tsconfig.json" + }, + "dependencies": { + "@gsd/pi-ai": "*" + } +} diff --git a/packages/pi-agent-core/src/agent-loop.ts b/packages/pi-agent-core/src/agent-loop.ts new file mode 100644 index 000000000..413c20b36 --- /dev/null +++ b/packages/pi-agent-core/src/agent-loop.ts @@ -0,0 +1,417 @@ +/** + * Agent loop that works with AgentMessage throughout. + * Transforms to Message[] only at the LLM call boundary. + */ + +import { + type AssistantMessage, + type Context, + EventStream, + streamSimple, + type ToolResultMessage, + validateToolArguments, +} from "@gsd/pi-ai"; +import type { + AgentContext, + AgentEvent, + AgentLoopConfig, + AgentMessage, + AgentTool, + AgentToolResult, + StreamFn, +} from "./types.js"; + +/** + * Start an agent loop with a new prompt message. + * The prompt is added to the context and events are emitted for it. + */ +export function agentLoop( + prompts: AgentMessage[], + context: AgentContext, + config: AgentLoopConfig, + signal?: AbortSignal, + streamFn?: StreamFn, +): EventStream { + const stream = createAgentStream(); + + (async () => { + const newMessages: AgentMessage[] = [...prompts]; + const currentContext: AgentContext = { + ...context, + messages: [...context.messages, ...prompts], + }; + + stream.push({ type: "agent_start" }); + stream.push({ type: "turn_start" }); + for (const prompt of prompts) { + stream.push({ type: "message_start", message: prompt }); + stream.push({ type: "message_end", message: prompt }); + } + + await runLoop(currentContext, newMessages, config, signal, stream, streamFn); + })(); + + return stream; +} + +/** + * Continue an agent loop from the current context without adding a new message. + * Used for retries - context already has user message or tool results. + * + * **Important:** The last message in context must convert to a `user` or `toolResult` message + * via `convertToLlm`. If it doesn't, the LLM provider will reject the request. + * This cannot be validated here since `convertToLlm` is only called once per turn. + */ +export function agentLoopContinue( + context: AgentContext, + config: AgentLoopConfig, + signal?: AbortSignal, + streamFn?: StreamFn, +): EventStream { + if (context.messages.length === 0) { + throw new Error("Cannot continue: no messages in context"); + } + + if (context.messages[context.messages.length - 1].role === "assistant") { + throw new Error("Cannot continue from message role: assistant"); + } + + const stream = createAgentStream(); + + (async () => { + const newMessages: AgentMessage[] = []; + const currentContext: AgentContext = { ...context }; + + stream.push({ type: "agent_start" }); + stream.push({ type: "turn_start" }); + + await runLoop(currentContext, newMessages, config, signal, stream, streamFn); + })(); + + return stream; +} + +function createAgentStream(): EventStream { + return new EventStream( + (event: AgentEvent) => event.type === "agent_end", + (event: AgentEvent) => (event.type === "agent_end" ? event.messages : []), + ); +} + +/** + * Main loop logic shared by agentLoop and agentLoopContinue. + */ +async function runLoop( + currentContext: AgentContext, + newMessages: AgentMessage[], + config: AgentLoopConfig, + signal: AbortSignal | undefined, + stream: EventStream, + streamFn?: StreamFn, +): Promise { + let firstTurn = true; + // Check for steering messages at start (user may have typed while waiting) + let pendingMessages: AgentMessage[] = (await config.getSteeringMessages?.()) || []; + + // Outer loop: continues when queued follow-up messages arrive after agent would stop + while (true) { + let hasMoreToolCalls = true; + let steeringAfterTools: AgentMessage[] | null = null; + + // Inner loop: process tool calls and steering messages + while (hasMoreToolCalls || pendingMessages.length > 0) { + if (!firstTurn) { + stream.push({ type: "turn_start" }); + } else { + firstTurn = false; + } + + // Process pending messages (inject before next assistant response) + if (pendingMessages.length > 0) { + for (const message of pendingMessages) { + stream.push({ type: "message_start", message }); + stream.push({ type: "message_end", message }); + currentContext.messages.push(message); + newMessages.push(message); + } + pendingMessages = []; + } + + // Stream assistant response + const message = await streamAssistantResponse(currentContext, config, signal, stream, streamFn); + newMessages.push(message); + + if (message.stopReason === "error" || message.stopReason === "aborted") { + stream.push({ type: "turn_end", message, toolResults: [] }); + stream.push({ type: "agent_end", messages: newMessages }); + stream.end(newMessages); + return; + } + + // Check for tool calls + const toolCalls = message.content.filter((c) => c.type === "toolCall"); + hasMoreToolCalls = toolCalls.length > 0; + + const toolResults: ToolResultMessage[] = []; + if (hasMoreToolCalls) { + const toolExecution = await executeToolCalls( + currentContext.tools, + message, + signal, + stream, + config.getSteeringMessages, + ); + toolResults.push(...toolExecution.toolResults); + steeringAfterTools = toolExecution.steeringMessages ?? null; + + for (const result of toolResults) { + currentContext.messages.push(result); + newMessages.push(result); + } + } + + stream.push({ type: "turn_end", message, toolResults }); + + // Get steering messages after turn completes + if (steeringAfterTools && steeringAfterTools.length > 0) { + pendingMessages = steeringAfterTools; + steeringAfterTools = null; + } else { + pendingMessages = (await config.getSteeringMessages?.()) || []; + } + } + + // Agent would stop here. Check for follow-up messages. + const followUpMessages = (await config.getFollowUpMessages?.()) || []; + if (followUpMessages.length > 0) { + // Set as pending so inner loop processes them + pendingMessages = followUpMessages; + continue; + } + + // No more messages, exit + break; + } + + stream.push({ type: "agent_end", messages: newMessages }); + stream.end(newMessages); +} + +/** + * Stream an assistant response from the LLM. + * This is where AgentMessage[] gets transformed to Message[] for the LLM. + */ +async function streamAssistantResponse( + context: AgentContext, + config: AgentLoopConfig, + signal: AbortSignal | undefined, + stream: EventStream, + streamFn?: StreamFn, +): Promise { + // Apply context transform if configured (AgentMessage[] → AgentMessage[]) + let messages = context.messages; + if (config.transformContext) { + messages = await config.transformContext(messages, signal); + } + + // Convert to LLM-compatible messages (AgentMessage[] → Message[]) + const llmMessages = await config.convertToLlm(messages); + + // Build LLM context + const llmContext: Context = { + systemPrompt: context.systemPrompt, + messages: llmMessages, + tools: context.tools, + }; + + const streamFunction = streamFn || streamSimple; + + // Resolve API key (important for expiring tokens) + const resolvedApiKey = + (config.getApiKey ? await config.getApiKey(config.model.provider) : undefined) || config.apiKey; + + const response = await streamFunction(config.model, llmContext, { + ...config, + apiKey: resolvedApiKey, + signal, + }); + + let partialMessage: AssistantMessage | null = null; + let addedPartial = false; + + for await (const event of response) { + switch (event.type) { + case "start": + partialMessage = event.partial; + context.messages.push(partialMessage); + addedPartial = true; + stream.push({ type: "message_start", message: { ...partialMessage } }); + break; + + case "text_start": + case "text_delta": + case "text_end": + case "thinking_start": + case "thinking_delta": + case "thinking_end": + case "toolcall_start": + case "toolcall_delta": + case "toolcall_end": + if (partialMessage) { + partialMessage = event.partial; + context.messages[context.messages.length - 1] = partialMessage; + stream.push({ + type: "message_update", + assistantMessageEvent: event, + message: { ...partialMessage }, + }); + } + break; + + case "done": + case "error": { + const finalMessage = await response.result(); + if (addedPartial) { + context.messages[context.messages.length - 1] = finalMessage; + } else { + context.messages.push(finalMessage); + } + if (!addedPartial) { + stream.push({ type: "message_start", message: { ...finalMessage } }); + } + stream.push({ type: "message_end", message: finalMessage }); + return finalMessage; + } + } + } + + return await response.result(); +} + +/** + * Execute tool calls from an assistant message. + */ +async function executeToolCalls( + tools: AgentTool[] | undefined, + assistantMessage: AssistantMessage, + signal: AbortSignal | undefined, + stream: EventStream, + getSteeringMessages?: AgentLoopConfig["getSteeringMessages"], +): Promise<{ toolResults: ToolResultMessage[]; steeringMessages?: AgentMessage[] }> { + const toolCalls = assistantMessage.content.filter((c) => c.type === "toolCall"); + const results: ToolResultMessage[] = []; + let steeringMessages: AgentMessage[] | undefined; + + for (let index = 0; index < toolCalls.length; index++) { + const toolCall = toolCalls[index]; + const tool = tools?.find((t) => t.name === toolCall.name); + + stream.push({ + type: "tool_execution_start", + toolCallId: toolCall.id, + toolName: toolCall.name, + args: toolCall.arguments, + }); + + let result: AgentToolResult; + let isError = false; + + try { + if (!tool) throw new Error(`Tool ${toolCall.name} not found`); + + const validatedArgs = validateToolArguments(tool, toolCall); + + result = await tool.execute(toolCall.id, validatedArgs, signal, (partialResult) => { + stream.push({ + type: "tool_execution_update", + toolCallId: toolCall.id, + toolName: toolCall.name, + args: toolCall.arguments, + partialResult, + }); + }); + } catch (e) { + result = { + content: [{ type: "text", text: e instanceof Error ? e.message : String(e) }], + details: {}, + }; + isError = true; + } + + stream.push({ + type: "tool_execution_end", + toolCallId: toolCall.id, + toolName: toolCall.name, + result, + isError, + }); + + const toolResultMessage: ToolResultMessage = { + role: "toolResult", + toolCallId: toolCall.id, + toolName: toolCall.name, + content: result.content, + details: result.details, + isError, + timestamp: Date.now(), + }; + + results.push(toolResultMessage); + stream.push({ type: "message_start", message: toolResultMessage }); + stream.push({ type: "message_end", message: toolResultMessage }); + + // Check for steering messages - skip remaining tools if user interrupted + if (getSteeringMessages) { + const steering = await getSteeringMessages(); + if (steering.length > 0) { + steeringMessages = steering; + const remainingCalls = toolCalls.slice(index + 1); + for (const skipped of remainingCalls) { + results.push(skipToolCall(skipped, stream)); + } + break; + } + } + } + + return { toolResults: results, steeringMessages }; +} + +function skipToolCall( + toolCall: Extract, + stream: EventStream, +): ToolResultMessage { + const result: AgentToolResult = { + content: [{ type: "text", text: "Skipped due to queued user message." }], + details: {}, + }; + + stream.push({ + type: "tool_execution_start", + toolCallId: toolCall.id, + toolName: toolCall.name, + args: toolCall.arguments, + }); + stream.push({ + type: "tool_execution_end", + toolCallId: toolCall.id, + toolName: toolCall.name, + result, + isError: true, + }); + + const toolResultMessage: ToolResultMessage = { + role: "toolResult", + toolCallId: toolCall.id, + toolName: toolCall.name, + content: result.content, + details: {}, + isError: true, + timestamp: Date.now(), + }; + + stream.push({ type: "message_start", message: toolResultMessage }); + stream.push({ type: "message_end", message: toolResultMessage }); + + return toolResultMessage; +} diff --git a/packages/pi-agent-core/src/agent.ts b/packages/pi-agent-core/src/agent.ts new file mode 100644 index 000000000..5ce47971a --- /dev/null +++ b/packages/pi-agent-core/src/agent.ts @@ -0,0 +1,568 @@ +/** + * Agent class that uses the agent-loop directly. + * No transport abstraction - calls streamSimple via the loop. + */ + +import { + getModel, + type ImageContent, + type Message, + type Model, + type SimpleStreamOptions, + streamSimple, + type TextContent, + type ThinkingBudgets, + type Transport, +} from "@gsd/pi-ai"; +import { agentLoop, agentLoopContinue } from "./agent-loop.js"; +import type { + AgentContext, + AgentEvent, + AgentLoopConfig, + AgentMessage, + AgentState, + AgentTool, + StreamFn, + ThinkingLevel, +} from "./types.js"; + +/** + * Default convertToLlm: Keep only LLM-compatible messages, convert attachments. + */ +function defaultConvertToLlm(messages: AgentMessage[]): Message[] { + return messages.filter((m) => m.role === "user" || m.role === "assistant" || m.role === "toolResult"); +} + +export interface AgentOptions { + initialState?: Partial; + + /** + * Converts AgentMessage[] to LLM-compatible Message[] before each LLM call. + * Default filters to user/assistant/toolResult and converts attachments. + */ + convertToLlm?: (messages: AgentMessage[]) => Message[] | Promise; + + /** + * Optional transform applied to context before convertToLlm. + * Use for context pruning, injecting external context, etc. + */ + transformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise; + + /** + * Steering mode: "all" = send all steering messages at once, "one-at-a-time" = one per turn + */ + steeringMode?: "all" | "one-at-a-time"; + + /** + * Follow-up mode: "all" = send all follow-up messages at once, "one-at-a-time" = one per turn + */ + followUpMode?: "all" | "one-at-a-time"; + + /** + * Custom stream function (for proxy backends, etc.). Default uses streamSimple. + */ + streamFn?: StreamFn; + + /** + * Optional session identifier forwarded to LLM providers. + * Used by providers that support session-based caching (e.g., OpenAI Codex). + */ + sessionId?: string; + + /** + * Resolves an API key dynamically for each LLM call. + * Useful for expiring tokens (e.g., GitHub Copilot OAuth). + */ + getApiKey?: (provider: string) => Promise | string | undefined; + + /** + * Inspect or replace provider payloads before they are sent. + */ + onPayload?: SimpleStreamOptions["onPayload"]; + + /** + * Custom token budgets for thinking levels (token-based providers only). + */ + thinkingBudgets?: ThinkingBudgets; + + /** + * Preferred transport for providers that support multiple transports. + */ + transport?: Transport; + + /** + * Maximum delay in milliseconds to wait for a retry when the server requests a long wait. + * If the server's requested delay exceeds this value, the request fails immediately, + * allowing higher-level retry logic to handle it with user visibility. + * Default: 60000 (60 seconds). Set to 0 to disable the cap. + */ + maxRetryDelayMs?: number; +} + +export class Agent { + private _state: AgentState = { + systemPrompt: "", + model: getModel("google", "gemini-2.5-flash-lite-preview-06-17"), + thinkingLevel: "off", + tools: [], + messages: [], + isStreaming: false, + streamMessage: null, + pendingToolCalls: new Set(), + error: undefined, + }; + + private listeners = new Set<(e: AgentEvent) => void>(); + private abortController?: AbortController; + private convertToLlm: (messages: AgentMessage[]) => Message[] | Promise; + private transformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise; + private steeringQueue: AgentMessage[] = []; + private followUpQueue: AgentMessage[] = []; + private steeringMode: "all" | "one-at-a-time"; + private followUpMode: "all" | "one-at-a-time"; + public streamFn: StreamFn; + private _sessionId?: string; + public getApiKey?: (provider: string) => Promise | string | undefined; + private _onPayload?: SimpleStreamOptions["onPayload"]; + private runningPrompt?: Promise; + private resolveRunningPrompt?: () => void; + private _thinkingBudgets?: ThinkingBudgets; + private _transport: Transport; + private _maxRetryDelayMs?: number; + + constructor(opts: AgentOptions = {}) { + this._state = { ...this._state, ...opts.initialState }; + this.convertToLlm = opts.convertToLlm || defaultConvertToLlm; + this.transformContext = opts.transformContext; + this.steeringMode = opts.steeringMode || "one-at-a-time"; + this.followUpMode = opts.followUpMode || "one-at-a-time"; + this.streamFn = opts.streamFn || streamSimple; + this._sessionId = opts.sessionId; + this.getApiKey = opts.getApiKey; + this._onPayload = opts.onPayload; + this._thinkingBudgets = opts.thinkingBudgets; + this._transport = opts.transport ?? "sse"; + this._maxRetryDelayMs = opts.maxRetryDelayMs; + } + + /** + * Get the current session ID used for provider caching. + */ + get sessionId(): string | undefined { + return this._sessionId; + } + + /** + * Set the session ID for provider caching. + * Call this when switching sessions (new session, branch, resume). + */ + set sessionId(value: string | undefined) { + this._sessionId = value; + } + + /** + * Get the current thinking budgets. + */ + get thinkingBudgets(): ThinkingBudgets | undefined { + return this._thinkingBudgets; + } + + /** + * Set custom thinking budgets for token-based providers. + */ + set thinkingBudgets(value: ThinkingBudgets | undefined) { + this._thinkingBudgets = value; + } + + /** + * Get the current preferred transport. + */ + get transport(): Transport { + return this._transport; + } + + /** + * Set the preferred transport. + */ + setTransport(value: Transport) { + this._transport = value; + } + + /** + * Get the current max retry delay in milliseconds. + */ + get maxRetryDelayMs(): number | undefined { + return this._maxRetryDelayMs; + } + + /** + * Set the maximum delay to wait for server-requested retries. + * Set to 0 to disable the cap. + */ + set maxRetryDelayMs(value: number | undefined) { + this._maxRetryDelayMs = value; + } + + get state(): AgentState { + return this._state; + } + + subscribe(fn: (e: AgentEvent) => void): () => void { + this.listeners.add(fn); + return () => this.listeners.delete(fn); + } + + // State mutators + setSystemPrompt(v: string) { + this._state.systemPrompt = v; + } + + setModel(m: Model) { + this._state.model = m; + } + + setThinkingLevel(l: ThinkingLevel) { + this._state.thinkingLevel = l; + } + + setSteeringMode(mode: "all" | "one-at-a-time") { + this.steeringMode = mode; + } + + getSteeringMode(): "all" | "one-at-a-time" { + return this.steeringMode; + } + + setFollowUpMode(mode: "all" | "one-at-a-time") { + this.followUpMode = mode; + } + + getFollowUpMode(): "all" | "one-at-a-time" { + return this.followUpMode; + } + + setTools(t: AgentTool[]) { + this._state.tools = t; + } + + replaceMessages(ms: AgentMessage[]) { + this._state.messages = ms.slice(); + } + + appendMessage(m: AgentMessage) { + this._state.messages = [...this._state.messages, m]; + } + + /** + * Queue a steering message to interrupt the agent mid-run. + * Delivered after current tool execution, skips remaining tools. + */ + steer(m: AgentMessage) { + this.steeringQueue.push(m); + } + + /** + * Queue a follow-up message to be processed after the agent finishes. + * Delivered only when agent has no more tool calls or steering messages. + */ + followUp(m: AgentMessage) { + this.followUpQueue.push(m); + } + + clearSteeringQueue() { + this.steeringQueue = []; + } + + clearFollowUpQueue() { + this.followUpQueue = []; + } + + clearAllQueues() { + this.steeringQueue = []; + this.followUpQueue = []; + } + + hasQueuedMessages(): boolean { + return this.steeringQueue.length > 0 || this.followUpQueue.length > 0; + } + + private dequeueSteeringMessages(): AgentMessage[] { + if (this.steeringMode === "one-at-a-time") { + if (this.steeringQueue.length > 0) { + const first = this.steeringQueue[0]; + this.steeringQueue = this.steeringQueue.slice(1); + return [first]; + } + return []; + } + + const steering = this.steeringQueue.slice(); + this.steeringQueue = []; + return steering; + } + + private dequeueFollowUpMessages(): AgentMessage[] { + if (this.followUpMode === "one-at-a-time") { + if (this.followUpQueue.length > 0) { + const first = this.followUpQueue[0]; + this.followUpQueue = this.followUpQueue.slice(1); + return [first]; + } + return []; + } + + const followUp = this.followUpQueue.slice(); + this.followUpQueue = []; + return followUp; + } + + clearMessages() { + this._state.messages = []; + } + + abort() { + this.abortController?.abort(); + } + + waitForIdle(): Promise { + return this.runningPrompt ?? Promise.resolve(); + } + + reset() { + this._state.messages = []; + this._state.isStreaming = false; + this._state.streamMessage = null; + this._state.pendingToolCalls = new Set(); + this._state.error = undefined; + this.steeringQueue = []; + this.followUpQueue = []; + } + + /** Send a prompt with an AgentMessage */ + async prompt(message: AgentMessage | AgentMessage[]): Promise; + async prompt(input: string, images?: ImageContent[]): Promise; + async prompt(input: string | AgentMessage | AgentMessage[], images?: ImageContent[]) { + if (this._state.isStreaming) { + throw new Error( + "Agent is already processing a prompt. Use steer() or followUp() to queue messages, or wait for completion.", + ); + } + + const model = this._state.model; + if (!model) throw new Error("No model configured"); + + let msgs: AgentMessage[]; + + if (Array.isArray(input)) { + msgs = input; + } else if (typeof input === "string") { + const content: Array = [{ type: "text", text: input }]; + if (images && images.length > 0) { + content.push(...images); + } + msgs = [ + { + role: "user", + content, + timestamp: Date.now(), + }, + ]; + } else { + msgs = [input]; + } + + await this._runLoop(msgs); + } + + /** + * Continue from current context (used for retries and resuming queued messages). + */ + async continue() { + if (this._state.isStreaming) { + throw new Error("Agent is already processing. Wait for completion before continuing."); + } + + const messages = this._state.messages; + if (messages.length === 0) { + throw new Error("No messages to continue from"); + } + if (messages[messages.length - 1].role === "assistant") { + const queuedSteering = this.dequeueSteeringMessages(); + if (queuedSteering.length > 0) { + await this._runLoop(queuedSteering, { skipInitialSteeringPoll: true }); + return; + } + + const queuedFollowUp = this.dequeueFollowUpMessages(); + if (queuedFollowUp.length > 0) { + await this._runLoop(queuedFollowUp); + return; + } + + throw new Error("Cannot continue from message role: assistant"); + } + + await this._runLoop(undefined); + } + + /** + * Run the agent loop. + * If messages are provided, starts a new conversation turn with those messages. + * Otherwise, continues from existing context. + */ + private async _runLoop(messages?: AgentMessage[], options?: { skipInitialSteeringPoll?: boolean }) { + const model = this._state.model; + if (!model) throw new Error("No model configured"); + + this.runningPrompt = new Promise((resolve) => { + this.resolveRunningPrompt = resolve; + }); + + this.abortController = new AbortController(); + this._state.isStreaming = true; + this._state.streamMessage = null; + this._state.error = undefined; + + const reasoning = this._state.thinkingLevel === "off" ? undefined : this._state.thinkingLevel; + + const context: AgentContext = { + systemPrompt: this._state.systemPrompt, + messages: this._state.messages.slice(), + tools: this._state.tools, + }; + + let skipInitialSteeringPoll = options?.skipInitialSteeringPoll === true; + + const config: AgentLoopConfig = { + model, + reasoning, + sessionId: this._sessionId, + onPayload: this._onPayload, + transport: this._transport, + thinkingBudgets: this._thinkingBudgets, + maxRetryDelayMs: this._maxRetryDelayMs, + convertToLlm: this.convertToLlm, + transformContext: this.transformContext, + getApiKey: this.getApiKey, + getSteeringMessages: async () => { + if (skipInitialSteeringPoll) { + skipInitialSteeringPoll = false; + return []; + } + return this.dequeueSteeringMessages(); + }, + getFollowUpMessages: async () => this.dequeueFollowUpMessages(), + }; + + let partial: AgentMessage | null = null; + + try { + const stream = messages + ? agentLoop(messages, context, config, this.abortController.signal, this.streamFn) + : agentLoopContinue(context, config, this.abortController.signal, this.streamFn); + + for await (const event of stream) { + // Update internal state based on events + switch (event.type) { + case "message_start": + partial = event.message; + this._state.streamMessage = event.message; + break; + + case "message_update": + partial = event.message; + this._state.streamMessage = event.message; + break; + + case "message_end": + partial = null; + this._state.streamMessage = null; + this.appendMessage(event.message); + break; + + case "tool_execution_start": { + const s = new Set(this._state.pendingToolCalls); + s.add(event.toolCallId); + this._state.pendingToolCalls = s; + break; + } + + case "tool_execution_end": { + const s = new Set(this._state.pendingToolCalls); + s.delete(event.toolCallId); + this._state.pendingToolCalls = s; + break; + } + + case "turn_end": + if (event.message.role === "assistant" && (event.message as any).errorMessage) { + this._state.error = (event.message as any).errorMessage; + } + break; + + case "agent_end": + this._state.isStreaming = false; + this._state.streamMessage = null; + break; + } + + // Emit to listeners + this.emit(event); + } + + // Handle any remaining partial message + if (partial && partial.role === "assistant" && partial.content.length > 0) { + const onlyEmpty = !partial.content.some( + (c) => + (c.type === "thinking" && c.thinking.trim().length > 0) || + (c.type === "text" && c.text.trim().length > 0) || + (c.type === "toolCall" && c.name.trim().length > 0), + ); + if (!onlyEmpty) { + this.appendMessage(partial); + } else { + if (this.abortController?.signal.aborted) { + throw new Error("Request was aborted"); + } + } + } + } catch (err: any) { + const errorMsg: AgentMessage = { + role: "assistant", + content: [{ type: "text", text: "" }], + api: model.api, + provider: model.provider, + model: model.id, + usage: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + stopReason: this.abortController?.signal.aborted ? "aborted" : "error", + errorMessage: err?.message || String(err), + timestamp: Date.now(), + } as AgentMessage; + + this.appendMessage(errorMsg); + this._state.error = err?.message || String(err); + this.emit({ type: "agent_end", messages: [errorMsg] }); + } finally { + this._state.isStreaming = false; + this._state.streamMessage = null; + this._state.pendingToolCalls = new Set(); + this.abortController = undefined; + this.resolveRunningPrompt?.(); + this.runningPrompt = undefined; + this.resolveRunningPrompt = undefined; + } + } + + private emit(e: AgentEvent) { + for (const listener of this.listeners) { + listener(e); + } + } +} diff --git a/packages/pi-agent-core/src/index.ts b/packages/pi-agent-core/src/index.ts new file mode 100644 index 000000000..d8ed5b8eb --- /dev/null +++ b/packages/pi-agent-core/src/index.ts @@ -0,0 +1,8 @@ +// Core Agent +export * from "./agent.js"; +// Loop functions +export * from "./agent-loop.js"; +// Proxy utilities +export * from "./proxy.js"; +// Types +export * from "./types.js"; diff --git a/packages/pi-agent-core/src/proxy.ts b/packages/pi-agent-core/src/proxy.ts new file mode 100644 index 000000000..ebeb7d926 --- /dev/null +++ b/packages/pi-agent-core/src/proxy.ts @@ -0,0 +1,340 @@ +/** + * Proxy stream function for apps that route LLM calls through a server. + * The server manages auth and proxies requests to LLM providers. + */ + +// Internal import for JSON parsing utility +import { + type AssistantMessage, + type AssistantMessageEvent, + type Context, + EventStream, + type Model, + parseStreamingJson, + type SimpleStreamOptions, + type StopReason, + type ToolCall, +} from "@gsd/pi-ai"; + +// Create stream class matching ProxyMessageEventStream +class ProxyMessageEventStream extends EventStream { + constructor() { + super( + (event) => event.type === "done" || event.type === "error", + (event) => { + if (event.type === "done") return event.message; + if (event.type === "error") return event.error; + throw new Error("Unexpected event type"); + }, + ); + } +} + +/** + * Proxy event types - server sends these with partial field stripped to reduce bandwidth. + */ +export type ProxyAssistantMessageEvent = + | { type: "start" } + | { type: "text_start"; contentIndex: number } + | { type: "text_delta"; contentIndex: number; delta: string } + | { type: "text_end"; contentIndex: number; contentSignature?: string } + | { type: "thinking_start"; contentIndex: number } + | { type: "thinking_delta"; contentIndex: number; delta: string } + | { type: "thinking_end"; contentIndex: number; contentSignature?: string } + | { type: "toolcall_start"; contentIndex: number; id: string; toolName: string } + | { type: "toolcall_delta"; contentIndex: number; delta: string } + | { type: "toolcall_end"; contentIndex: number } + | { + type: "done"; + reason: Extract; + usage: AssistantMessage["usage"]; + } + | { + type: "error"; + reason: Extract; + errorMessage?: string; + usage: AssistantMessage["usage"]; + }; + +export interface ProxyStreamOptions extends SimpleStreamOptions { + /** Auth token for the proxy server */ + authToken: string; + /** Proxy server URL (e.g., "https://genai.example.com") */ + proxyUrl: string; +} + +/** + * Stream function that proxies through a server instead of calling LLM providers directly. + * The server strips the partial field from delta events to reduce bandwidth. + * We reconstruct the partial message client-side. + * + * Use this as the `streamFn` option when creating an Agent that needs to go through a proxy. + * + * @example + * ```typescript + * const agent = new Agent({ + * streamFn: (model, context, options) => + * streamProxy(model, context, { + * ...options, + * authToken: await getAuthToken(), + * proxyUrl: "https://genai.example.com", + * }), + * }); + * ``` + */ +export function streamProxy(model: Model, context: Context, options: ProxyStreamOptions): ProxyMessageEventStream { + const stream = new ProxyMessageEventStream(); + + (async () => { + // Initialize the partial message that we'll build up from events + const partial: AssistantMessage = { + role: "assistant", + stopReason: "stop", + content: [], + api: model.api, + provider: model.provider, + model: model.id, + usage: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + timestamp: Date.now(), + }; + + let reader: ReadableStreamDefaultReader | undefined; + + const abortHandler = () => { + if (reader) { + reader.cancel("Request aborted by user").catch(() => {}); + } + }; + + if (options.signal) { + options.signal.addEventListener("abort", abortHandler); + } + + try { + const response = await fetch(`${options.proxyUrl}/api/stream`, { + method: "POST", + headers: { + Authorization: `Bearer ${options.authToken}`, + "Content-Type": "application/json", + }, + body: JSON.stringify({ + model, + context, + options: { + temperature: options.temperature, + maxTokens: options.maxTokens, + reasoning: options.reasoning, + }, + }), + signal: options.signal, + }); + + if (!response.ok) { + let errorMessage = `Proxy error: ${response.status} ${response.statusText}`; + try { + const errorData = (await response.json()) as { error?: string }; + if (errorData.error) { + errorMessage = `Proxy error: ${errorData.error}`; + } + } catch { + // Couldn't parse error response + } + throw new Error(errorMessage); + } + + reader = response.body!.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + + if (options.signal?.aborted) { + throw new Error("Request aborted by user"); + } + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop() || ""; + + for (const line of lines) { + if (line.startsWith("data: ")) { + const data = line.slice(6).trim(); + if (data) { + const proxyEvent = JSON.parse(data) as ProxyAssistantMessageEvent; + const event = processProxyEvent(proxyEvent, partial); + if (event) { + stream.push(event); + } + } + } + } + } + + if (options.signal?.aborted) { + throw new Error("Request aborted by user"); + } + + stream.end(); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + const reason = options.signal?.aborted ? "aborted" : "error"; + partial.stopReason = reason; + partial.errorMessage = errorMessage; + stream.push({ + type: "error", + reason, + error: partial, + }); + stream.end(); + } finally { + if (options.signal) { + options.signal.removeEventListener("abort", abortHandler); + } + } + })(); + + return stream; +} + +/** + * Process a proxy event and update the partial message. + */ +function processProxyEvent( + proxyEvent: ProxyAssistantMessageEvent, + partial: AssistantMessage, +): AssistantMessageEvent | undefined { + switch (proxyEvent.type) { + case "start": + return { type: "start", partial }; + + case "text_start": + partial.content[proxyEvent.contentIndex] = { type: "text", text: "" }; + return { type: "text_start", contentIndex: proxyEvent.contentIndex, partial }; + + case "text_delta": { + const content = partial.content[proxyEvent.contentIndex]; + if (content?.type === "text") { + content.text += proxyEvent.delta; + return { + type: "text_delta", + contentIndex: proxyEvent.contentIndex, + delta: proxyEvent.delta, + partial, + }; + } + throw new Error("Received text_delta for non-text content"); + } + + case "text_end": { + const content = partial.content[proxyEvent.contentIndex]; + if (content?.type === "text") { + content.textSignature = proxyEvent.contentSignature; + return { + type: "text_end", + contentIndex: proxyEvent.contentIndex, + content: content.text, + partial, + }; + } + throw new Error("Received text_end for non-text content"); + } + + case "thinking_start": + partial.content[proxyEvent.contentIndex] = { type: "thinking", thinking: "" }; + return { type: "thinking_start", contentIndex: proxyEvent.contentIndex, partial }; + + case "thinking_delta": { + const content = partial.content[proxyEvent.contentIndex]; + if (content?.type === "thinking") { + content.thinking += proxyEvent.delta; + return { + type: "thinking_delta", + contentIndex: proxyEvent.contentIndex, + delta: proxyEvent.delta, + partial, + }; + } + throw new Error("Received thinking_delta for non-thinking content"); + } + + case "thinking_end": { + const content = partial.content[proxyEvent.contentIndex]; + if (content?.type === "thinking") { + content.thinkingSignature = proxyEvent.contentSignature; + return { + type: "thinking_end", + contentIndex: proxyEvent.contentIndex, + content: content.thinking, + partial, + }; + } + throw new Error("Received thinking_end for non-thinking content"); + } + + case "toolcall_start": + partial.content[proxyEvent.contentIndex] = { + type: "toolCall", + id: proxyEvent.id, + name: proxyEvent.toolName, + arguments: {}, + partialJson: "", + } satisfies ToolCall & { partialJson: string } as ToolCall; + return { type: "toolcall_start", contentIndex: proxyEvent.contentIndex, partial }; + + case "toolcall_delta": { + const content = partial.content[proxyEvent.contentIndex]; + if (content?.type === "toolCall") { + (content as any).partialJson += proxyEvent.delta; + content.arguments = parseStreamingJson((content as any).partialJson) || {}; + partial.content[proxyEvent.contentIndex] = { ...content }; // Trigger reactivity + return { + type: "toolcall_delta", + contentIndex: proxyEvent.contentIndex, + delta: proxyEvent.delta, + partial, + }; + } + throw new Error("Received toolcall_delta for non-toolCall content"); + } + + case "toolcall_end": { + const content = partial.content[proxyEvent.contentIndex]; + if (content?.type === "toolCall") { + delete (content as any).partialJson; + return { + type: "toolcall_end", + contentIndex: proxyEvent.contentIndex, + toolCall: content, + partial, + }; + } + return undefined; + } + + case "done": + partial.stopReason = proxyEvent.reason; + partial.usage = proxyEvent.usage; + return { type: "done", reason: proxyEvent.reason, message: partial }; + + case "error": + partial.stopReason = proxyEvent.reason; + partial.errorMessage = proxyEvent.errorMessage; + partial.usage = proxyEvent.usage; + return { type: "error", reason: proxyEvent.reason, error: partial }; + + default: { + const _exhaustiveCheck: never = proxyEvent; + console.warn(`Unhandled proxy event type: ${(proxyEvent as any).type}`); + return undefined; + } + } +} diff --git a/packages/pi-agent-core/src/types.ts b/packages/pi-agent-core/src/types.ts new file mode 100644 index 000000000..a1d5a0d4b --- /dev/null +++ b/packages/pi-agent-core/src/types.ts @@ -0,0 +1,194 @@ +import type { + AssistantMessageEvent, + ImageContent, + Message, + Model, + SimpleStreamOptions, + streamSimple, + TextContent, + Tool, + ToolResultMessage, +} from "@gsd/pi-ai"; +import type { Static, TSchema } from "@sinclair/typebox"; + +/** Stream function - can return sync or Promise for async config lookup */ +export type StreamFn = ( + ...args: Parameters +) => ReturnType | Promise>; + +/** + * Configuration for the agent loop. + */ +export interface AgentLoopConfig extends SimpleStreamOptions { + model: Model; + + /** + * Converts AgentMessage[] to LLM-compatible Message[] before each LLM call. + * + * Each AgentMessage must be converted to a UserMessage, AssistantMessage, or ToolResultMessage + * that the LLM can understand. AgentMessages that cannot be converted (e.g., UI-only notifications, + * status messages) should be filtered out. + * + * @example + * ```typescript + * convertToLlm: (messages) => messages.flatMap(m => { + * if (m.role === "custom") { + * // Convert custom message to user message + * return [{ role: "user", content: m.content, timestamp: m.timestamp }]; + * } + * if (m.role === "notification") { + * // Filter out UI-only messages + * return []; + * } + * // Pass through standard LLM messages + * return [m]; + * }) + * ``` + */ + convertToLlm: (messages: AgentMessage[]) => Message[] | Promise; + + /** + * Optional transform applied to the context before `convertToLlm`. + * + * Use this for operations that work at the AgentMessage level: + * - Context window management (pruning old messages) + * - Injecting context from external sources + * + * @example + * ```typescript + * transformContext: async (messages) => { + * if (estimateTokens(messages) > MAX_TOKENS) { + * return pruneOldMessages(messages); + * } + * return messages; + * } + * ``` + */ + transformContext?: (messages: AgentMessage[], signal?: AbortSignal) => Promise; + + /** + * Resolves an API key dynamically for each LLM call. + * + * Useful for short-lived OAuth tokens (e.g., GitHub Copilot) that may expire + * during long-running tool execution phases. + */ + getApiKey?: (provider: string) => Promise | string | undefined; + + /** + * Returns steering messages to inject into the conversation mid-run. + * + * Called after each tool execution to check for user interruptions. + * If messages are returned, remaining tool calls are skipped and + * these messages are added to the context before the next LLM call. + * + * Use this for "steering" the agent while it's working. + */ + getSteeringMessages?: () => Promise; + + /** + * Returns follow-up messages to process after the agent would otherwise stop. + * + * Called when the agent has no more tool calls and no steering messages. + * If messages are returned, they're added to the context and the agent + * continues with another turn. + * + * Use this for follow-up messages that should wait until the agent finishes. + */ + getFollowUpMessages?: () => Promise; +} + +/** + * Thinking/reasoning level for models that support it. + * Note: "xhigh" is only supported by OpenAI gpt-5.1-codex-max, gpt-5.2, gpt-5.2-codex, gpt-5.3, and gpt-5.3-codex models. + */ +export type ThinkingLevel = "off" | "minimal" | "low" | "medium" | "high" | "xhigh"; + +/** + * Extensible interface for custom app messages. + * Apps can extend via declaration merging: + * + * @example + * ```typescript + * declare module "@mariozechner/agent" { + * interface CustomAgentMessages { + * artifact: ArtifactMessage; + * notification: NotificationMessage; + * } + * } + * ``` + */ +export interface CustomAgentMessages { + // Empty by default - apps extend via declaration merging +} + +/** + * AgentMessage: Union of LLM messages + custom messages. + * This abstraction allows apps to add custom message types while maintaining + * type safety and compatibility with the base LLM messages. + */ +export type AgentMessage = Message | CustomAgentMessages[keyof CustomAgentMessages]; + +/** + * Agent state containing all configuration and conversation data. + */ +export interface AgentState { + systemPrompt: string; + model: Model; + thinkingLevel: ThinkingLevel; + tools: AgentTool[]; + messages: AgentMessage[]; // Can include attachments + custom message types + isStreaming: boolean; + streamMessage: AgentMessage | null; + pendingToolCalls: Set; + error?: string; +} + +export interface AgentToolResult { + // Content blocks supporting text and images + content: (TextContent | ImageContent)[]; + // Details to be displayed in a UI or logged + details: T; +} + +// Callback for streaming tool execution updates +export type AgentToolUpdateCallback = (partialResult: AgentToolResult) => void; + +// AgentTool extends Tool but adds the execute function +export interface AgentTool extends Tool { + // A human-readable label for the tool to be displayed in UI + label: string; + execute: ( + toolCallId: string, + params: Static, + signal?: AbortSignal, + onUpdate?: AgentToolUpdateCallback, + ) => Promise>; +} + +// AgentContext is like Context but uses AgentTool +export interface AgentContext { + systemPrompt: string; + messages: AgentMessage[]; + tools?: AgentTool[]; +} + +/** + * Events emitted by the Agent for UI updates. + * These events provide fine-grained lifecycle information for messages, turns, and tool executions. + */ +export type AgentEvent = + // Agent lifecycle + | { type: "agent_start" } + | { type: "agent_end"; messages: AgentMessage[] } + // Turn lifecycle - a turn is one assistant response + any tool calls/results + | { type: "turn_start" } + | { type: "turn_end"; message: AgentMessage; toolResults: ToolResultMessage[] } + // Message lifecycle - emitted for user, assistant, and toolResult messages + | { type: "message_start"; message: AgentMessage } + // Only emitted for assistant messages during streaming + | { type: "message_update"; message: AgentMessage; assistantMessageEvent: AssistantMessageEvent } + | { type: "message_end"; message: AgentMessage } + // Tool execution lifecycle + | { type: "tool_execution_start"; toolCallId: string; toolName: string; args: any } + | { type: "tool_execution_update"; toolCallId: string; toolName: string; args: any; partialResult: any } + | { type: "tool_execution_end"; toolCallId: string; toolName: string; result: any; isError: boolean }; diff --git a/packages/pi-agent-core/tsconfig.json b/packages/pi-agent-core/tsconfig.json new file mode 100644 index 000000000..6f6331d49 --- /dev/null +++ b/packages/pi-agent-core/tsconfig.json @@ -0,0 +1,27 @@ +{ + "compilerOptions": { + "target": "ES2024", + "module": "Node16", + "lib": ["ES2024"], + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "declaration": true, + "declarationMap": true, + "sourceMap": true, + "inlineSources": true, + "inlineSourceMap": false, + "moduleResolution": "Node16", + "resolveJsonModule": true, + "allowImportingTsExtensions": false, + "experimentalDecorators": true, + "emitDecoratorMetadata": true, + "useDefineForClassFields": false, + "types": ["node"], + "outDir": "./dist", + "rootDir": "./src" + }, + "include": ["src/**/*.ts"], + "exclude": ["node_modules", "dist", "**/*.d.ts", "src/**/*.d.ts"] +} diff --git a/packages/pi-ai/bedrock-provider.d.ts b/packages/pi-ai/bedrock-provider.d.ts new file mode 100644 index 000000000..a66eabee6 --- /dev/null +++ b/packages/pi-ai/bedrock-provider.d.ts @@ -0,0 +1 @@ +export * from "./dist/bedrock-provider.js"; diff --git a/packages/pi-ai/bedrock-provider.js b/packages/pi-ai/bedrock-provider.js new file mode 100644 index 000000000..a66eabee6 --- /dev/null +++ b/packages/pi-ai/bedrock-provider.js @@ -0,0 +1 @@ +export * from "./dist/bedrock-provider.js"; diff --git a/packages/pi-ai/package.json b/packages/pi-ai/package.json new file mode 100644 index 000000000..fa4bdcf8e --- /dev/null +++ b/packages/pi-ai/package.json @@ -0,0 +1,40 @@ +{ + "name": "@gsd/pi-ai", + "version": "0.57.1", + "description": "Unified LLM API (vendored from pi-mono)", + "type": "module", + "main": "./dist/index.js", + "types": "./dist/index.d.ts", + "exports": { + ".": { + "types": "./dist/index.d.ts", + "import": "./dist/index.js" + }, + "./oauth": { + "types": "./dist/oauth.d.ts", + "import": "./dist/oauth.js" + }, + "./bedrock-provider": { + "types": "./bedrock-provider.d.ts", + "import": "./bedrock-provider.js" + } + }, + "scripts": { + "build": "tsc -p tsconfig.json" + }, + "dependencies": { + "@anthropic-ai/sdk": "^0.73.0", + "@aws-sdk/client-bedrock-runtime": "^3.983.0", + "@google/genai": "^1.40.0", + "@mistralai/mistralai": "1.14.1", + "@sinclair/typebox": "^0.34.41", + "ajv": "^8.17.1", + "ajv-formats": "^3.0.1", + "chalk": "^5.6.2", + "openai": "6.26.0", + "partial-json": "^0.1.7", + "proxy-agent": "^6.5.0", + "undici": "^7.19.1", + "zod-to-json-schema": "^3.24.6" + } +} diff --git a/packages/pi-ai/src/api-registry.ts b/packages/pi-ai/src/api-registry.ts new file mode 100644 index 000000000..ca0e06107 --- /dev/null +++ b/packages/pi-ai/src/api-registry.ts @@ -0,0 +1,98 @@ +import type { + Api, + AssistantMessageEventStream, + Context, + Model, + SimpleStreamOptions, + StreamFunction, + StreamOptions, +} from "./types.js"; + +export type ApiStreamFunction = ( + model: Model, + context: Context, + options?: StreamOptions, +) => AssistantMessageEventStream; + +export type ApiStreamSimpleFunction = ( + model: Model, + context: Context, + options?: SimpleStreamOptions, +) => AssistantMessageEventStream; + +export interface ApiProvider { + api: TApi; + stream: StreamFunction; + streamSimple: StreamFunction; +} + +interface ApiProviderInternal { + api: Api; + stream: ApiStreamFunction; + streamSimple: ApiStreamSimpleFunction; +} + +type RegisteredApiProvider = { + provider: ApiProviderInternal; + sourceId?: string; +}; + +const apiProviderRegistry = new Map(); + +function wrapStream( + api: TApi, + stream: StreamFunction, +): ApiStreamFunction { + return (model, context, options) => { + if (model.api !== api) { + throw new Error(`Mismatched api: ${model.api} expected ${api}`); + } + return stream(model as Model, context, options as TOptions); + }; +} + +function wrapStreamSimple( + api: TApi, + streamSimple: StreamFunction, +): ApiStreamSimpleFunction { + return (model, context, options) => { + if (model.api !== api) { + throw new Error(`Mismatched api: ${model.api} expected ${api}`); + } + return streamSimple(model as Model, context, options); + }; +} + +export function registerApiProvider( + provider: ApiProvider, + sourceId?: string, +): void { + apiProviderRegistry.set(provider.api, { + provider: { + api: provider.api, + stream: wrapStream(provider.api, provider.stream), + streamSimple: wrapStreamSimple(provider.api, provider.streamSimple), + }, + sourceId, + }); +} + +export function getApiProvider(api: Api): ApiProviderInternal | undefined { + return apiProviderRegistry.get(api)?.provider; +} + +export function getApiProviders(): ApiProviderInternal[] { + return Array.from(apiProviderRegistry.values(), (entry) => entry.provider); +} + +export function unregisterApiProviders(sourceId: string): void { + for (const [api, entry] of apiProviderRegistry.entries()) { + if (entry.sourceId === sourceId) { + apiProviderRegistry.delete(api); + } + } +} + +export function clearApiProviders(): void { + apiProviderRegistry.clear(); +} diff --git a/packages/pi-ai/src/bedrock-provider.ts b/packages/pi-ai/src/bedrock-provider.ts new file mode 100644 index 000000000..073054189 --- /dev/null +++ b/packages/pi-ai/src/bedrock-provider.ts @@ -0,0 +1,6 @@ +import { streamBedrock, streamSimpleBedrock } from "./providers/amazon-bedrock.js"; + +export const bedrockProviderModule = { + streamBedrock, + streamSimpleBedrock, +}; diff --git a/packages/pi-ai/src/cli.ts b/packages/pi-ai/src/cli.ts new file mode 100644 index 000000000..71dbfc3fd --- /dev/null +++ b/packages/pi-ai/src/cli.ts @@ -0,0 +1,133 @@ +#!/usr/bin/env node + +import { existsSync, readFileSync, writeFileSync } from "fs"; +import { createInterface } from "readline"; +import { getOAuthProvider, getOAuthProviders } from "./utils/oauth/index.js"; +import type { OAuthCredentials, OAuthProviderId } from "./utils/oauth/types.js"; + +const AUTH_FILE = "auth.json"; +const PROVIDERS = getOAuthProviders(); + +function prompt(rl: ReturnType, question: string): Promise { + return new Promise((resolve) => rl.question(question, resolve)); +} + +function loadAuth(): Record { + if (!existsSync(AUTH_FILE)) return {}; + try { + return JSON.parse(readFileSync(AUTH_FILE, "utf-8")); + } catch { + return {}; + } +} + +function saveAuth(auth: Record): void { + writeFileSync(AUTH_FILE, JSON.stringify(auth, null, 2), "utf-8"); +} + +async function login(providerId: OAuthProviderId): Promise { + const provider = getOAuthProvider(providerId); + if (!provider) { + console.error(`Unknown provider: ${providerId}`); + process.exit(1); + } + + const rl = createInterface({ input: process.stdin, output: process.stdout }); + const promptFn = (msg: string) => prompt(rl, `${msg} `); + + try { + const credentials = await provider.login({ + onAuth: (info) => { + console.log(`\nOpen this URL in your browser:\n${info.url}`); + if (info.instructions) console.log(info.instructions); + console.log(); + }, + onPrompt: async (p) => { + return await promptFn(`${p.message}${p.placeholder ? ` (${p.placeholder})` : ""}:`); + }, + onProgress: (msg) => console.log(msg), + }); + + const auth = loadAuth(); + auth[providerId] = { type: "oauth", ...credentials }; + saveAuth(auth); + + console.log(`\nCredentials saved to ${AUTH_FILE}`); + } finally { + rl.close(); + } +} + +async function main(): Promise { + const args = process.argv.slice(2); + const command = args[0]; + + if (!command || command === "help" || command === "--help" || command === "-h") { + const providerList = PROVIDERS.map((p) => ` ${p.id.padEnd(20)} ${p.name}`).join("\n"); + console.log(`Usage: npx @gsd/pi-ai [provider] + +Commands: + login [provider] Login to an OAuth provider + list List available providers + +Providers: +${providerList} + +Examples: + npx @gsd/pi-ai login # interactive provider selection + npx @gsd/pi-ai login anthropic # login to specific provider + npx @gsd/pi-ai list # list providers +`); + return; + } + + if (command === "list") { + console.log("Available OAuth providers:\n"); + for (const p of PROVIDERS) { + console.log(` ${p.id.padEnd(20)} ${p.name}`); + } + return; + } + + if (command === "login") { + let provider = args[1] as OAuthProviderId | undefined; + + if (!provider) { + const rl = createInterface({ input: process.stdin, output: process.stdout }); + console.log("Select a provider:\n"); + for (let i = 0; i < PROVIDERS.length; i++) { + console.log(` ${i + 1}. ${PROVIDERS[i].name}`); + } + console.log(); + + const choice = await prompt(rl, `Enter number (1-${PROVIDERS.length}): `); + rl.close(); + + const index = parseInt(choice, 10) - 1; + if (index < 0 || index >= PROVIDERS.length) { + console.error("Invalid selection"); + process.exit(1); + } + provider = PROVIDERS[index].id; + } + + if (!PROVIDERS.some((p) => p.id === provider)) { + console.error(`Unknown provider: ${provider}`); + console.error(`Use 'npx @gsd/pi-ai list' to see available providers`); + process.exit(1); + } + + console.log(`Logging in to ${provider}...`); + await login(provider); + return; + } + + console.error(`Unknown command: ${command}`); + console.error(`Use 'npx @gsd/pi-ai --help' for usage`); + process.exit(1); +} + +main().catch((err) => { + console.error("Error:", err.message); + process.exit(1); +}); diff --git a/packages/pi-ai/src/env-api-keys.ts b/packages/pi-ai/src/env-api-keys.ts new file mode 100644 index 000000000..749e074f9 --- /dev/null +++ b/packages/pi-ai/src/env-api-keys.ts @@ -0,0 +1,129 @@ +// NEVER convert to top-level imports - breaks browser/Vite builds (web-ui) +let _existsSync: typeof import("node:fs").existsSync | null = null; +let _homedir: typeof import("node:os").homedir | null = null; +let _join: typeof import("node:path").join | null = null; + +type DynamicImport = (specifier: string) => Promise; + +const dynamicImport: DynamicImport = (specifier) => import(specifier); +const NODE_FS_SPECIFIER = "node:" + "fs"; +const NODE_OS_SPECIFIER = "node:" + "os"; +const NODE_PATH_SPECIFIER = "node:" + "path"; + +// Eagerly load in Node.js/Bun environment only +if (typeof process !== "undefined" && (process.versions?.node || process.versions?.bun)) { + dynamicImport(NODE_FS_SPECIFIER).then((m) => { + _existsSync = (m as typeof import("node:fs")).existsSync; + }); + dynamicImport(NODE_OS_SPECIFIER).then((m) => { + _homedir = (m as typeof import("node:os")).homedir; + }); + dynamicImport(NODE_PATH_SPECIFIER).then((m) => { + _join = (m as typeof import("node:path")).join; + }); +} + +import type { KnownProvider } from "./types.js"; + +let cachedVertexAdcCredentialsExists: boolean | null = null; + +function hasVertexAdcCredentials(): boolean { + if (cachedVertexAdcCredentialsExists === null) { + // If node modules haven't loaded yet (async import race at startup), + // return false WITHOUT caching so the next call retries once they're ready. + // Only cache false permanently in a browser environment where fs is never available. + if (!_existsSync || !_homedir || !_join) { + const isNode = typeof process !== "undefined" && (process.versions?.node || process.versions?.bun); + if (!isNode) { + // Definitively in a browser — safe to cache false permanently + cachedVertexAdcCredentialsExists = false; + } + return false; + } + + // Check GOOGLE_APPLICATION_CREDENTIALS env var first (standard way) + const gacPath = process.env.GOOGLE_APPLICATION_CREDENTIALS; + if (gacPath) { + cachedVertexAdcCredentialsExists = _existsSync(gacPath); + } else { + // Fall back to default ADC path (lazy evaluation) + cachedVertexAdcCredentialsExists = _existsSync( + _join(_homedir(), ".config", "gcloud", "application_default_credentials.json"), + ); + } + } + return cachedVertexAdcCredentialsExists; +} + +/** + * Get API key for provider from known environment variables, e.g. OPENAI_API_KEY. + * + * Will not return API keys for providers that require OAuth tokens. + */ +export function getEnvApiKey(provider: KnownProvider): string | undefined; +export function getEnvApiKey(provider: string): string | undefined; +export function getEnvApiKey(provider: any): string | undefined { + // Fall back to environment variables + if (provider === "github-copilot") { + return process.env.COPILOT_GITHUB_TOKEN || process.env.GH_TOKEN || process.env.GITHUB_TOKEN; + } + + // ANTHROPIC_OAUTH_TOKEN takes precedence over ANTHROPIC_API_KEY + if (provider === "anthropic") { + return process.env.ANTHROPIC_OAUTH_TOKEN || process.env.ANTHROPIC_API_KEY; + } + + // Vertex AI uses Application Default Credentials, not API keys. + // Auth is configured via `gcloud auth application-default login`. + if (provider === "google-vertex") { + const hasCredentials = hasVertexAdcCredentials(); + const hasProject = !!(process.env.GOOGLE_CLOUD_PROJECT || process.env.GCLOUD_PROJECT); + const hasLocation = !!process.env.GOOGLE_CLOUD_LOCATION; + + if (hasCredentials && hasProject && hasLocation) { + return ""; + } + } + + if (provider === "amazon-bedrock") { + // Amazon Bedrock supports multiple credential sources: + // 1. AWS_PROFILE - named profile from ~/.aws/credentials + // 2. AWS_ACCESS_KEY_ID + AWS_SECRET_ACCESS_KEY - standard IAM keys + // 3. AWS_BEARER_TOKEN_BEDROCK - Bedrock API keys (bearer token) + // 4. AWS_CONTAINER_CREDENTIALS_RELATIVE_URI - ECS task roles + // 5. AWS_CONTAINER_CREDENTIALS_FULL_URI - ECS task roles (full URI) + // 6. AWS_WEB_IDENTITY_TOKEN_FILE - IRSA (IAM Roles for Service Accounts) + if ( + process.env.AWS_PROFILE || + (process.env.AWS_ACCESS_KEY_ID && process.env.AWS_SECRET_ACCESS_KEY) || + process.env.AWS_BEARER_TOKEN_BEDROCK || + process.env.AWS_CONTAINER_CREDENTIALS_RELATIVE_URI || + process.env.AWS_CONTAINER_CREDENTIALS_FULL_URI || + process.env.AWS_WEB_IDENTITY_TOKEN_FILE + ) { + return ""; + } + } + + const envMap: Record = { + openai: "OPENAI_API_KEY", + "azure-openai-responses": "AZURE_OPENAI_API_KEY", + google: "GEMINI_API_KEY", + groq: "GROQ_API_KEY", + cerebras: "CEREBRAS_API_KEY", + xai: "XAI_API_KEY", + openrouter: "OPENROUTER_API_KEY", + "vercel-ai-gateway": "AI_GATEWAY_API_KEY", + zai: "ZAI_API_KEY", + mistral: "MISTRAL_API_KEY", + minimax: "MINIMAX_API_KEY", + "minimax-cn": "MINIMAX_CN_API_KEY", + huggingface: "HF_TOKEN", + opencode: "OPENCODE_API_KEY", + "opencode-go": "OPENCODE_API_KEY", + "kimi-coding": "KIMI_API_KEY", + }; + + const envVar = envMap[provider]; + return envVar ? process.env[envVar] : undefined; +} diff --git a/packages/pi-ai/src/index.ts b/packages/pi-ai/src/index.ts new file mode 100644 index 000000000..512451c2d --- /dev/null +++ b/packages/pi-ai/src/index.ts @@ -0,0 +1,32 @@ +export type { Static, TSchema } from "@sinclair/typebox"; +export { Type } from "@sinclair/typebox"; + +export * from "./api-registry.js"; +export * from "./env-api-keys.js"; +export * from "./models.js"; +export * from "./providers/anthropic.js"; +export * from "./providers/azure-openai-responses.js"; +export * from "./providers/google.js"; +export * from "./providers/google-gemini-cli.js"; +export * from "./providers/google-vertex.js"; +export * from "./providers/mistral.js"; +export * from "./providers/openai-completions.js"; +export * from "./providers/openai-responses.js"; +export * from "./providers/register-builtins.js"; +export * from "./stream.js"; +export * from "./types.js"; +export * from "./utils/event-stream.js"; +export * from "./utils/json-parse.js"; +export type { + OAuthAuthInfo, + OAuthCredentials, + OAuthLoginCallbacks, + OAuthPrompt, + OAuthProvider, + OAuthProviderId, + OAuthProviderInfo, + OAuthProviderInterface, +} from "./utils/oauth/types.js"; +export * from "./utils/overflow.js"; +export * from "./utils/typebox-helpers.js"; +export * from "./utils/validation.js"; diff --git a/packages/pi-ai/src/models.generated.ts b/packages/pi-ai/src/models.generated.ts new file mode 100644 index 000000000..63cf92d3d --- /dev/null +++ b/packages/pi-ai/src/models.generated.ts @@ -0,0 +1,13370 @@ +// This file is auto-generated by scripts/generate-models.ts +// Do not edit manually - run 'npm run generate-models' to update + +import type { Model } from "./types.js"; + +export const MODELS = { + "amazon-bedrock": { + "amazon.nova-2-lite-v1:0": { + id: "amazon.nova-2-lite-v1:0", + name: "Nova 2 Lite", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.33, + output: 2.75, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "amazon.nova-lite-v1:0": { + id: "amazon.nova-lite-v1:0", + name: "Nova Lite", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.06, + output: 0.24, + cacheRead: 0.015, + cacheWrite: 0, + }, + contextWindow: 300000, + maxTokens: 8192, + } satisfies Model<"bedrock-converse-stream">, + "amazon.nova-micro-v1:0": { + id: "amazon.nova-micro-v1:0", + name: "Nova Micro", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.035, + output: 0.14, + cacheRead: 0.00875, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 8192, + } satisfies Model<"bedrock-converse-stream">, + "amazon.nova-premier-v1:0": { + id: "amazon.nova-premier-v1:0", + name: "Nova Premier", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2.5, + output: 12.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 16384, + } satisfies Model<"bedrock-converse-stream">, + "amazon.nova-pro-v1:0": { + id: "amazon.nova-pro-v1:0", + name: "Nova Pro", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.8, + output: 3.2, + cacheRead: 0.2, + cacheWrite: 0, + }, + contextWindow: 300000, + maxTokens: 8192, + } satisfies Model<"bedrock-converse-stream">, + "amazon.titan-text-express-v1": { + id: "amazon.titan-text-express-v1", + name: "Titan Text G1 - Express", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.2, + output: 0.6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "amazon.titan-text-express-v1:0:8k": { + id: "amazon.titan-text-express-v1:0:8k", + name: "Titan Text G1 - Express", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.2, + output: 0.6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "anthropic.claude-3-5-haiku-20241022-v1:0": { + id: "anthropic.claude-3-5-haiku-20241022-v1:0", + name: "Claude Haiku 3.5", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.8, + output: 4, + cacheRead: 0.08, + cacheWrite: 1, + }, + contextWindow: 200000, + maxTokens: 8192, + } satisfies Model<"bedrock-converse-stream">, + "anthropic.claude-3-5-sonnet-20240620-v1:0": { + id: "anthropic.claude-3-5-sonnet-20240620-v1:0", + name: "Claude Sonnet 3.5", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 8192, + } satisfies Model<"bedrock-converse-stream">, + "anthropic.claude-3-5-sonnet-20241022-v2:0": { + id: "anthropic.claude-3-5-sonnet-20241022-v2:0", + name: "Claude Sonnet 3.5 v2", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 8192, + } satisfies Model<"bedrock-converse-stream">, + "anthropic.claude-3-7-sonnet-20250219-v1:0": { + id: "anthropic.claude-3-7-sonnet-20250219-v1:0", + name: "Claude Sonnet 3.7", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 8192, + } satisfies Model<"bedrock-converse-stream">, + "anthropic.claude-3-haiku-20240307-v1:0": { + id: "anthropic.claude-3-haiku-20240307-v1:0", + name: "Claude Haiku 3", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.25, + output: 1.25, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "anthropic.claude-3-opus-20240229-v1:0": { + id: "anthropic.claude-3-opus-20240229-v1:0", + name: "Claude Opus 3", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 15, + output: 75, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "anthropic.claude-3-sonnet-20240229-v1:0": { + id: "anthropic.claude-3-sonnet-20240229-v1:0", + name: "Claude Sonnet 3", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "anthropic.claude-haiku-4-5-20251001-v1:0": { + id: "anthropic.claude-haiku-4-5-20251001-v1:0", + name: "Claude Haiku 4.5", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1, + output: 5, + cacheRead: 0.1, + cacheWrite: 1.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "anthropic.claude-opus-4-1-20250805-v1:0": { + id: "anthropic.claude-opus-4-1-20250805-v1:0", + name: "Claude Opus 4.1", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 75, + cacheRead: 1.5, + cacheWrite: 18.75, + }, + contextWindow: 200000, + maxTokens: 32000, + } satisfies Model<"bedrock-converse-stream">, + "anthropic.claude-opus-4-20250514-v1:0": { + id: "anthropic.claude-opus-4-20250514-v1:0", + name: "Claude Opus 4", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 75, + cacheRead: 1.5, + cacheWrite: 18.75, + }, + contextWindow: 200000, + maxTokens: 32000, + } satisfies Model<"bedrock-converse-stream">, + "anthropic.claude-opus-4-5-20251101-v1:0": { + id: "anthropic.claude-opus-4-5-20251101-v1:0", + name: "Claude Opus 4.5", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "anthropic.claude-opus-4-6-v1": { + id: "anthropic.claude-opus-4-6-v1", + name: "Claude Opus 4.6", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 200000, + maxTokens: 128000, + } satisfies Model<"bedrock-converse-stream">, + "anthropic.claude-sonnet-4-20250514-v1:0": { + id: "anthropic.claude-sonnet-4-20250514-v1:0", + name: "Claude Sonnet 4", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "anthropic.claude-sonnet-4-5-20250929-v1:0": { + id: "anthropic.claude-sonnet-4-5-20250929-v1:0", + name: "Claude Sonnet 4.5", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "anthropic.claude-sonnet-4-6": { + id: "anthropic.claude-sonnet-4-6", + name: "Claude Sonnet 4.6", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "cohere.command-r-plus-v1:0": { + id: "cohere.command-r-plus-v1:0", + name: "Command R+", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 3, + output: 15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "cohere.command-r-v1:0": { + id: "cohere.command-r-v1:0", + name: "Command R", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.5, + output: 1.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "deepseek.r1-v1:0": { + id: "deepseek.r1-v1:0", + name: "DeepSeek-R1", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text"], + cost: { + input: 1.35, + output: 5.4, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 32768, + } satisfies Model<"bedrock-converse-stream">, + "deepseek.v3-v1:0": { + id: "deepseek.v3-v1:0", + name: "DeepSeek-V3.1", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text"], + cost: { + input: 0.58, + output: 1.68, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 163840, + maxTokens: 81920, + } satisfies Model<"bedrock-converse-stream">, + "deepseek.v3.2-v1:0": { + id: "deepseek.v3.2-v1:0", + name: "DeepSeek-V3.2", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text"], + cost: { + input: 0.62, + output: 1.85, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 163840, + maxTokens: 81920, + } satisfies Model<"bedrock-converse-stream">, + "eu.anthropic.claude-haiku-4-5-20251001-v1:0": { + id: "eu.anthropic.claude-haiku-4-5-20251001-v1:0", + name: "Claude Haiku 4.5 (EU)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1, + output: 5, + cacheRead: 0.1, + cacheWrite: 1.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "eu.anthropic.claude-opus-4-5-20251101-v1:0": { + id: "eu.anthropic.claude-opus-4-5-20251101-v1:0", + name: "Claude Opus 4.5 (EU)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "eu.anthropic.claude-opus-4-6-v1": { + id: "eu.anthropic.claude-opus-4-6-v1", + name: "Claude Opus 4.6 (EU)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 200000, + maxTokens: 128000, + } satisfies Model<"bedrock-converse-stream">, + "eu.anthropic.claude-sonnet-4-20250514-v1:0": { + id: "eu.anthropic.claude-sonnet-4-20250514-v1:0", + name: "Claude Sonnet 4 (EU)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "eu.anthropic.claude-sonnet-4-5-20250929-v1:0": { + id: "eu.anthropic.claude-sonnet-4-5-20250929-v1:0", + name: "Claude Sonnet 4.5 (EU)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "eu.anthropic.claude-sonnet-4-6": { + id: "eu.anthropic.claude-sonnet-4-6", + name: "Claude Sonnet 4.6 (EU)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "global.anthropic.claude-haiku-4-5-20251001-v1:0": { + id: "global.anthropic.claude-haiku-4-5-20251001-v1:0", + name: "Claude Haiku 4.5 (Global)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1, + output: 5, + cacheRead: 0.1, + cacheWrite: 1.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "global.anthropic.claude-opus-4-5-20251101-v1:0": { + id: "global.anthropic.claude-opus-4-5-20251101-v1:0", + name: "Claude Opus 4.5 (Global)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "global.anthropic.claude-opus-4-6-v1": { + id: "global.anthropic.claude-opus-4-6-v1", + name: "Claude Opus 4.6 (Global)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 200000, + maxTokens: 128000, + } satisfies Model<"bedrock-converse-stream">, + "global.anthropic.claude-sonnet-4-20250514-v1:0": { + id: "global.anthropic.claude-sonnet-4-20250514-v1:0", + name: "Claude Sonnet 4 (Global)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "global.anthropic.claude-sonnet-4-5-20250929-v1:0": { + id: "global.anthropic.claude-sonnet-4-5-20250929-v1:0", + name: "Claude Sonnet 4.5 (Global)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "global.anthropic.claude-sonnet-4-6": { + id: "global.anthropic.claude-sonnet-4-6", + name: "Claude Sonnet 4.6 (Global)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "google.gemma-3-27b-it": { + id: "google.gemma-3-27b-it", + name: "Google Gemma 3 27B Instruct", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.12, + output: 0.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 202752, + maxTokens: 8192, + } satisfies Model<"bedrock-converse-stream">, + "google.gemma-3-4b-it": { + id: "google.gemma-3-4b-it", + name: "Gemma 3 4B IT", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.04, + output: 0.08, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "meta.llama3-1-70b-instruct-v1:0": { + id: "meta.llama3-1-70b-instruct-v1:0", + name: "Llama 3.1 70B Instruct", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.72, + output: 0.72, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "meta.llama3-1-8b-instruct-v1:0": { + id: "meta.llama3-1-8b-instruct-v1:0", + name: "Llama 3.1 8B Instruct", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.22, + output: 0.22, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "meta.llama3-2-11b-instruct-v1:0": { + id: "meta.llama3-2-11b-instruct-v1:0", + name: "Llama 3.2 11B Instruct", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.16, + output: 0.16, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "meta.llama3-2-1b-instruct-v1:0": { + id: "meta.llama3-2-1b-instruct-v1:0", + name: "Llama 3.2 1B Instruct", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.1, + output: 0.1, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "meta.llama3-2-3b-instruct-v1:0": { + id: "meta.llama3-2-3b-instruct-v1:0", + name: "Llama 3.2 3B Instruct", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.15, + output: 0.15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "meta.llama3-2-90b-instruct-v1:0": { + id: "meta.llama3-2-90b-instruct-v1:0", + name: "Llama 3.2 90B Instruct", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.72, + output: 0.72, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "meta.llama3-3-70b-instruct-v1:0": { + id: "meta.llama3-3-70b-instruct-v1:0", + name: "Llama 3.3 70B Instruct", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.72, + output: 0.72, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "meta.llama4-maverick-17b-instruct-v1:0": { + id: "meta.llama4-maverick-17b-instruct-v1:0", + name: "Llama 4 Maverick 17B Instruct", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.24, + output: 0.97, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 16384, + } satisfies Model<"bedrock-converse-stream">, + "meta.llama4-scout-17b-instruct-v1:0": { + id: "meta.llama4-scout-17b-instruct-v1:0", + name: "Llama 4 Scout 17B Instruct", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.17, + output: 0.66, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 3500000, + maxTokens: 16384, + } satisfies Model<"bedrock-converse-stream">, + "minimax.minimax-m2": { + id: "minimax.minimax-m2", + name: "MiniMax M2", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 1.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 204608, + maxTokens: 128000, + } satisfies Model<"bedrock-converse-stream">, + "minimax.minimax-m2.1": { + id: "minimax.minimax-m2.1", + name: "MiniMax M2.1", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 1.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"bedrock-converse-stream">, + "mistral.ministral-3-14b-instruct": { + id: "mistral.ministral-3-14b-instruct", + name: "Ministral 14B 3.0", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.2, + output: 0.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "mistral.ministral-3-8b-instruct": { + id: "mistral.ministral-3-8b-instruct", + name: "Ministral 3 8B", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.15, + output: 0.15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "mistral.mistral-large-2402-v1:0": { + id: "mistral.mistral-large-2402-v1:0", + name: "Mistral Large (24.02)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.5, + output: 1.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "mistral.voxtral-mini-3b-2507": { + id: "mistral.voxtral-mini-3b-2507", + name: "Voxtral Mini 3B 2507", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.04, + output: 0.04, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "mistral.voxtral-small-24b-2507": { + id: "mistral.voxtral-small-24b-2507", + name: "Voxtral Small 24B 2507", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.15, + output: 0.35, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32000, + maxTokens: 8192, + } satisfies Model<"bedrock-converse-stream">, + "moonshot.kimi-k2-thinking": { + id: "moonshot.kimi-k2-thinking", + name: "Kimi K2 Thinking", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 2.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 256000, + } satisfies Model<"bedrock-converse-stream">, + "moonshotai.kimi-k2.5": { + id: "moonshotai.kimi-k2.5", + name: "Kimi K2.5", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.6, + output: 3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 256000, + } satisfies Model<"bedrock-converse-stream">, + "nvidia.nemotron-nano-12b-v2": { + id: "nvidia.nemotron-nano-12b-v2", + name: "NVIDIA Nemotron Nano 12B v2 VL BF16", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.2, + output: 0.6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "nvidia.nemotron-nano-9b-v2": { + id: "nvidia.nemotron-nano-9b-v2", + name: "NVIDIA Nemotron Nano 9B v2", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.06, + output: 0.23, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "openai.gpt-oss-120b-1:0": { + id: "openai.gpt-oss-120b-1:0", + name: "gpt-oss-120b", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "openai.gpt-oss-20b-1:0": { + id: "openai.gpt-oss-20b-1:0", + name: "gpt-oss-20b", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.07, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "openai.gpt-oss-safeguard-120b": { + id: "openai.gpt-oss-safeguard-120b", + name: "GPT OSS Safeguard 120B", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "openai.gpt-oss-safeguard-20b": { + id: "openai.gpt-oss-safeguard-20b", + name: "GPT OSS Safeguard 20B", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.07, + output: 0.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"bedrock-converse-stream">, + "qwen.qwen3-235b-a22b-2507-v1:0": { + id: "qwen.qwen3-235b-a22b-2507-v1:0", + name: "Qwen3 235B A22B 2507", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.22, + output: 0.88, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 131072, + } satisfies Model<"bedrock-converse-stream">, + "qwen.qwen3-32b-v1:0": { + id: "qwen.qwen3-32b-v1:0", + name: "Qwen3 32B (dense)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 16384, + maxTokens: 16384, + } satisfies Model<"bedrock-converse-stream">, + "qwen.qwen3-coder-30b-a3b-v1:0": { + id: "qwen.qwen3-coder-30b-a3b-v1:0", + name: "Qwen3 Coder 30B A3B Instruct", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 131072, + } satisfies Model<"bedrock-converse-stream">, + "qwen.qwen3-coder-480b-a35b-v1:0": { + id: "qwen.qwen3-coder-480b-a35b-v1:0", + name: "Qwen3 Coder 480B A35B Instruct", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.22, + output: 1.8, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 65536, + } satisfies Model<"bedrock-converse-stream">, + "qwen.qwen3-next-80b-a3b": { + id: "qwen.qwen3-next-80b-a3b", + name: "Qwen/Qwen3-Next-80B-A3B-Instruct", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.14, + output: 1.4, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262000, + maxTokens: 262000, + } satisfies Model<"bedrock-converse-stream">, + "qwen.qwen3-vl-235b-a22b": { + id: "qwen.qwen3-vl-235b-a22b", + name: "Qwen/Qwen3-VL-235B-A22B-Instruct", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.3, + output: 1.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262000, + maxTokens: 262000, + } satisfies Model<"bedrock-converse-stream">, + "us.anthropic.claude-haiku-4-5-20251001-v1:0": { + id: "us.anthropic.claude-haiku-4-5-20251001-v1:0", + name: "Claude Haiku 4.5 (US)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1, + output: 5, + cacheRead: 0.1, + cacheWrite: 1.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "us.anthropic.claude-opus-4-1-20250805-v1:0": { + id: "us.anthropic.claude-opus-4-1-20250805-v1:0", + name: "Claude Opus 4.1 (US)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 75, + cacheRead: 1.5, + cacheWrite: 18.75, + }, + contextWindow: 200000, + maxTokens: 32000, + } satisfies Model<"bedrock-converse-stream">, + "us.anthropic.claude-opus-4-20250514-v1:0": { + id: "us.anthropic.claude-opus-4-20250514-v1:0", + name: "Claude Opus 4 (US)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 75, + cacheRead: 1.5, + cacheWrite: 18.75, + }, + contextWindow: 200000, + maxTokens: 32000, + } satisfies Model<"bedrock-converse-stream">, + "us.anthropic.claude-opus-4-5-20251101-v1:0": { + id: "us.anthropic.claude-opus-4-5-20251101-v1:0", + name: "Claude Opus 4.5 (US)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "us.anthropic.claude-opus-4-6-v1": { + id: "us.anthropic.claude-opus-4-6-v1", + name: "Claude Opus 4.6 (US)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 200000, + maxTokens: 128000, + } satisfies Model<"bedrock-converse-stream">, + "us.anthropic.claude-sonnet-4-20250514-v1:0": { + id: "us.anthropic.claude-sonnet-4-20250514-v1:0", + name: "Claude Sonnet 4 (US)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "us.anthropic.claude-sonnet-4-5-20250929-v1:0": { + id: "us.anthropic.claude-sonnet-4-5-20250929-v1:0", + name: "Claude Sonnet 4.5 (US)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "us.anthropic.claude-sonnet-4-6": { + id: "us.anthropic.claude-sonnet-4-6", + name: "Claude Sonnet 4.6 (US)", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"bedrock-converse-stream">, + "writer.palmyra-x4-v1:0": { + id: "writer.palmyra-x4-v1:0", + name: "Palmyra X4", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text"], + cost: { + input: 2.5, + output: 10, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 122880, + maxTokens: 8192, + } satisfies Model<"bedrock-converse-stream">, + "writer.palmyra-x5-v1:0": { + id: "writer.palmyra-x5-v1:0", + name: "Palmyra X5", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1040000, + maxTokens: 8192, + } satisfies Model<"bedrock-converse-stream">, + "zai.glm-4.7": { + id: "zai.glm-4.7", + name: "GLM-4.7", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 2.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"bedrock-converse-stream">, + "zai.glm-4.7-flash": { + id: "zai.glm-4.7-flash", + name: "GLM-4.7-Flash", + api: "bedrock-converse-stream", + provider: "amazon-bedrock", + baseUrl: "https://bedrock-runtime.us-east-1.amazonaws.com", + reasoning: true, + input: ["text"], + cost: { + input: 0.07, + output: 0.4, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 131072, + } satisfies Model<"bedrock-converse-stream">, + }, + "anthropic": { + "claude-3-5-haiku-20241022": { + id: "claude-3-5-haiku-20241022", + name: "Claude Haiku 3.5", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.8, + output: 4, + cacheRead: 0.08, + cacheWrite: 1, + }, + contextWindow: 200000, + maxTokens: 8192, + } satisfies Model<"anthropic-messages">, + "claude-3-5-haiku-latest": { + id: "claude-3-5-haiku-latest", + name: "Claude Haiku 3.5 (latest)", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.8, + output: 4, + cacheRead: 0.08, + cacheWrite: 1, + }, + contextWindow: 200000, + maxTokens: 8192, + } satisfies Model<"anthropic-messages">, + "claude-3-5-sonnet-20240620": { + id: "claude-3-5-sonnet-20240620", + name: "Claude Sonnet 3.5", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 8192, + } satisfies Model<"anthropic-messages">, + "claude-3-5-sonnet-20241022": { + id: "claude-3-5-sonnet-20241022", + name: "Claude Sonnet 3.5 v2", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 8192, + } satisfies Model<"anthropic-messages">, + "claude-3-7-sonnet-20250219": { + id: "claude-3-7-sonnet-20250219", + name: "Claude Sonnet 3.7", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "claude-3-7-sonnet-latest": { + id: "claude-3-7-sonnet-latest", + name: "Claude Sonnet 3.7 (latest)", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "claude-3-haiku-20240307": { + id: "claude-3-haiku-20240307", + name: "Claude Haiku 3", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.25, + output: 1.25, + cacheRead: 0.03, + cacheWrite: 0.3, + }, + contextWindow: 200000, + maxTokens: 4096, + } satisfies Model<"anthropic-messages">, + "claude-3-opus-20240229": { + id: "claude-3-opus-20240229", + name: "Claude Opus 3", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 15, + output: 75, + cacheRead: 1.5, + cacheWrite: 18.75, + }, + contextWindow: 200000, + maxTokens: 4096, + } satisfies Model<"anthropic-messages">, + "claude-3-sonnet-20240229": { + id: "claude-3-sonnet-20240229", + name: "Claude Sonnet 3", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 0.3, + }, + contextWindow: 200000, + maxTokens: 4096, + } satisfies Model<"anthropic-messages">, + "claude-haiku-4-5": { + id: "claude-haiku-4-5", + name: "Claude Haiku 4.5 (latest)", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1, + output: 5, + cacheRead: 0.1, + cacheWrite: 1.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "claude-haiku-4-5-20251001": { + id: "claude-haiku-4-5-20251001", + name: "Claude Haiku 4.5", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1, + output: 5, + cacheRead: 0.1, + cacheWrite: 1.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "claude-opus-4-0": { + id: "claude-opus-4-0", + name: "Claude Opus 4 (latest)", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 75, + cacheRead: 1.5, + cacheWrite: 18.75, + }, + contextWindow: 200000, + maxTokens: 32000, + } satisfies Model<"anthropic-messages">, + "claude-opus-4-1": { + id: "claude-opus-4-1", + name: "Claude Opus 4.1 (latest)", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 75, + cacheRead: 1.5, + cacheWrite: 18.75, + }, + contextWindow: 200000, + maxTokens: 32000, + } satisfies Model<"anthropic-messages">, + "claude-opus-4-1-20250805": { + id: "claude-opus-4-1-20250805", + name: "Claude Opus 4.1", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 75, + cacheRead: 1.5, + cacheWrite: 18.75, + }, + contextWindow: 200000, + maxTokens: 32000, + } satisfies Model<"anthropic-messages">, + "claude-opus-4-20250514": { + id: "claude-opus-4-20250514", + name: "Claude Opus 4", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 75, + cacheRead: 1.5, + cacheWrite: 18.75, + }, + contextWindow: 200000, + maxTokens: 32000, + } satisfies Model<"anthropic-messages">, + "claude-opus-4-5": { + id: "claude-opus-4-5", + name: "Claude Opus 4.5 (latest)", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "claude-opus-4-5-20251101": { + id: "claude-opus-4-5-20251101", + name: "Claude Opus 4.5", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "claude-opus-4-6": { + id: "claude-opus-4-6", + name: "Claude Opus 4.6", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 200000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "claude-sonnet-4-0": { + id: "claude-sonnet-4-0", + name: "Claude Sonnet 4 (latest)", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "claude-sonnet-4-20250514": { + id: "claude-sonnet-4-20250514", + name: "Claude Sonnet 4", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "claude-sonnet-4-5": { + id: "claude-sonnet-4-5", + name: "Claude Sonnet 4.5 (latest)", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "claude-sonnet-4-5-20250929": { + id: "claude-sonnet-4-5-20250929", + name: "Claude Sonnet 4.5", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "claude-sonnet-4-6": { + id: "claude-sonnet-4-6", + name: "Claude Sonnet 4.6", + api: "anthropic-messages", + provider: "anthropic", + baseUrl: "https://api.anthropic.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + }, + "azure-openai-responses": { + "codex-mini-latest": { + id: "codex-mini-latest", + name: "Codex Mini", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text"], + cost: { + input: 1.5, + output: 6, + cacheRead: 0.375, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"azure-openai-responses">, + "gpt-4": { + id: "gpt-4", + name: "GPT-4", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: false, + input: ["text"], + cost: { + input: 30, + output: 60, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 8192, + maxTokens: 8192, + } satisfies Model<"azure-openai-responses">, + "gpt-4-turbo": { + id: "gpt-4-turbo", + name: "GPT-4 Turbo", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: false, + input: ["text", "image"], + cost: { + input: 10, + output: 30, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"azure-openai-responses">, + "gpt-4.1": { + id: "gpt-4.1", + name: "GPT-4.1", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2, + output: 8, + cacheRead: 0.5, + cacheWrite: 0, + }, + contextWindow: 1047576, + maxTokens: 32768, + } satisfies Model<"azure-openai-responses">, + "gpt-4.1-mini": { + id: "gpt-4.1-mini", + name: "GPT-4.1 mini", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.4, + output: 1.6, + cacheRead: 0.1, + cacheWrite: 0, + }, + contextWindow: 1047576, + maxTokens: 32768, + } satisfies Model<"azure-openai-responses">, + "gpt-4.1-nano": { + id: "gpt-4.1-nano", + name: "GPT-4.1 nano", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.1, + output: 0.4, + cacheRead: 0.03, + cacheWrite: 0, + }, + contextWindow: 1047576, + maxTokens: 32768, + } satisfies Model<"azure-openai-responses">, + "gpt-4o": { + id: "gpt-4o", + name: "GPT-4o", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2.5, + output: 10, + cacheRead: 1.25, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"azure-openai-responses">, + "gpt-4o-2024-05-13": { + id: "gpt-4o-2024-05-13", + name: "GPT-4o (2024-05-13)", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: false, + input: ["text", "image"], + cost: { + input: 5, + output: 15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"azure-openai-responses">, + "gpt-4o-2024-08-06": { + id: "gpt-4o-2024-08-06", + name: "GPT-4o (2024-08-06)", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2.5, + output: 10, + cacheRead: 1.25, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"azure-openai-responses">, + "gpt-4o-2024-11-20": { + id: "gpt-4o-2024-11-20", + name: "GPT-4o (2024-11-20)", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2.5, + output: 10, + cacheRead: 1.25, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"azure-openai-responses">, + "gpt-4o-mini": { + id: "gpt-4o-mini", + name: "GPT-4o mini", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0.08, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"azure-openai-responses">, + "gpt-5": { + id: "gpt-5", + name: "GPT-5", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"azure-openai-responses">, + "gpt-5-chat-latest": { + id: "gpt-5-chat-latest", + name: "GPT-5 Chat Latest", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: false, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"azure-openai-responses">, + "gpt-5-codex": { + id: "gpt-5-codex", + name: "GPT-5-Codex", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"azure-openai-responses">, + "gpt-5-mini": { + id: "gpt-5-mini", + name: "GPT-5 Mini", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.25, + output: 2, + cacheRead: 0.025, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"azure-openai-responses">, + "gpt-5-nano": { + id: "gpt-5-nano", + name: "GPT-5 Nano", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.05, + output: 0.4, + cacheRead: 0.005, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"azure-openai-responses">, + "gpt-5-pro": { + id: "gpt-5-pro", + name: "GPT-5 Pro", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 120, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 272000, + } satisfies Model<"azure-openai-responses">, + "gpt-5.1": { + id: "gpt-5.1", + name: "GPT-5.1", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.13, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"azure-openai-responses">, + "gpt-5.1-chat-latest": { + id: "gpt-5.1-chat-latest", + name: "GPT-5.1 Chat", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"azure-openai-responses">, + "gpt-5.1-codex": { + id: "gpt-5.1-codex", + name: "GPT-5.1 Codex", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"azure-openai-responses">, + "gpt-5.1-codex-max": { + id: "gpt-5.1-codex-max", + name: "GPT-5.1 Codex Max", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"azure-openai-responses">, + "gpt-5.1-codex-mini": { + id: "gpt-5.1-codex-mini", + name: "GPT-5.1 Codex mini", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.25, + output: 2, + cacheRead: 0.025, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"azure-openai-responses">, + "gpt-5.2": { + id: "gpt-5.2", + name: "GPT-5.2", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"azure-openai-responses">, + "gpt-5.2-chat-latest": { + id: "gpt-5.2-chat-latest", + name: "GPT-5.2 Chat", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"azure-openai-responses">, + "gpt-5.2-codex": { + id: "gpt-5.2-codex", + name: "GPT-5.2 Codex", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"azure-openai-responses">, + "gpt-5.2-pro": { + id: "gpt-5.2-pro", + name: "GPT-5.2 Pro", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 21, + output: 168, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"azure-openai-responses">, + "gpt-5.3-codex": { + id: "gpt-5.3-codex", + name: "GPT-5.3 Codex", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"azure-openai-responses">, + "gpt-5.3-codex-spark": { + id: "gpt-5.3-codex-spark", + name: "GPT-5.3 Codex Spark", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 32000, + } satisfies Model<"azure-openai-responses">, + "gpt-5.4": { + id: "gpt-5.4", + name: "GPT-5.4", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2.5, + output: 15, + cacheRead: 0.25, + cacheWrite: 0, + }, + contextWindow: 272000, + maxTokens: 128000, + } satisfies Model<"azure-openai-responses">, + "gpt-5.4-pro": { + id: "gpt-5.4-pro", + name: "GPT-5.4 Pro", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 30, + output: 180, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1050000, + maxTokens: 128000, + } satisfies Model<"azure-openai-responses">, + "o1": { + id: "o1", + name: "o1", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 60, + cacheRead: 7.5, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"azure-openai-responses">, + "o1-pro": { + id: "o1-pro", + name: "o1-pro", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 150, + output: 600, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"azure-openai-responses">, + "o3": { + id: "o3", + name: "o3", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 8, + cacheRead: 0.5, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"azure-openai-responses">, + "o3-deep-research": { + id: "o3-deep-research", + name: "o3-deep-research", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 10, + output: 40, + cacheRead: 2.5, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"azure-openai-responses">, + "o3-mini": { + id: "o3-mini", + name: "o3-mini", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text"], + cost: { + input: 1.1, + output: 4.4, + cacheRead: 0.55, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"azure-openai-responses">, + "o3-pro": { + id: "o3-pro", + name: "o3-pro", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 20, + output: 80, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"azure-openai-responses">, + "o4-mini": { + id: "o4-mini", + name: "o4-mini", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.1, + output: 4.4, + cacheRead: 0.28, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"azure-openai-responses">, + "o4-mini-deep-research": { + id: "o4-mini-deep-research", + name: "o4-mini-deep-research", + api: "azure-openai-responses", + provider: "azure-openai-responses", + baseUrl: "", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 8, + cacheRead: 0.5, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"azure-openai-responses">, + }, + "cerebras": { + "gpt-oss-120b": { + id: "gpt-oss-120b", + name: "GPT OSS 120B", + api: "openai-completions", + provider: "cerebras", + baseUrl: "https://api.cerebras.ai/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.25, + output: 0.69, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "llama3.1-8b": { + id: "llama3.1-8b", + name: "Llama 3.1 8B", + api: "openai-completions", + provider: "cerebras", + baseUrl: "https://api.cerebras.ai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.1, + output: 0.1, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32000, + maxTokens: 8000, + } satisfies Model<"openai-completions">, + "qwen-3-235b-a22b-instruct-2507": { + id: "qwen-3-235b-a22b-instruct-2507", + name: "Qwen 3 235B Instruct", + api: "openai-completions", + provider: "cerebras", + baseUrl: "https://api.cerebras.ai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.6, + output: 1.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131000, + maxTokens: 32000, + } satisfies Model<"openai-completions">, + "zai-glm-4.7": { + id: "zai-glm-4.7", + name: "Z.AI GLM-4.7", + api: "openai-completions", + provider: "cerebras", + baseUrl: "https://api.cerebras.ai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 2.25, + output: 2.75, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 40000, + } satisfies Model<"openai-completions">, + }, + "github-copilot": { + "claude-haiku-4.5": { + id: "claude-haiku-4.5", + name: "Claude Haiku 4.5", + api: "anthropic-messages", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 32000, + } satisfies Model<"anthropic-messages">, + "claude-opus-4.5": { + id: "claude-opus-4.5", + name: "Claude Opus 4.5", + api: "anthropic-messages", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 32000, + } satisfies Model<"anthropic-messages">, + "claude-opus-4.6": { + id: "claude-opus-4.6", + name: "Claude Opus 4.6", + api: "anthropic-messages", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "claude-sonnet-4": { + id: "claude-sonnet-4", + name: "Claude Sonnet 4", + api: "anthropic-messages", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16000, + } satisfies Model<"anthropic-messages">, + "claude-sonnet-4.5": { + id: "claude-sonnet-4.5", + name: "Claude Sonnet 4.5", + api: "anthropic-messages", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 32000, + } satisfies Model<"anthropic-messages">, + "claude-sonnet-4.6": { + id: "claude-sonnet-4.6", + name: "Claude Sonnet 4.6", + api: "anthropic-messages", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 32000, + } satisfies Model<"anthropic-messages">, + "gemini-2.5-pro": { + id: "gemini-2.5-pro", + name: "Gemini 2.5 Pro", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + compat: {"supportsStore":false,"supportsDeveloperRole":false,"supportsReasoningEffort":false}, + reasoning: false, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "gemini-3-flash-preview": { + id: "gemini-3-flash-preview", + name: "Gemini 3 Flash", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + compat: {"supportsStore":false,"supportsDeveloperRole":false,"supportsReasoningEffort":false}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "gemini-3-pro-preview": { + id: "gemini-3-pro-preview", + name: "Gemini 3 Pro Preview", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + compat: {"supportsStore":false,"supportsDeveloperRole":false,"supportsReasoningEffort":false}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "gemini-3.1-pro-preview": { + id: "gemini-3.1-pro-preview", + name: "Gemini 3.1 Pro Preview", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + compat: {"supportsStore":false,"supportsDeveloperRole":false,"supportsReasoningEffort":false}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "gpt-4.1": { + id: "gpt-4.1", + name: "GPT-4.1", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + compat: {"supportsStore":false,"supportsDeveloperRole":false,"supportsReasoningEffort":false}, + reasoning: false, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 64000, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "gpt-4o": { + id: "gpt-4o", + name: "GPT-4o", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + compat: {"supportsStore":false,"supportsDeveloperRole":false,"supportsReasoningEffort":false}, + reasoning: false, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 64000, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "gpt-5": { + id: "gpt-5", + name: "GPT-5", + api: "openai-responses", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5-mini": { + id: "gpt-5-mini", + name: "GPT-5-mini", + api: "openai-responses", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"openai-responses">, + "gpt-5.1": { + id: "gpt-5.1", + name: "GPT-5.1", + api: "openai-responses", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"openai-responses">, + "gpt-5.1-codex": { + id: "gpt-5.1-codex", + name: "GPT-5.1-Codex", + api: "openai-responses", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.1-codex-max": { + id: "gpt-5.1-codex-max", + name: "GPT-5.1-Codex-max", + api: "openai-responses", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.1-codex-mini": { + id: "gpt-5.1-codex-mini", + name: "GPT-5.1-Codex-mini", + api: "openai-responses", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.2": { + id: "gpt-5.2", + name: "GPT-5.2", + api: "openai-responses", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"openai-responses">, + "gpt-5.2-codex": { + id: "gpt-5.2-codex", + name: "GPT-5.2-Codex", + api: "openai-responses", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 272000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.3-codex": { + id: "gpt-5.3-codex", + name: "GPT-5.3-Codex", + api: "openai-responses", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.4": { + id: "gpt-5.4", + name: "GPT-5.4", + api: "openai-responses", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "grok-code-fast-1": { + id: "grok-code-fast-1", + name: "Grok Code Fast 1", + api: "openai-completions", + provider: "github-copilot", + baseUrl: "https://api.individual.githubcopilot.com", + headers: {"User-Agent":"GitHubCopilotChat/0.35.0","Editor-Version":"vscode/1.107.0","Editor-Plugin-Version":"copilot-chat/0.35.0","Copilot-Integration-Id":"vscode-chat"}, + compat: {"supportsStore":false,"supportsDeveloperRole":false,"supportsReasoningEffort":false}, + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + }, + "google": { + "gemini-1.5-flash": { + id: "gemini-1.5-flash", + name: "Gemini 1.5 Flash", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.075, + output: 0.3, + cacheRead: 0.01875, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 8192, + } satisfies Model<"google-generative-ai">, + "gemini-1.5-flash-8b": { + id: "gemini-1.5-flash-8b", + name: "Gemini 1.5 Flash-8B", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.0375, + output: 0.15, + cacheRead: 0.01, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 8192, + } satisfies Model<"google-generative-ai">, + "gemini-1.5-pro": { + id: "gemini-1.5-pro", + name: "Gemini 1.5 Pro", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: false, + input: ["text", "image"], + cost: { + input: 1.25, + output: 5, + cacheRead: 0.3125, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 8192, + } satisfies Model<"google-generative-ai">, + "gemini-2.0-flash": { + id: "gemini-2.0-flash", + name: "Gemini 2.0 Flash", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.1, + output: 0.4, + cacheRead: 0.025, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 8192, + } satisfies Model<"google-generative-ai">, + "gemini-2.0-flash-lite": { + id: "gemini-2.0-flash-lite", + name: "Gemini 2.0 Flash Lite", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.075, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 8192, + } satisfies Model<"google-generative-ai">, + "gemini-2.5-flash": { + id: "gemini-2.5-flash", + name: "Gemini 2.5 Flash", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.3, + output: 2.5, + cacheRead: 0.075, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-2.5-flash-lite": { + id: "gemini-2.5-flash-lite", + name: "Gemini 2.5 Flash Lite", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.1, + output: 0.4, + cacheRead: 0.025, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-2.5-flash-lite-preview-06-17": { + id: "gemini-2.5-flash-lite-preview-06-17", + name: "Gemini 2.5 Flash Lite Preview 06-17", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.1, + output: 0.4, + cacheRead: 0.025, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-2.5-flash-lite-preview-09-2025": { + id: "gemini-2.5-flash-lite-preview-09-2025", + name: "Gemini 2.5 Flash Lite Preview 09-25", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.1, + output: 0.4, + cacheRead: 0.025, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-2.5-flash-preview-04-17": { + id: "gemini-2.5-flash-preview-04-17", + name: "Gemini 2.5 Flash Preview 04-17", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0.0375, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-2.5-flash-preview-05-20": { + id: "gemini-2.5-flash-preview-05-20", + name: "Gemini 2.5 Flash Preview 05-20", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0.0375, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-2.5-flash-preview-09-2025": { + id: "gemini-2.5-flash-preview-09-2025", + name: "Gemini 2.5 Flash Preview 09-25", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.3, + output: 2.5, + cacheRead: 0.075, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-2.5-pro": { + id: "gemini-2.5-pro", + name: "Gemini 2.5 Pro", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.31, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-2.5-pro-preview-05-06": { + id: "gemini-2.5-pro-preview-05-06", + name: "Gemini 2.5 Pro Preview 05-06", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.31, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-2.5-pro-preview-06-05": { + id: "gemini-2.5-pro-preview-06-05", + name: "Gemini 2.5 Pro Preview 06-05", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.31, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-3-flash-preview": { + id: "gemini-3-flash-preview", + name: "Gemini 3 Flash Preview", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.5, + output: 3, + cacheRead: 0.05, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-3-pro-preview": { + id: "gemini-3-pro-preview", + name: "Gemini 3 Pro Preview", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 12, + cacheRead: 0.2, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 64000, + } satisfies Model<"google-generative-ai">, + "gemini-3.1-flash-lite-preview": { + id: "gemini-3.1-flash-lite-preview", + name: "Gemini 3.1 Flash Lite Preview", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-3.1-pro-preview": { + id: "gemini-3.1-pro-preview", + name: "Gemini 3.1 Pro Preview", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 12, + cacheRead: 0.2, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-3.1-pro-preview-customtools": { + id: "gemini-3.1-pro-preview-customtools", + name: "Gemini 3.1 Pro Preview Custom Tools", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 12, + cacheRead: 0.2, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-flash-latest": { + id: "gemini-flash-latest", + name: "Gemini Flash Latest", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.3, + output: 2.5, + cacheRead: 0.075, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-flash-lite-latest": { + id: "gemini-flash-lite-latest", + name: "Gemini Flash-Lite Latest", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.1, + output: 0.4, + cacheRead: 0.025, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-live-2.5-flash": { + id: "gemini-live-2.5-flash", + name: "Gemini Live 2.5 Flash", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.5, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 8000, + } satisfies Model<"google-generative-ai">, + "gemini-live-2.5-flash-preview-native-audio": { + id: "gemini-live-2.5-flash-preview-native-audio", + name: "Gemini Live 2.5 Flash Preview Native Audio", + api: "google-generative-ai", + provider: "google", + baseUrl: "https://generativelanguage.googleapis.com/v1beta", + reasoning: true, + input: ["text"], + cost: { + input: 0.5, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + }, + "google-antigravity": { + "claude-opus-4-5-thinking": { + id: "claude-opus-4-5-thinking", + name: "Claude Opus 4.5 Thinking (Antigravity)", + api: "google-gemini-cli", + provider: "google-antigravity", + baseUrl: "https://daily-cloudcode-pa.sandbox.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"google-gemini-cli">, + "claude-opus-4-6-thinking": { + id: "claude-opus-4-6-thinking", + name: "Claude Opus 4.6 Thinking (Antigravity)", + api: "google-gemini-cli", + provider: "google-antigravity", + baseUrl: "https://daily-cloudcode-pa.sandbox.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 200000, + maxTokens: 128000, + } satisfies Model<"google-gemini-cli">, + "claude-sonnet-4-5": { + id: "claude-sonnet-4-5", + name: "Claude Sonnet 4.5 (Antigravity)", + api: "google-gemini-cli", + provider: "google-antigravity", + baseUrl: "https://daily-cloudcode-pa.sandbox.googleapis.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"google-gemini-cli">, + "claude-sonnet-4-5-thinking": { + id: "claude-sonnet-4-5-thinking", + name: "Claude Sonnet 4.5 Thinking (Antigravity)", + api: "google-gemini-cli", + provider: "google-antigravity", + baseUrl: "https://daily-cloudcode-pa.sandbox.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"google-gemini-cli">, + "claude-sonnet-4-6": { + id: "claude-sonnet-4-6", + name: "Claude Sonnet 4.6 (Antigravity)", + api: "google-gemini-cli", + provider: "google-antigravity", + baseUrl: "https://daily-cloudcode-pa.sandbox.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"google-gemini-cli">, + "gemini-3-flash": { + id: "gemini-3-flash", + name: "Gemini 3 Flash (Antigravity)", + api: "google-gemini-cli", + provider: "google-antigravity", + baseUrl: "https://daily-cloudcode-pa.sandbox.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.5, + output: 3, + cacheRead: 0.5, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65535, + } satisfies Model<"google-gemini-cli">, + "gemini-3.1-pro-high": { + id: "gemini-3.1-pro-high", + name: "Gemini 3.1 Pro High (Antigravity)", + api: "google-gemini-cli", + provider: "google-antigravity", + baseUrl: "https://daily-cloudcode-pa.sandbox.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 12, + cacheRead: 0.2, + cacheWrite: 2.375, + }, + contextWindow: 1048576, + maxTokens: 65535, + } satisfies Model<"google-gemini-cli">, + "gemini-3.1-pro-low": { + id: "gemini-3.1-pro-low", + name: "Gemini 3.1 Pro Low (Antigravity)", + api: "google-gemini-cli", + provider: "google-antigravity", + baseUrl: "https://daily-cloudcode-pa.sandbox.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 12, + cacheRead: 0.2, + cacheWrite: 2.375, + }, + contextWindow: 1048576, + maxTokens: 65535, + } satisfies Model<"google-gemini-cli">, + "gpt-oss-120b-medium": { + id: "gpt-oss-120b-medium", + name: "GPT-OSS 120B Medium (Antigravity)", + api: "google-gemini-cli", + provider: "google-antigravity", + baseUrl: "https://daily-cloudcode-pa.sandbox.googleapis.com", + reasoning: false, + input: ["text"], + cost: { + input: 0.09, + output: 0.36, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 32768, + } satisfies Model<"google-gemini-cli">, + }, + "google-gemini-cli": { + "gemini-2.0-flash": { + id: "gemini-2.0-flash", + name: "Gemini 2.0 Flash (Cloud Code Assist)", + api: "google-gemini-cli", + provider: "google-gemini-cli", + baseUrl: "https://cloudcode-pa.googleapis.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 8192, + } satisfies Model<"google-gemini-cli">, + "gemini-2.5-flash": { + id: "gemini-2.5-flash", + name: "Gemini 2.5 Flash (Cloud Code Assist)", + api: "google-gemini-cli", + provider: "google-gemini-cli", + baseUrl: "https://cloudcode-pa.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65535, + } satisfies Model<"google-gemini-cli">, + "gemini-2.5-pro": { + id: "gemini-2.5-pro", + name: "Gemini 2.5 Pro (Cloud Code Assist)", + api: "google-gemini-cli", + provider: "google-gemini-cli", + baseUrl: "https://cloudcode-pa.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65535, + } satisfies Model<"google-gemini-cli">, + "gemini-3-flash-preview": { + id: "gemini-3-flash-preview", + name: "Gemini 3 Flash Preview (Cloud Code Assist)", + api: "google-gemini-cli", + provider: "google-gemini-cli", + baseUrl: "https://cloudcode-pa.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65535, + } satisfies Model<"google-gemini-cli">, + "gemini-3-pro-preview": { + id: "gemini-3-pro-preview", + name: "Gemini 3 Pro Preview (Cloud Code Assist)", + api: "google-gemini-cli", + provider: "google-gemini-cli", + baseUrl: "https://cloudcode-pa.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65535, + } satisfies Model<"google-gemini-cli">, + "gemini-3.1-pro-preview": { + id: "gemini-3.1-pro-preview", + name: "Gemini 3.1 Pro Preview (Cloud Code Assist)", + api: "google-gemini-cli", + provider: "google-gemini-cli", + baseUrl: "https://cloudcode-pa.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65535, + } satisfies Model<"google-gemini-cli">, + }, + "google-vertex": { + "gemini-1.5-flash": { + id: "gemini-1.5-flash", + name: "Gemini 1.5 Flash (Vertex)", + api: "google-vertex", + provider: "google-vertex", + baseUrl: "https://{location}-aiplatform.googleapis.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.075, + output: 0.3, + cacheRead: 0.01875, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 8192, + } satisfies Model<"google-vertex">, + "gemini-1.5-flash-8b": { + id: "gemini-1.5-flash-8b", + name: "Gemini 1.5 Flash-8B (Vertex)", + api: "google-vertex", + provider: "google-vertex", + baseUrl: "https://{location}-aiplatform.googleapis.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.0375, + output: 0.15, + cacheRead: 0.01, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 8192, + } satisfies Model<"google-vertex">, + "gemini-1.5-pro": { + id: "gemini-1.5-pro", + name: "Gemini 1.5 Pro (Vertex)", + api: "google-vertex", + provider: "google-vertex", + baseUrl: "https://{location}-aiplatform.googleapis.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 1.25, + output: 5, + cacheRead: 0.3125, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 8192, + } satisfies Model<"google-vertex">, + "gemini-2.0-flash": { + id: "gemini-2.0-flash", + name: "Gemini 2.0 Flash (Vertex)", + api: "google-vertex", + provider: "google-vertex", + baseUrl: "https://{location}-aiplatform.googleapis.com", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0.0375, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 8192, + } satisfies Model<"google-vertex">, + "gemini-2.0-flash-lite": { + id: "gemini-2.0-flash-lite", + name: "Gemini 2.0 Flash Lite (Vertex)", + api: "google-vertex", + provider: "google-vertex", + baseUrl: "https://{location}-aiplatform.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.075, + output: 0.3, + cacheRead: 0.01875, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-vertex">, + "gemini-2.5-flash": { + id: "gemini-2.5-flash", + name: "Gemini 2.5 Flash (Vertex)", + api: "google-vertex", + provider: "google-vertex", + baseUrl: "https://{location}-aiplatform.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.3, + output: 2.5, + cacheRead: 0.03, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-vertex">, + "gemini-2.5-flash-lite": { + id: "gemini-2.5-flash-lite", + name: "Gemini 2.5 Flash Lite (Vertex)", + api: "google-vertex", + provider: "google-vertex", + baseUrl: "https://{location}-aiplatform.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.1, + output: 0.4, + cacheRead: 0.01, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-vertex">, + "gemini-2.5-flash-lite-preview-09-2025": { + id: "gemini-2.5-flash-lite-preview-09-2025", + name: "Gemini 2.5 Flash Lite Preview 09-25 (Vertex)", + api: "google-vertex", + provider: "google-vertex", + baseUrl: "https://{location}-aiplatform.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.1, + output: 0.4, + cacheRead: 0.01, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-vertex">, + "gemini-2.5-pro": { + id: "gemini-2.5-pro", + name: "Gemini 2.5 Pro (Vertex)", + api: "google-vertex", + provider: "google-vertex", + baseUrl: "https://{location}-aiplatform.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-vertex">, + "gemini-3-flash-preview": { + id: "gemini-3-flash-preview", + name: "Gemini 3 Flash Preview (Vertex)", + api: "google-vertex", + provider: "google-vertex", + baseUrl: "https://{location}-aiplatform.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.5, + output: 3, + cacheRead: 0.05, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-vertex">, + "gemini-3-pro-preview": { + id: "gemini-3-pro-preview", + name: "Gemini 3 Pro Preview (Vertex)", + api: "google-vertex", + provider: "google-vertex", + baseUrl: "https://{location}-aiplatform.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 12, + cacheRead: 0.2, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 64000, + } satisfies Model<"google-vertex">, + "gemini-3.1-pro-preview": { + id: "gemini-3.1-pro-preview", + name: "Gemini 3.1 Pro Preview (Vertex)", + api: "google-vertex", + provider: "google-vertex", + baseUrl: "https://{location}-aiplatform.googleapis.com", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 12, + cacheRead: 0.2, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-vertex">, + }, + "groq": { + "deepseek-r1-distill-llama-70b": { + id: "deepseek-r1-distill-llama-70b", + name: "DeepSeek R1 Distill Llama 70B", + api: "openai-completions", + provider: "groq", + baseUrl: "https://api.groq.com/openai/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.75, + output: 0.99, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "gemma2-9b-it": { + id: "gemma2-9b-it", + name: "Gemma 2 9B", + api: "openai-completions", + provider: "groq", + baseUrl: "https://api.groq.com/openai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.2, + output: 0.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 8192, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "llama-3.1-8b-instant": { + id: "llama-3.1-8b-instant", + name: "Llama 3.1 8B Instant", + api: "openai-completions", + provider: "groq", + baseUrl: "https://api.groq.com/openai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.05, + output: 0.08, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "llama-3.3-70b-versatile": { + id: "llama-3.3-70b-versatile", + name: "Llama 3.3 70B Versatile", + api: "openai-completions", + provider: "groq", + baseUrl: "https://api.groq.com/openai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.59, + output: 0.79, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "llama3-70b-8192": { + id: "llama3-70b-8192", + name: "Llama 3 70B", + api: "openai-completions", + provider: "groq", + baseUrl: "https://api.groq.com/openai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.59, + output: 0.79, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 8192, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "llama3-8b-8192": { + id: "llama3-8b-8192", + name: "Llama 3 8B", + api: "openai-completions", + provider: "groq", + baseUrl: "https://api.groq.com/openai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.05, + output: 0.08, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 8192, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "meta-llama/llama-4-maverick-17b-128e-instruct": { + id: "meta-llama/llama-4-maverick-17b-128e-instruct", + name: "Llama 4 Maverick 17B", + api: "openai-completions", + provider: "groq", + baseUrl: "https://api.groq.com/openai/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.2, + output: 0.6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "meta-llama/llama-4-scout-17b-16e-instruct": { + id: "meta-llama/llama-4-scout-17b-16e-instruct", + name: "Llama 4 Scout 17B", + api: "openai-completions", + provider: "groq", + baseUrl: "https://api.groq.com/openai/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.11, + output: 0.34, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "mistral-saba-24b": { + id: "mistral-saba-24b", + name: "Mistral Saba 24B", + api: "openai-completions", + provider: "groq", + baseUrl: "https://api.groq.com/openai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.79, + output: 0.79, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "moonshotai/kimi-k2-instruct": { + id: "moonshotai/kimi-k2-instruct", + name: "Kimi K2 Instruct", + api: "openai-completions", + provider: "groq", + baseUrl: "https://api.groq.com/openai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 1, + output: 3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "moonshotai/kimi-k2-instruct-0905": { + id: "moonshotai/kimi-k2-instruct-0905", + name: "Kimi K2 Instruct 0905", + api: "openai-completions", + provider: "groq", + baseUrl: "https://api.groq.com/openai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 1, + output: 3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "openai/gpt-oss-120b": { + id: "openai/gpt-oss-120b", + name: "GPT OSS 120B", + api: "openai-completions", + provider: "groq", + baseUrl: "https://api.groq.com/openai/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "openai/gpt-oss-20b": { + id: "openai/gpt-oss-20b", + name: "GPT OSS 20B", + api: "openai-completions", + provider: "groq", + baseUrl: "https://api.groq.com/openai/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.075, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "qwen-qwq-32b": { + id: "qwen-qwq-32b", + name: "Qwen QwQ 32B", + api: "openai-completions", + provider: "groq", + baseUrl: "https://api.groq.com/openai/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.29, + output: 0.39, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "qwen/qwen3-32b": { + id: "qwen/qwen3-32b", + name: "Qwen3 32B", + api: "openai-completions", + provider: "groq", + baseUrl: "https://api.groq.com/openai/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.29, + output: 0.59, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + }, + "huggingface": { + "MiniMaxAI/MiniMax-M2.1": { + id: "MiniMaxAI/MiniMax-M2.1", + name: "MiniMax-M2.1", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 1.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "MiniMaxAI/MiniMax-M2.5": { + id: "MiniMaxAI/MiniMax-M2.5", + name: "MiniMax-M2.5", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 1.2, + cacheRead: 0.03, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "Qwen/Qwen3-235B-A22B-Thinking-2507": { + id: "Qwen/Qwen3-235B-A22B-Thinking-2507", + name: "Qwen3-235B-A22B-Thinking-2507", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "Qwen/Qwen3-Coder-480B-A35B-Instruct": { + id: "Qwen/Qwen3-Coder-480B-A35B-Instruct", + name: "Qwen3-Coder-480B-A35B-Instruct", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: false, + input: ["text"], + cost: { + input: 2, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 66536, + } satisfies Model<"openai-completions">, + "Qwen/Qwen3-Coder-Next": { + id: "Qwen/Qwen3-Coder-Next", + name: "Qwen3-Coder-Next", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: false, + input: ["text"], + cost: { + input: 0.2, + output: 1.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "Qwen/Qwen3-Next-80B-A3B-Instruct": { + id: "Qwen/Qwen3-Next-80B-A3B-Instruct", + name: "Qwen3-Next-80B-A3B-Instruct", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: false, + input: ["text"], + cost: { + input: 0.25, + output: 1, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 66536, + } satisfies Model<"openai-completions">, + "Qwen/Qwen3-Next-80B-A3B-Thinking": { + id: "Qwen/Qwen3-Next-80B-A3B-Thinking", + name: "Qwen3-Next-80B-A3B-Thinking", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: false, + input: ["text"], + cost: { + input: 0.3, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "Qwen/Qwen3.5-397B-A17B": { + id: "Qwen/Qwen3.5-397B-A17B", + name: "Qwen3.5-397B-A17B", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.6, + output: 3.6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "XiaomiMiMo/MiMo-V2-Flash": { + id: "XiaomiMiMo/MiMo-V2-Flash", + name: "MiMo-V2-Flash", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: true, + input: ["text"], + cost: { + input: 0.1, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "deepseek-ai/DeepSeek-R1-0528": { + id: "deepseek-ai/DeepSeek-R1-0528", + name: "DeepSeek-R1-0528", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: true, + input: ["text"], + cost: { + input: 3, + output: 5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 163840, + maxTokens: 163840, + } satisfies Model<"openai-completions">, + "deepseek-ai/DeepSeek-V3.2": { + id: "deepseek-ai/DeepSeek-V3.2", + name: "DeepSeek-V3.2", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: true, + input: ["text"], + cost: { + input: 0.28, + output: 0.4, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 163840, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "moonshotai/Kimi-K2-Instruct": { + id: "moonshotai/Kimi-K2-Instruct", + name: "Kimi-K2-Instruct", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: false, + input: ["text"], + cost: { + input: 1, + output: 3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "moonshotai/Kimi-K2-Instruct-0905": { + id: "moonshotai/Kimi-K2-Instruct-0905", + name: "Kimi-K2-Instruct-0905", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: false, + input: ["text"], + cost: { + input: 1, + output: 3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "moonshotai/Kimi-K2-Thinking": { + id: "moonshotai/Kimi-K2-Thinking", + name: "Kimi-K2-Thinking", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 2.5, + cacheRead: 0.15, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 262144, + } satisfies Model<"openai-completions">, + "moonshotai/Kimi-K2.5": { + id: "moonshotai/Kimi-K2.5", + name: "Kimi-K2.5", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.6, + output: 3, + cacheRead: 0.1, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 262144, + } satisfies Model<"openai-completions">, + "zai-org/GLM-4.7": { + id: "zai-org/GLM-4.7", + name: "GLM-4.7", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 2.2, + cacheRead: 0.11, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "zai-org/GLM-4.7-Flash": { + id: "zai-org/GLM-4.7-Flash", + name: "GLM-4.7-Flash", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "zai-org/GLM-5": { + id: "zai-org/GLM-5", + name: "GLM-5", + api: "openai-completions", + provider: "huggingface", + baseUrl: "https://router.huggingface.co/v1", + compat: {"supportsDeveloperRole":false}, + reasoning: true, + input: ["text"], + cost: { + input: 1, + output: 3.2, + cacheRead: 0.2, + cacheWrite: 0, + }, + contextWindow: 202752, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + }, + "kimi-coding": { + "k2p5": { + id: "k2p5", + name: "Kimi K2.5", + api: "anthropic-messages", + provider: "kimi-coding", + baseUrl: "https://api.kimi.com/coding", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 32768, + } satisfies Model<"anthropic-messages">, + "kimi-k2-thinking": { + id: "kimi-k2-thinking", + name: "Kimi K2 Thinking", + api: "anthropic-messages", + provider: "kimi-coding", + baseUrl: "https://api.kimi.com/coding", + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 32768, + } satisfies Model<"anthropic-messages">, + }, + "minimax": { + "MiniMax-M2": { + id: "MiniMax-M2", + name: "MiniMax-M2", + api: "anthropic-messages", + provider: "minimax", + baseUrl: "https://api.minimax.io/anthropic", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 1.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 196608, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "MiniMax-M2.1": { + id: "MiniMax-M2.1", + name: "MiniMax-M2.1", + api: "anthropic-messages", + provider: "minimax", + baseUrl: "https://api.minimax.io/anthropic", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 1.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + "MiniMax-M2.5": { + id: "MiniMax-M2.5", + name: "MiniMax-M2.5", + api: "anthropic-messages", + provider: "minimax", + baseUrl: "https://api.minimax.io/anthropic", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 1.2, + cacheRead: 0.03, + cacheWrite: 0.375, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + "MiniMax-M2.5-highspeed": { + id: "MiniMax-M2.5-highspeed", + name: "MiniMax-M2.5-highspeed", + api: "anthropic-messages", + provider: "minimax", + baseUrl: "https://api.minimax.io/anthropic", + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 2.4, + cacheRead: 0.06, + cacheWrite: 0.375, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + }, + "minimax-cn": { + "MiniMax-M2": { + id: "MiniMax-M2", + name: "MiniMax-M2", + api: "anthropic-messages", + provider: "minimax-cn", + baseUrl: "https://api.minimaxi.com/anthropic", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 1.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 196608, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "MiniMax-M2.1": { + id: "MiniMax-M2.1", + name: "MiniMax-M2.1", + api: "anthropic-messages", + provider: "minimax-cn", + baseUrl: "https://api.minimaxi.com/anthropic", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 1.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + "MiniMax-M2.5": { + id: "MiniMax-M2.5", + name: "MiniMax-M2.5", + api: "anthropic-messages", + provider: "minimax-cn", + baseUrl: "https://api.minimaxi.com/anthropic", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 1.2, + cacheRead: 0.03, + cacheWrite: 0.375, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + "MiniMax-M2.5-highspeed": { + id: "MiniMax-M2.5-highspeed", + name: "MiniMax-M2.5-highspeed", + api: "anthropic-messages", + provider: "minimax-cn", + baseUrl: "https://api.minimaxi.com/anthropic", + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 2.4, + cacheRead: 0.06, + cacheWrite: 0.375, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + }, + "mistral": { + "codestral-latest": { + id: "codestral-latest", + name: "Codestral", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text"], + cost: { + input: 0.3, + output: 0.9, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 4096, + } satisfies Model<"mistral-conversations">, + "devstral-2512": { + id: "devstral-2512", + name: "Devstral 2", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text"], + cost: { + input: 0.4, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 262144, + } satisfies Model<"mistral-conversations">, + "devstral-medium-2507": { + id: "devstral-medium-2507", + name: "Devstral Medium", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text"], + cost: { + input: 0.4, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"mistral-conversations">, + "devstral-medium-latest": { + id: "devstral-medium-latest", + name: "Devstral 2", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text"], + cost: { + input: 0.4, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 262144, + } satisfies Model<"mistral-conversations">, + "devstral-small-2505": { + id: "devstral-small-2505", + name: "Devstral Small 2505", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text"], + cost: { + input: 0.1, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"mistral-conversations">, + "devstral-small-2507": { + id: "devstral-small-2507", + name: "Devstral Small", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text"], + cost: { + input: 0.1, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"mistral-conversations">, + "labs-devstral-small-2512": { + id: "labs-devstral-small-2512", + name: "Devstral Small 2", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 256000, + } satisfies Model<"mistral-conversations">, + "magistral-medium-latest": { + id: "magistral-medium-latest", + name: "Magistral Medium", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: true, + input: ["text"], + cost: { + input: 2, + output: 5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"mistral-conversations">, + "magistral-small": { + id: "magistral-small", + name: "Magistral Small", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: true, + input: ["text"], + cost: { + input: 0.5, + output: 1.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"mistral-conversations">, + "ministral-3b-latest": { + id: "ministral-3b-latest", + name: "Ministral 3B", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text"], + cost: { + input: 0.04, + output: 0.04, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"mistral-conversations">, + "ministral-8b-latest": { + id: "ministral-8b-latest", + name: "Ministral 8B", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text"], + cost: { + input: 0.1, + output: 0.1, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"mistral-conversations">, + "mistral-large-2411": { + id: "mistral-large-2411", + name: "Mistral Large 2.1", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text"], + cost: { + input: 2, + output: 6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 16384, + } satisfies Model<"mistral-conversations">, + "mistral-large-2512": { + id: "mistral-large-2512", + name: "Mistral Large 3", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.5, + output: 1.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 262144, + } satisfies Model<"mistral-conversations">, + "mistral-large-latest": { + id: "mistral-large-latest", + name: "Mistral Large", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.5, + output: 1.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 262144, + } satisfies Model<"mistral-conversations">, + "mistral-medium-2505": { + id: "mistral-medium-2505", + name: "Mistral Medium 3", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.4, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"mistral-conversations">, + "mistral-medium-2508": { + id: "mistral-medium-2508", + name: "Mistral Medium 3.1", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.4, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 262144, + } satisfies Model<"mistral-conversations">, + "mistral-medium-latest": { + id: "mistral-medium-latest", + name: "Mistral Medium", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.4, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"mistral-conversations">, + "mistral-nemo": { + id: "mistral-nemo", + name: "Mistral Nemo", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text"], + cost: { + input: 0.15, + output: 0.15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"mistral-conversations">, + "mistral-small-2506": { + id: "mistral-small-2506", + name: "Mistral Small 3.2", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.1, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"mistral-conversations">, + "mistral-small-latest": { + id: "mistral-small-latest", + name: "Mistral Small", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.1, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"mistral-conversations">, + "open-mistral-7b": { + id: "open-mistral-7b", + name: "Mistral 7B", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text"], + cost: { + input: 0.25, + output: 0.25, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 8000, + maxTokens: 8000, + } satisfies Model<"mistral-conversations">, + "open-mixtral-8x22b": { + id: "open-mixtral-8x22b", + name: "Mixtral 8x22B", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text"], + cost: { + input: 2, + output: 6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 64000, + maxTokens: 64000, + } satisfies Model<"mistral-conversations">, + "open-mixtral-8x7b": { + id: "open-mixtral-8x7b", + name: "Mixtral 8x7B", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text"], + cost: { + input: 0.7, + output: 0.7, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32000, + maxTokens: 32000, + } satisfies Model<"mistral-conversations">, + "pixtral-12b": { + id: "pixtral-12b", + name: "Pixtral 12B", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.15, + output: 0.15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"mistral-conversations">, + "pixtral-large-latest": { + id: "pixtral-large-latest", + name: "Pixtral Large", + api: "mistral-conversations", + provider: "mistral", + baseUrl: "https://api.mistral.ai", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2, + output: 6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"mistral-conversations">, + }, + "openai": { + "codex-mini-latest": { + id: "codex-mini-latest", + name: "Codex Mini", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text"], + cost: { + input: 1.5, + output: 6, + cacheRead: 0.375, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-responses">, + "gpt-4": { + id: "gpt-4", + name: "GPT-4", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: false, + input: ["text"], + cost: { + input: 30, + output: 60, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 8192, + maxTokens: 8192, + } satisfies Model<"openai-responses">, + "gpt-4-turbo": { + id: "gpt-4-turbo", + name: "GPT-4 Turbo", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 10, + output: 30, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"openai-responses">, + "gpt-4.1": { + id: "gpt-4.1", + name: "GPT-4.1", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2, + output: 8, + cacheRead: 0.5, + cacheWrite: 0, + }, + contextWindow: 1047576, + maxTokens: 32768, + } satisfies Model<"openai-responses">, + "gpt-4.1-mini": { + id: "gpt-4.1-mini", + name: "GPT-4.1 mini", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.4, + output: 1.6, + cacheRead: 0.1, + cacheWrite: 0, + }, + contextWindow: 1047576, + maxTokens: 32768, + } satisfies Model<"openai-responses">, + "gpt-4.1-nano": { + id: "gpt-4.1-nano", + name: "GPT-4.1 nano", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.1, + output: 0.4, + cacheRead: 0.03, + cacheWrite: 0, + }, + contextWindow: 1047576, + maxTokens: 32768, + } satisfies Model<"openai-responses">, + "gpt-4o": { + id: "gpt-4o", + name: "GPT-4o", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2.5, + output: 10, + cacheRead: 1.25, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"openai-responses">, + "gpt-4o-2024-05-13": { + id: "gpt-4o-2024-05-13", + name: "GPT-4o (2024-05-13)", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 5, + output: 15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"openai-responses">, + "gpt-4o-2024-08-06": { + id: "gpt-4o-2024-08-06", + name: "GPT-4o (2024-08-06)", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2.5, + output: 10, + cacheRead: 1.25, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"openai-responses">, + "gpt-4o-2024-11-20": { + id: "gpt-4o-2024-11-20", + name: "GPT-4o (2024-11-20)", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2.5, + output: 10, + cacheRead: 1.25, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"openai-responses">, + "gpt-4o-mini": { + id: "gpt-4o-mini", + name: "GPT-4o mini", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0.08, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"openai-responses">, + "gpt-5": { + id: "gpt-5", + name: "GPT-5", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5-chat-latest": { + id: "gpt-5-chat-latest", + name: "GPT-5 Chat Latest", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"openai-responses">, + "gpt-5-codex": { + id: "gpt-5-codex", + name: "GPT-5-Codex", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5-mini": { + id: "gpt-5-mini", + name: "GPT-5 Mini", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.25, + output: 2, + cacheRead: 0.025, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5-nano": { + id: "gpt-5-nano", + name: "GPT-5 Nano", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.05, + output: 0.4, + cacheRead: 0.005, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5-pro": { + id: "gpt-5-pro", + name: "GPT-5 Pro", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 120, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 272000, + } satisfies Model<"openai-responses">, + "gpt-5.1": { + id: "gpt-5.1", + name: "GPT-5.1", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.13, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.1-chat-latest": { + id: "gpt-5.1-chat-latest", + name: "GPT-5.1 Chat", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"openai-responses">, + "gpt-5.1-codex": { + id: "gpt-5.1-codex", + name: "GPT-5.1 Codex", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.1-codex-max": { + id: "gpt-5.1-codex-max", + name: "GPT-5.1 Codex Max", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.1-codex-mini": { + id: "gpt-5.1-codex-mini", + name: "GPT-5.1 Codex mini", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.25, + output: 2, + cacheRead: 0.025, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.2": { + id: "gpt-5.2", + name: "GPT-5.2", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.2-chat-latest": { + id: "gpt-5.2-chat-latest", + name: "GPT-5.2 Chat", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"openai-responses">, + "gpt-5.2-codex": { + id: "gpt-5.2-codex", + name: "GPT-5.2 Codex", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.2-pro": { + id: "gpt-5.2-pro", + name: "GPT-5.2 Pro", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 21, + output: 168, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.3-codex": { + id: "gpt-5.3-codex", + name: "GPT-5.3 Codex", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.3-codex-spark": { + id: "gpt-5.3-codex-spark", + name: "GPT-5.3 Codex Spark", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 32000, + } satisfies Model<"openai-responses">, + "gpt-5.4": { + id: "gpt-5.4", + name: "GPT-5.4", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2.5, + output: 15, + cacheRead: 0.25, + cacheWrite: 0, + }, + contextWindow: 272000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.4-pro": { + id: "gpt-5.4-pro", + name: "GPT-5.4 Pro", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 30, + output: 180, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1050000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "o1": { + id: "o1", + name: "o1", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 60, + cacheRead: 7.5, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-responses">, + "o1-pro": { + id: "o1-pro", + name: "o1-pro", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 150, + output: 600, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-responses">, + "o3": { + id: "o3", + name: "o3", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 8, + cacheRead: 0.5, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-responses">, + "o3-deep-research": { + id: "o3-deep-research", + name: "o3-deep-research", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 10, + output: 40, + cacheRead: 2.5, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-responses">, + "o3-mini": { + id: "o3-mini", + name: "o3-mini", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text"], + cost: { + input: 1.1, + output: 4.4, + cacheRead: 0.55, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-responses">, + "o3-pro": { + id: "o3-pro", + name: "o3-pro", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 20, + output: 80, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-responses">, + "o4-mini": { + id: "o4-mini", + name: "o4-mini", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.1, + output: 4.4, + cacheRead: 0.28, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-responses">, + "o4-mini-deep-research": { + id: "o4-mini-deep-research", + name: "o4-mini-deep-research", + api: "openai-responses", + provider: "openai", + baseUrl: "https://api.openai.com/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 8, + cacheRead: 0.5, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-responses">, + }, + "openai-codex": { + "gpt-5.1": { + id: "gpt-5.1", + name: "GPT-5.1", + api: "openai-codex-responses", + provider: "openai-codex", + baseUrl: "https://chatgpt.com/backend-api", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 272000, + maxTokens: 128000, + } satisfies Model<"openai-codex-responses">, + "gpt-5.1-codex-max": { + id: "gpt-5.1-codex-max", + name: "GPT-5.1 Codex Max", + api: "openai-codex-responses", + provider: "openai-codex", + baseUrl: "https://chatgpt.com/backend-api", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 272000, + maxTokens: 128000, + } satisfies Model<"openai-codex-responses">, + "gpt-5.1-codex-mini": { + id: "gpt-5.1-codex-mini", + name: "GPT-5.1 Codex Mini", + api: "openai-codex-responses", + provider: "openai-codex", + baseUrl: "https://chatgpt.com/backend-api", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.25, + output: 2, + cacheRead: 0.025, + cacheWrite: 0, + }, + contextWindow: 272000, + maxTokens: 128000, + } satisfies Model<"openai-codex-responses">, + "gpt-5.2": { + id: "gpt-5.2", + name: "GPT-5.2", + api: "openai-codex-responses", + provider: "openai-codex", + baseUrl: "https://chatgpt.com/backend-api", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 272000, + maxTokens: 128000, + } satisfies Model<"openai-codex-responses">, + "gpt-5.2-codex": { + id: "gpt-5.2-codex", + name: "GPT-5.2 Codex", + api: "openai-codex-responses", + provider: "openai-codex", + baseUrl: "https://chatgpt.com/backend-api", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 272000, + maxTokens: 128000, + } satisfies Model<"openai-codex-responses">, + "gpt-5.3-codex": { + id: "gpt-5.3-codex", + name: "GPT-5.3 Codex", + api: "openai-codex-responses", + provider: "openai-codex", + baseUrl: "https://chatgpt.com/backend-api", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 272000, + maxTokens: 128000, + } satisfies Model<"openai-codex-responses">, + "gpt-5.3-codex-spark": { + id: "gpt-5.3-codex-spark", + name: "GPT-5.3 Codex Spark", + api: "openai-codex-responses", + provider: "openai-codex", + baseUrl: "https://chatgpt.com/backend-api", + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"openai-codex-responses">, + "gpt-5.4": { + id: "gpt-5.4", + name: "GPT-5.4", + api: "openai-codex-responses", + provider: "openai-codex", + baseUrl: "https://chatgpt.com/backend-api", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2.5, + output: 15, + cacheRead: 0.25, + cacheWrite: 0, + }, + contextWindow: 272000, + maxTokens: 128000, + } satisfies Model<"openai-codex-responses">, + }, + "opencode": { + "big-pickle": { + id: "big-pickle", + name: "Big Pickle", + api: "anthropic-messages", + provider: "opencode", + baseUrl: "https://opencode.ai/zen", + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "claude-3-5-haiku": { + id: "claude-3-5-haiku", + name: "Claude Haiku 3.5", + api: "anthropic-messages", + provider: "opencode", + baseUrl: "https://opencode.ai/zen", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.8, + output: 4, + cacheRead: 0.08, + cacheWrite: 1, + }, + contextWindow: 200000, + maxTokens: 8192, + } satisfies Model<"anthropic-messages">, + "claude-haiku-4-5": { + id: "claude-haiku-4-5", + name: "Claude Haiku 4.5", + api: "anthropic-messages", + provider: "opencode", + baseUrl: "https://opencode.ai/zen", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1, + output: 5, + cacheRead: 0.1, + cacheWrite: 1.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "claude-opus-4-1": { + id: "claude-opus-4-1", + name: "Claude Opus 4.1", + api: "anthropic-messages", + provider: "opencode", + baseUrl: "https://opencode.ai/zen", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 75, + cacheRead: 1.5, + cacheWrite: 18.75, + }, + contextWindow: 200000, + maxTokens: 32000, + } satisfies Model<"anthropic-messages">, + "claude-opus-4-5": { + id: "claude-opus-4-5", + name: "Claude Opus 4.5", + api: "anthropic-messages", + provider: "opencode", + baseUrl: "https://opencode.ai/zen", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "claude-opus-4-6": { + id: "claude-opus-4-6", + name: "Claude Opus 4.6", + api: "anthropic-messages", + provider: "opencode", + baseUrl: "https://opencode.ai/zen", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 200000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "claude-sonnet-4": { + id: "claude-sonnet-4", + name: "Claude Sonnet 4", + api: "anthropic-messages", + provider: "opencode", + baseUrl: "https://opencode.ai/zen", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "claude-sonnet-4-5": { + id: "claude-sonnet-4-5", + name: "Claude Sonnet 4.5", + api: "anthropic-messages", + provider: "opencode", + baseUrl: "https://opencode.ai/zen", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "claude-sonnet-4-6": { + id: "claude-sonnet-4-6", + name: "Claude Sonnet 4.6", + api: "anthropic-messages", + provider: "opencode", + baseUrl: "https://opencode.ai/zen", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 1000000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "gemini-3-flash": { + id: "gemini-3-flash", + name: "Gemini 3 Flash", + api: "google-generative-ai", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.5, + output: 3, + cacheRead: 0.05, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-3-pro": { + id: "gemini-3-pro", + name: "Gemini 3 Pro", + api: "google-generative-ai", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 12, + cacheRead: 0.2, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "gemini-3.1-pro": { + id: "gemini-3.1-pro", + name: "Gemini 3.1 Pro Preview", + api: "google-generative-ai", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 12, + cacheRead: 0.2, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"google-generative-ai">, + "glm-4.6": { + id: "glm-4.6", + name: "GLM-4.6", + api: "openai-completions", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 2.2, + cacheRead: 0.1, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "glm-4.7": { + id: "glm-4.7", + name: "GLM-4.7", + api: "openai-completions", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 2.2, + cacheRead: 0.1, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "glm-5": { + id: "glm-5", + name: "GLM-5", + api: "openai-completions", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text"], + cost: { + input: 1, + output: 3.2, + cacheRead: 0.2, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "gpt-5": { + id: "gpt-5", + name: "GPT-5", + api: "openai-responses", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.07, + output: 8.5, + cacheRead: 0.107, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5-codex": { + id: "gpt-5-codex", + name: "GPT-5 Codex", + api: "openai-responses", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.07, + output: 8.5, + cacheRead: 0.107, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5-nano": { + id: "gpt-5-nano", + name: "GPT-5 Nano", + api: "openai-responses", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.1": { + id: "gpt-5.1", + name: "GPT-5.1", + api: "openai-responses", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.07, + output: 8.5, + cacheRead: 0.107, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.1-codex": { + id: "gpt-5.1-codex", + name: "GPT-5.1 Codex", + api: "openai-responses", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.07, + output: 8.5, + cacheRead: 0.107, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.1-codex-max": { + id: "gpt-5.1-codex-max", + name: "GPT-5.1 Codex Max", + api: "openai-responses", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.1-codex-mini": { + id: "gpt-5.1-codex-mini", + name: "GPT-5.1 Codex Mini", + api: "openai-responses", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.25, + output: 2, + cacheRead: 0.025, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.2": { + id: "gpt-5.2", + name: "GPT-5.2", + api: "openai-responses", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.2-codex": { + id: "gpt-5.2-codex", + name: "GPT-5.2 Codex", + api: "openai-responses", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.3-codex": { + id: "gpt-5.3-codex", + name: "GPT-5.3 Codex", + api: "openai-responses", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.4": { + id: "gpt-5.4", + name: "GPT-5.4", + api: "openai-responses", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2.5, + output: 15, + cacheRead: 0.25, + cacheWrite: 0, + }, + contextWindow: 272000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "gpt-5.4-pro": { + id: "gpt-5.4-pro", + name: "GPT-5.4 Pro", + api: "openai-responses", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 30, + output: 180, + cacheRead: 30, + cacheWrite: 0, + }, + contextWindow: 1050000, + maxTokens: 128000, + } satisfies Model<"openai-responses">, + "kimi-k2.5": { + id: "kimi-k2.5", + name: "Kimi K2.5", + api: "openai-completions", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.6, + output: 3, + cacheRead: 0.08, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "minimax-m2.1": { + id: "minimax-m2.1", + name: "MiniMax M2.1", + api: "openai-completions", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 1.2, + cacheRead: 0.1, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "minimax-m2.5": { + id: "minimax-m2.5", + name: "MiniMax M2.5", + api: "openai-completions", + provider: "opencode", + baseUrl: "https://opencode.ai/zen/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 1.2, + cacheRead: 0.06, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "minimax-m2.5-free": { + id: "minimax-m2.5-free", + name: "MiniMax M2.5 Free", + api: "anthropic-messages", + provider: "opencode", + baseUrl: "https://opencode.ai/zen", + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + }, + "opencode-go": { + "glm-5": { + id: "glm-5", + name: "GLM-5", + api: "openai-completions", + provider: "opencode-go", + baseUrl: "https://opencode.ai/zen/go/v1", + reasoning: true, + input: ["text"], + cost: { + input: 1, + output: 3.2, + cacheRead: 0.2, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "kimi-k2.5": { + id: "kimi-k2.5", + name: "Kimi K2.5", + api: "openai-completions", + provider: "opencode-go", + baseUrl: "https://opencode.ai/zen/go/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.6, + output: 3, + cacheRead: 0.1, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "minimax-m2.5": { + id: "minimax-m2.5", + name: "MiniMax M2.5", + api: "anthropic-messages", + provider: "opencode-go", + baseUrl: "https://opencode.ai/zen/go", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 1.2, + cacheRead: 0.03, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + }, + "openrouter": { + "ai21/jamba-large-1.7": { + id: "ai21/jamba-large-1.7", + name: "AI21: Jamba Large 1.7", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 2, + output: 8, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "alibaba/tongyi-deepresearch-30b-a3b": { + id: "alibaba/tongyi-deepresearch-30b-a3b", + name: "Tongyi DeepResearch 30B A3B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.09, + output: 0.44999999999999996, + cacheRead: 0.09, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "allenai/olmo-3.1-32b-instruct": { + id: "allenai/olmo-3.1-32b-instruct", + name: "AllenAI: Olmo 3.1 32B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.19999999999999998, + output: 0.6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 65536, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "amazon/nova-2-lite-v1": { + id: "amazon/nova-2-lite-v1", + name: "Amazon: Nova 2 Lite", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.3, + output: 2.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 65535, + } satisfies Model<"openai-completions">, + "amazon/nova-lite-v1": { + id: "amazon/nova-lite-v1", + name: "Amazon: Nova Lite 1.0", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.06, + output: 0.24, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 300000, + maxTokens: 5120, + } satisfies Model<"openai-completions">, + "amazon/nova-micro-v1": { + id: "amazon/nova-micro-v1", + name: "Amazon: Nova Micro 1.0", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.035, + output: 0.14, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 5120, + } satisfies Model<"openai-completions">, + "amazon/nova-premier-v1": { + id: "amazon/nova-premier-v1", + name: "Amazon: Nova Premier 1.0", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2.5, + output: 12.5, + cacheRead: 0.625, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 32000, + } satisfies Model<"openai-completions">, + "amazon/nova-pro-v1": { + id: "amazon/nova-pro-v1", + name: "Amazon: Nova Pro 1.0", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.7999999999999999, + output: 3.1999999999999997, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 300000, + maxTokens: 5120, + } satisfies Model<"openai-completions">, + "anthropic/claude-3-haiku": { + id: "anthropic/claude-3-haiku", + name: "Anthropic: Claude 3 Haiku", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.25, + output: 1.25, + cacheRead: 0.03, + cacheWrite: 0.3, + }, + contextWindow: 200000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "anthropic/claude-3.5-haiku": { + id: "anthropic/claude-3.5-haiku", + name: "Anthropic: Claude 3.5 Haiku", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.7999999999999999, + output: 4, + cacheRead: 0.08, + cacheWrite: 1, + }, + contextWindow: 200000, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "anthropic/claude-3.5-sonnet": { + id: "anthropic/claude-3.5-sonnet", + name: "Anthropic: Claude 3.5 Sonnet", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 6, + output: 30, + cacheRead: 0.6, + cacheWrite: 7.5, + }, + contextWindow: 200000, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "anthropic/claude-3.7-sonnet": { + id: "anthropic/claude-3.7-sonnet", + name: "Anthropic: Claude 3.7 Sonnet", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "anthropic/claude-3.7-sonnet:thinking": { + id: "anthropic/claude-3.7-sonnet:thinking", + name: "Anthropic: Claude 3.7 Sonnet (thinking)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "anthropic/claude-haiku-4.5": { + id: "anthropic/claude-haiku-4.5", + name: "Anthropic: Claude Haiku 4.5", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1, + output: 5, + cacheRead: 0.09999999999999999, + cacheWrite: 1.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "anthropic/claude-opus-4": { + id: "anthropic/claude-opus-4", + name: "Anthropic: Claude Opus 4", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 75, + cacheRead: 1.5, + cacheWrite: 18.75, + }, + contextWindow: 200000, + maxTokens: 32000, + } satisfies Model<"openai-completions">, + "anthropic/claude-opus-4.1": { + id: "anthropic/claude-opus-4.1", + name: "Anthropic: Claude Opus 4.1", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 75, + cacheRead: 1.5, + cacheWrite: 18.75, + }, + contextWindow: 200000, + maxTokens: 32000, + } satisfies Model<"openai-completions">, + "anthropic/claude-opus-4.5": { + id: "anthropic/claude-opus-4.5", + name: "Anthropic: Claude Opus 4.5", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "anthropic/claude-opus-4.6": { + id: "anthropic/claude-opus-4.6", + name: "Anthropic: Claude Opus 4.6", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 1000000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "anthropic/claude-sonnet-4": { + id: "anthropic/claude-sonnet-4", + name: "Anthropic: Claude Sonnet 4", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "anthropic/claude-sonnet-4.5": { + id: "anthropic/claude-sonnet-4.5", + name: "Anthropic: Claude Sonnet 4.5", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 1000000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "anthropic/claude-sonnet-4.6": { + id: "anthropic/claude-sonnet-4.6", + name: "Anthropic: Claude Sonnet 4.6", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 1000000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "arcee-ai/trinity-large-preview:free": { + id: "arcee-ai/trinity-large-preview:free", + name: "Arcee AI: Trinity Large Preview (free)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "arcee-ai/trinity-mini": { + id: "arcee-ai/trinity-mini", + name: "Arcee AI: Trinity Mini", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.045, + output: 0.15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "arcee-ai/trinity-mini:free": { + id: "arcee-ai/trinity-mini:free", + name: "Arcee AI: Trinity Mini (free)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "arcee-ai/virtuoso-large": { + id: "arcee-ai/virtuoso-large", + name: "Arcee AI: Virtuoso Large", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.75, + output: 1.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "auto": { + id: "auto", + name: "Auto", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 2000000, + maxTokens: 30000, + } satisfies Model<"openai-completions">, + "baidu/ernie-4.5-21b-a3b": { + id: "baidu/ernie-4.5-21b-a3b", + name: "Baidu: ERNIE 4.5 21B A3B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.07, + output: 0.28, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 120000, + maxTokens: 8000, + } satisfies Model<"openai-completions">, + "baidu/ernie-4.5-vl-28b-a3b": { + id: "baidu/ernie-4.5-vl-28b-a3b", + name: "Baidu: ERNIE 4.5 VL 28B A3B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.14, + output: 0.56, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 30000, + maxTokens: 8000, + } satisfies Model<"openai-completions">, + "bytedance-seed/seed-1.6": { + id: "bytedance-seed/seed-1.6", + name: "ByteDance Seed: Seed 1.6", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.25, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "bytedance-seed/seed-1.6-flash": { + id: "bytedance-seed/seed-1.6-flash", + name: "ByteDance Seed: Seed 1.6 Flash", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.075, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "bytedance-seed/seed-2.0-mini": { + id: "bytedance-seed/seed-2.0-mini", + name: "ByteDance Seed: Seed-2.0-Mini", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.09999999999999999, + output: 0.39999999999999997, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "cohere/command-r-08-2024": { + id: "cohere/command-r-08-2024", + name: "Cohere: Command R (08-2024)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4000, + } satisfies Model<"openai-completions">, + "cohere/command-r-plus-08-2024": { + id: "cohere/command-r-plus-08-2024", + name: "Cohere: Command R+ (08-2024)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 2.5, + output: 10, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4000, + } satisfies Model<"openai-completions">, + "deepseek/deepseek-chat": { + id: "deepseek/deepseek-chat", + name: "DeepSeek: DeepSeek V3", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.32, + output: 0.8899999999999999, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 163840, + maxTokens: 163840, + } satisfies Model<"openai-completions">, + "deepseek/deepseek-chat-v3-0324": { + id: "deepseek/deepseek-chat-v3-0324", + name: "DeepSeek: DeepSeek V3 0324", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.19999999999999998, + output: 0.77, + cacheRead: 0.13, + cacheWrite: 0, + }, + contextWindow: 163840, + maxTokens: 163840, + } satisfies Model<"openai-completions">, + "deepseek/deepseek-chat-v3.1": { + id: "deepseek/deepseek-chat-v3.1", + name: "DeepSeek: DeepSeek V3.1", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.15, + output: 0.75, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 7168, + } satisfies Model<"openai-completions">, + "deepseek/deepseek-r1": { + id: "deepseek/deepseek-r1", + name: "DeepSeek: R1", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.7, + output: 2.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 64000, + maxTokens: 16000, + } satisfies Model<"openai-completions">, + "deepseek/deepseek-r1-0528": { + id: "deepseek/deepseek-r1-0528", + name: "DeepSeek: R1 0528", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.44999999999999996, + output: 2.1500000000000004, + cacheRead: 0.22499999999999998, + cacheWrite: 0, + }, + contextWindow: 163840, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "deepseek/deepseek-v3.1-terminus": { + id: "deepseek/deepseek-v3.1-terminus", + name: "DeepSeek: DeepSeek V3.1 Terminus", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.21, + output: 0.7899999999999999, + cacheRead: 0.1300000002, + cacheWrite: 0, + }, + contextWindow: 163840, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "deepseek/deepseek-v3.1-terminus:exacto": { + id: "deepseek/deepseek-v3.1-terminus:exacto", + name: "DeepSeek: DeepSeek V3.1 Terminus (exacto)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.21, + output: 0.7899999999999999, + cacheRead: 0.16799999999999998, + cacheWrite: 0, + }, + contextWindow: 163840, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "deepseek/deepseek-v3.2": { + id: "deepseek/deepseek-v3.2", + name: "DeepSeek: DeepSeek V3.2", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.25, + output: 0.39999999999999997, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 163840, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "deepseek/deepseek-v3.2-exp": { + id: "deepseek/deepseek-v3.2-exp", + name: "DeepSeek: DeepSeek V3.2 Exp", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.27, + output: 0.41, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 163840, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "essentialai/rnj-1-instruct": { + id: "essentialai/rnj-1-instruct", + name: "EssentialAI: Rnj 1 Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.15, + output: 0.15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "google/gemini-2.0-flash-001": { + id: "google/gemini-2.0-flash-001", + name: "Google: Gemini 2.0 Flash", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.09999999999999999, + output: 0.39999999999999997, + cacheRead: 0.024999999999999998, + cacheWrite: 0.08333333333333334, + }, + contextWindow: 1048576, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "google/gemini-2.0-flash-lite-001": { + id: "google/gemini-2.0-flash-lite-001", + name: "Google: Gemini 2.0 Flash Lite", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.075, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "google/gemini-2.5-flash": { + id: "google/gemini-2.5-flash", + name: "Google: Gemini 2.5 Flash", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.3, + output: 2.5, + cacheRead: 0.03, + cacheWrite: 0.08333333333333334, + }, + contextWindow: 1048576, + maxTokens: 65535, + } satisfies Model<"openai-completions">, + "google/gemini-2.5-flash-lite": { + id: "google/gemini-2.5-flash-lite", + name: "Google: Gemini 2.5 Flash Lite", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.09999999999999999, + output: 0.39999999999999997, + cacheRead: 0.01, + cacheWrite: 0.08333333333333334, + }, + contextWindow: 1048576, + maxTokens: 65535, + } satisfies Model<"openai-completions">, + "google/gemini-2.5-flash-lite-preview-09-2025": { + id: "google/gemini-2.5-flash-lite-preview-09-2025", + name: "Google: Gemini 2.5 Flash Lite Preview 09-2025", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.09999999999999999, + output: 0.39999999999999997, + cacheRead: 0.01, + cacheWrite: 0.08333333333333334, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "google/gemini-2.5-pro": { + id: "google/gemini-2.5-pro", + name: "Google: Gemini 2.5 Pro", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0.375, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "google/gemini-2.5-pro-preview": { + id: "google/gemini-2.5-pro-preview", + name: "Google: Gemini 2.5 Pro Preview 06-05", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0.375, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "google/gemini-2.5-pro-preview-05-06": { + id: "google/gemini-2.5-pro-preview-05-06", + name: "Google: Gemini 2.5 Pro Preview 05-06", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0.375, + }, + contextWindow: 1048576, + maxTokens: 65535, + } satisfies Model<"openai-completions">, + "google/gemini-3-flash-preview": { + id: "google/gemini-3-flash-preview", + name: "Google: Gemini 3 Flash Preview", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.5, + output: 3, + cacheRead: 0.049999999999999996, + cacheWrite: 0.08333333333333334, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "google/gemini-3-pro-preview": { + id: "google/gemini-3-pro-preview", + name: "Google: Gemini 3 Pro Preview", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 12, + cacheRead: 0.19999999999999998, + cacheWrite: 0.375, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "google/gemini-3.1-flash-lite-preview": { + id: "google/gemini-3.1-flash-lite-preview", + name: "Google: Gemini 3.1 Flash Lite Preview", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.25, + output: 1.5, + cacheRead: 0.024999999999999998, + cacheWrite: 0.08333333333333334, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "google/gemini-3.1-pro-preview": { + id: "google/gemini-3.1-pro-preview", + name: "Google: Gemini 3.1 Pro Preview", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 12, + cacheRead: 0.19999999999999998, + cacheWrite: 0.375, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "google/gemini-3.1-pro-preview-customtools": { + id: "google/gemini-3.1-pro-preview-customtools", + name: "Google: Gemini 3.1 Pro Preview Custom Tools", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 12, + cacheRead: 0.19999999999999998, + cacheWrite: 0.375, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "google/gemma-3-27b-it": { + id: "google/gemma-3-27b-it", + name: "Google: Gemma 3 27B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.04, + output: 0.15, + cacheRead: 0.02, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "google/gemma-3-27b-it:free": { + id: "google/gemma-3-27b-it:free", + name: "Google: Gemma 3 27B (free)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "inception/mercury": { + id: "inception/mercury", + name: "Inception: Mercury", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.25, + output: 0.75, + cacheRead: 0.024999999999999998, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 32000, + } satisfies Model<"openai-completions">, + "inception/mercury-2": { + id: "inception/mercury-2", + name: "Inception: Mercury 2", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.25, + output: 0.75, + cacheRead: 0.024999999999999998, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 50000, + } satisfies Model<"openai-completions">, + "inception/mercury-coder": { + id: "inception/mercury-coder", + name: "Inception: Mercury Coder", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.25, + output: 0.75, + cacheRead: 0.024999999999999998, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 32000, + } satisfies Model<"openai-completions">, + "kwaipilot/kat-coder-pro": { + id: "kwaipilot/kat-coder-pro", + name: "Kwaipilot: KAT-Coder-Pro V1", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.207, + output: 0.828, + cacheRead: 0.0414, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "meituan/longcat-flash-chat": { + id: "meituan/longcat-flash-chat", + name: "Meituan: LongCat Flash Chat", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.19999999999999998, + output: 0.7999999999999999, + cacheRead: 0.19999999999999998, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "meta-llama/llama-3-8b-instruct": { + id: "meta-llama/llama-3-8b-instruct", + name: "Meta: Llama 3 8B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.03, + output: 0.04, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 8192, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "meta-llama/llama-3.1-405b-instruct": { + id: "meta-llama/llama-3.1-405b-instruct", + name: "Meta: Llama 3.1 405B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 4, + output: 4, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "meta-llama/llama-3.1-70b-instruct": { + id: "meta-llama/llama-3.1-70b-instruct", + name: "Meta: Llama 3.1 70B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.39999999999999997, + output: 0.39999999999999997, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "meta-llama/llama-3.1-8b-instruct": { + id: "meta-llama/llama-3.1-8b-instruct", + name: "Meta: Llama 3.1 8B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.02, + output: 0.049999999999999996, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 16384, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "meta-llama/llama-3.3-70b-instruct": { + id: "meta-llama/llama-3.3-70b-instruct", + name: "Meta: Llama 3.3 70B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.09999999999999999, + output: 0.32, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "meta-llama/llama-3.3-70b-instruct:free": { + id: "meta-llama/llama-3.3-70b-instruct:free", + name: "Meta: Llama 3.3 70B Instruct (free)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "meta-llama/llama-4-maverick": { + id: "meta-llama/llama-4-maverick", + name: "Meta: Llama 4 Maverick", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "meta-llama/llama-4-scout": { + id: "meta-llama/llama-4-scout", + name: "Meta: Llama 4 Scout", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.08, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 327680, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "minimax/minimax-m1": { + id: "minimax/minimax-m1", + name: "MiniMax: MiniMax M1", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.39999999999999997, + output: 2.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 40000, + } satisfies Model<"openai-completions">, + "minimax/minimax-m2": { + id: "minimax/minimax-m2", + name: "MiniMax: MiniMax M2", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.255, + output: 1, + cacheRead: 0.03, + cacheWrite: 0, + }, + contextWindow: 196608, + maxTokens: 196608, + } satisfies Model<"openai-completions">, + "minimax/minimax-m2.1": { + id: "minimax/minimax-m2.1", + name: "MiniMax: MiniMax M2.1", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.27, + output: 0.95, + cacheRead: 0.0290000007, + cacheWrite: 0, + }, + contextWindow: 196608, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "minimax/minimax-m2.5": { + id: "minimax/minimax-m2.5", + name: "MiniMax: MiniMax M2.5", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.295, + output: 1.2, + cacheRead: 0.03, + cacheWrite: 0, + }, + contextWindow: 196608, + maxTokens: 196608, + } satisfies Model<"openai-completions">, + "mistralai/codestral-2508": { + id: "mistralai/codestral-2508", + name: "Mistral: Codestral 2508", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.3, + output: 0.8999999999999999, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/devstral-2512": { + id: "mistralai/devstral-2512", + name: "Mistral: Devstral 2 2512", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.39999999999999997, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/devstral-medium": { + id: "mistralai/devstral-medium", + name: "Mistral: Devstral Medium", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.39999999999999997, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/devstral-small": { + id: "mistralai/devstral-small", + name: "Mistral: Devstral Small 1.1", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.09999999999999999, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/ministral-14b-2512": { + id: "mistralai/ministral-14b-2512", + name: "Mistral: Ministral 3 14B 2512", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.19999999999999998, + output: 0.19999999999999998, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/ministral-3b-2512": { + id: "mistralai/ministral-3b-2512", + name: "Mistral: Ministral 3 3B 2512", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.09999999999999999, + output: 0.09999999999999999, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/ministral-8b-2512": { + id: "mistralai/ministral-8b-2512", + name: "Mistral: Ministral 3 8B 2512", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.15, + output: 0.15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/mistral-large": { + id: "mistralai/mistral-large", + name: "Mistral Large", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 2, + output: 6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/mistral-large-2407": { + id: "mistralai/mistral-large-2407", + name: "Mistral Large 2407", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 2, + output: 6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/mistral-large-2411": { + id: "mistralai/mistral-large-2411", + name: "Mistral Large 2411", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 2, + output: 6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/mistral-large-2512": { + id: "mistralai/mistral-large-2512", + name: "Mistral: Mistral Large 3 2512", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.5, + output: 1.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/mistral-medium-3": { + id: "mistralai/mistral-medium-3", + name: "Mistral: Mistral Medium 3", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.39999999999999997, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/mistral-medium-3.1": { + id: "mistralai/mistral-medium-3.1", + name: "Mistral: Mistral Medium 3.1", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.39999999999999997, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/mistral-nemo": { + id: "mistralai/mistral-nemo", + name: "Mistral: Mistral Nemo", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.02, + output: 0.04, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "mistralai/mistral-saba": { + id: "mistralai/mistral-saba", + name: "Mistral: Saba", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.19999999999999998, + output: 0.6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/mistral-small-24b-instruct-2501": { + id: "mistralai/mistral-small-24b-instruct-2501", + name: "Mistral: Mistral Small 3", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.049999999999999996, + output: 0.08, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "mistralai/mistral-small-3.1-24b-instruct:free": { + id: "mistralai/mistral-small-3.1-24b-instruct:free", + name: "Mistral: Mistral Small 3.1 24B (free)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/mistral-small-3.2-24b-instruct": { + id: "mistralai/mistral-small-3.2-24b-instruct", + name: "Mistral: Mistral Small 3.2 24B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.06, + output: 0.18, + cacheRead: 0.03, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "mistralai/mistral-small-creative": { + id: "mistralai/mistral-small-creative", + name: "Mistral: Mistral Small Creative", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.09999999999999999, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/mixtral-8x22b-instruct": { + id: "mistralai/mixtral-8x22b-instruct", + name: "Mistral: Mixtral 8x22B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 2, + output: 6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 65536, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/mixtral-8x7b-instruct": { + id: "mistralai/mixtral-8x7b-instruct", + name: "Mistral: Mixtral 8x7B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.54, + output: 0.54, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "mistralai/pixtral-large-2411": { + id: "mistralai/pixtral-large-2411", + name: "Mistral: Pixtral Large 2411", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2, + output: 6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "mistralai/voxtral-small-24b-2507": { + id: "mistralai/voxtral-small-24b-2507", + name: "Mistral: Voxtral Small 24B 2507", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.09999999999999999, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "moonshotai/kimi-k2": { + id: "moonshotai/kimi-k2", + name: "MoonshotAI: Kimi K2 0711", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.55, + output: 2.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "moonshotai/kimi-k2-0905": { + id: "moonshotai/kimi-k2-0905", + name: "MoonshotAI: Kimi K2 0905", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.39999999999999997, + output: 2, + cacheRead: 0.15, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "moonshotai/kimi-k2-0905:exacto": { + id: "moonshotai/kimi-k2-0905:exacto", + name: "MoonshotAI: Kimi K2 0905 (exacto)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.6, + output: 2.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "moonshotai/kimi-k2-thinking": { + id: "moonshotai/kimi-k2-thinking", + name: "MoonshotAI: Kimi K2 Thinking", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.47, + output: 2, + cacheRead: 0.14100000000000001, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "moonshotai/kimi-k2.5": { + id: "moonshotai/kimi-k2.5", + name: "MoonshotAI: Kimi K2.5", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.41, + output: 2.06, + cacheRead: 0.07, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "nex-agi/deepseek-v3.1-nex-n1": { + id: "nex-agi/deepseek-v3.1-nex-n1", + name: "Nex AGI: DeepSeek V3.1 Nex N1", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.27, + output: 1, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 163840, + } satisfies Model<"openai-completions">, + "nvidia/llama-3.1-nemotron-70b-instruct": { + id: "nvidia/llama-3.1-nemotron-70b-instruct", + name: "NVIDIA: Llama 3.1 Nemotron 70B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 1.2, + output: 1.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "nvidia/llama-3.3-nemotron-super-49b-v1.5": { + id: "nvidia/llama-3.3-nemotron-super-49b-v1.5", + name: "NVIDIA: Llama 3.3 Nemotron Super 49B V1.5", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.09999999999999999, + output: 0.39999999999999997, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "nvidia/nemotron-3-nano-30b-a3b": { + id: "nvidia/nemotron-3-nano-30b-a3b", + name: "NVIDIA: Nemotron 3 Nano 30B A3B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.049999999999999996, + output: 0.19999999999999998, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "nvidia/nemotron-3-nano-30b-a3b:free": { + id: "nvidia/nemotron-3-nano-30b-a3b:free", + name: "NVIDIA: Nemotron 3 Nano 30B A3B (free)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "nvidia/nemotron-nano-12b-v2-vl:free": { + id: "nvidia/nemotron-nano-12b-v2-vl:free", + name: "NVIDIA: Nemotron Nano 12B 2 VL (free)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "nvidia/nemotron-nano-9b-v2": { + id: "nvidia/nemotron-nano-9b-v2", + name: "NVIDIA: Nemotron Nano 9B V2", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.04, + output: 0.16, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "nvidia/nemotron-nano-9b-v2:free": { + id: "nvidia/nemotron-nano-9b-v2:free", + name: "NVIDIA: Nemotron Nano 9B V2 (free)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "openai/gpt-3.5-turbo": { + id: "openai/gpt-3.5-turbo", + name: "OpenAI: GPT-3.5 Turbo", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.5, + output: 1.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 16385, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "openai/gpt-3.5-turbo-0613": { + id: "openai/gpt-3.5-turbo-0613", + name: "OpenAI: GPT-3.5 Turbo (older v0613)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 1, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 4095, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "openai/gpt-3.5-turbo-16k": { + id: "openai/gpt-3.5-turbo-16k", + name: "OpenAI: GPT-3.5 Turbo 16k", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 3, + output: 4, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 16385, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "openai/gpt-4": { + id: "openai/gpt-4", + name: "OpenAI: GPT-4", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 30, + output: 60, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 8191, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "openai/gpt-4-0314": { + id: "openai/gpt-4-0314", + name: "OpenAI: GPT-4 (older v0314)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 30, + output: 60, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 8191, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "openai/gpt-4-1106-preview": { + id: "openai/gpt-4-1106-preview", + name: "OpenAI: GPT-4 Turbo (older v1106)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 10, + output: 30, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "openai/gpt-4-turbo": { + id: "openai/gpt-4-turbo", + name: "OpenAI: GPT-4 Turbo", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 10, + output: 30, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "openai/gpt-4-turbo-preview": { + id: "openai/gpt-4-turbo-preview", + name: "OpenAI: GPT-4 Turbo Preview", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 10, + output: 30, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "openai/gpt-4.1": { + id: "openai/gpt-4.1", + name: "OpenAI: GPT-4.1", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2, + output: 8, + cacheRead: 0.5, + cacheWrite: 0, + }, + contextWindow: 1047576, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "openai/gpt-4.1-mini": { + id: "openai/gpt-4.1-mini", + name: "OpenAI: GPT-4.1 Mini", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.39999999999999997, + output: 1.5999999999999999, + cacheRead: 0.09999999999999999, + cacheWrite: 0, + }, + contextWindow: 1047576, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "openai/gpt-4.1-nano": { + id: "openai/gpt-4.1-nano", + name: "OpenAI: GPT-4.1 Nano", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.09999999999999999, + output: 0.39999999999999997, + cacheRead: 0.024999999999999998, + cacheWrite: 0, + }, + contextWindow: 1047576, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "openai/gpt-4o": { + id: "openai/gpt-4o", + name: "OpenAI: GPT-4o", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2.5, + output: 10, + cacheRead: 1.25, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "openai/gpt-4o-2024-05-13": { + id: "openai/gpt-4o-2024-05-13", + name: "OpenAI: GPT-4o (2024-05-13)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 5, + output: 15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "openai/gpt-4o-2024-08-06": { + id: "openai/gpt-4o-2024-08-06", + name: "OpenAI: GPT-4o (2024-08-06)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2.5, + output: 10, + cacheRead: 1.25, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "openai/gpt-4o-2024-11-20": { + id: "openai/gpt-4o-2024-11-20", + name: "OpenAI: GPT-4o (2024-11-20)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2.5, + output: 10, + cacheRead: 1.25, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "openai/gpt-4o-audio-preview": { + id: "openai/gpt-4o-audio-preview", + name: "OpenAI: GPT-4o Audio", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 2.5, + output: 10, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "openai/gpt-4o-mini": { + id: "openai/gpt-4o-mini", + name: "OpenAI: GPT-4o-mini", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0.075, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "openai/gpt-4o-mini-2024-07-18": { + id: "openai/gpt-4o-mini-2024-07-18", + name: "OpenAI: GPT-4o-mini (2024-07-18)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0.075, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "openai/gpt-4o:extended": { + id: "openai/gpt-4o:extended", + name: "OpenAI: GPT-4o (extended)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 6, + output: 18, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "openai/gpt-5": { + id: "openai/gpt-5", + name: "OpenAI: GPT-5", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "openai/gpt-5-codex": { + id: "openai/gpt-5-codex", + name: "OpenAI: GPT-5 Codex", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "openai/gpt-5-image": { + id: "openai/gpt-5-image", + name: "OpenAI: GPT-5 Image", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 10, + output: 10, + cacheRead: 1.25, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "openai/gpt-5-image-mini": { + id: "openai/gpt-5-image-mini", + name: "OpenAI: GPT-5 Image Mini", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2.5, + output: 2, + cacheRead: 0.25, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "openai/gpt-5-mini": { + id: "openai/gpt-5-mini", + name: "OpenAI: GPT-5 Mini", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.25, + output: 2, + cacheRead: 0.024999999999999998, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "openai/gpt-5-nano": { + id: "openai/gpt-5-nano", + name: "OpenAI: GPT-5 Nano", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.049999999999999996, + output: 0.39999999999999997, + cacheRead: 0.005, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "openai/gpt-5-pro": { + id: "openai/gpt-5-pro", + name: "OpenAI: GPT-5 Pro", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 120, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "openai/gpt-5.1": { + id: "openai/gpt-5.1", + name: "OpenAI: GPT-5.1", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "openai/gpt-5.1-chat": { + id: "openai/gpt-5.1-chat", + name: "OpenAI: GPT-5.1 Chat", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "openai/gpt-5.1-codex": { + id: "openai/gpt-5.1-codex", + name: "OpenAI: GPT-5.1-Codex", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "openai/gpt-5.1-codex-max": { + id: "openai/gpt-5.1-codex-max", + name: "OpenAI: GPT-5.1-Codex-Max", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "openai/gpt-5.1-codex-mini": { + id: "openai/gpt-5.1-codex-mini", + name: "OpenAI: GPT-5.1-Codex-Mini", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.25, + output: 2, + cacheRead: 0.024999999999999998, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 100000, + } satisfies Model<"openai-completions">, + "openai/gpt-5.2": { + id: "openai/gpt-5.2", + name: "OpenAI: GPT-5.2", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "openai/gpt-5.2-chat": { + id: "openai/gpt-5.2-chat", + name: "OpenAI: GPT-5.2 Chat", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "openai/gpt-5.2-codex": { + id: "openai/gpt-5.2-codex", + name: "OpenAI: GPT-5.2-Codex", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "openai/gpt-5.2-pro": { + id: "openai/gpt-5.2-pro", + name: "OpenAI: GPT-5.2 Pro", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 21, + output: 168, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "openai/gpt-5.3-chat": { + id: "openai/gpt-5.3-chat", + name: "OpenAI: GPT-5.3 Chat", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "openai/gpt-5.3-codex": { + id: "openai/gpt-5.3-codex", + name: "OpenAI: GPT-5.3-Codex", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "openai/gpt-5.4": { + id: "openai/gpt-5.4", + name: "OpenAI: GPT-5.4", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2.5, + output: 15, + cacheRead: 0.25, + cacheWrite: 0, + }, + contextWindow: 1050000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "openai/gpt-5.4-pro": { + id: "openai/gpt-5.4-pro", + name: "OpenAI: GPT-5.4 Pro", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 30, + output: 180, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1050000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "openai/gpt-oss-120b": { + id: "openai/gpt-oss-120b", + name: "OpenAI: gpt-oss-120b", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.039, + output: 0.19, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "openai/gpt-oss-120b:exacto": { + id: "openai/gpt-oss-120b:exacto", + name: "OpenAI: gpt-oss-120b (exacto)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.039, + output: 0.19, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "openai/gpt-oss-120b:free": { + id: "openai/gpt-oss-120b:free", + name: "OpenAI: gpt-oss-120b (free)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "openai/gpt-oss-20b": { + id: "openai/gpt-oss-20b", + name: "OpenAI: gpt-oss-20b", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.03, + output: 0.14, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "openai/gpt-oss-20b:free": { + id: "openai/gpt-oss-20b:free", + name: "OpenAI: gpt-oss-20b (free)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "openai/gpt-oss-safeguard-20b": { + id: "openai/gpt-oss-safeguard-20b", + name: "OpenAI: gpt-oss-safeguard-20b", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.075, + output: 0.3, + cacheRead: 0.037, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "openai/o1": { + id: "openai/o1", + name: "OpenAI: o1", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 15, + output: 60, + cacheRead: 7.5, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-completions">, + "openai/o3": { + id: "openai/o3", + name: "OpenAI: o3", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 8, + cacheRead: 0.5, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-completions">, + "openai/o3-deep-research": { + id: "openai/o3-deep-research", + name: "OpenAI: o3 Deep Research", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 10, + output: 40, + cacheRead: 2.5, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-completions">, + "openai/o3-mini": { + id: "openai/o3-mini", + name: "OpenAI: o3 Mini", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 1.1, + output: 4.4, + cacheRead: 0.55, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-completions">, + "openai/o3-mini-high": { + id: "openai/o3-mini-high", + name: "OpenAI: o3 Mini High", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 1.1, + output: 4.4, + cacheRead: 0.55, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-completions">, + "openai/o3-pro": { + id: "openai/o3-pro", + name: "OpenAI: o3 Pro", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 20, + output: 80, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-completions">, + "openai/o4-mini": { + id: "openai/o4-mini", + name: "OpenAI: o4 Mini", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.1, + output: 4.4, + cacheRead: 0.275, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-completions">, + "openai/o4-mini-deep-research": { + id: "openai/o4-mini-deep-research", + name: "OpenAI: o4 Mini Deep Research", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 8, + cacheRead: 0.5, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-completions">, + "openai/o4-mini-high": { + id: "openai/o4-mini-high", + name: "OpenAI: o4 Mini High", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.1, + output: 4.4, + cacheRead: 0.275, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"openai-completions">, + "openrouter/auto": { + id: "openrouter/auto", + name: "Auto Router", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: -1000000, + output: -1000000, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 2000000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "openrouter/free": { + id: "openrouter/free", + name: "Free Models Router", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "prime-intellect/intellect-3": { + id: "prime-intellect/intellect-3", + name: "Prime Intellect: INTELLECT-3", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.19999999999999998, + output: 1.1, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "qwen/qwen-2.5-72b-instruct": { + id: "qwen/qwen-2.5-72b-instruct", + name: "Qwen2.5 72B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.12, + output: 0.39, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "qwen/qwen-2.5-7b-instruct": { + id: "qwen/qwen-2.5-7b-instruct", + name: "Qwen: Qwen2.5 7B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.04, + output: 0.09999999999999999, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "qwen/qwen-max": { + id: "qwen/qwen-max", + name: "Qwen: Qwen-Max ", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 1.04, + output: 4.16, + cacheRead: 0.20800000000000002, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "qwen/qwen-plus": { + id: "qwen/qwen-plus", + name: "Qwen: Qwen-Plus", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.39999999999999997, + output: 1.2, + cacheRead: 0.08, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "qwen/qwen-plus-2025-07-28": { + id: "qwen/qwen-plus-2025-07-28", + name: "Qwen: Qwen Plus 0728", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.26, + output: 0.78, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "qwen/qwen-plus-2025-07-28:thinking": { + id: "qwen/qwen-plus-2025-07-28:thinking", + name: "Qwen: Qwen Plus 0728 (thinking)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.26, + output: 0.78, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "qwen/qwen-turbo": { + id: "qwen/qwen-turbo", + name: "Qwen: Qwen-Turbo", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.0325, + output: 0.13, + cacheRead: 0.006500000000000001, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "qwen/qwen-vl-max": { + id: "qwen/qwen-vl-max", + name: "Qwen: Qwen VL Max", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.7999999999999999, + output: 3.1999999999999997, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "qwen/qwen3-14b": { + id: "qwen/qwen3-14b", + name: "Qwen: Qwen3 14B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.06, + output: 0.24, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 40960, + maxTokens: 40960, + } satisfies Model<"openai-completions">, + "qwen/qwen3-235b-a22b": { + id: "qwen/qwen3-235b-a22b", + name: "Qwen: Qwen3 235B A22B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.45499999999999996, + output: 1.8199999999999998, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "qwen/qwen3-235b-a22b-2507": { + id: "qwen/qwen3-235b-a22b-2507", + name: "Qwen: Qwen3 235B A22B Instruct 2507", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.071, + output: 0.09999999999999999, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "qwen/qwen3-235b-a22b-thinking-2507": { + id: "qwen/qwen3-235b-a22b-thinking-2507", + name: "Qwen: Qwen3 235B A22B Thinking 2507", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.11, + output: 0.6, + cacheRead: 0.055, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 262144, + } satisfies Model<"openai-completions">, + "qwen/qwen3-30b-a3b": { + id: "qwen/qwen3-30b-a3b", + name: "Qwen: Qwen3 30B A3B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.08, + output: 0.28, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 40960, + maxTokens: 40960, + } satisfies Model<"openai-completions">, + "qwen/qwen3-30b-a3b-instruct-2507": { + id: "qwen/qwen3-30b-a3b-instruct-2507", + name: "Qwen: Qwen3 30B A3B Instruct 2507", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.09, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 262144, + } satisfies Model<"openai-completions">, + "qwen/qwen3-30b-a3b-thinking-2507": { + id: "qwen/qwen3-30b-a3b-thinking-2507", + name: "Qwen: Qwen3 30B A3B Thinking 2507", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.051, + output: 0.33999999999999997, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "qwen/qwen3-32b": { + id: "qwen/qwen3-32b", + name: "Qwen: Qwen3 32B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.08, + output: 0.24, + cacheRead: 0.04, + cacheWrite: 0, + }, + contextWindow: 40960, + maxTokens: 40960, + } satisfies Model<"openai-completions">, + "qwen/qwen3-4b:free": { + id: "qwen/qwen3-4b:free", + name: "Qwen: Qwen3 4B (free)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 40960, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "qwen/qwen3-8b": { + id: "qwen/qwen3-8b", + name: "Qwen: Qwen3 8B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.049999999999999996, + output: 0.39999999999999997, + cacheRead: 0.049999999999999996, + cacheWrite: 0, + }, + contextWindow: 40960, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "qwen/qwen3-coder": { + id: "qwen/qwen3-coder", + name: "Qwen: Qwen3 Coder 480B A35B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.22, + output: 1, + cacheRead: 0.022, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "qwen/qwen3-coder-30b-a3b-instruct": { + id: "qwen/qwen3-coder-30b-a3b-instruct", + name: "Qwen: Qwen3 Coder 30B A3B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.07, + output: 0.27, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 160000, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "qwen/qwen3-coder-flash": { + id: "qwen/qwen3-coder-flash", + name: "Qwen: Qwen3 Coder Flash", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.195, + output: 0.975, + cacheRead: 0.039, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "qwen/qwen3-coder-next": { + id: "qwen/qwen3-coder-next", + name: "Qwen: Qwen3 Coder Next", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.12, + output: 0.75, + cacheRead: 0.06, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "qwen/qwen3-coder-plus": { + id: "qwen/qwen3-coder-plus", + name: "Qwen: Qwen3 Coder Plus", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.65, + output: 3.25, + cacheRead: 0.13, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "qwen/qwen3-coder:exacto": { + id: "qwen/qwen3-coder:exacto", + name: "Qwen: Qwen3 Coder 480B A35B (exacto)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.22, + output: 1.7999999999999998, + cacheRead: 0.022, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "qwen/qwen3-coder:free": { + id: "qwen/qwen3-coder:free", + name: "Qwen: Qwen3 Coder 480B A35B (free)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262000, + maxTokens: 262000, + } satisfies Model<"openai-completions">, + "qwen/qwen3-max": { + id: "qwen/qwen3-max", + name: "Qwen: Qwen3 Max", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 1.2, + output: 6, + cacheRead: 0.24, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "qwen/qwen3-max-thinking": { + id: "qwen/qwen3-max-thinking", + name: "Qwen: Qwen3 Max Thinking", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.78, + output: 3.9, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "qwen/qwen3-next-80b-a3b-instruct": { + id: "qwen/qwen3-next-80b-a3b-instruct", + name: "Qwen: Qwen3 Next 80B A3B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.09, + output: 1.1, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "qwen/qwen3-next-80b-a3b-instruct:free": { + id: "qwen/qwen3-next-80b-a3b-instruct:free", + name: "Qwen: Qwen3 Next 80B A3B Instruct (free)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "qwen/qwen3-next-80b-a3b-thinking": { + id: "qwen/qwen3-next-80b-a3b-thinking", + name: "Qwen: Qwen3 Next 80B A3B Thinking", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.15, + output: 1.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "qwen/qwen3-vl-235b-a22b-instruct": { + id: "qwen/qwen3-vl-235b-a22b-instruct", + name: "Qwen: Qwen3 VL 235B A22B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.19999999999999998, + output: 0.88, + cacheRead: 0.11, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "qwen/qwen3-vl-235b-a22b-thinking": { + id: "qwen/qwen3-vl-235b-a22b-thinking", + name: "Qwen: Qwen3 VL 235B A22B Thinking", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "qwen/qwen3-vl-30b-a3b-instruct": { + id: "qwen/qwen3-vl-30b-a3b-instruct", + name: "Qwen: Qwen3 VL 30B A3B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.13, + output: 0.52, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "qwen/qwen3-vl-30b-a3b-thinking": { + id: "qwen/qwen3-vl-30b-a3b-thinking", + name: "Qwen: Qwen3 VL 30B A3B Thinking", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "qwen/qwen3-vl-32b-instruct": { + id: "qwen/qwen3-vl-32b-instruct", + name: "Qwen: Qwen3 VL 32B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.10400000000000001, + output: 0.41600000000000004, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "qwen/qwen3-vl-8b-instruct": { + id: "qwen/qwen3-vl-8b-instruct", + name: "Qwen: Qwen3 VL 8B Instruct", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.08, + output: 0.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "qwen/qwen3-vl-8b-thinking": { + id: "qwen/qwen3-vl-8b-thinking", + name: "Qwen: Qwen3 VL 8B Thinking", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.117, + output: 1.365, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "qwen/qwen3.5-122b-a10b": { + id: "qwen/qwen3.5-122b-a10b", + name: "Qwen: Qwen3.5-122B-A10B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.26, + output: 2.08, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "qwen/qwen3.5-27b": { + id: "qwen/qwen3.5-27b", + name: "Qwen: Qwen3.5-27B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.195, + output: 1.56, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "qwen/qwen3.5-35b-a3b": { + id: "qwen/qwen3.5-35b-a3b", + name: "Qwen: Qwen3.5-35B-A3B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.1625, + output: 1.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "qwen/qwen3.5-397b-a17b": { + id: "qwen/qwen3.5-397b-a17b", + name: "Qwen: Qwen3.5 397B A17B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.39, + output: 2.34, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "qwen/qwen3.5-flash-02-23": { + id: "qwen/qwen3.5-flash-02-23", + name: "Qwen: Qwen3.5-Flash", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.09999999999999999, + output: 0.39999999999999997, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "qwen/qwen3.5-plus-02-15": { + id: "qwen/qwen3.5-plus-02-15", + name: "Qwen: Qwen3.5 Plus 2026-02-15", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.26, + output: 1.56, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "qwen/qwq-32b": { + id: "qwen/qwq-32b", + name: "Qwen: QwQ 32B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.15, + output: 0.39999999999999997, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "relace/relace-search": { + id: "relace/relace-search", + name: "Relace: Relace Search", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 1, + output: 3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 128000, + } satisfies Model<"openai-completions">, + "sao10k/l3-euryale-70b": { + id: "sao10k/l3-euryale-70b", + name: "Sao10k: Llama 3 Euryale 70B v2.1", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 1.48, + output: 1.48, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 8192, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "sao10k/l3.1-euryale-70b": { + id: "sao10k/l3.1-euryale-70b", + name: "Sao10K: Llama 3.1 Euryale 70B v2.2", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.65, + output: 0.75, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "stepfun/step-3.5-flash": { + id: "stepfun/step-3.5-flash", + name: "StepFun: Step 3.5 Flash", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.09999999999999999, + output: 0.3, + cacheRead: 0.02, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 256000, + } satisfies Model<"openai-completions">, + "stepfun/step-3.5-flash:free": { + id: "stepfun/step-3.5-flash:free", + name: "StepFun: Step 3.5 Flash (free)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 256000, + } satisfies Model<"openai-completions">, + "thedrummer/rocinante-12b": { + id: "thedrummer/rocinante-12b", + name: "TheDrummer: Rocinante 12B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.16999999999999998, + output: 0.43, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "thedrummer/unslopnemo-12b": { + id: "thedrummer/unslopnemo-12b", + name: "TheDrummer: UnslopNemo 12B", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.39999999999999997, + output: 0.39999999999999997, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "tngtech/deepseek-r1t2-chimera": { + id: "tngtech/deepseek-r1t2-chimera", + name: "TNG: DeepSeek R1T2 Chimera", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.25, + output: 0.85, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 163840, + maxTokens: 163840, + } satisfies Model<"openai-completions">, + "upstage/solar-pro-3": { + id: "upstage/solar-pro-3", + name: "Upstage: Solar Pro 3", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0.015, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "x-ai/grok-3": { + id: "x-ai/grok-3", + name: "xAI: Grok 3", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 3, + output: 15, + cacheRead: 0.75, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "x-ai/grok-3-beta": { + id: "x-ai/grok-3-beta", + name: "xAI: Grok 3 Beta", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 3, + output: 15, + cacheRead: 0.75, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "x-ai/grok-3-mini": { + id: "x-ai/grok-3-mini", + name: "xAI: Grok 3 Mini", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 0.5, + cacheRead: 0.075, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "x-ai/grok-3-mini-beta": { + id: "x-ai/grok-3-mini-beta", + name: "xAI: Grok 3 Mini Beta", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 0.5, + cacheRead: 0.075, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "x-ai/grok-4": { + id: "x-ai/grok-4", + name: "xAI: Grok 4", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.75, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "x-ai/grok-4-fast": { + id: "x-ai/grok-4-fast", + name: "xAI: Grok 4 Fast", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.19999999999999998, + output: 0.5, + cacheRead: 0.049999999999999996, + cacheWrite: 0, + }, + contextWindow: 2000000, + maxTokens: 30000, + } satisfies Model<"openai-completions">, + "x-ai/grok-4.1-fast": { + id: "x-ai/grok-4.1-fast", + name: "xAI: Grok 4.1 Fast", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.19999999999999998, + output: 0.5, + cacheRead: 0.049999999999999996, + cacheWrite: 0, + }, + contextWindow: 2000000, + maxTokens: 30000, + } satisfies Model<"openai-completions">, + "x-ai/grok-code-fast-1": { + id: "x-ai/grok-code-fast-1", + name: "xAI: Grok Code Fast 1", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.19999999999999998, + output: 1.5, + cacheRead: 0.02, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 10000, + } satisfies Model<"openai-completions">, + "xiaomi/mimo-v2-flash": { + id: "xiaomi/mimo-v2-flash", + name: "Xiaomi: MiMo-V2-Flash", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.09, + output: 0.29, + cacheRead: 0.045, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 65536, + } satisfies Model<"openai-completions">, + "z-ai/glm-4-32b": { + id: "z-ai/glm-4-32b", + name: "Z.ai: GLM 4 32B ", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: false, + input: ["text"], + cost: { + input: 0.09999999999999999, + output: 0.09999999999999999, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "z-ai/glm-4.5": { + id: "z-ai/glm-4.5", + name: "Z.ai: GLM 4.5", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 2.2, + cacheRead: 0.11, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 98304, + } satisfies Model<"openai-completions">, + "z-ai/glm-4.5-air": { + id: "z-ai/glm-4.5-air", + name: "Z.ai: GLM 4.5 Air", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.13, + output: 0.85, + cacheRead: 0.024999999999999998, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 98304, + } satisfies Model<"openai-completions">, + "z-ai/glm-4.5-air:free": { + id: "z-ai/glm-4.5-air:free", + name: "Z.ai: GLM 4.5 Air (free)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 96000, + } satisfies Model<"openai-completions">, + "z-ai/glm-4.5v": { + id: "z-ai/glm-4.5v", + name: "Z.ai: GLM 4.5V", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.6, + output: 1.7999999999999998, + cacheRead: 0.11, + cacheWrite: 0, + }, + contextWindow: 65536, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "z-ai/glm-4.6": { + id: "z-ai/glm-4.6", + name: "Z.ai: GLM 4.6", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.39, + output: 1.9, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 204800, + } satisfies Model<"openai-completions">, + "z-ai/glm-4.6:exacto": { + id: "z-ai/glm-4.6:exacto", + name: "Z.ai: GLM 4.6 (exacto)", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.44, + output: 1.76, + cacheRead: 0.11, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "z-ai/glm-4.6v": { + id: "z-ai/glm-4.6v", + name: "Z.ai: GLM 4.6V", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.3, + output: 0.8999999999999999, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "z-ai/glm-4.7": { + id: "z-ai/glm-4.7", + name: "Z.ai: GLM 4.7", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.38, + output: 1.9800000000000002, + cacheRead: 0.19, + cacheWrite: 0, + }, + contextWindow: 202752, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "z-ai/glm-4.7-flash": { + id: "z-ai/glm-4.7-flash", + name: "Z.ai: GLM 4.7 Flash", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.06, + output: 0.39999999999999997, + cacheRead: 0.0100000002, + cacheWrite: 0, + }, + contextWindow: 202752, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "z-ai/glm-5": { + id: "z-ai/glm-5", + name: "Z.ai: GLM 5", + api: "openai-completions", + provider: "openrouter", + baseUrl: "https://openrouter.ai/api/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 1.9, + cacheRead: 0.119, + cacheWrite: 0, + }, + contextWindow: 202752, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + }, + "vercel-ai-gateway": { + "alibaba/qwen-3-14b": { + id: "alibaba/qwen-3-14b", + name: "Qwen3-14B", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.06, + output: 0.24, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 40960, + maxTokens: 16384, + } satisfies Model<"anthropic-messages">, + "alibaba/qwen-3-235b": { + id: "alibaba/qwen-3-235b", + name: "Qwen3-235B-A22B", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.071, + output: 0.463, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 40960, + maxTokens: 16384, + } satisfies Model<"anthropic-messages">, + "alibaba/qwen-3-30b": { + id: "alibaba/qwen-3-30b", + name: "Qwen3-30B-A3B", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.08, + output: 0.29, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 40960, + maxTokens: 16384, + } satisfies Model<"anthropic-messages">, + "alibaba/qwen-3-32b": { + id: "alibaba/qwen-3-32b", + name: "Qwen 3 32B", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.09999999999999999, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 40960, + maxTokens: 16384, + } satisfies Model<"anthropic-messages">, + "alibaba/qwen3-235b-a22b-thinking": { + id: "alibaba/qwen3-235b-a22b-thinking", + name: "Qwen3 235B A22B Thinking 2507", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.3, + output: 2.9000000000000004, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262114, + maxTokens: 262114, + } satisfies Model<"anthropic-messages">, + "alibaba/qwen3-coder": { + id: "alibaba/qwen3-coder", + name: "Qwen3 Coder 480B A35B Instruct", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.39999999999999997, + output: 1.5999999999999999, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 66536, + } satisfies Model<"anthropic-messages">, + "alibaba/qwen3-coder-30b-a3b": { + id: "alibaba/qwen3-coder-30b-a3b", + name: "Qwen 3 Coder 30B A3B Instruct", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.07, + output: 0.27, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 160000, + maxTokens: 32768, + } satisfies Model<"anthropic-messages">, + "alibaba/qwen3-coder-next": { + id: "alibaba/qwen3-coder-next", + name: "Qwen3 Coder Next", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.5, + output: 1.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 256000, + } satisfies Model<"anthropic-messages">, + "alibaba/qwen3-coder-plus": { + id: "alibaba/qwen3-coder-plus", + name: "Qwen3 Coder Plus", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 1, + output: 5, + cacheRead: 0.19999999999999998, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 65536, + } satisfies Model<"anthropic-messages">, + "alibaba/qwen3-max-preview": { + id: "alibaba/qwen3-max-preview", + name: "Qwen3 Max Preview", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 1.2, + output: 6, + cacheRead: 0.24, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 32768, + } satisfies Model<"anthropic-messages">, + "alibaba/qwen3-max-thinking": { + id: "alibaba/qwen3-max-thinking", + name: "Qwen 3 Max Thinking", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 1.2, + output: 6, + cacheRead: 0.24, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 65536, + } satisfies Model<"anthropic-messages">, + "alibaba/qwen3-vl-thinking": { + id: "alibaba/qwen3-vl-thinking", + name: "Qwen3 VL 235B A22B Thinking", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.22, + output: 0.88, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 256000, + } satisfies Model<"anthropic-messages">, + "alibaba/qwen3.5-flash": { + id: "alibaba/qwen3.5-flash", + name: "Qwen 3.5 Flash", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.09999999999999999, + output: 0.39999999999999997, + cacheRead: 0.001, + cacheWrite: 0.125, + }, + contextWindow: 1000000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "alibaba/qwen3.5-plus": { + id: "alibaba/qwen3.5-plus", + name: "Qwen 3.5 Plus", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.39999999999999997, + output: 2.4, + cacheRead: 0.04, + cacheWrite: 0.5, + }, + contextWindow: 1000000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "anthropic/claude-3-haiku": { + id: "anthropic/claude-3-haiku", + name: "Claude 3 Haiku", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.25, + output: 1.25, + cacheRead: 0.03, + cacheWrite: 0.3, + }, + contextWindow: 200000, + maxTokens: 4096, + } satisfies Model<"anthropic-messages">, + "anthropic/claude-3.5-haiku": { + id: "anthropic/claude-3.5-haiku", + name: "Claude 3.5 Haiku", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.7999999999999999, + output: 4, + cacheRead: 0.08, + cacheWrite: 1, + }, + contextWindow: 200000, + maxTokens: 8192, + } satisfies Model<"anthropic-messages">, + "anthropic/claude-3.5-sonnet": { + id: "anthropic/claude-3.5-sonnet", + name: "Claude 3.5 Sonnet", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 8192, + } satisfies Model<"anthropic-messages">, + "anthropic/claude-3.5-sonnet-20240620": { + id: "anthropic/claude-3.5-sonnet-20240620", + name: "Claude 3.5 Sonnet (2024-06-20)", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 8192, + } satisfies Model<"anthropic-messages">, + "anthropic/claude-3.7-sonnet": { + id: "anthropic/claude-3.7-sonnet", + name: "Claude 3.7 Sonnet", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 200000, + maxTokens: 8192, + } satisfies Model<"anthropic-messages">, + "anthropic/claude-haiku-4.5": { + id: "anthropic/claude-haiku-4.5", + name: "Claude Haiku 4.5", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1, + output: 5, + cacheRead: 0.09999999999999999, + cacheWrite: 1.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "anthropic/claude-opus-4": { + id: "anthropic/claude-opus-4", + name: "Claude Opus 4", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 75, + cacheRead: 1.5, + cacheWrite: 18.75, + }, + contextWindow: 200000, + maxTokens: 32000, + } satisfies Model<"anthropic-messages">, + "anthropic/claude-opus-4.1": { + id: "anthropic/claude-opus-4.1", + name: "Claude Opus 4.1", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 75, + cacheRead: 1.5, + cacheWrite: 18.75, + }, + contextWindow: 200000, + maxTokens: 32000, + } satisfies Model<"anthropic-messages">, + "anthropic/claude-opus-4.5": { + id: "anthropic/claude-opus-4.5", + name: "Claude Opus 4.5", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 200000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "anthropic/claude-opus-4.6": { + id: "anthropic/claude-opus-4.6", + name: "Claude Opus 4.6", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 5, + output: 25, + cacheRead: 0.5, + cacheWrite: 6.25, + }, + contextWindow: 1000000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "anthropic/claude-sonnet-4": { + id: "anthropic/claude-sonnet-4", + name: "Claude Sonnet 4", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 1000000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "anthropic/claude-sonnet-4.5": { + id: "anthropic/claude-sonnet-4.5", + name: "Claude Sonnet 4.5", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 1000000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "anthropic/claude-sonnet-4.6": { + id: "anthropic/claude-sonnet-4.6", + name: "Claude Sonnet 4.6", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0.3, + cacheWrite: 3.75, + }, + contextWindow: 1000000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "arcee-ai/trinity-large-preview": { + id: "arcee-ai/trinity-large-preview", + name: "Trinity Large Preview", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.25, + output: 1, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131000, + maxTokens: 131000, + } satisfies Model<"anthropic-messages">, + "bytedance/seed-1.6": { + id: "bytedance/seed-1.6", + name: "Seed 1.6", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.25, + output: 2, + cacheRead: 0.049999999999999996, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 32000, + } satisfies Model<"anthropic-messages">, + "cohere/command-a": { + id: "cohere/command-a", + name: "Command A", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 2.5, + output: 10, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 8000, + } satisfies Model<"anthropic-messages">, + "deepseek/deepseek-v3": { + id: "deepseek/deepseek-v3", + name: "DeepSeek V3 0324", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.77, + output: 0.77, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 163840, + maxTokens: 16384, + } satisfies Model<"anthropic-messages">, + "deepseek/deepseek-v3.1": { + id: "deepseek/deepseek-v3.1", + name: "DeepSeek-V3.1", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.21, + output: 0.7899999999999999, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 163840, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "deepseek/deepseek-v3.1-terminus": { + id: "deepseek/deepseek-v3.1-terminus", + name: "DeepSeek V3.1 Terminus", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.27, + output: 1, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 65536, + } satisfies Model<"anthropic-messages">, + "deepseek/deepseek-v3.2": { + id: "deepseek/deepseek-v3.2", + name: "DeepSeek V3.2", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.26, + output: 0.38, + cacheRead: 0.13, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 8000, + } satisfies Model<"anthropic-messages">, + "deepseek/deepseek-v3.2-thinking": { + id: "deepseek/deepseek-v3.2-thinking", + name: "DeepSeek V3.2 Thinking", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.28, + output: 0.42, + cacheRead: 0.028, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "google/gemini-2.5-flash": { + id: "google/gemini-2.5-flash", + name: "Gemini 2.5 Flash", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 2.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 65536, + } satisfies Model<"anthropic-messages">, + "google/gemini-2.5-flash-lite": { + id: "google/gemini-2.5-flash-lite", + name: "Gemini 2.5 Flash Lite", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.09999999999999999, + output: 0.39999999999999997, + cacheRead: 0.01, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"anthropic-messages">, + "google/gemini-2.5-flash-lite-preview-09-2025": { + id: "google/gemini-2.5-flash-lite-preview-09-2025", + name: "Gemini 2.5 Flash Lite Preview 09-2025", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.09999999999999999, + output: 0.39999999999999997, + cacheRead: 0.01, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"anthropic-messages">, + "google/gemini-2.5-flash-preview-09-2025": { + id: "google/gemini-2.5-flash-preview-09-2025", + name: "Gemini 2.5 Flash Preview 09-2025", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.3, + output: 2.5, + cacheRead: 0.03, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 65536, + } satisfies Model<"anthropic-messages">, + "google/gemini-2.5-pro": { + id: "google/gemini-2.5-pro", + name: "Gemini 2.5 Pro", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1048576, + maxTokens: 65536, + } satisfies Model<"anthropic-messages">, + "google/gemini-3-flash": { + id: "google/gemini-3-flash", + name: "Gemini 3 Flash", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.5, + output: 3, + cacheRead: 0.049999999999999996, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "google/gemini-3-pro-preview": { + id: "google/gemini-3-pro-preview", + name: "Gemini 3 Pro Preview", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 12, + cacheRead: 0.19999999999999998, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "google/gemini-3.1-flash-lite-preview": { + id: "google/gemini-3.1-flash-lite-preview", + name: "Gemini 3.1 Flash Lite Preview", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.25, + output: 1.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 65000, + } satisfies Model<"anthropic-messages">, + "google/gemini-3.1-pro-preview": { + id: "google/gemini-3.1-pro-preview", + name: "Gemini 3.1 Pro Preview", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 12, + cacheRead: 0.19999999999999998, + cacheWrite: 0, + }, + contextWindow: 1000000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "inception/mercury-2": { + id: "inception/mercury-2", + name: "Mercury 2", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.25, + output: 0.75, + cacheRead: 0.024999999999999998, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "inception/mercury-coder-small": { + id: "inception/mercury-coder-small", + name: "Mercury Coder Small Beta", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.25, + output: 1, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32000, + maxTokens: 16384, + } satisfies Model<"anthropic-messages">, + "meituan/longcat-flash-chat": { + id: "meituan/longcat-flash-chat", + name: "LongCat Flash Chat", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 8192, + } satisfies Model<"anthropic-messages">, + "meituan/longcat-flash-thinking": { + id: "meituan/longcat-flash-thinking", + name: "LongCat Flash Thinking", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.15, + output: 1.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 8192, + } satisfies Model<"anthropic-messages">, + "meta/llama-3.1-70b": { + id: "meta/llama-3.1-70b", + name: "Llama 3.1 70B Instruct", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.39999999999999997, + output: 0.39999999999999997, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 16384, + } satisfies Model<"anthropic-messages">, + "meta/llama-3.1-8b": { + id: "meta/llama-3.1-8b", + name: "Llama 3.1 8B Instruct", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.03, + output: 0.049999999999999996, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 16384, + } satisfies Model<"anthropic-messages">, + "meta/llama-3.2-11b": { + id: "meta/llama-3.2-11b", + name: "Llama 3.2 11B Vision Instruct", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.16, + output: 0.16, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 8192, + } satisfies Model<"anthropic-messages">, + "meta/llama-3.2-90b": { + id: "meta/llama-3.2-90b", + name: "Llama 3.2 90B Vision Instruct", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.72, + output: 0.72, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 8192, + } satisfies Model<"anthropic-messages">, + "meta/llama-3.3-70b": { + id: "meta/llama-3.3-70b", + name: "Llama 3.3 70B Instruct", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.72, + output: 0.72, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 8192, + } satisfies Model<"anthropic-messages">, + "meta/llama-4-maverick": { + id: "meta/llama-4-maverick", + name: "Llama 4 Maverick 17B Instruct", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"anthropic-messages">, + "meta/llama-4-scout": { + id: "meta/llama-4-scout", + name: "Llama 4 Scout 17B Instruct", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.08, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"anthropic-messages">, + "minimax/minimax-m2": { + id: "minimax/minimax-m2", + name: "MiniMax M2", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 1.2, + cacheRead: 0.03, + cacheWrite: 0.375, + }, + contextWindow: 205000, + maxTokens: 205000, + } satisfies Model<"anthropic-messages">, + "minimax/minimax-m2.1": { + id: "minimax/minimax-m2.1", + name: "MiniMax M2.1", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 1.2, + cacheRead: 0.15, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + "minimax/minimax-m2.1-lightning": { + id: "minimax/minimax-m2.1-lightning", + name: "MiniMax M2.1 Lightning", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 2.4, + cacheRead: 0.03, + cacheWrite: 0.375, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + "minimax/minimax-m2.5": { + id: "minimax/minimax-m2.5", + name: "MiniMax M2.5", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 1.2, + cacheRead: 0.03, + cacheWrite: 0.375, + }, + contextWindow: 204800, + maxTokens: 131000, + } satisfies Model<"anthropic-messages">, + "minimax/minimax-m2.5-highspeed": { + id: "minimax/minimax-m2.5-highspeed", + name: "MiniMax M2.5 High Speed", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 2.4, + cacheRead: 0.03, + cacheWrite: 0.375, + }, + contextWindow: 4096, + maxTokens: 4096, + } satisfies Model<"anthropic-messages">, + "mistral/codestral": { + id: "mistral/codestral", + name: "Mistral Codestral", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.3, + output: 0.8999999999999999, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4000, + } satisfies Model<"anthropic-messages">, + "mistral/devstral-2": { + id: "mistral/devstral-2", + name: "Devstral 2", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 256000, + } satisfies Model<"anthropic-messages">, + "mistral/devstral-small": { + id: "mistral/devstral-small", + name: "Devstral Small 1.1", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.09999999999999999, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "mistral/devstral-small-2": { + id: "mistral/devstral-small-2", + name: "Devstral Small 2", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 256000, + } satisfies Model<"anthropic-messages">, + "mistral/ministral-3b": { + id: "mistral/ministral-3b", + name: "Ministral 3B", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.04, + output: 0.04, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4000, + } satisfies Model<"anthropic-messages">, + "mistral/ministral-8b": { + id: "mistral/ministral-8b", + name: "Ministral 8B", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.09999999999999999, + output: 0.09999999999999999, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4000, + } satisfies Model<"anthropic-messages">, + "mistral/mistral-medium": { + id: "mistral/mistral-medium", + name: "Mistral Medium 3.1", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.39999999999999997, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 64000, + } satisfies Model<"anthropic-messages">, + "mistral/mistral-small": { + id: "mistral/mistral-small", + name: "Mistral Small", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.09999999999999999, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32000, + maxTokens: 4000, + } satisfies Model<"anthropic-messages">, + "mistral/pixtral-12b": { + id: "mistral/pixtral-12b", + name: "Pixtral 12B 2409", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.15, + output: 0.15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4000, + } satisfies Model<"anthropic-messages">, + "mistral/pixtral-large": { + id: "mistral/pixtral-large", + name: "Pixtral Large", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2, + output: 6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4000, + } satisfies Model<"anthropic-messages">, + "moonshotai/kimi-k2": { + id: "moonshotai/kimi-k2", + name: "Kimi K2", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.5, + output: 2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 16384, + } satisfies Model<"anthropic-messages">, + "moonshotai/kimi-k2-thinking": { + id: "moonshotai/kimi-k2-thinking", + name: "Kimi K2 Thinking", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.47, + output: 2, + cacheRead: 0.14100000000000001, + cacheWrite: 0, + }, + contextWindow: 216144, + maxTokens: 216144, + } satisfies Model<"anthropic-messages">, + "moonshotai/kimi-k2-thinking-turbo": { + id: "moonshotai/kimi-k2-thinking-turbo", + name: "Kimi K2 Thinking Turbo", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 1.15, + output: 8, + cacheRead: 0.15, + cacheWrite: 0, + }, + contextWindow: 262114, + maxTokens: 262114, + } satisfies Model<"anthropic-messages">, + "moonshotai/kimi-k2-turbo": { + id: "moonshotai/kimi-k2-turbo", + name: "Kimi K2 Turbo", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 2.4, + output: 10, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 16384, + } satisfies Model<"anthropic-messages">, + "moonshotai/kimi-k2.5": { + id: "moonshotai/kimi-k2.5", + name: "Kimi K2.5", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.5, + output: 2.8, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 256000, + } satisfies Model<"anthropic-messages">, + "nvidia/nemotron-nano-12b-v2-vl": { + id: "nvidia/nemotron-nano-12b-v2-vl", + name: "Nvidia Nemotron Nano 12B V2 VL", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.19999999999999998, + output: 0.6, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + "nvidia/nemotron-nano-9b-v2": { + id: "nvidia/nemotron-nano-9b-v2", + name: "Nvidia Nemotron Nano 9B V2", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.04, + output: 0.16, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + "openai/codex-mini": { + id: "openai/codex-mini", + name: "Codex Mini", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.5, + output: 6, + cacheRead: 0.375, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"anthropic-messages">, + "openai/gpt-4-turbo": { + id: "openai/gpt-4-turbo", + name: "GPT-4 Turbo", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 10, + output: 30, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 4096, + } satisfies Model<"anthropic-messages">, + "openai/gpt-4.1": { + id: "openai/gpt-4.1", + name: "GPT-4.1", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2, + output: 8, + cacheRead: 0.5, + cacheWrite: 0, + }, + contextWindow: 1047576, + maxTokens: 32768, + } satisfies Model<"anthropic-messages">, + "openai/gpt-4.1-mini": { + id: "openai/gpt-4.1-mini", + name: "GPT-4.1 mini", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.39999999999999997, + output: 1.5999999999999999, + cacheRead: 0.09999999999999999, + cacheWrite: 0, + }, + contextWindow: 1047576, + maxTokens: 32768, + } satisfies Model<"anthropic-messages">, + "openai/gpt-4.1-nano": { + id: "openai/gpt-4.1-nano", + name: "GPT-4.1 nano", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.09999999999999999, + output: 0.39999999999999997, + cacheRead: 0.03, + cacheWrite: 0, + }, + contextWindow: 1047576, + maxTokens: 32768, + } satisfies Model<"anthropic-messages">, + "openai/gpt-4o": { + id: "openai/gpt-4o", + name: "GPT-4o", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2.5, + output: 10, + cacheRead: 1.25, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"anthropic-messages">, + "openai/gpt-4o-mini": { + id: "openai/gpt-4o-mini", + name: "GPT-4o mini", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.15, + output: 0.6, + cacheRead: 0.075, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5": { + id: "openai/gpt-5", + name: "GPT-5", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.13, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5-chat": { + id: "openai/gpt-5-chat", + name: "GPT 5 Chat", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5-codex": { + id: "openai/gpt-5-codex", + name: "GPT-5-Codex", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.13, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5-mini": { + id: "openai/gpt-5-mini", + name: "GPT-5 mini", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.25, + output: 2, + cacheRead: 0.03, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5-nano": { + id: "openai/gpt-5-nano", + name: "GPT-5 nano", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.049999999999999996, + output: 0.39999999999999997, + cacheRead: 0.01, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5-pro": { + id: "openai/gpt-5-pro", + name: "GPT-5 pro", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 120, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 272000, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5.1-codex": { + id: "openai/gpt-5.1-codex", + name: "GPT-5.1-Codex", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.13, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5.1-codex-max": { + id: "openai/gpt-5.1-codex-max", + name: "GPT 5.1 Codex Max", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.125, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5.1-codex-mini": { + id: "openai/gpt-5.1-codex-mini", + name: "GPT 5.1 Codex Mini", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.25, + output: 2, + cacheRead: 0.024999999999999998, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5.1-instant": { + id: "openai/gpt-5.1-instant", + name: "GPT-5.1 Instant", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.13, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5.1-thinking": { + id: "openai/gpt-5.1-thinking", + name: "GPT 5.1 Thinking", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.25, + output: 10, + cacheRead: 0.13, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5.2": { + id: "openai/gpt-5.2", + name: "GPT 5.2", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.18, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5.2-chat": { + id: "openai/gpt-5.2-chat", + name: "GPT 5.2 Chat", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5.2-codex": { + id: "openai/gpt-5.2-codex", + name: "GPT 5.2 Codex", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5.2-pro": { + id: "openai/gpt-5.2-pro", + name: "GPT 5.2 ", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 21, + output: 168, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5.3-chat": { + id: "openai/gpt-5.3-chat", + name: "GPT-5.3 Chat", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 16384, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5.3-codex": { + id: "openai/gpt-5.3-codex", + name: "GPT 5.3 Codex", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.75, + output: 14, + cacheRead: 0.175, + cacheWrite: 0, + }, + contextWindow: 400000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5.4": { + id: "openai/gpt-5.4", + name: "GPT 5.4", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2.5, + output: 15, + cacheRead: 0.25, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "openai/gpt-5.4-pro": { + id: "openai/gpt-5.4-pro", + name: "GPT 5.4 Pro", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 30, + output: 180, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 1050000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "openai/gpt-oss-120b": { + id: "openai/gpt-oss-120b", + name: "gpt-oss-120b", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.09999999999999999, + output: 0.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + "openai/gpt-oss-20b": { + id: "openai/gpt-oss-20b", + name: "gpt-oss-20b", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.07, + output: 0.3, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 8192, + } satisfies Model<"anthropic-messages">, + "openai/gpt-oss-safeguard-20b": { + id: "openai/gpt-oss-safeguard-20b", + name: "gpt-oss-safeguard-20b", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.075, + output: 0.3, + cacheRead: 0.037, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 65536, + } satisfies Model<"anthropic-messages">, + "openai/o1": { + id: "openai/o1", + name: "o1", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 15, + output: 60, + cacheRead: 7.5, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"anthropic-messages">, + "openai/o3": { + id: "openai/o3", + name: "o3", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 2, + output: 8, + cacheRead: 0.5, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"anthropic-messages">, + "openai/o3-deep-research": { + id: "openai/o3-deep-research", + name: "o3-deep-research", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 10, + output: 40, + cacheRead: 2.5, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"anthropic-messages">, + "openai/o3-mini": { + id: "openai/o3-mini", + name: "o3-mini", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 1.1, + output: 4.4, + cacheRead: 0.55, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"anthropic-messages">, + "openai/o3-pro": { + id: "openai/o3-pro", + name: "o3 Pro", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 20, + output: 80, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"anthropic-messages">, + "openai/o4-mini": { + id: "openai/o4-mini", + name: "o4-mini", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 1.1, + output: 4.4, + cacheRead: 0.275, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 100000, + } satisfies Model<"anthropic-messages">, + "perplexity/sonar": { + id: "perplexity/sonar", + name: "Sonar", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 1, + output: 1, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 127000, + maxTokens: 8000, + } satisfies Model<"anthropic-messages">, + "perplexity/sonar-pro": { + id: "perplexity/sonar-pro", + name: "Sonar Pro", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 8000, + } satisfies Model<"anthropic-messages">, + "prime-intellect/intellect-3": { + id: "prime-intellect/intellect-3", + name: "INTELLECT 3", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.19999999999999998, + output: 1.1, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + "vercel/v0-1.0-md": { + id: "vercel/v0-1.0-md", + name: "v0-1.0-md", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 32000, + } satisfies Model<"anthropic-messages">, + "vercel/v0-1.5-md": { + id: "vercel/v0-1.5-md", + name: "v0-1.5-md", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 32768, + } satisfies Model<"anthropic-messages">, + "xai/grok-2-vision": { + id: "xai/grok-2-vision", + name: "Grok 2 Vision", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2, + output: 10, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 32768, + maxTokens: 32768, + } satisfies Model<"anthropic-messages">, + "xai/grok-3": { + id: "xai/grok-3", + name: "Grok 3 Beta", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 3, + output: 15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + "xai/grok-3-fast": { + id: "xai/grok-3-fast", + name: "Grok 3 Fast Beta", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 5, + output: 25, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + "xai/grok-3-mini": { + id: "xai/grok-3-mini", + name: "Grok 3 Mini Beta", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.3, + output: 0.5, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + "xai/grok-3-mini-fast": { + id: "xai/grok-3-mini-fast", + name: "Grok 3 Mini Fast Beta", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.6, + output: 4, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + "xai/grok-4": { + id: "xai/grok-4", + name: "Grok 4", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 3, + output: 15, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 256000, + } satisfies Model<"anthropic-messages">, + "xai/grok-4-fast-non-reasoning": { + id: "xai/grok-4-fast-non-reasoning", + name: "Grok 4 Fast Non-Reasoning", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.19999999999999998, + output: 0.5, + cacheRead: 0.049999999999999996, + cacheWrite: 0, + }, + contextWindow: 2000000, + maxTokens: 256000, + } satisfies Model<"anthropic-messages">, + "xai/grok-4-fast-reasoning": { + id: "xai/grok-4-fast-reasoning", + name: "Grok 4 Fast Reasoning", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.19999999999999998, + output: 0.5, + cacheRead: 0.049999999999999996, + cacheWrite: 0, + }, + contextWindow: 2000000, + maxTokens: 256000, + } satisfies Model<"anthropic-messages">, + "xai/grok-4.1-fast-non-reasoning": { + id: "xai/grok-4.1-fast-non-reasoning", + name: "Grok 4.1 Fast Non-Reasoning", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: false, + input: ["text"], + cost: { + input: 0.19999999999999998, + output: 0.5, + cacheRead: 0.049999999999999996, + cacheWrite: 0, + }, + contextWindow: 2000000, + maxTokens: 30000, + } satisfies Model<"anthropic-messages">, + "xai/grok-4.1-fast-reasoning": { + id: "xai/grok-4.1-fast-reasoning", + name: "Grok 4.1 Fast Reasoning", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.19999999999999998, + output: 0.5, + cacheRead: 0.049999999999999996, + cacheWrite: 0, + }, + contextWindow: 2000000, + maxTokens: 30000, + } satisfies Model<"anthropic-messages">, + "xai/grok-code-fast-1": { + id: "xai/grok-code-fast-1", + name: "Grok Code Fast 1", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.19999999999999998, + output: 1.5, + cacheRead: 0.02, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 256000, + } satisfies Model<"anthropic-messages">, + "xiaomi/mimo-v2-flash": { + id: "xiaomi/mimo-v2-flash", + name: "MiMo V2 Flash", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.09, + output: 0.29, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 262144, + maxTokens: 32000, + } satisfies Model<"anthropic-messages">, + "zai/glm-4.5": { + id: "zai/glm-4.5", + name: "GLM-4.5", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 2.2, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + "zai/glm-4.5-air": { + id: "zai/glm-4.5-air", + name: "GLM 4.5 Air", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.19999999999999998, + output: 1.1, + cacheRead: 0.03, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 96000, + } satisfies Model<"anthropic-messages">, + "zai/glm-4.5v": { + id: "zai/glm-4.5v", + name: "GLM 4.5V", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.6, + output: 1.7999999999999998, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 65536, + maxTokens: 16384, + } satisfies Model<"anthropic-messages">, + "zai/glm-4.6": { + id: "zai/glm-4.6", + name: "GLM 4.6", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.44999999999999996, + output: 1.7999999999999998, + cacheRead: 0.11, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 96000, + } satisfies Model<"anthropic-messages">, + "zai/glm-4.6v": { + id: "zai/glm-4.6v", + name: "GLM-4.6V", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.3, + output: 0.8999999999999999, + cacheRead: 0.049999999999999996, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 24000, + } satisfies Model<"anthropic-messages">, + "zai/glm-4.6v-flash": { + id: "zai/glm-4.6v-flash", + name: "GLM-4.6V-Flash", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 24000, + } satisfies Model<"anthropic-messages">, + "zai/glm-4.7": { + id: "zai/glm-4.7", + name: "GLM 4.7", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.43, + output: 1.75, + cacheRead: 0.08, + cacheWrite: 0, + }, + contextWindow: 202752, + maxTokens: 120000, + } satisfies Model<"anthropic-messages">, + "zai/glm-4.7-flashx": { + id: "zai/glm-4.7-flashx", + name: "GLM 4.7 FlashX", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 0.06, + output: 0.39999999999999997, + cacheRead: 0.01, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 128000, + } satisfies Model<"anthropic-messages">, + "zai/glm-5": { + id: "zai/glm-5", + name: "GLM-5", + api: "anthropic-messages", + provider: "vercel-ai-gateway", + baseUrl: "https://ai-gateway.vercel.sh", + reasoning: true, + input: ["text"], + cost: { + input: 1, + output: 3.1999999999999997, + cacheRead: 0.19999999999999998, + cacheWrite: 0, + }, + contextWindow: 202800, + maxTokens: 131072, + } satisfies Model<"anthropic-messages">, + }, + "xai": { + "grok-2": { + id: "grok-2", + name: "Grok 2", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 2, + output: 10, + cacheRead: 2, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "grok-2-1212": { + id: "grok-2-1212", + name: "Grok 2 (1212)", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 2, + output: 10, + cacheRead: 2, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "grok-2-latest": { + id: "grok-2-latest", + name: "Grok 2 Latest", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 2, + output: 10, + cacheRead: 2, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "grok-2-vision": { + id: "grok-2-vision", + name: "Grok 2 Vision", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2, + output: 10, + cacheRead: 2, + cacheWrite: 0, + }, + contextWindow: 8192, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "grok-2-vision-1212": { + id: "grok-2-vision-1212", + name: "Grok 2 Vision (1212)", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2, + output: 10, + cacheRead: 2, + cacheWrite: 0, + }, + contextWindow: 8192, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "grok-2-vision-latest": { + id: "grok-2-vision-latest", + name: "Grok 2 Vision Latest", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 2, + output: 10, + cacheRead: 2, + cacheWrite: 0, + }, + contextWindow: 8192, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "grok-3": { + id: "grok-3", + name: "Grok 3", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 3, + output: 15, + cacheRead: 0.75, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "grok-3-fast": { + id: "grok-3-fast", + name: "Grok 3 Fast", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 5, + output: 25, + cacheRead: 1.25, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "grok-3-fast-latest": { + id: "grok-3-fast-latest", + name: "Grok 3 Fast Latest", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 5, + output: 25, + cacheRead: 1.25, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "grok-3-latest": { + id: "grok-3-latest", + name: "Grok 3 Latest", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 3, + output: 15, + cacheRead: 0.75, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "grok-3-mini": { + id: "grok-3-mini", + name: "Grok 3 Mini", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 0.5, + cacheRead: 0.075, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "grok-3-mini-fast": { + id: "grok-3-mini-fast", + name: "Grok 3 Mini Fast", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 4, + cacheRead: 0.15, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "grok-3-mini-fast-latest": { + id: "grok-3-mini-fast-latest", + name: "Grok 3 Mini Fast Latest", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 4, + cacheRead: 0.15, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "grok-3-mini-latest": { + id: "grok-3-mini-latest", + name: "Grok 3 Mini Latest", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.3, + output: 0.5, + cacheRead: 0.075, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 8192, + } satisfies Model<"openai-completions">, + "grok-4": { + id: "grok-4", + name: "Grok 4", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: true, + input: ["text"], + cost: { + input: 3, + output: 15, + cacheRead: 0.75, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 64000, + } satisfies Model<"openai-completions">, + "grok-4-1-fast": { + id: "grok-4-1-fast", + name: "Grok 4.1 Fast", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.2, + output: 0.5, + cacheRead: 0.05, + cacheWrite: 0, + }, + contextWindow: 2000000, + maxTokens: 30000, + } satisfies Model<"openai-completions">, + "grok-4-1-fast-non-reasoning": { + id: "grok-4-1-fast-non-reasoning", + name: "Grok 4.1 Fast (Non-Reasoning)", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.2, + output: 0.5, + cacheRead: 0.05, + cacheWrite: 0, + }, + contextWindow: 2000000, + maxTokens: 30000, + } satisfies Model<"openai-completions">, + "grok-4-fast": { + id: "grok-4-fast", + name: "Grok 4 Fast", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.2, + output: 0.5, + cacheRead: 0.05, + cacheWrite: 0, + }, + contextWindow: 2000000, + maxTokens: 30000, + } satisfies Model<"openai-completions">, + "grok-4-fast-non-reasoning": { + id: "grok-4-fast-non-reasoning", + name: "Grok 4 Fast (Non-Reasoning)", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 0.2, + output: 0.5, + cacheRead: 0.05, + cacheWrite: 0, + }, + contextWindow: 2000000, + maxTokens: 30000, + } satisfies Model<"openai-completions">, + "grok-beta": { + id: "grok-beta", + name: "Grok Beta", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: false, + input: ["text"], + cost: { + input: 5, + output: 15, + cacheRead: 5, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + "grok-code-fast-1": { + id: "grok-code-fast-1", + name: "Grok Code Fast 1", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: true, + input: ["text"], + cost: { + input: 0.2, + output: 1.5, + cacheRead: 0.02, + cacheWrite: 0, + }, + contextWindow: 256000, + maxTokens: 10000, + } satisfies Model<"openai-completions">, + "grok-vision-beta": { + id: "grok-vision-beta", + name: "Grok Vision Beta", + api: "openai-completions", + provider: "xai", + baseUrl: "https://api.x.ai/v1", + reasoning: false, + input: ["text", "image"], + cost: { + input: 5, + output: 15, + cacheRead: 5, + cacheWrite: 0, + }, + contextWindow: 8192, + maxTokens: 4096, + } satisfies Model<"openai-completions">, + }, + "zai": { + "glm-4.5": { + id: "glm-4.5", + name: "GLM-4.5", + api: "openai-completions", + provider: "zai", + baseUrl: "https://api.z.ai/api/coding/paas/v4", + compat: {"supportsDeveloperRole":false,"thinkingFormat":"zai"}, + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 2.2, + cacheRead: 0.11, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 98304, + } satisfies Model<"openai-completions">, + "glm-4.5-air": { + id: "glm-4.5-air", + name: "GLM-4.5-Air", + api: "openai-completions", + provider: "zai", + baseUrl: "https://api.z.ai/api/coding/paas/v4", + compat: {"supportsDeveloperRole":false,"thinkingFormat":"zai"}, + reasoning: true, + input: ["text"], + cost: { + input: 0.2, + output: 1.1, + cacheRead: 0.03, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 98304, + } satisfies Model<"openai-completions">, + "glm-4.5-flash": { + id: "glm-4.5-flash", + name: "GLM-4.5-Flash", + api: "openai-completions", + provider: "zai", + baseUrl: "https://api.z.ai/api/coding/paas/v4", + compat: {"supportsDeveloperRole":false,"thinkingFormat":"zai"}, + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 131072, + maxTokens: 98304, + } satisfies Model<"openai-completions">, + "glm-4.5v": { + id: "glm-4.5v", + name: "GLM-4.5V", + api: "openai-completions", + provider: "zai", + baseUrl: "https://api.z.ai/api/coding/paas/v4", + compat: {"supportsDeveloperRole":false,"thinkingFormat":"zai"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.6, + output: 1.8, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 64000, + maxTokens: 16384, + } satisfies Model<"openai-completions">, + "glm-4.6": { + id: "glm-4.6", + name: "GLM-4.6", + api: "openai-completions", + provider: "zai", + baseUrl: "https://api.z.ai/api/coding/paas/v4", + compat: {"supportsDeveloperRole":false,"thinkingFormat":"zai"}, + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 2.2, + cacheRead: 0.11, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "glm-4.6v": { + id: "glm-4.6v", + name: "GLM-4.6V", + api: "openai-completions", + provider: "zai", + baseUrl: "https://api.z.ai/api/coding/paas/v4", + compat: {"supportsDeveloperRole":false,"thinkingFormat":"zai"}, + reasoning: true, + input: ["text", "image"], + cost: { + input: 0.3, + output: 0.9, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 128000, + maxTokens: 32768, + } satisfies Model<"openai-completions">, + "glm-4.7": { + id: "glm-4.7", + name: "GLM-4.7", + api: "openai-completions", + provider: "zai", + baseUrl: "https://api.z.ai/api/coding/paas/v4", + compat: {"supportsDeveloperRole":false,"thinkingFormat":"zai"}, + reasoning: true, + input: ["text"], + cost: { + input: 0.6, + output: 2.2, + cacheRead: 0.11, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "glm-4.7-flash": { + id: "glm-4.7-flash", + name: "GLM-4.7-Flash", + api: "openai-completions", + provider: "zai", + baseUrl: "https://api.z.ai/api/coding/paas/v4", + compat: {"supportsDeveloperRole":false,"thinkingFormat":"zai"}, + reasoning: true, + input: ["text"], + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + }, + contextWindow: 200000, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + "glm-5": { + id: "glm-5", + name: "GLM-5", + api: "openai-completions", + provider: "zai", + baseUrl: "https://api.z.ai/api/coding/paas/v4", + compat: {"supportsDeveloperRole":false,"thinkingFormat":"zai"}, + reasoning: true, + input: ["text"], + cost: { + input: 1, + output: 3.2, + cacheRead: 0.2, + cacheWrite: 0, + }, + contextWindow: 204800, + maxTokens: 131072, + } satisfies Model<"openai-completions">, + }, +} as const; diff --git a/packages/pi-ai/src/models.ts b/packages/pi-ai/src/models.ts new file mode 100644 index 000000000..3c06c0cc6 --- /dev/null +++ b/packages/pi-ai/src/models.ts @@ -0,0 +1,77 @@ +import { MODELS } from "./models.generated.js"; +import type { Api, KnownProvider, Model, Usage } from "./types.js"; + +const modelRegistry: Map>> = new Map(); + +// Initialize registry from MODELS on module load +for (const [provider, models] of Object.entries(MODELS)) { + const providerModels = new Map>(); + for (const [id, model] of Object.entries(models)) { + providerModels.set(id, model as Model); + } + modelRegistry.set(provider, providerModels); +} + +type ModelApi< + TProvider extends KnownProvider, + TModelId extends keyof (typeof MODELS)[TProvider], +> = (typeof MODELS)[TProvider][TModelId] extends { api: infer TApi } ? (TApi extends Api ? TApi : never) : never; + +export function getModel( + provider: TProvider, + modelId: TModelId, +): Model> { + const providerModels = modelRegistry.get(provider); + return providerModels?.get(modelId as string) as Model>; +} + +export function getProviders(): KnownProvider[] { + return Array.from(modelRegistry.keys()) as KnownProvider[]; +} + +export function getModels( + provider: TProvider, +): Model>[] { + const models = modelRegistry.get(provider); + return models ? (Array.from(models.values()) as Model>[]) : []; +} + +export function calculateCost(model: Model, usage: Usage): Usage["cost"] { + usage.cost.input = (model.cost.input / 1000000) * usage.input; + usage.cost.output = (model.cost.output / 1000000) * usage.output; + usage.cost.cacheRead = (model.cost.cacheRead / 1000000) * usage.cacheRead; + usage.cost.cacheWrite = (model.cost.cacheWrite / 1000000) * usage.cacheWrite; + usage.cost.total = usage.cost.input + usage.cost.output + usage.cost.cacheRead + usage.cost.cacheWrite; + return usage.cost; +} + +/** + * Check if a model supports xhigh thinking level. + * + * Supported today: + * - GPT-5.2 / GPT-5.3 / GPT-5.4 model families + * - Anthropic Messages API Opus 4.6 models (xhigh maps to adaptive effort "max") + */ +export function supportsXhigh(model: Model): boolean { + if (model.id.includes("gpt-5.2") || model.id.includes("gpt-5.3") || model.id.includes("gpt-5.4")) { + return true; + } + + if (model.api === "anthropic-messages") { + return model.id.includes("opus-4-6") || model.id.includes("opus-4.6"); + } + + return false; +} + +/** + * Check if two models are equal by comparing both their id and provider. + * Returns false if either model is null or undefined. + */ +export function modelsAreEqual( + a: Model | null | undefined, + b: Model | null | undefined, +): boolean { + if (!a || !b) return false; + return a.id === b.id && a.provider === b.provider; +} diff --git a/packages/pi-ai/src/oauth.ts b/packages/pi-ai/src/oauth.ts new file mode 100644 index 000000000..d768a0fe6 --- /dev/null +++ b/packages/pi-ai/src/oauth.ts @@ -0,0 +1 @@ +export * from "./utils/oauth/index.js"; diff --git a/packages/pi-ai/src/providers/amazon-bedrock.ts b/packages/pi-ai/src/providers/amazon-bedrock.ts new file mode 100644 index 000000000..52b42b4d1 --- /dev/null +++ b/packages/pi-ai/src/providers/amazon-bedrock.ts @@ -0,0 +1,751 @@ +import { + BedrockRuntimeClient, + type BedrockRuntimeClientConfig, + StopReason as BedrockStopReason, + type Tool as BedrockTool, + CachePointType, + CacheTTL, + type ContentBlock, + type ContentBlockDeltaEvent, + type ContentBlockStartEvent, + type ContentBlockStopEvent, + ConversationRole, + ConverseStreamCommand, + type ConverseStreamMetadataEvent, + ImageFormat, + type Message, + type SystemContentBlock, + type ToolChoice, + type ToolConfiguration, + ToolResultStatus, +} from "@aws-sdk/client-bedrock-runtime"; + +import { calculateCost } from "../models.js"; +import type { + Api, + AssistantMessage, + CacheRetention, + Context, + Model, + SimpleStreamOptions, + StopReason, + StreamFunction, + StreamOptions, + TextContent, + ThinkingBudgets, + ThinkingContent, + ThinkingLevel, + Tool, + ToolCall, + ToolResultMessage, +} from "../types.js"; +import { AssistantMessageEventStream } from "../utils/event-stream.js"; +import { parseStreamingJson } from "../utils/json-parse.js"; +import { sanitizeSurrogates } from "../utils/sanitize-unicode.js"; +import { adjustMaxTokensForThinking, buildBaseOptions, clampReasoning } from "./simple-options.js"; +import { transformMessages } from "./transform-messages.js"; + +export interface BedrockOptions extends StreamOptions { + region?: string; + profile?: string; + toolChoice?: "auto" | "any" | "none" | { type: "tool"; name: string }; + /* See https://docs.aws.amazon.com/bedrock/latest/userguide/inference-reasoning.html for supported models. */ + reasoning?: ThinkingLevel; + /* Custom token budgets per thinking level. Overrides default budgets. */ + thinkingBudgets?: ThinkingBudgets; + /* Only supported by Claude 4.x models, see https://docs.aws.amazon.com/bedrock/latest/userguide/claude-messages-extended-thinking.html#claude-messages-extended-thinking-tool-use-interleaved */ + interleavedThinking?: boolean; +} + +type Block = (TextContent | ThinkingContent | ToolCall) & { index?: number; partialJson?: string }; + +export const streamBedrock: StreamFunction<"bedrock-converse-stream", BedrockOptions> = ( + model: Model<"bedrock-converse-stream">, + context: Context, + options: BedrockOptions = {}, +): AssistantMessageEventStream => { + const stream = new AssistantMessageEventStream(); + + (async () => { + const output: AssistantMessage = { + role: "assistant", + content: [], + api: "bedrock-converse-stream" as Api, + provider: model.provider, + model: model.id, + usage: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + stopReason: "stop", + timestamp: Date.now(), + }; + + const blocks = output.content as Block[]; + + const config: BedrockRuntimeClientConfig = { + profile: options.profile, + }; + + // in Node.js/Bun environment only + if (typeof process !== "undefined" && (process.versions?.node || process.versions?.bun)) { + // Region resolution: explicit option > env vars > SDK default chain. + // When AWS_PROFILE is set, we leave region undefined so the SDK can + // resovle it from aws profile configs. Otherwise fall back to us-east-1. + const explicitRegion = options.region || process.env.AWS_REGION || process.env.AWS_DEFAULT_REGION; + if (explicitRegion) { + config.region = explicitRegion; + } else if (!process.env.AWS_PROFILE) { + config.region = "us-east-1"; + } + + // Support proxies that don't need authentication + if (process.env.AWS_BEDROCK_SKIP_AUTH === "1") { + config.credentials = { + accessKeyId: "dummy-access-key", + secretAccessKey: "dummy-secret-key", + }; + } + + if ( + process.env.HTTP_PROXY || + process.env.HTTPS_PROXY || + process.env.NO_PROXY || + process.env.http_proxy || + process.env.https_proxy || + process.env.no_proxy + ) { + const nodeHttpHandler = await import("@smithy/node-http-handler"); + const proxyAgent = await import("proxy-agent"); + + const agent = new proxyAgent.ProxyAgent(); + + // Bedrock runtime uses NodeHttp2Handler by default since v3.798.0, which is based + // on `http2` module and has no support for http agent. + // Use NodeHttpHandler to support http agent. + config.requestHandler = new nodeHttpHandler.NodeHttpHandler({ + httpAgent: agent, + httpsAgent: agent, + }); + } else if (process.env.AWS_BEDROCK_FORCE_HTTP1 === "1") { + // Some custom endpoints require HTTP/1.1 instead of HTTP/2 + const nodeHttpHandler = await import("@smithy/node-http-handler"); + config.requestHandler = new nodeHttpHandler.NodeHttpHandler(); + } + } else { + // Non-Node environment (browser): fall back to us-east-1 since + // there's no config file resolution available. + config.region = options.region || "us-east-1"; + } + + try { + const client = new BedrockRuntimeClient(config); + + const cacheRetention = resolveCacheRetention(options.cacheRetention); + let commandInput = { + modelId: model.id, + messages: convertMessages(context, model, cacheRetention), + system: buildSystemPrompt(context.systemPrompt, model, cacheRetention), + inferenceConfig: { maxTokens: options.maxTokens, temperature: options.temperature }, + toolConfig: convertToolConfig(context.tools, options.toolChoice), + additionalModelRequestFields: buildAdditionalModelRequestFields(model, options), + }; + const nextCommandInput = await options?.onPayload?.(commandInput, model); + if (nextCommandInput !== undefined) { + commandInput = nextCommandInput as typeof commandInput; + } + const command = new ConverseStreamCommand(commandInput); + + const response = await client.send(command, { abortSignal: options.signal }); + + for await (const item of response.stream!) { + if (item.messageStart) { + if (item.messageStart.role !== ConversationRole.ASSISTANT) { + throw new Error("Unexpected assistant message start but got user message start instead"); + } + stream.push({ type: "start", partial: output }); + } else if (item.contentBlockStart) { + handleContentBlockStart(item.contentBlockStart, blocks, output, stream); + } else if (item.contentBlockDelta) { + handleContentBlockDelta(item.contentBlockDelta, blocks, output, stream); + } else if (item.contentBlockStop) { + handleContentBlockStop(item.contentBlockStop, blocks, output, stream); + } else if (item.messageStop) { + output.stopReason = mapStopReason(item.messageStop.stopReason); + } else if (item.metadata) { + handleMetadata(item.metadata, model, output); + } else if (item.internalServerException) { + throw new Error(`Internal server error: ${item.internalServerException.message}`); + } else if (item.modelStreamErrorException) { + throw new Error(`Model stream error: ${item.modelStreamErrorException.message}`); + } else if (item.validationException) { + throw new Error(`Validation error: ${item.validationException.message}`); + } else if (item.throttlingException) { + throw new Error(`Throttling error: ${item.throttlingException.message}`); + } else if (item.serviceUnavailableException) { + throw new Error(`Service unavailable: ${item.serviceUnavailableException.message}`); + } + } + + if (options.signal?.aborted) { + throw new Error("Request was aborted"); + } + + if (output.stopReason === "error" || output.stopReason === "aborted") { + throw new Error("An unknown error occurred"); + } + + stream.push({ type: "done", reason: output.stopReason, message: output }); + stream.end(); + } catch (error) { + for (const block of output.content) { + delete (block as Block).index; + delete (block as Block).partialJson; + } + output.stopReason = options.signal?.aborted ? "aborted" : "error"; + output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error); + stream.push({ type: "error", reason: output.stopReason, error: output }); + stream.end(); + } + })(); + + return stream; +}; + +export const streamSimpleBedrock: StreamFunction<"bedrock-converse-stream", SimpleStreamOptions> = ( + model: Model<"bedrock-converse-stream">, + context: Context, + options?: SimpleStreamOptions, +): AssistantMessageEventStream => { + const base = buildBaseOptions(model, options, undefined); + if (!options?.reasoning) { + return streamBedrock(model, context, { ...base, reasoning: undefined } satisfies BedrockOptions); + } + + if (model.id.includes("anthropic.claude") || model.id.includes("anthropic/claude")) { + if (supportsAdaptiveThinking(model.id)) { + return streamBedrock(model, context, { + ...base, + reasoning: options.reasoning, + thinkingBudgets: options.thinkingBudgets, + } satisfies BedrockOptions); + } + + const adjusted = adjustMaxTokensForThinking( + base.maxTokens || 0, + model.maxTokens, + options.reasoning, + options.thinkingBudgets, + ); + + return streamBedrock(model, context, { + ...base, + maxTokens: adjusted.maxTokens, + reasoning: options.reasoning, + thinkingBudgets: { + ...(options.thinkingBudgets || {}), + [clampReasoning(options.reasoning)!]: adjusted.thinkingBudget, + }, + } satisfies BedrockOptions); + } + + return streamBedrock(model, context, { + ...base, + reasoning: options.reasoning, + thinkingBudgets: options.thinkingBudgets, + } satisfies BedrockOptions); +}; + +function handleContentBlockStart( + event: ContentBlockStartEvent, + blocks: Block[], + output: AssistantMessage, + stream: AssistantMessageEventStream, +): void { + const index = event.contentBlockIndex!; + const start = event.start; + + if (start?.toolUse) { + const block: Block = { + type: "toolCall", + id: start.toolUse.toolUseId || "", + name: start.toolUse.name || "", + arguments: {}, + partialJson: "", + index, + }; + output.content.push(block); + stream.push({ type: "toolcall_start", contentIndex: blocks.length - 1, partial: output }); + } +} + +function handleContentBlockDelta( + event: ContentBlockDeltaEvent, + blocks: Block[], + output: AssistantMessage, + stream: AssistantMessageEventStream, +): void { + const contentBlockIndex = event.contentBlockIndex!; + const delta = event.delta; + let index = blocks.findIndex((b) => b.index === contentBlockIndex); + let block = blocks[index]; + + if (delta?.text !== undefined) { + // If no text block exists yet, create one, as `handleContentBlockStart` is not sent for text blocks + if (!block) { + const newBlock: Block = { type: "text", text: "", index: contentBlockIndex }; + output.content.push(newBlock); + index = blocks.length - 1; + block = blocks[index]; + stream.push({ type: "text_start", contentIndex: index, partial: output }); + } + if (block.type === "text") { + block.text += delta.text; + stream.push({ type: "text_delta", contentIndex: index, delta: delta.text, partial: output }); + } + } else if (delta?.toolUse && block?.type === "toolCall") { + block.partialJson = (block.partialJson || "") + (delta.toolUse.input || ""); + block.arguments = parseStreamingJson(block.partialJson); + stream.push({ type: "toolcall_delta", contentIndex: index, delta: delta.toolUse.input || "", partial: output }); + } else if (delta?.reasoningContent) { + let thinkingBlock = block; + let thinkingIndex = index; + + if (!thinkingBlock) { + const newBlock: Block = { type: "thinking", thinking: "", thinkingSignature: "", index: contentBlockIndex }; + output.content.push(newBlock); + thinkingIndex = blocks.length - 1; + thinkingBlock = blocks[thinkingIndex]; + stream.push({ type: "thinking_start", contentIndex: thinkingIndex, partial: output }); + } + + if (thinkingBlock?.type === "thinking") { + if (delta.reasoningContent.text) { + thinkingBlock.thinking += delta.reasoningContent.text; + stream.push({ + type: "thinking_delta", + contentIndex: thinkingIndex, + delta: delta.reasoningContent.text, + partial: output, + }); + } + if (delta.reasoningContent.signature) { + thinkingBlock.thinkingSignature = + (thinkingBlock.thinkingSignature || "") + delta.reasoningContent.signature; + } + } + } +} + +function handleMetadata( + event: ConverseStreamMetadataEvent, + model: Model<"bedrock-converse-stream">, + output: AssistantMessage, +): void { + if (event.usage) { + output.usage.input = event.usage.inputTokens || 0; + output.usage.output = event.usage.outputTokens || 0; + output.usage.cacheRead = event.usage.cacheReadInputTokens || 0; + output.usage.cacheWrite = event.usage.cacheWriteInputTokens || 0; + output.usage.totalTokens = event.usage.totalTokens || output.usage.input + output.usage.output; + calculateCost(model, output.usage); + } +} + +function handleContentBlockStop( + event: ContentBlockStopEvent, + blocks: Block[], + output: AssistantMessage, + stream: AssistantMessageEventStream, +): void { + const index = blocks.findIndex((b) => b.index === event.contentBlockIndex); + const block = blocks[index]; + if (!block) return; + delete (block as Block).index; + + switch (block.type) { + case "text": + stream.push({ type: "text_end", contentIndex: index, content: block.text, partial: output }); + break; + case "thinking": + stream.push({ type: "thinking_end", contentIndex: index, content: block.thinking, partial: output }); + break; + case "toolCall": + block.arguments = parseStreamingJson(block.partialJson); + delete (block as Block).partialJson; + stream.push({ type: "toolcall_end", contentIndex: index, toolCall: block, partial: output }); + break; + } +} + +/** + * Check if the model supports adaptive thinking (Opus 4.6 and Sonnet 4.6). + */ +function supportsAdaptiveThinking(modelId: string): boolean { + return ( + modelId.includes("opus-4-6") || + modelId.includes("opus-4.6") || + modelId.includes("sonnet-4-6") || + modelId.includes("sonnet-4.6") + ); +} + +function mapThinkingLevelToEffort( + level: SimpleStreamOptions["reasoning"], + modelId: string, +): "low" | "medium" | "high" | "max" { + switch (level) { + case "minimal": + case "low": + return "low"; + case "medium": + return "medium"; + case "high": + return "high"; + case "xhigh": + return modelId.includes("opus-4-6") || modelId.includes("opus-4.6") ? "max" : "high"; + default: + return "high"; + } +} + +/** + * Resolve cache retention preference. + * Defaults to "short" and uses PI_CACHE_RETENTION for backward compatibility. + */ +function resolveCacheRetention(cacheRetention?: CacheRetention): CacheRetention { + if (cacheRetention) { + return cacheRetention; + } + if (typeof process !== "undefined" && process.env.PI_CACHE_RETENTION === "long") { + return "long"; + } + return "short"; +} + +/** + * Check if the model supports prompt caching. + * Supported: Claude 3.5 Haiku, Claude 3.7 Sonnet, Claude 4.x models + */ +function supportsPromptCaching(model: Model<"bedrock-converse-stream">): boolean { + if (model.cost.cacheRead || model.cost.cacheWrite) { + return true; + } + + const id = model.id.toLowerCase(); + // Claude 4.x models (opus-4, sonnet-4, haiku-4) + if (id.includes("claude") && (id.includes("-4-") || id.includes("-4."))) return true; + // Claude 3.7 Sonnet + if (id.includes("claude-3-7-sonnet")) return true; + // Claude 3.5 Haiku + if (id.includes("claude-3-5-haiku")) return true; + return false; +} + +/** + * Check if the model supports thinking signatures in reasoningContent. + * Only Anthropic Claude models support the signature field. + * Other models (OpenAI, Qwen, Minimax, Moonshot, etc.) reject it with: + * "This model doesn't support the reasoningContent.reasoningText.signature field" + */ +function supportsThinkingSignature(model: Model<"bedrock-converse-stream">): boolean { + const id = model.id.toLowerCase(); + return id.includes("anthropic.claude") || id.includes("anthropic/claude"); +} + +function buildSystemPrompt( + systemPrompt: string | undefined, + model: Model<"bedrock-converse-stream">, + cacheRetention: CacheRetention, +): SystemContentBlock[] | undefined { + if (!systemPrompt) return undefined; + + const blocks: SystemContentBlock[] = [{ text: sanitizeSurrogates(systemPrompt) }]; + + // Add cache point for supported Claude models when caching is enabled + if (cacheRetention !== "none" && supportsPromptCaching(model)) { + blocks.push({ + cachePoint: { type: CachePointType.DEFAULT, ...(cacheRetention === "long" ? { ttl: CacheTTL.ONE_HOUR } : {}) }, + }); + } + + return blocks; +} + +function normalizeToolCallId(id: string): string { + const sanitized = id.replace(/[^a-zA-Z0-9_-]/g, "_"); + return sanitized.length > 64 ? sanitized.slice(0, 64) : sanitized; +} + +function convertMessages( + context: Context, + model: Model<"bedrock-converse-stream">, + cacheRetention: CacheRetention, +): Message[] { + const result: Message[] = []; + const transformedMessages = transformMessages(context.messages, model, normalizeToolCallId); + + for (let i = 0; i < transformedMessages.length; i++) { + const m = transformedMessages[i]; + + switch (m.role) { + case "user": + result.push({ + role: ConversationRole.USER, + content: + typeof m.content === "string" + ? [{ text: sanitizeSurrogates(m.content) }] + : m.content.map((c) => { + switch (c.type) { + case "text": + return { text: sanitizeSurrogates(c.text) }; + case "image": + return { image: createImageBlock(c.mimeType, c.data) }; + default: + throw new Error("Unknown user content type"); + } + }), + }); + break; + case "assistant": { + // Skip assistant messages with empty content (e.g., from aborted requests) + // Bedrock rejects messages with empty content arrays + if (m.content.length === 0) { + continue; + } + const contentBlocks: ContentBlock[] = []; + for (const c of m.content) { + switch (c.type) { + case "text": + // Skip empty text blocks + if (c.text.trim().length === 0) continue; + contentBlocks.push({ text: sanitizeSurrogates(c.text) }); + break; + case "toolCall": + contentBlocks.push({ + toolUse: { toolUseId: c.id, name: c.name, input: c.arguments }, + }); + break; + case "thinking": + // Skip empty thinking blocks + if (c.thinking.trim().length === 0) continue; + // Only Anthropic models support the signature field in reasoningText. + // For other models, we omit the signature to avoid errors like: + // "This model doesn't support the reasoningContent.reasoningText.signature field" + if (supportsThinkingSignature(model)) { + contentBlocks.push({ + reasoningContent: { + reasoningText: { text: sanitizeSurrogates(c.thinking), signature: c.thinkingSignature }, + }, + }); + } else { + contentBlocks.push({ + reasoningContent: { + reasoningText: { text: sanitizeSurrogates(c.thinking) }, + }, + }); + } + break; + default: + throw new Error("Unknown assistant content type"); + } + } + // Skip if all content blocks were filtered out + if (contentBlocks.length === 0) { + continue; + } + result.push({ + role: ConversationRole.ASSISTANT, + content: contentBlocks, + }); + break; + } + case "toolResult": { + // Collect all consecutive toolResult messages into a single user message + // Bedrock requires all tool results to be in one message + const toolResults: ContentBlock.ToolResultMember[] = []; + + // Add current tool result with all content blocks combined + toolResults.push({ + toolResult: { + toolUseId: m.toolCallId, + content: m.content.map((c) => + c.type === "image" + ? { image: createImageBlock(c.mimeType, c.data) } + : { text: sanitizeSurrogates(c.text) }, + ), + status: m.isError ? ToolResultStatus.ERROR : ToolResultStatus.SUCCESS, + }, + }); + + // Look ahead for consecutive toolResult messages + let j = i + 1; + while (j < transformedMessages.length && transformedMessages[j].role === "toolResult") { + const nextMsg = transformedMessages[j] as ToolResultMessage; + toolResults.push({ + toolResult: { + toolUseId: nextMsg.toolCallId, + content: nextMsg.content.map((c) => + c.type === "image" + ? { image: createImageBlock(c.mimeType, c.data) } + : { text: sanitizeSurrogates(c.text) }, + ), + status: nextMsg.isError ? ToolResultStatus.ERROR : ToolResultStatus.SUCCESS, + }, + }); + j++; + } + + // Skip the messages we've already processed + i = j - 1; + + result.push({ + role: ConversationRole.USER, + content: toolResults, + }); + break; + } + default: + throw new Error("Unknown message role"); + } + } + + // Add cache point to the last user message for supported Claude models when caching is enabled + if (cacheRetention !== "none" && supportsPromptCaching(model) && result.length > 0) { + const lastMessage = result[result.length - 1]; + if (lastMessage.role === ConversationRole.USER && lastMessage.content) { + (lastMessage.content as ContentBlock[]).push({ + cachePoint: { + type: CachePointType.DEFAULT, + ...(cacheRetention === "long" ? { ttl: CacheTTL.ONE_HOUR } : {}), + }, + }); + } + } + + return result; +} + +function convertToolConfig( + tools: Tool[] | undefined, + toolChoice: BedrockOptions["toolChoice"], +): ToolConfiguration | undefined { + if (!tools?.length || toolChoice === "none") return undefined; + + const bedrockTools: BedrockTool[] = tools.map((tool) => ({ + toolSpec: { + name: tool.name, + description: tool.description, + inputSchema: { json: tool.parameters }, + }, + })); + + let bedrockToolChoice: ToolChoice | undefined; + switch (toolChoice) { + case "auto": + bedrockToolChoice = { auto: {} }; + break; + case "any": + bedrockToolChoice = { any: {} }; + break; + default: + if (toolChoice?.type === "tool") { + bedrockToolChoice = { tool: { name: toolChoice.name } }; + } + } + + return { tools: bedrockTools, toolChoice: bedrockToolChoice }; +} + +function mapStopReason(reason: string | undefined): StopReason { + switch (reason) { + case BedrockStopReason.END_TURN: + case BedrockStopReason.STOP_SEQUENCE: + return "stop"; + case BedrockStopReason.MAX_TOKENS: + case BedrockStopReason.MODEL_CONTEXT_WINDOW_EXCEEDED: + return "length"; + case BedrockStopReason.TOOL_USE: + return "toolUse"; + default: + return "error"; + } +} + +function buildAdditionalModelRequestFields( + model: Model<"bedrock-converse-stream">, + options: BedrockOptions, +): Record | undefined { + if (!options.reasoning || !model.reasoning) { + return undefined; + } + + if (model.id.includes("anthropic.claude") || model.id.includes("anthropic/claude")) { + const result: Record = supportsAdaptiveThinking(model.id) + ? { + thinking: { type: "adaptive" }, + output_config: { effort: mapThinkingLevelToEffort(options.reasoning, model.id) }, + } + : (() => { + const defaultBudgets: Record = { + minimal: 1024, + low: 2048, + medium: 8192, + high: 16384, + xhigh: 16384, // Claude doesn't support xhigh, clamp to high + }; + + // Custom budgets override defaults (xhigh not in ThinkingBudgets, use high) + const level = options.reasoning === "xhigh" ? "high" : options.reasoning; + const budget = options.thinkingBudgets?.[level] ?? defaultBudgets[options.reasoning]; + + return { + thinking: { + type: "enabled", + budget_tokens: budget, + }, + }; + })(); + + if (!supportsAdaptiveThinking(model.id) && (options.interleavedThinking ?? true)) { + result.anthropic_beta = ["interleaved-thinking-2025-05-14"]; + } + + return result; + } + + return undefined; +} + +function createImageBlock(mimeType: string, data: string) { + let format: ImageFormat; + switch (mimeType) { + case "image/jpeg": + case "image/jpg": + format = ImageFormat.JPEG; + break; + case "image/png": + format = ImageFormat.PNG; + break; + case "image/gif": + format = ImageFormat.GIF; + break; + case "image/webp": + format = ImageFormat.WEBP; + break; + default: + throw new Error(`Unknown image type: ${mimeType}`); + } + + const binaryString = atob(data); + const bytes = new Uint8Array(binaryString.length); + for (let i = 0; i < binaryString.length; i++) { + bytes[i] = binaryString.charCodeAt(i); + } + + return { source: { bytes }, format }; +} diff --git a/packages/pi-ai/src/providers/anthropic.ts b/packages/pi-ai/src/providers/anthropic.ts new file mode 100644 index 000000000..ba59f1478 --- /dev/null +++ b/packages/pi-ai/src/providers/anthropic.ts @@ -0,0 +1,883 @@ +import Anthropic from "@anthropic-ai/sdk"; +import type { + ContentBlockParam, + MessageCreateParamsStreaming, + MessageParam, +} from "@anthropic-ai/sdk/resources/messages.js"; +import { getEnvApiKey } from "../env-api-keys.js"; +import { calculateCost } from "../models.js"; +import type { + Api, + AssistantMessage, + CacheRetention, + Context, + ImageContent, + Message, + Model, + SimpleStreamOptions, + StopReason, + StreamFunction, + StreamOptions, + TextContent, + ThinkingContent, + Tool, + ToolCall, + ToolResultMessage, +} from "../types.js"; +import { AssistantMessageEventStream } from "../utils/event-stream.js"; +import { parseStreamingJson } from "../utils/json-parse.js"; +import { sanitizeSurrogates } from "../utils/sanitize-unicode.js"; + +import { buildCopilotDynamicHeaders, hasCopilotVisionInput } from "./github-copilot-headers.js"; +import { adjustMaxTokensForThinking, buildBaseOptions } from "./simple-options.js"; +import { transformMessages } from "./transform-messages.js"; + +/** + * Resolve cache retention preference. + * Defaults to "short" and uses PI_CACHE_RETENTION for backward compatibility. + */ +function resolveCacheRetention(cacheRetention?: CacheRetention): CacheRetention { + if (cacheRetention) { + return cacheRetention; + } + if (typeof process !== "undefined" && process.env.PI_CACHE_RETENTION === "long") { + return "long"; + } + return "short"; +} + +function getCacheControl( + baseUrl: string, + cacheRetention?: CacheRetention, +): { retention: CacheRetention; cacheControl?: { type: "ephemeral"; ttl?: "1h" } } { + const retention = resolveCacheRetention(cacheRetention); + if (retention === "none") { + return { retention }; + } + const ttl = retention === "long" && baseUrl.includes("api.anthropic.com") ? "1h" : undefined; + return { + retention, + cacheControl: { type: "ephemeral", ...(ttl && { ttl }) }, + }; +} + +// Stealth mode: Mimic Claude Code's tool naming exactly +const claudeCodeVersion = "2.1.62"; + +// Claude Code 2.x tool names (canonical casing) +// Source: https://cchistory.mariozechner.at/data/prompts-2.1.11.md +// To update: https://github.com/badlogic/cchistory +const claudeCodeTools = [ + "Read", + "Write", + "Edit", + "Bash", + "Grep", + "Glob", + "AskUserQuestion", + "EnterPlanMode", + "ExitPlanMode", + "KillShell", + "NotebookEdit", + "Skill", + "Task", + "TaskOutput", + "TodoWrite", + "WebFetch", + "WebSearch", +]; + +const ccToolLookup = new Map(claudeCodeTools.map((t) => [t.toLowerCase(), t])); + +// Convert tool name to CC canonical casing if it matches (case-insensitive) +const toClaudeCodeName = (name: string) => ccToolLookup.get(name.toLowerCase()) ?? name; +const fromClaudeCodeName = (name: string, tools?: Tool[]) => { + if (tools && tools.length > 0) { + const lowerName = name.toLowerCase(); + const matchedTool = tools.find((tool) => tool.name.toLowerCase() === lowerName); + if (matchedTool) return matchedTool.name; + } + return name; +}; + +/** + * Convert content blocks to Anthropic API format + */ +function convertContentBlocks(content: (TextContent | ImageContent)[]): + | string + | Array< + | { type: "text"; text: string } + | { + type: "image"; + source: { + type: "base64"; + media_type: "image/jpeg" | "image/png" | "image/gif" | "image/webp"; + data: string; + }; + } + > { + // If only text blocks, return as concatenated string for simplicity + const hasImages = content.some((c) => c.type === "image"); + if (!hasImages) { + return sanitizeSurrogates(content.map((c) => (c as TextContent).text).join("\n")); + } + + // If we have images, convert to content block array + const blocks = content.map((block) => { + if (block.type === "text") { + return { + type: "text" as const, + text: sanitizeSurrogates(block.text), + }; + } + return { + type: "image" as const, + source: { + type: "base64" as const, + media_type: block.mimeType as "image/jpeg" | "image/png" | "image/gif" | "image/webp", + data: block.data, + }, + }; + }); + + // If only images (no text), add placeholder text block + const hasText = blocks.some((b) => b.type === "text"); + if (!hasText) { + blocks.unshift({ + type: "text" as const, + text: "(see attached image)", + }); + } + + return blocks; +} + +export type AnthropicEffort = "low" | "medium" | "high" | "max"; + +export interface AnthropicOptions extends StreamOptions { + /** + * Enable extended thinking. + * For Opus 4.6 and Sonnet 4.6: uses adaptive thinking (model decides when/how much to think). + * For older models: uses budget-based thinking with thinkingBudgetTokens. + */ + thinkingEnabled?: boolean; + /** + * Token budget for extended thinking (older models only). + * Ignored for Opus 4.6 and Sonnet 4.6, which use adaptive thinking. + */ + thinkingBudgetTokens?: number; + /** + * Effort level for adaptive thinking (Opus 4.6 and Sonnet 4.6). + * Controls how much thinking Claude allocates: + * - "max": Always thinks with no constraints (Opus 4.6 only) + * - "high": Always thinks, deep reasoning (default) + * - "medium": Moderate thinking, may skip for simple queries + * - "low": Minimal thinking, skips for simple tasks + * Ignored for older models. + */ + effort?: AnthropicEffort; + interleavedThinking?: boolean; + toolChoice?: "auto" | "any" | "none" | { type: "tool"; name: string }; +} + +function mergeHeaders(...headerSources: (Record | undefined)[]): Record { + const merged: Record = {}; + for (const headers of headerSources) { + if (headers) { + Object.assign(merged, headers); + } + } + return merged; +} + +export const streamAnthropic: StreamFunction<"anthropic-messages", AnthropicOptions> = ( + model: Model<"anthropic-messages">, + context: Context, + options?: AnthropicOptions, +): AssistantMessageEventStream => { + const stream = new AssistantMessageEventStream(); + + (async () => { + const output: AssistantMessage = { + role: "assistant", + content: [], + api: model.api as Api, + provider: model.provider, + model: model.id, + usage: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + stopReason: "stop", + timestamp: Date.now(), + }; + + try { + const apiKey = options?.apiKey ?? getEnvApiKey(model.provider) ?? ""; + + let copilotDynamicHeaders: Record | undefined; + if (model.provider === "github-copilot") { + const hasImages = hasCopilotVisionInput(context.messages); + copilotDynamicHeaders = buildCopilotDynamicHeaders({ + messages: context.messages, + hasImages, + }); + } + + const { client, isOAuthToken } = createClient( + model, + apiKey, + options?.interleavedThinking ?? true, + options?.headers, + copilotDynamicHeaders, + ); + let params = buildParams(model, context, isOAuthToken, options); + const nextParams = await options?.onPayload?.(params, model); + if (nextParams !== undefined) { + params = nextParams as MessageCreateParamsStreaming; + } + const anthropicStream = client.messages.stream({ ...params, stream: true }, { signal: options?.signal }); + stream.push({ type: "start", partial: output }); + + type Block = (ThinkingContent | TextContent | (ToolCall & { partialJson: string })) & { index: number }; + const blocks = output.content as Block[]; + + for await (const event of anthropicStream) { + if (event.type === "message_start") { + // Capture initial token usage from message_start event + // This ensures we have input token counts even if the stream is aborted early + output.usage.input = event.message.usage.input_tokens || 0; + output.usage.output = event.message.usage.output_tokens || 0; + output.usage.cacheRead = event.message.usage.cache_read_input_tokens || 0; + output.usage.cacheWrite = event.message.usage.cache_creation_input_tokens || 0; + // Anthropic doesn't provide total_tokens, compute from components + output.usage.totalTokens = + output.usage.input + output.usage.output + output.usage.cacheRead + output.usage.cacheWrite; + calculateCost(model, output.usage); + } else if (event.type === "content_block_start") { + if (event.content_block.type === "text") { + const block: Block = { + type: "text", + text: "", + index: event.index, + }; + output.content.push(block); + stream.push({ type: "text_start", contentIndex: output.content.length - 1, partial: output }); + } else if (event.content_block.type === "thinking") { + const block: Block = { + type: "thinking", + thinking: "", + thinkingSignature: "", + index: event.index, + }; + output.content.push(block); + stream.push({ type: "thinking_start", contentIndex: output.content.length - 1, partial: output }); + } else if (event.content_block.type === "redacted_thinking") { + const block: Block = { + type: "thinking", + thinking: "[Reasoning redacted]", + thinkingSignature: event.content_block.data, + redacted: true, + index: event.index, + }; + output.content.push(block); + stream.push({ type: "thinking_start", contentIndex: output.content.length - 1, partial: output }); + } else if (event.content_block.type === "tool_use") { + const block: Block = { + type: "toolCall", + id: event.content_block.id, + name: isOAuthToken + ? fromClaudeCodeName(event.content_block.name, context.tools) + : event.content_block.name, + arguments: (event.content_block.input as Record) ?? {}, + partialJson: "", + index: event.index, + }; + output.content.push(block); + stream.push({ type: "toolcall_start", contentIndex: output.content.length - 1, partial: output }); + } + } else if (event.type === "content_block_delta") { + if (event.delta.type === "text_delta") { + const index = blocks.findIndex((b) => b.index === event.index); + const block = blocks[index]; + if (block && block.type === "text") { + block.text += event.delta.text; + stream.push({ + type: "text_delta", + contentIndex: index, + delta: event.delta.text, + partial: output, + }); + } + } else if (event.delta.type === "thinking_delta") { + const index = blocks.findIndex((b) => b.index === event.index); + const block = blocks[index]; + if (block && block.type === "thinking") { + block.thinking += event.delta.thinking; + stream.push({ + type: "thinking_delta", + contentIndex: index, + delta: event.delta.thinking, + partial: output, + }); + } + } else if (event.delta.type === "input_json_delta") { + const index = blocks.findIndex((b) => b.index === event.index); + const block = blocks[index]; + if (block && block.type === "toolCall") { + block.partialJson += event.delta.partial_json; + block.arguments = parseStreamingJson(block.partialJson); + stream.push({ + type: "toolcall_delta", + contentIndex: index, + delta: event.delta.partial_json, + partial: output, + }); + } + } else if (event.delta.type === "signature_delta") { + const index = blocks.findIndex((b) => b.index === event.index); + const block = blocks[index]; + if (block && block.type === "thinking") { + block.thinkingSignature = block.thinkingSignature || ""; + block.thinkingSignature += event.delta.signature; + } + } + } else if (event.type === "content_block_stop") { + const index = blocks.findIndex((b) => b.index === event.index); + const block = blocks[index]; + if (block) { + delete (block as any).index; + if (block.type === "text") { + stream.push({ + type: "text_end", + contentIndex: index, + content: block.text, + partial: output, + }); + } else if (block.type === "thinking") { + stream.push({ + type: "thinking_end", + contentIndex: index, + content: block.thinking, + partial: output, + }); + } else if (block.type === "toolCall") { + block.arguments = parseStreamingJson(block.partialJson); + delete (block as any).partialJson; + stream.push({ + type: "toolcall_end", + contentIndex: index, + toolCall: block, + partial: output, + }); + } + } + } else if (event.type === "message_delta") { + if (event.delta.stop_reason) { + output.stopReason = mapStopReason(event.delta.stop_reason); + } + // Only update usage fields if present (not null). + // Preserves input_tokens from message_start when proxies omit it in message_delta. + if (event.usage.input_tokens != null) { + output.usage.input = event.usage.input_tokens; + } + if (event.usage.output_tokens != null) { + output.usage.output = event.usage.output_tokens; + } + if (event.usage.cache_read_input_tokens != null) { + output.usage.cacheRead = event.usage.cache_read_input_tokens; + } + if (event.usage.cache_creation_input_tokens != null) { + output.usage.cacheWrite = event.usage.cache_creation_input_tokens; + } + // Anthropic doesn't provide total_tokens, compute from components + output.usage.totalTokens = + output.usage.input + output.usage.output + output.usage.cacheRead + output.usage.cacheWrite; + calculateCost(model, output.usage); + } + } + + if (options?.signal?.aborted) { + throw new Error("Request was aborted"); + } + + if (output.stopReason === "aborted" || output.stopReason === "error") { + throw new Error("An unknown error occurred"); + } + + stream.push({ type: "done", reason: output.stopReason, message: output }); + stream.end(); + } catch (error) { + for (const block of output.content) delete (block as any).index; + output.stopReason = options?.signal?.aborted ? "aborted" : "error"; + output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error); + stream.push({ type: "error", reason: output.stopReason, error: output }); + stream.end(); + } + })(); + + return stream; +}; + +/** + * Check if a model supports adaptive thinking (Opus 4.6 and Sonnet 4.6) + */ +function supportsAdaptiveThinking(modelId: string): boolean { + // Opus 4.6 and Sonnet 4.6 model IDs (with or without date suffix) + return ( + modelId.includes("opus-4-6") || + modelId.includes("opus-4.6") || + modelId.includes("sonnet-4-6") || + modelId.includes("sonnet-4.6") + ); +} + +/** + * Map ThinkingLevel to Anthropic effort levels for adaptive thinking. + * Note: effort "max" is only valid on Opus 4.6. + */ +function mapThinkingLevelToEffort(level: SimpleStreamOptions["reasoning"], modelId: string): AnthropicEffort { + switch (level) { + case "minimal": + return "low"; + case "low": + return "low"; + case "medium": + return "medium"; + case "high": + return "high"; + case "xhigh": + return modelId.includes("opus-4-6") || modelId.includes("opus-4.6") ? "max" : "high"; + default: + return "high"; + } +} + +export const streamSimpleAnthropic: StreamFunction<"anthropic-messages", SimpleStreamOptions> = ( + model: Model<"anthropic-messages">, + context: Context, + options?: SimpleStreamOptions, +): AssistantMessageEventStream => { + const apiKey = options?.apiKey || getEnvApiKey(model.provider); + if (!apiKey) { + throw new Error(`No API key for provider: ${model.provider}`); + } + + const base = buildBaseOptions(model, options, apiKey); + if (!options?.reasoning) { + return streamAnthropic(model, context, { ...base, thinkingEnabled: false } satisfies AnthropicOptions); + } + + // For Opus 4.6 and Sonnet 4.6: use adaptive thinking with effort level + // For older models: use budget-based thinking + if (supportsAdaptiveThinking(model.id)) { + const effort = mapThinkingLevelToEffort(options.reasoning, model.id); + return streamAnthropic(model, context, { + ...base, + thinkingEnabled: true, + effort, + } satisfies AnthropicOptions); + } + + const adjusted = adjustMaxTokensForThinking( + base.maxTokens || 0, + model.maxTokens, + options.reasoning, + options.thinkingBudgets, + ); + + return streamAnthropic(model, context, { + ...base, + maxTokens: adjusted.maxTokens, + thinkingEnabled: true, + thinkingBudgetTokens: adjusted.thinkingBudget, + } satisfies AnthropicOptions); +}; + +function isOAuthToken(apiKey: string): boolean { + return apiKey.includes("sk-ant-oat"); +} + +function createClient( + model: Model<"anthropic-messages">, + apiKey: string, + interleavedThinking: boolean, + optionsHeaders?: Record, + dynamicHeaders?: Record, +): { client: Anthropic; isOAuthToken: boolean } { + // Adaptive thinking models (Opus 4.6, Sonnet 4.6) have interleaved thinking built-in. + // The beta header is deprecated on Opus 4.6 and redundant on Sonnet 4.6, so skip it. + const needsInterleavedBeta = interleavedThinking && !supportsAdaptiveThinking(model.id); + + // Copilot: Bearer auth, selective betas (no fine-grained-tool-streaming) + if (model.provider === "github-copilot") { + const betaFeatures: string[] = []; + if (needsInterleavedBeta) { + betaFeatures.push("interleaved-thinking-2025-05-14"); + } + + const client = new Anthropic({ + apiKey: null, + authToken: apiKey, + baseURL: model.baseUrl, + dangerouslyAllowBrowser: true, + defaultHeaders: mergeHeaders( + { + accept: "application/json", + "anthropic-dangerous-direct-browser-access": "true", + ...(betaFeatures.length > 0 ? { "anthropic-beta": betaFeatures.join(",") } : {}), + }, + model.headers, + dynamicHeaders, + optionsHeaders, + ), + }); + + return { client, isOAuthToken: false }; + } + + const betaFeatures = ["fine-grained-tool-streaming-2025-05-14"]; + if (needsInterleavedBeta) { + betaFeatures.push("interleaved-thinking-2025-05-14"); + } + + // OAuth: Bearer auth, Claude Code identity headers + if (isOAuthToken(apiKey)) { + const client = new Anthropic({ + apiKey: null, + authToken: apiKey, + baseURL: model.baseUrl, + dangerouslyAllowBrowser: true, + defaultHeaders: mergeHeaders( + { + accept: "application/json", + "anthropic-dangerous-direct-browser-access": "true", + "anthropic-beta": `claude-code-20250219,oauth-2025-04-20,${betaFeatures.join(",")}`, + "user-agent": `claude-cli/${claudeCodeVersion}`, + "x-app": "cli", + }, + model.headers, + optionsHeaders, + ), + }); + + return { client, isOAuthToken: true }; + } + + // API key auth + const client = new Anthropic({ + apiKey, + baseURL: model.baseUrl, + dangerouslyAllowBrowser: true, + defaultHeaders: mergeHeaders( + { + accept: "application/json", + "anthropic-dangerous-direct-browser-access": "true", + "anthropic-beta": betaFeatures.join(","), + }, + model.headers, + optionsHeaders, + ), + }); + + return { client, isOAuthToken: false }; +} + +function buildParams( + model: Model<"anthropic-messages">, + context: Context, + isOAuthToken: boolean, + options?: AnthropicOptions, +): MessageCreateParamsStreaming { + const { cacheControl } = getCacheControl(model.baseUrl, options?.cacheRetention); + const params: MessageCreateParamsStreaming = { + model: model.id, + messages: convertMessages(context.messages, model, isOAuthToken, cacheControl), + max_tokens: options?.maxTokens || (model.maxTokens / 3) | 0, + stream: true, + }; + + // For OAuth tokens, we MUST include Claude Code identity + if (isOAuthToken) { + params.system = [ + { + type: "text", + text: "You are Claude Code, Anthropic's official CLI for Claude.", + ...(cacheControl ? { cache_control: cacheControl } : {}), + }, + ]; + if (context.systemPrompt) { + params.system.push({ + type: "text", + text: sanitizeSurrogates(context.systemPrompt), + ...(cacheControl ? { cache_control: cacheControl } : {}), + }); + } + } else if (context.systemPrompt) { + // Add cache control to system prompt for non-OAuth tokens + params.system = [ + { + type: "text", + text: sanitizeSurrogates(context.systemPrompt), + ...(cacheControl ? { cache_control: cacheControl } : {}), + }, + ]; + } + + // Temperature is incompatible with extended thinking (adaptive or budget-based). + if (options?.temperature !== undefined && !options?.thinkingEnabled) { + params.temperature = options.temperature; + } + + if (context.tools) { + params.tools = convertTools(context.tools, isOAuthToken); + } + + // Configure thinking mode: adaptive (Opus 4.6 and Sonnet 4.6) or budget-based (older models) + if (options?.thinkingEnabled && model.reasoning) { + if (supportsAdaptiveThinking(model.id)) { + // Adaptive thinking: Claude decides when and how much to think + params.thinking = { type: "adaptive" }; + if (options.effort) { + params.output_config = { effort: options.effort }; + } + } else { + // Budget-based thinking for older models + params.thinking = { + type: "enabled", + budget_tokens: options.thinkingBudgetTokens || 1024, + }; + } + } + + if (options?.metadata) { + const userId = options.metadata.user_id; + if (typeof userId === "string") { + params.metadata = { user_id: userId }; + } + } + + if (options?.toolChoice) { + if (typeof options.toolChoice === "string") { + params.tool_choice = { type: options.toolChoice }; + } else { + params.tool_choice = options.toolChoice; + } + } + + return params; +} + +// Normalize tool call IDs to match Anthropic's required pattern and length +function normalizeToolCallId(id: string): string { + return id.replace(/[^a-zA-Z0-9_-]/g, "_").slice(0, 64); +} + +function convertMessages( + messages: Message[], + model: Model<"anthropic-messages">, + isOAuthToken: boolean, + cacheControl?: { type: "ephemeral"; ttl?: "1h" }, +): MessageParam[] { + const params: MessageParam[] = []; + + // Transform messages for cross-provider compatibility + const transformedMessages = transformMessages(messages, model, normalizeToolCallId); + + for (let i = 0; i < transformedMessages.length; i++) { + const msg = transformedMessages[i]; + + if (msg.role === "user") { + if (typeof msg.content === "string") { + if (msg.content.trim().length > 0) { + params.push({ + role: "user", + content: sanitizeSurrogates(msg.content), + }); + } + } else { + const blocks: ContentBlockParam[] = msg.content.map((item) => { + if (item.type === "text") { + return { + type: "text", + text: sanitizeSurrogates(item.text), + }; + } else { + return { + type: "image", + source: { + type: "base64", + media_type: item.mimeType as "image/jpeg" | "image/png" | "image/gif" | "image/webp", + data: item.data, + }, + }; + } + }); + let filteredBlocks = !model?.input.includes("image") ? blocks.filter((b) => b.type !== "image") : blocks; + filteredBlocks = filteredBlocks.filter((b) => { + if (b.type === "text") { + return b.text.trim().length > 0; + } + return true; + }); + if (filteredBlocks.length === 0) continue; + params.push({ + role: "user", + content: filteredBlocks, + }); + } + } else if (msg.role === "assistant") { + const blocks: ContentBlockParam[] = []; + + for (const block of msg.content) { + if (block.type === "text") { + if (block.text.trim().length === 0) continue; + blocks.push({ + type: "text", + text: sanitizeSurrogates(block.text), + }); + } else if (block.type === "thinking") { + // Redacted thinking: pass the opaque payload back as redacted_thinking + if (block.redacted) { + blocks.push({ + type: "redacted_thinking", + data: block.thinkingSignature!, + }); + continue; + } + if (block.thinking.trim().length === 0) continue; + // If thinking signature is missing/empty (e.g., from aborted stream), + // convert to plain text block without tags to avoid API rejection + // and prevent Claude from mimicking the tags in responses + if (!block.thinkingSignature || block.thinkingSignature.trim().length === 0) { + blocks.push({ + type: "text", + text: sanitizeSurrogates(block.thinking), + }); + } else { + blocks.push({ + type: "thinking", + thinking: sanitizeSurrogates(block.thinking), + signature: block.thinkingSignature, + }); + } + } else if (block.type === "toolCall") { + blocks.push({ + type: "tool_use", + id: block.id, + name: isOAuthToken ? toClaudeCodeName(block.name) : block.name, + input: block.arguments ?? {}, + }); + } + } + if (blocks.length === 0) continue; + params.push({ + role: "assistant", + content: blocks, + }); + } else if (msg.role === "toolResult") { + // Collect all consecutive toolResult messages, needed for z.ai Anthropic endpoint + const toolResults: ContentBlockParam[] = []; + + // Add the current tool result + toolResults.push({ + type: "tool_result", + tool_use_id: msg.toolCallId, + content: convertContentBlocks(msg.content), + is_error: msg.isError, + }); + + // Look ahead for consecutive toolResult messages + let j = i + 1; + while (j < transformedMessages.length && transformedMessages[j].role === "toolResult") { + const nextMsg = transformedMessages[j] as ToolResultMessage; // We know it's a toolResult + toolResults.push({ + type: "tool_result", + tool_use_id: nextMsg.toolCallId, + content: convertContentBlocks(nextMsg.content), + is_error: nextMsg.isError, + }); + j++; + } + + // Skip the messages we've already processed + i = j - 1; + + // Add a single user message with all tool results + params.push({ + role: "user", + content: toolResults, + }); + } + } + + // Add cache_control to the last user message to cache conversation history + if (cacheControl && params.length > 0) { + const lastMessage = params[params.length - 1]; + if (lastMessage.role === "user") { + if (Array.isArray(lastMessage.content)) { + const lastBlock = lastMessage.content[lastMessage.content.length - 1]; + if ( + lastBlock && + (lastBlock.type === "text" || lastBlock.type === "image" || lastBlock.type === "tool_result") + ) { + (lastBlock as any).cache_control = cacheControl; + } + } else if (typeof lastMessage.content === "string") { + lastMessage.content = [ + { + type: "text", + text: lastMessage.content, + cache_control: cacheControl, + }, + ] as any; + } + } + } + + return params; +} + +function convertTools(tools: Tool[], isOAuthToken: boolean): Anthropic.Messages.Tool[] { + if (!tools) return []; + + return tools.map((tool) => { + const jsonSchema = tool.parameters as any; // TypeBox already generates JSON Schema + + return { + name: isOAuthToken ? toClaudeCodeName(tool.name) : tool.name, + description: tool.description, + input_schema: { + type: "object" as const, + properties: jsonSchema.properties || {}, + required: jsonSchema.required || [], + }, + }; + }); +} + +function mapStopReason(reason: Anthropic.Messages.StopReason | string): StopReason { + switch (reason) { + case "end_turn": + return "stop"; + case "max_tokens": + return "length"; + case "tool_use": + return "toolUse"; + case "refusal": + return "error"; + case "pause_turn": // Stop is good enough -> resubmit + return "stop"; + case "stop_sequence": + return "stop"; // We don't supply stop sequences, so this should never happen + case "sensitive": // Content flagged by safety filters (not yet in SDK types) + return "error"; + default: + // Handle unknown stop reasons gracefully (API may add new values) + throw new Error(`Unhandled stop reason: ${reason}`); + } +} diff --git a/packages/pi-ai/src/providers/azure-openai-responses.ts b/packages/pi-ai/src/providers/azure-openai-responses.ts new file mode 100644 index 000000000..4f474e1a5 --- /dev/null +++ b/packages/pi-ai/src/providers/azure-openai-responses.ts @@ -0,0 +1,259 @@ +import { AzureOpenAI } from "openai"; +import type { ResponseCreateParamsStreaming } from "openai/resources/responses/responses.js"; +import { getEnvApiKey } from "../env-api-keys.js"; +import { supportsXhigh } from "../models.js"; +import type { + Api, + AssistantMessage, + Context, + Model, + SimpleStreamOptions, + StreamFunction, + StreamOptions, +} from "../types.js"; +import { AssistantMessageEventStream } from "../utils/event-stream.js"; +import { convertResponsesMessages, convertResponsesTools, processResponsesStream } from "./openai-responses-shared.js"; +import { buildBaseOptions, clampReasoning } from "./simple-options.js"; + +const DEFAULT_AZURE_API_VERSION = "v1"; +const AZURE_TOOL_CALL_PROVIDERS = new Set(["openai", "openai-codex", "opencode", "azure-openai-responses"]); + +function parseDeploymentNameMap(value: string | undefined): Map { + const map = new Map(); + if (!value) return map; + for (const entry of value.split(",")) { + const trimmed = entry.trim(); + if (!trimmed) continue; + const [modelId, deploymentName] = trimmed.split("=", 2); + if (!modelId || !deploymentName) continue; + map.set(modelId.trim(), deploymentName.trim()); + } + return map; +} + +function resolveDeploymentName(model: Model<"azure-openai-responses">, options?: AzureOpenAIResponsesOptions): string { + if (options?.azureDeploymentName) { + return options.azureDeploymentName; + } + const mappedDeployment = parseDeploymentNameMap(process.env.AZURE_OPENAI_DEPLOYMENT_NAME_MAP).get(model.id); + return mappedDeployment || model.id; +} + +// Azure OpenAI Responses-specific options +export interface AzureOpenAIResponsesOptions extends StreamOptions { + reasoningEffort?: "minimal" | "low" | "medium" | "high" | "xhigh"; + reasoningSummary?: "auto" | "detailed" | "concise" | null; + azureApiVersion?: string; + azureResourceName?: string; + azureBaseUrl?: string; + azureDeploymentName?: string; +} + +/** + * Generate function for Azure OpenAI Responses API + */ +export const streamAzureOpenAIResponses: StreamFunction<"azure-openai-responses", AzureOpenAIResponsesOptions> = ( + model: Model<"azure-openai-responses">, + context: Context, + options?: AzureOpenAIResponsesOptions, +): AssistantMessageEventStream => { + const stream = new AssistantMessageEventStream(); + + // Start async processing + (async () => { + const deploymentName = resolveDeploymentName(model, options); + + const output: AssistantMessage = { + role: "assistant", + content: [], + api: "azure-openai-responses" as Api, + provider: model.provider, + model: model.id, + usage: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + stopReason: "stop", + timestamp: Date.now(), + }; + + try { + // Create Azure OpenAI client + const apiKey = options?.apiKey || getEnvApiKey(model.provider) || ""; + const client = createClient(model, apiKey, options); + let params = buildParams(model, context, options, deploymentName); + const nextParams = await options?.onPayload?.(params, model); + if (nextParams !== undefined) { + params = nextParams as ResponseCreateParamsStreaming; + } + const openaiStream = await client.responses.create( + params, + options?.signal ? { signal: options.signal } : undefined, + ); + stream.push({ type: "start", partial: output }); + + await processResponsesStream(openaiStream, output, stream, model); + + if (options?.signal?.aborted) { + throw new Error("Request was aborted"); + } + + if (output.stopReason === "aborted" || output.stopReason === "error") { + throw new Error("An unknown error occurred"); + } + + stream.push({ type: "done", reason: output.stopReason, message: output }); + stream.end(); + } catch (error) { + for (const block of output.content) delete (block as { index?: number }).index; + output.stopReason = options?.signal?.aborted ? "aborted" : "error"; + output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error); + stream.push({ type: "error", reason: output.stopReason, error: output }); + stream.end(); + } + })(); + + return stream; +}; + +export const streamSimpleAzureOpenAIResponses: StreamFunction<"azure-openai-responses", SimpleStreamOptions> = ( + model: Model<"azure-openai-responses">, + context: Context, + options?: SimpleStreamOptions, +): AssistantMessageEventStream => { + const apiKey = options?.apiKey || getEnvApiKey(model.provider); + if (!apiKey) { + throw new Error(`No API key for provider: ${model.provider}`); + } + + const base = buildBaseOptions(model, options, apiKey); + const reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning); + + return streamAzureOpenAIResponses(model, context, { + ...base, + reasoningEffort, + } satisfies AzureOpenAIResponsesOptions); +}; + +function normalizeAzureBaseUrl(baseUrl: string): string { + return baseUrl.replace(/\/+$/, ""); +} + +function buildDefaultBaseUrl(resourceName: string): string { + return `https://${resourceName}.openai.azure.com/openai/v1`; +} + +function resolveAzureConfig( + model: Model<"azure-openai-responses">, + options?: AzureOpenAIResponsesOptions, +): { baseUrl: string; apiVersion: string } { + const apiVersion = options?.azureApiVersion || process.env.AZURE_OPENAI_API_VERSION || DEFAULT_AZURE_API_VERSION; + + const baseUrl = options?.azureBaseUrl?.trim() || process.env.AZURE_OPENAI_BASE_URL?.trim() || undefined; + const resourceName = options?.azureResourceName || process.env.AZURE_OPENAI_RESOURCE_NAME; + + let resolvedBaseUrl = baseUrl; + + if (!resolvedBaseUrl && resourceName) { + resolvedBaseUrl = buildDefaultBaseUrl(resourceName); + } + + if (!resolvedBaseUrl && model.baseUrl) { + resolvedBaseUrl = model.baseUrl; + } + + if (!resolvedBaseUrl) { + throw new Error( + "Azure OpenAI base URL is required. Set AZURE_OPENAI_BASE_URL or AZURE_OPENAI_RESOURCE_NAME, or pass azureBaseUrl, azureResourceName, or model.baseUrl.", + ); + } + + return { + baseUrl: normalizeAzureBaseUrl(resolvedBaseUrl), + apiVersion, + }; +} + +function createClient(model: Model<"azure-openai-responses">, apiKey: string, options?: AzureOpenAIResponsesOptions) { + if (!apiKey) { + if (!process.env.AZURE_OPENAI_API_KEY) { + throw new Error( + "Azure OpenAI API key is required. Set AZURE_OPENAI_API_KEY environment variable or pass it as an argument.", + ); + } + apiKey = process.env.AZURE_OPENAI_API_KEY; + } + + const headers = { ...model.headers }; + + if (options?.headers) { + Object.assign(headers, options.headers); + } + + const { baseUrl, apiVersion } = resolveAzureConfig(model, options); + + return new AzureOpenAI({ + apiKey, + apiVersion, + dangerouslyAllowBrowser: true, + defaultHeaders: headers, + baseURL: baseUrl, + }); +} + +function buildParams( + model: Model<"azure-openai-responses">, + context: Context, + options: AzureOpenAIResponsesOptions | undefined, + deploymentName: string, +) { + const messages = convertResponsesMessages(model, context, AZURE_TOOL_CALL_PROVIDERS); + + const params: ResponseCreateParamsStreaming = { + model: deploymentName, + input: messages, + stream: true, + prompt_cache_key: options?.sessionId, + }; + + if (options?.maxTokens) { + params.max_output_tokens = options?.maxTokens; + } + + if (options?.temperature !== undefined) { + params.temperature = options?.temperature; + } + + if (context.tools) { + params.tools = convertResponsesTools(context.tools); + } + + if (model.reasoning) { + if (options?.reasoningEffort || options?.reasoningSummary) { + params.reasoning = { + effort: options?.reasoningEffort || "medium", + summary: options?.reasoningSummary || "auto", + }; + params.include = ["reasoning.encrypted_content"]; + } else { + if (model.name.toLowerCase().startsWith("gpt-5")) { + // Jesus Christ, see https://community.openai.com/t/need-reasoning-false-option-for-gpt-5/1351588/7 + messages.push({ + role: "developer", + content: [ + { + type: "input_text", + text: "# Juice: 0 !important", + }, + ], + }); + } + } + } + + return params; +} diff --git a/packages/pi-ai/src/providers/github-copilot-headers.ts b/packages/pi-ai/src/providers/github-copilot-headers.ts new file mode 100644 index 000000000..4f01a9d2a --- /dev/null +++ b/packages/pi-ai/src/providers/github-copilot-headers.ts @@ -0,0 +1,37 @@ +import type { Message } from "../types.js"; + +// Copilot expects X-Initiator to indicate whether the request is user-initiated +// or agent-initiated (e.g. follow-up after assistant/tool messages). +export function inferCopilotInitiator(messages: Message[]): "user" | "agent" { + const last = messages[messages.length - 1]; + return last && last.role !== "user" ? "agent" : "user"; +} + +// Copilot requires Copilot-Vision-Request header when sending images +export function hasCopilotVisionInput(messages: Message[]): boolean { + return messages.some((msg) => { + if (msg.role === "user" && Array.isArray(msg.content)) { + return msg.content.some((c) => c.type === "image"); + } + if (msg.role === "toolResult" && Array.isArray(msg.content)) { + return msg.content.some((c) => c.type === "image"); + } + return false; + }); +} + +export function buildCopilotDynamicHeaders(params: { + messages: Message[]; + hasImages: boolean; +}): Record { + const headers: Record = { + "X-Initiator": inferCopilotInitiator(params.messages), + "Openai-Intent": "conversation-edits", + }; + + if (params.hasImages) { + headers["Copilot-Vision-Request"] = "true"; + } + + return headers; +} diff --git a/packages/pi-ai/src/providers/google-gemini-cli.ts b/packages/pi-ai/src/providers/google-gemini-cli.ts new file mode 100644 index 000000000..67eccdbcc --- /dev/null +++ b/packages/pi-ai/src/providers/google-gemini-cli.ts @@ -0,0 +1,967 @@ +/** + * Google Gemini CLI / Antigravity provider. + * Shared implementation for both google-gemini-cli and google-antigravity providers. + * Uses the Cloud Code Assist API endpoint to access Gemini and Claude models. + */ + +import type { Content, ThinkingConfig } from "@google/genai"; +import { calculateCost } from "../models.js"; +import type { + Api, + AssistantMessage, + Context, + Model, + SimpleStreamOptions, + StreamFunction, + StreamOptions, + TextContent, + ThinkingBudgets, + ThinkingContent, + ThinkingLevel, + ToolCall, +} from "../types.js"; +import { AssistantMessageEventStream } from "../utils/event-stream.js"; +import { sanitizeSurrogates } from "../utils/sanitize-unicode.js"; +import { + convertMessages, + convertTools, + isThinkingPart, + mapStopReasonString, + mapToolChoice, + retainThoughtSignature, +} from "./google-shared.js"; +import { buildBaseOptions, clampReasoning } from "./simple-options.js"; + +/** + * Thinking level for Gemini 3 models. + * Mirrors Google's ThinkingLevel enum values. + */ +export type GoogleThinkingLevel = "THINKING_LEVEL_UNSPECIFIED" | "MINIMAL" | "LOW" | "MEDIUM" | "HIGH"; + +export interface GoogleGeminiCliOptions extends StreamOptions { + toolChoice?: "auto" | "none" | "any"; + /** + * Thinking/reasoning configuration. + * - Gemini 2.x models: use `budgetTokens` to set the thinking budget + * - Gemini 3 models (gemini-3-pro-*, gemini-3-flash-*): use `level` instead + * + * When using `streamSimple`, this is handled automatically based on the model. + */ + thinking?: { + enabled: boolean; + /** Thinking budget in tokens. Use for Gemini 2.x models. */ + budgetTokens?: number; + /** Thinking level. Use for Gemini 3 models (LOW/HIGH for Pro, MINIMAL/LOW/MEDIUM/HIGH for Flash). */ + level?: GoogleThinkingLevel; + }; + projectId?: string; +} + +const DEFAULT_ENDPOINT = "https://cloudcode-pa.googleapis.com"; +const ANTIGRAVITY_DAILY_ENDPOINT = "https://daily-cloudcode-pa.sandbox.googleapis.com"; +const ANTIGRAVITY_AUTOPUSH_ENDPOINT = "https://autopush-cloudcode-pa.sandbox.googleapis.com"; +const ANTIGRAVITY_ENDPOINT_FALLBACKS = [ + ANTIGRAVITY_DAILY_ENDPOINT, + ANTIGRAVITY_AUTOPUSH_ENDPOINT, + DEFAULT_ENDPOINT, +] as const; +// Headers for Gemini CLI (prod endpoint) +const GEMINI_CLI_HEADERS = { + "User-Agent": "google-cloud-sdk vscode_cloudshelleditor/0.1", + "X-Goog-Api-Client": "gl-node/22.17.0", + "Client-Metadata": JSON.stringify({ + ideType: "IDE_UNSPECIFIED", + platform: "PLATFORM_UNSPECIFIED", + pluginType: "GEMINI", + }), +}; + +// Headers for Antigravity (sandbox endpoint) - requires specific User-Agent +const DEFAULT_ANTIGRAVITY_VERSION = "1.18.4"; + +function getAntigravityHeaders() { + const version = process.env.PI_AI_ANTIGRAVITY_VERSION || DEFAULT_ANTIGRAVITY_VERSION; + return { + "User-Agent": `antigravity/${version} darwin/arm64`, + }; +} + +// Antigravity system instruction (compact version from CLIProxyAPI). +const ANTIGRAVITY_SYSTEM_INSTRUCTION = + "You are Antigravity, a powerful agentic AI coding assistant designed by the Google Deepmind team working on Advanced Agentic Coding." + + "You are pair programming with a USER to solve their coding task. The task may require creating a new codebase, modifying or debugging an existing codebase, or simply answering a question." + + "**Absolute paths only**" + + "**Proactiveness**"; + +// Counter for generating unique tool call IDs +let toolCallCounter = 0; + +// Retry configuration +const MAX_RETRIES = 3; +const BASE_DELAY_MS = 1000; +const MAX_EMPTY_STREAM_RETRIES = 2; +const EMPTY_STREAM_BASE_DELAY_MS = 500; +const CLAUDE_THINKING_BETA_HEADER = "interleaved-thinking-2025-05-14"; + +/** + * Extract retry delay from Gemini error response (in milliseconds). + * Checks headers first (Retry-After, x-ratelimit-reset, x-ratelimit-reset-after), + * then parses body patterns like: + * - "Your quota will reset after 39s" + * - "Your quota will reset after 18h31m10s" + * - "Please retry in Xs" or "Please retry in Xms" + * - "retryDelay": "34.074824224s" (JSON field) + */ +export function extractRetryDelay(errorText: string, response?: Response | Headers): number | undefined { + const normalizeDelay = (ms: number): number | undefined => (ms > 0 ? Math.ceil(ms + 1000) : undefined); + + const headers = response instanceof Headers ? response : response?.headers; + if (headers) { + const retryAfter = headers.get("retry-after"); + if (retryAfter) { + const retryAfterSeconds = Number(retryAfter); + if (Number.isFinite(retryAfterSeconds)) { + const delay = normalizeDelay(retryAfterSeconds * 1000); + if (delay !== undefined) { + return delay; + } + } + const retryAfterDate = new Date(retryAfter); + const retryAfterMs = retryAfterDate.getTime(); + if (!Number.isNaN(retryAfterMs)) { + const delay = normalizeDelay(retryAfterMs - Date.now()); + if (delay !== undefined) { + return delay; + } + } + } + + const rateLimitReset = headers.get("x-ratelimit-reset"); + if (rateLimitReset) { + const resetSeconds = Number.parseInt(rateLimitReset, 10); + if (!Number.isNaN(resetSeconds)) { + const delay = normalizeDelay(resetSeconds * 1000 - Date.now()); + if (delay !== undefined) { + return delay; + } + } + } + + const rateLimitResetAfter = headers.get("x-ratelimit-reset-after"); + if (rateLimitResetAfter) { + const resetAfterSeconds = Number(rateLimitResetAfter); + if (Number.isFinite(resetAfterSeconds)) { + const delay = normalizeDelay(resetAfterSeconds * 1000); + if (delay !== undefined) { + return delay; + } + } + } + } + + // Pattern 1: "Your quota will reset after ..." (formats: "18h31m10s", "10m15s", "6s", "39s") + const durationMatch = errorText.match(/reset after (?:(\d+)h)?(?:(\d+)m)?(\d+(?:\.\d+)?)s/i); + if (durationMatch) { + const hours = durationMatch[1] ? parseInt(durationMatch[1], 10) : 0; + const minutes = durationMatch[2] ? parseInt(durationMatch[2], 10) : 0; + const seconds = parseFloat(durationMatch[3]); + if (!Number.isNaN(seconds)) { + const totalMs = ((hours * 60 + minutes) * 60 + seconds) * 1000; + const delay = normalizeDelay(totalMs); + if (delay !== undefined) { + return delay; + } + } + } + + // Pattern 2: "Please retry in X[ms|s]" + const retryInMatch = errorText.match(/Please retry in ([0-9.]+)(ms|s)/i); + if (retryInMatch?.[1]) { + const value = parseFloat(retryInMatch[1]); + if (!Number.isNaN(value) && value > 0) { + const ms = retryInMatch[2].toLowerCase() === "ms" ? value : value * 1000; + const delay = normalizeDelay(ms); + if (delay !== undefined) { + return delay; + } + } + } + + // Pattern 3: "retryDelay": "34.074824224s" (JSON field in error details) + const retryDelayMatch = errorText.match(/"retryDelay":\s*"([0-9.]+)(ms|s)"/i); + if (retryDelayMatch?.[1]) { + const value = parseFloat(retryDelayMatch[1]); + if (!Number.isNaN(value) && value > 0) { + const ms = retryDelayMatch[2].toLowerCase() === "ms" ? value : value * 1000; + const delay = normalizeDelay(ms); + if (delay !== undefined) { + return delay; + } + } + } + + return undefined; +} + +function needsClaudeThinkingBetaHeader(model: Model<"google-gemini-cli">): boolean { + return model.provider === "google-antigravity" && model.id.startsWith("claude-") && model.reasoning; +} + +function isGemini3ProModel(modelId: string): boolean { + return /gemini-3(?:\.1)?-pro/.test(modelId.toLowerCase()); +} + +function isGemini3FlashModel(modelId: string): boolean { + return /gemini-3(?:\.1)?-flash/.test(modelId.toLowerCase()); +} + +function isGemini3Model(modelId: string): boolean { + return isGemini3ProModel(modelId) || isGemini3FlashModel(modelId); +} + +/** + * Check if an error is retryable (rate limit, server error, network error, etc.) + */ +function isRetryableError(status: number, errorText: string): boolean { + if (status === 429 || status === 500 || status === 502 || status === 503 || status === 504) { + return true; + } + return /resource.?exhausted|rate.?limit|overloaded|service.?unavailable|other.?side.?closed/i.test(errorText); +} + +/** + * Extract a clean, user-friendly error message from Google API error response. + * Parses JSON error responses and returns just the message field. + */ +function extractErrorMessage(errorText: string): string { + try { + const parsed = JSON.parse(errorText) as { error?: { message?: string } }; + if (parsed.error?.message) { + return parsed.error.message; + } + } catch { + // Not JSON, return as-is + } + return errorText; +} + +/** + * Sleep for a given number of milliseconds, respecting abort signal. + */ +function sleep(ms: number, signal?: AbortSignal): Promise { + return new Promise((resolve, reject) => { + if (signal?.aborted) { + reject(new Error("Request was aborted")); + return; + } + const timeout = setTimeout(resolve, ms); + signal?.addEventListener("abort", () => { + clearTimeout(timeout); + reject(new Error("Request was aborted")); + }); + }); +} + +interface CloudCodeAssistRequest { + project: string; + model: string; + request: { + contents: Content[]; + sessionId?: string; + systemInstruction?: { role?: string; parts: { text: string }[] }; + generationConfig?: { + maxOutputTokens?: number; + temperature?: number; + thinkingConfig?: ThinkingConfig; + }; + tools?: ReturnType; + toolConfig?: { + functionCallingConfig: { + mode: ReturnType; + }; + }; + }; + requestType?: string; + userAgent?: string; + requestId?: string; +} + +interface CloudCodeAssistResponseChunk { + response?: { + candidates?: Array<{ + content?: { + role: string; + parts?: Array<{ + text?: string; + thought?: boolean; + thoughtSignature?: string; + functionCall?: { + name: string; + args: Record; + id?: string; + }; + }>; + }; + finishReason?: string; + }>; + usageMetadata?: { + promptTokenCount?: number; + candidatesTokenCount?: number; + thoughtsTokenCount?: number; + totalTokenCount?: number; + cachedContentTokenCount?: number; + }; + modelVersion?: string; + responseId?: string; + }; + traceId?: string; +} + +export const streamGoogleGeminiCli: StreamFunction<"google-gemini-cli", GoogleGeminiCliOptions> = ( + model: Model<"google-gemini-cli">, + context: Context, + options?: GoogleGeminiCliOptions, +): AssistantMessageEventStream => { + const stream = new AssistantMessageEventStream(); + + (async () => { + const output: AssistantMessage = { + role: "assistant", + content: [], + api: "google-gemini-cli" as Api, + provider: model.provider, + model: model.id, + usage: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + stopReason: "stop", + timestamp: Date.now(), + }; + + try { + // apiKey is JSON-encoded: { token, projectId } + const apiKeyRaw = options?.apiKey; + if (!apiKeyRaw) { + throw new Error("Google Cloud Code Assist requires OAuth authentication. Use /login to authenticate."); + } + + let accessToken: string; + let projectId: string; + + try { + const parsed = JSON.parse(apiKeyRaw) as { token: string; projectId: string }; + accessToken = parsed.token; + projectId = parsed.projectId; + } catch { + throw new Error("Invalid Google Cloud Code Assist credentials. Use /login to re-authenticate."); + } + + if (!accessToken || !projectId) { + throw new Error("Missing token or projectId in Google Cloud credentials. Use /login to re-authenticate."); + } + + const isAntigravity = model.provider === "google-antigravity"; + const baseUrl = model.baseUrl?.trim(); + const endpoints = baseUrl ? [baseUrl] : isAntigravity ? ANTIGRAVITY_ENDPOINT_FALLBACKS : [DEFAULT_ENDPOINT]; + + let requestBody = buildRequest(model, context, projectId, options, isAntigravity); + const nextRequestBody = await options?.onPayload?.(requestBody, model); + if (nextRequestBody !== undefined) { + requestBody = nextRequestBody as CloudCodeAssistRequest; + } + const headers = isAntigravity ? getAntigravityHeaders() : GEMINI_CLI_HEADERS; + + const requestHeaders = { + Authorization: `Bearer ${accessToken}`, + "Content-Type": "application/json", + Accept: "text/event-stream", + ...headers, + ...(needsClaudeThinkingBetaHeader(model) ? { "anthropic-beta": CLAUDE_THINKING_BETA_HEADER } : {}), + ...options?.headers, + }; + const requestBodyJson = JSON.stringify(requestBody); + + // Fetch with retry logic for rate limits, transient errors, and endpoint fallbacks. + // On 403/404, immediately try the next endpoint (no delay). + // On 429/5xx, retry with backoff on the same or next endpoint. + let response: Response | undefined; + let lastError: Error | undefined; + let requestUrl: string | undefined; + let endpointIndex = 0; + + for (let attempt = 0; attempt <= MAX_RETRIES; attempt++) { + if (options?.signal?.aborted) { + throw new Error("Request was aborted"); + } + + try { + const endpoint = endpoints[endpointIndex]; + requestUrl = `${endpoint}/v1internal:streamGenerateContent?alt=sse`; + response = await fetch(requestUrl, { + method: "POST", + headers: requestHeaders, + body: requestBodyJson, + signal: options?.signal, + }); + + if (response.ok) { + break; // Success, exit retry loop + } + + const errorText = await response.text(); + + // On 403/404, cascade to the next endpoint immediately (no delay) + if ((response.status === 403 || response.status === 404) && endpointIndex < endpoints.length - 1) { + endpointIndex++; + continue; + } + + // Check if retryable (429, 5xx, network patterns) + if (attempt < MAX_RETRIES && isRetryableError(response.status, errorText)) { + // Advance endpoint if possible + if (endpointIndex < endpoints.length - 1) { + endpointIndex++; + } + + // Use server-provided delay or exponential backoff + const serverDelay = extractRetryDelay(errorText, response); + const delayMs = serverDelay ?? BASE_DELAY_MS * 2 ** attempt; + + // Check if server delay exceeds max allowed (default: 60s) + const maxDelayMs = options?.maxRetryDelayMs ?? 60000; + if (maxDelayMs > 0 && serverDelay && serverDelay > maxDelayMs) { + const delaySeconds = Math.ceil(serverDelay / 1000); + throw new Error( + `Server requested ${delaySeconds}s retry delay (max: ${Math.ceil(maxDelayMs / 1000)}s). ${extractErrorMessage(errorText)}`, + ); + } + + await sleep(delayMs, options?.signal); + continue; + } + + // Not retryable or max retries exceeded + throw new Error(`Cloud Code Assist API error (${response.status}): ${extractErrorMessage(errorText)}`); + } catch (error) { + // Check for abort - fetch throws AbortError, our code throws "Request was aborted" + if (error instanceof Error) { + if (error.name === "AbortError" || error.message === "Request was aborted") { + throw new Error("Request was aborted"); + } + } + // Extract detailed error message from fetch errors (Node includes cause) + lastError = error instanceof Error ? error : new Error(String(error)); + if (lastError.message === "fetch failed" && lastError.cause instanceof Error) { + lastError = new Error(`Network error: ${lastError.cause.message}`); + } + // Network errors are retryable + if (attempt < MAX_RETRIES) { + const delayMs = BASE_DELAY_MS * 2 ** attempt; + await sleep(delayMs, options?.signal); + continue; + } + throw lastError; + } + } + + if (!response || !response.ok) { + throw lastError ?? new Error("Failed to get response after retries"); + } + + let started = false; + const ensureStarted = () => { + if (!started) { + stream.push({ type: "start", partial: output }); + started = true; + } + }; + + const resetOutput = () => { + output.content = []; + output.usage = { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }; + output.stopReason = "stop"; + output.errorMessage = undefined; + output.timestamp = Date.now(); + started = false; + }; + + const streamResponse = async (activeResponse: Response): Promise => { + if (!activeResponse.body) { + throw new Error("No response body"); + } + + let hasContent = false; + let currentBlock: TextContent | ThinkingContent | null = null; + const blocks = output.content; + const blockIndex = () => blocks.length - 1; + + // Read SSE stream + const reader = activeResponse.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + // Set up abort handler to cancel reader when signal fires + const abortHandler = () => { + void reader.cancel().catch(() => {}); + }; + options?.signal?.addEventListener("abort", abortHandler); + + try { + while (true) { + // Check abort signal before each read + if (options?.signal?.aborted) { + throw new Error("Request was aborted"); + } + + const { done, value } = await reader.read(); + if (done) break; + + buffer += decoder.decode(value, { stream: true }); + const lines = buffer.split("\n"); + buffer = lines.pop() || ""; + + for (const line of lines) { + if (!line.startsWith("data:")) continue; + + const jsonStr = line.slice(5).trim(); + if (!jsonStr) continue; + + let chunk: CloudCodeAssistResponseChunk; + try { + chunk = JSON.parse(jsonStr); + } catch { + continue; + } + + // Unwrap the response + const responseData = chunk.response; + if (!responseData) continue; + + const candidate = responseData.candidates?.[0]; + if (candidate?.content?.parts) { + for (const part of candidate.content.parts) { + if (part.text !== undefined) { + hasContent = true; + const isThinking = isThinkingPart(part); + if ( + !currentBlock || + (isThinking && currentBlock.type !== "thinking") || + (!isThinking && currentBlock.type !== "text") + ) { + if (currentBlock) { + if (currentBlock.type === "text") { + stream.push({ + type: "text_end", + contentIndex: blocks.length - 1, + content: currentBlock.text, + partial: output, + }); + } else { + stream.push({ + type: "thinking_end", + contentIndex: blockIndex(), + content: currentBlock.thinking, + partial: output, + }); + } + } + if (isThinking) { + currentBlock = { type: "thinking", thinking: "", thinkingSignature: undefined }; + output.content.push(currentBlock); + ensureStarted(); + stream.push({ + type: "thinking_start", + contentIndex: blockIndex(), + partial: output, + }); + } else { + currentBlock = { type: "text", text: "" }; + output.content.push(currentBlock); + ensureStarted(); + stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output }); + } + } + if (currentBlock.type === "thinking") { + currentBlock.thinking += part.text; + currentBlock.thinkingSignature = retainThoughtSignature( + currentBlock.thinkingSignature, + part.thoughtSignature, + ); + stream.push({ + type: "thinking_delta", + contentIndex: blockIndex(), + delta: part.text, + partial: output, + }); + } else { + currentBlock.text += part.text; + currentBlock.textSignature = retainThoughtSignature( + currentBlock.textSignature, + part.thoughtSignature, + ); + stream.push({ + type: "text_delta", + contentIndex: blockIndex(), + delta: part.text, + partial: output, + }); + } + } + + if (part.functionCall) { + hasContent = true; + if (currentBlock) { + if (currentBlock.type === "text") { + stream.push({ + type: "text_end", + contentIndex: blockIndex(), + content: currentBlock.text, + partial: output, + }); + } else { + stream.push({ + type: "thinking_end", + contentIndex: blockIndex(), + content: currentBlock.thinking, + partial: output, + }); + } + currentBlock = null; + } + + const providedId = part.functionCall.id; + const needsNewId = + !providedId || + output.content.some((b) => b.type === "toolCall" && b.id === providedId); + const toolCallId = needsNewId + ? `${part.functionCall.name}_${Date.now()}_${++toolCallCounter}` + : providedId; + + const toolCall: ToolCall = { + type: "toolCall", + id: toolCallId, + name: part.functionCall.name || "", + arguments: (part.functionCall.args as Record) ?? {}, + ...(part.thoughtSignature && { thoughtSignature: part.thoughtSignature }), + }; + + output.content.push(toolCall); + ensureStarted(); + stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output }); + stream.push({ + type: "toolcall_delta", + contentIndex: blockIndex(), + delta: JSON.stringify(toolCall.arguments), + partial: output, + }); + stream.push({ + type: "toolcall_end", + contentIndex: blockIndex(), + toolCall, + partial: output, + }); + } + } + } + + if (candidate?.finishReason) { + output.stopReason = mapStopReasonString(candidate.finishReason); + if (output.content.some((b) => b.type === "toolCall")) { + output.stopReason = "toolUse"; + } + } + + if (responseData.usageMetadata) { + // promptTokenCount includes cachedContentTokenCount, so subtract to get fresh input + const promptTokens = responseData.usageMetadata.promptTokenCount || 0; + const cacheReadTokens = responseData.usageMetadata.cachedContentTokenCount || 0; + output.usage = { + input: promptTokens - cacheReadTokens, + output: + (responseData.usageMetadata.candidatesTokenCount || 0) + + (responseData.usageMetadata.thoughtsTokenCount || 0), + cacheRead: cacheReadTokens, + cacheWrite: 0, + totalTokens: responseData.usageMetadata.totalTokenCount || 0, + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + total: 0, + }, + }; + calculateCost(model, output.usage); + } + } + } + } finally { + options?.signal?.removeEventListener("abort", abortHandler); + } + + if (currentBlock) { + if (currentBlock.type === "text") { + stream.push({ + type: "text_end", + contentIndex: blockIndex(), + content: currentBlock.text, + partial: output, + }); + } else { + stream.push({ + type: "thinking_end", + contentIndex: blockIndex(), + content: currentBlock.thinking, + partial: output, + }); + } + } + + return hasContent; + }; + + let receivedContent = false; + let currentResponse = response; + + for (let emptyAttempt = 0; emptyAttempt <= MAX_EMPTY_STREAM_RETRIES; emptyAttempt++) { + if (options?.signal?.aborted) { + throw new Error("Request was aborted"); + } + + if (emptyAttempt > 0) { + const backoffMs = EMPTY_STREAM_BASE_DELAY_MS * 2 ** (emptyAttempt - 1); + await sleep(backoffMs, options?.signal); + + if (!requestUrl) { + throw new Error("Missing request URL"); + } + + currentResponse = await fetch(requestUrl, { + method: "POST", + headers: requestHeaders, + body: requestBodyJson, + signal: options?.signal, + }); + + if (!currentResponse.ok) { + const retryErrorText = await currentResponse.text(); + throw new Error(`Cloud Code Assist API error (${currentResponse.status}): ${retryErrorText}`); + } + } + + const streamed = await streamResponse(currentResponse); + if (streamed) { + receivedContent = true; + break; + } + + if (emptyAttempt < MAX_EMPTY_STREAM_RETRIES) { + resetOutput(); + } + } + + if (!receivedContent) { + throw new Error("Cloud Code Assist API returned an empty response"); + } + + if (options?.signal?.aborted) { + throw new Error("Request was aborted"); + } + + if (output.stopReason === "aborted" || output.stopReason === "error") { + throw new Error("An unknown error occurred"); + } + + stream.push({ type: "done", reason: output.stopReason, message: output }); + stream.end(); + } catch (error) { + for (const block of output.content) { + if ("index" in block) { + delete (block as { index?: number }).index; + } + } + output.stopReason = options?.signal?.aborted ? "aborted" : "error"; + output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error); + stream.push({ type: "error", reason: output.stopReason, error: output }); + stream.end(); + } + })(); + + return stream; +}; + +export const streamSimpleGoogleGeminiCli: StreamFunction<"google-gemini-cli", SimpleStreamOptions> = ( + model: Model<"google-gemini-cli">, + context: Context, + options?: SimpleStreamOptions, +): AssistantMessageEventStream => { + const apiKey = options?.apiKey; + if (!apiKey) { + throw new Error("Google Cloud Code Assist requires OAuth authentication. Use /login to authenticate."); + } + + const base = buildBaseOptions(model, options, apiKey); + if (!options?.reasoning) { + return streamGoogleGeminiCli(model, context, { + ...base, + thinking: { enabled: false }, + } satisfies GoogleGeminiCliOptions); + } + + const effort = clampReasoning(options.reasoning)!; + if (isGemini3Model(model.id)) { + return streamGoogleGeminiCli(model, context, { + ...base, + thinking: { + enabled: true, + level: getGeminiCliThinkingLevel(effort, model.id), + }, + } satisfies GoogleGeminiCliOptions); + } + + const defaultBudgets: ThinkingBudgets = { + minimal: 1024, + low: 2048, + medium: 8192, + high: 16384, + }; + const budgets = { ...defaultBudgets, ...options.thinkingBudgets }; + + const minOutputTokens = 1024; + let thinkingBudget = budgets[effort]!; + const maxTokens = Math.min((base.maxTokens || 0) + thinkingBudget, model.maxTokens); + + if (maxTokens <= thinkingBudget) { + thinkingBudget = Math.max(0, maxTokens - minOutputTokens); + } + + return streamGoogleGeminiCli(model, context, { + ...base, + maxTokens, + thinking: { + enabled: true, + budgetTokens: thinkingBudget, + }, + } satisfies GoogleGeminiCliOptions); +}; + +export function buildRequest( + model: Model<"google-gemini-cli">, + context: Context, + projectId: string, + options: GoogleGeminiCliOptions = {}, + isAntigravity = false, +): CloudCodeAssistRequest { + const contents = convertMessages(model, context); + + const generationConfig: CloudCodeAssistRequest["request"]["generationConfig"] = {}; + if (options.temperature !== undefined) { + generationConfig.temperature = options.temperature; + } + if (options.maxTokens !== undefined) { + generationConfig.maxOutputTokens = options.maxTokens; + } + + // Thinking config + if (options.thinking?.enabled && model.reasoning) { + generationConfig.thinkingConfig = { + includeThoughts: true, + }; + // Gemini 3 models use thinkingLevel, older models use thinkingBudget + if (options.thinking.level !== undefined) { + // Cast to any since our GoogleThinkingLevel mirrors Google's ThinkingLevel enum values + generationConfig.thinkingConfig.thinkingLevel = options.thinking.level as any; + } else if (options.thinking.budgetTokens !== undefined) { + generationConfig.thinkingConfig.thinkingBudget = options.thinking.budgetTokens; + } + } + + const request: CloudCodeAssistRequest["request"] = { + contents, + }; + + request.sessionId = options.sessionId; + + // System instruction must be object with parts, not plain string + if (context.systemPrompt) { + request.systemInstruction = { + parts: [{ text: sanitizeSurrogates(context.systemPrompt) }], + }; + } + + if (Object.keys(generationConfig).length > 0) { + request.generationConfig = generationConfig; + } + + if (context.tools && context.tools.length > 0) { + // Claude models on Cloud Code Assist need the legacy `parameters` field; + // the API translates it into Anthropic's `input_schema`. + const useParameters = model.id.startsWith("claude-"); + request.tools = convertTools(context.tools, useParameters); + if (options.toolChoice) { + request.toolConfig = { + functionCallingConfig: { + mode: mapToolChoice(options.toolChoice), + }, + }; + } + } + + if (isAntigravity) { + const existingParts = request.systemInstruction?.parts ?? []; + request.systemInstruction = { + role: "user", + parts: [ + { text: ANTIGRAVITY_SYSTEM_INSTRUCTION }, + { text: `Please ignore following [ignore]${ANTIGRAVITY_SYSTEM_INSTRUCTION}[/ignore]` }, + ...existingParts, + ], + }; + } + + return { + project: projectId, + model: model.id, + request, + ...(isAntigravity ? { requestType: "agent" } : {}), + userAgent: isAntigravity ? "antigravity" : "pi-coding-agent", + requestId: `${isAntigravity ? "agent" : "pi"}-${Date.now()}-${Math.random().toString(36).slice(2, 11)}`, + }; +} + +type ClampedThinkingLevel = Exclude; + +function getGeminiCliThinkingLevel(effort: ClampedThinkingLevel, modelId: string): GoogleThinkingLevel { + if (isGemini3ProModel(modelId)) { + switch (effort) { + case "minimal": + case "low": + return "LOW"; + case "medium": + case "high": + return "HIGH"; + } + } + switch (effort) { + case "minimal": + return "MINIMAL"; + case "low": + return "LOW"; + case "medium": + return "MEDIUM"; + case "high": + return "HIGH"; + } +} diff --git a/packages/pi-ai/src/providers/google-shared.ts b/packages/pi-ai/src/providers/google-shared.ts new file mode 100644 index 000000000..e942314f9 --- /dev/null +++ b/packages/pi-ai/src/providers/google-shared.ts @@ -0,0 +1,313 @@ +/** + * Shared utilities for Google Generative AI and Google Cloud Code Assist providers. + */ + +import { type Content, FinishReason, FunctionCallingConfigMode, type Part } from "@google/genai"; +import type { Context, ImageContent, Model, StopReason, TextContent, Tool } from "../types.js"; +import { sanitizeSurrogates } from "../utils/sanitize-unicode.js"; +import { transformMessages } from "./transform-messages.js"; + +type GoogleApiType = "google-generative-ai" | "google-gemini-cli" | "google-vertex"; + +/** + * Determines whether a streamed Gemini `Part` should be treated as "thinking". + * + * Protocol note (Gemini / Vertex AI thought signatures): + * - `thought: true` is the definitive marker for thinking content (thought summaries). + * - `thoughtSignature` is an encrypted representation of the model's internal thought process + * used to preserve reasoning context across multi-turn interactions. + * - `thoughtSignature` can appear on ANY part type (text, functionCall, etc.) - it does NOT + * indicate the part itself is thinking content. + * - For non-functionCall responses, the signature appears on the last part for context replay. + * - When persisting/replaying model outputs, signature-bearing parts must be preserved as-is; + * do not merge/move signatures across parts. + * + * See: https://ai.google.dev/gemini-api/docs/thought-signatures + */ +export function isThinkingPart(part: Pick): boolean { + return part.thought === true; +} + +/** + * Retain thought signatures during streaming. + * + * Some backends only send `thoughtSignature` on the first delta for a given part/block; later deltas may omit it. + * This helper preserves the last non-empty signature for the current block. + * + * Note: this does NOT merge or move signatures across distinct response parts. It only prevents + * a signature from being overwritten with `undefined` within the same streamed block. + */ +export function retainThoughtSignature(existing: string | undefined, incoming: string | undefined): string | undefined { + if (typeof incoming === "string" && incoming.length > 0) return incoming; + return existing; +} + +// Thought signatures must be base64 for Google APIs (TYPE_BYTES). +const base64SignaturePattern = /^[A-Za-z0-9+/]+={0,2}$/; + +// Sentinel value that tells the Gemini API to skip thought signature validation. +// Used for unsigned function call parts (e.g. replayed from providers without thought signatures). +// See: https://ai.google.dev/gemini-api/docs/thought-signatures +const SKIP_THOUGHT_SIGNATURE = "skip_thought_signature_validator"; + +function isValidThoughtSignature(signature: string | undefined): boolean { + if (!signature) return false; + if (signature.length % 4 !== 0) return false; + return base64SignaturePattern.test(signature); +} + +/** + * Only keep signatures from the same provider/model and with valid base64. + */ +function resolveThoughtSignature(isSameProviderAndModel: boolean, signature: string | undefined): string | undefined { + return isSameProviderAndModel && isValidThoughtSignature(signature) ? signature : undefined; +} + +/** + * Models via Google APIs that require explicit tool call IDs in function calls/responses. + */ +export function requiresToolCallId(modelId: string): boolean { + return modelId.startsWith("claude-") || modelId.startsWith("gpt-oss-"); +} + +/** + * Convert internal messages to Gemini Content[] format. + */ +export function convertMessages(model: Model, context: Context): Content[] { + const contents: Content[] = []; + const normalizeToolCallId = (id: string): string => { + if (!requiresToolCallId(model.id)) return id; + return id.replace(/[^a-zA-Z0-9_-]/g, "_").slice(0, 64); + }; + + const transformedMessages = transformMessages(context.messages, model, normalizeToolCallId); + + for (const msg of transformedMessages) { + if (msg.role === "user") { + if (typeof msg.content === "string") { + contents.push({ + role: "user", + parts: [{ text: sanitizeSurrogates(msg.content) }], + }); + } else { + const parts: Part[] = msg.content.map((item) => { + if (item.type === "text") { + return { text: sanitizeSurrogates(item.text) }; + } else { + return { + inlineData: { + mimeType: item.mimeType, + data: item.data, + }, + }; + } + }); + const filteredParts = !model.input.includes("image") ? parts.filter((p) => p.text !== undefined) : parts; + if (filteredParts.length === 0) continue; + contents.push({ + role: "user", + parts: filteredParts, + }); + } + } else if (msg.role === "assistant") { + const parts: Part[] = []; + // Check if message is from same provider and model - only then keep thinking blocks + const isSameProviderAndModel = msg.provider === model.provider && msg.model === model.id; + + for (const block of msg.content) { + if (block.type === "text") { + // Skip empty text blocks - they can cause issues with some models (e.g. Claude via Antigravity) + if (!block.text || block.text.trim() === "") continue; + const thoughtSignature = resolveThoughtSignature(isSameProviderAndModel, block.textSignature); + parts.push({ + text: sanitizeSurrogates(block.text), + ...(thoughtSignature && { thoughtSignature }), + }); + } else if (block.type === "thinking") { + // Skip empty thinking blocks + if (!block.thinking || block.thinking.trim() === "") continue; + // Only keep as thinking block if same provider AND same model + // Otherwise convert to plain text (no tags to avoid model mimicking them) + if (isSameProviderAndModel) { + const thoughtSignature = resolveThoughtSignature(isSameProviderAndModel, block.thinkingSignature); + parts.push({ + thought: true, + text: sanitizeSurrogates(block.thinking), + ...(thoughtSignature && { thoughtSignature }), + }); + } else { + parts.push({ + text: sanitizeSurrogates(block.thinking), + }); + } + } else if (block.type === "toolCall") { + const thoughtSignature = resolveThoughtSignature(isSameProviderAndModel, block.thoughtSignature); + // Gemini 3 requires thoughtSignature on all function calls when thinking mode is enabled. + // Use the skip_thought_signature_validator sentinel for unsigned function calls + // (e.g. replayed from providers without thought signatures like Claude via Antigravity). + const isGemini3 = model.id.toLowerCase().includes("gemini-3"); + const effectiveSignature = thoughtSignature || (isGemini3 ? SKIP_THOUGHT_SIGNATURE : undefined); + const part: Part = { + functionCall: { + name: block.name, + args: block.arguments ?? {}, + ...(requiresToolCallId(model.id) ? { id: block.id } : {}), + }, + ...(effectiveSignature && { thoughtSignature: effectiveSignature }), + }; + parts.push(part); + } + } + + if (parts.length === 0) continue; + contents.push({ + role: "model", + parts, + }); + } else if (msg.role === "toolResult") { + // Extract text and image content + const textContent = msg.content.filter((c): c is TextContent => c.type === "text"); + const textResult = textContent.map((c) => c.text).join("\n"); + const imageContent = model.input.includes("image") + ? msg.content.filter((c): c is ImageContent => c.type === "image") + : []; + + const hasText = textResult.length > 0; + const hasImages = imageContent.length > 0; + + // Gemini 3 supports multimodal function responses with images nested inside functionResponse.parts + // See: https://ai.google.dev/gemini-api/docs/function-calling#multimodal + // Older models don't support this, so we put images in a separate user message. + const supportsMultimodalFunctionResponse = model.id.includes("gemini-3"); + + // Use "output" key for success, "error" key for errors as per SDK documentation + const responseValue = hasText ? sanitizeSurrogates(textResult) : hasImages ? "(see attached image)" : ""; + + const imageParts: Part[] = imageContent.map((imageBlock) => ({ + inlineData: { + mimeType: imageBlock.mimeType, + data: imageBlock.data, + }, + })); + + const includeId = requiresToolCallId(model.id); + const functionResponsePart: Part = { + functionResponse: { + name: msg.toolName, + response: msg.isError ? { error: responseValue } : { output: responseValue }, + // Nest images inside functionResponse.parts for Gemini 3 + ...(hasImages && supportsMultimodalFunctionResponse && { parts: imageParts }), + ...(includeId ? { id: msg.toolCallId } : {}), + }, + }; + + // Cloud Code Assist API requires all function responses to be in a single user turn. + // Check if the last content is already a user turn with function responses and merge. + const lastContent = contents[contents.length - 1]; + if (lastContent?.role === "user" && lastContent.parts?.some((p) => p.functionResponse)) { + lastContent.parts.push(functionResponsePart); + } else { + contents.push({ + role: "user", + parts: [functionResponsePart], + }); + } + + // For older models, add images in a separate user message + if (hasImages && !supportsMultimodalFunctionResponse) { + contents.push({ + role: "user", + parts: [{ text: "Tool result image:" }, ...imageParts], + }); + } + } + } + + return contents; +} + +/** + * Convert tools to Gemini function declarations format. + * + * By default uses `parametersJsonSchema` which supports full JSON Schema (including + * anyOf, oneOf, const, etc.). Set `useParameters` to true to use the legacy `parameters` + * field instead (OpenAPI 3.03 Schema). This is needed for Cloud Code Assist with Claude + * models, where the API translates `parameters` into Anthropic's `input_schema`. + */ +export function convertTools( + tools: Tool[], + useParameters = false, +): { functionDeclarations: Record[] }[] | undefined { + if (tools.length === 0) return undefined; + return [ + { + functionDeclarations: tools.map((tool) => ({ + name: tool.name, + description: tool.description, + ...(useParameters ? { parameters: tool.parameters } : { parametersJsonSchema: tool.parameters }), + })), + }, + ]; +} + +/** + * Map tool choice string to Gemini FunctionCallingConfigMode. + */ +export function mapToolChoice(choice: string): FunctionCallingConfigMode { + switch (choice) { + case "auto": + return FunctionCallingConfigMode.AUTO; + case "none": + return FunctionCallingConfigMode.NONE; + case "any": + return FunctionCallingConfigMode.ANY; + default: + return FunctionCallingConfigMode.AUTO; + } +} + +/** + * Map Gemini FinishReason to our StopReason. + */ +export function mapStopReason(reason: FinishReason): StopReason { + switch (reason) { + case FinishReason.STOP: + return "stop"; + case FinishReason.MAX_TOKENS: + return "length"; + case FinishReason.BLOCKLIST: + case FinishReason.PROHIBITED_CONTENT: + case FinishReason.SPII: + case FinishReason.SAFETY: + case FinishReason.IMAGE_SAFETY: + case FinishReason.IMAGE_PROHIBITED_CONTENT: + case FinishReason.IMAGE_RECITATION: + case FinishReason.IMAGE_OTHER: + case FinishReason.RECITATION: + case FinishReason.FINISH_REASON_UNSPECIFIED: + case FinishReason.OTHER: + case FinishReason.LANGUAGE: + case FinishReason.MALFORMED_FUNCTION_CALL: + case FinishReason.UNEXPECTED_TOOL_CALL: + case FinishReason.NO_IMAGE: + return "error"; + default: { + const _exhaustive: never = reason; + throw new Error(`Unhandled stop reason: ${_exhaustive}`); + } + } +} + +/** + * Map string finish reason to our StopReason (for raw API responses). + */ +export function mapStopReasonString(reason: string): StopReason { + switch (reason) { + case "STOP": + return "stop"; + case "MAX_TOKENS": + return "length"; + default: + return "error"; + } +} diff --git a/packages/pi-ai/src/providers/google-vertex.ts b/packages/pi-ai/src/providers/google-vertex.ts new file mode 100644 index 000000000..7bdaf5ebd --- /dev/null +++ b/packages/pi-ai/src/providers/google-vertex.ts @@ -0,0 +1,485 @@ +import { + type GenerateContentConfig, + type GenerateContentParameters, + GoogleGenAI, + type ThinkingConfig, + ThinkingLevel, +} from "@google/genai"; +import { calculateCost } from "../models.js"; +import type { + Api, + AssistantMessage, + Context, + Model, + ThinkingLevel as PiThinkingLevel, + SimpleStreamOptions, + StreamFunction, + StreamOptions, + TextContent, + ThinkingBudgets, + ThinkingContent, + ToolCall, +} from "../types.js"; +import { AssistantMessageEventStream } from "../utils/event-stream.js"; +import { sanitizeSurrogates } from "../utils/sanitize-unicode.js"; +import type { GoogleThinkingLevel } from "./google-gemini-cli.js"; +import { + convertMessages, + convertTools, + isThinkingPart, + mapStopReason, + mapToolChoice, + retainThoughtSignature, +} from "./google-shared.js"; +import { buildBaseOptions, clampReasoning } from "./simple-options.js"; + +export interface GoogleVertexOptions extends StreamOptions { + toolChoice?: "auto" | "none" | "any"; + thinking?: { + enabled: boolean; + budgetTokens?: number; // -1 for dynamic, 0 to disable + level?: GoogleThinkingLevel; + }; + project?: string; + location?: string; +} + +const API_VERSION = "v1"; + +const THINKING_LEVEL_MAP: Record = { + THINKING_LEVEL_UNSPECIFIED: ThinkingLevel.THINKING_LEVEL_UNSPECIFIED, + MINIMAL: ThinkingLevel.MINIMAL, + LOW: ThinkingLevel.LOW, + MEDIUM: ThinkingLevel.MEDIUM, + HIGH: ThinkingLevel.HIGH, +}; + +// Counter for generating unique tool call IDs +let toolCallCounter = 0; + +export const streamGoogleVertex: StreamFunction<"google-vertex", GoogleVertexOptions> = ( + model: Model<"google-vertex">, + context: Context, + options?: GoogleVertexOptions, +): AssistantMessageEventStream => { + const stream = new AssistantMessageEventStream(); + + (async () => { + const output: AssistantMessage = { + role: "assistant", + content: [], + api: "google-vertex" as Api, + provider: model.provider, + model: model.id, + usage: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + stopReason: "stop", + timestamp: Date.now(), + }; + + try { + const project = resolveProject(options); + const location = resolveLocation(options); + const client = createClient(model, project, location, options?.headers); + let params = buildParams(model, context, options); + const nextParams = await options?.onPayload?.(params, model); + if (nextParams !== undefined) { + params = nextParams as GenerateContentParameters; + } + const googleStream = await client.models.generateContentStream(params); + + stream.push({ type: "start", partial: output }); + let currentBlock: TextContent | ThinkingContent | null = null; + const blocks = output.content; + const blockIndex = () => blocks.length - 1; + for await (const chunk of googleStream) { + const candidate = chunk.candidates?.[0]; + if (candidate?.content?.parts) { + for (const part of candidate.content.parts) { + if (part.text !== undefined) { + const isThinking = isThinkingPart(part); + if ( + !currentBlock || + (isThinking && currentBlock.type !== "thinking") || + (!isThinking && currentBlock.type !== "text") + ) { + if (currentBlock) { + if (currentBlock.type === "text") { + stream.push({ + type: "text_end", + contentIndex: blocks.length - 1, + content: currentBlock.text, + partial: output, + }); + } else { + stream.push({ + type: "thinking_end", + contentIndex: blockIndex(), + content: currentBlock.thinking, + partial: output, + }); + } + } + if (isThinking) { + currentBlock = { type: "thinking", thinking: "", thinkingSignature: undefined }; + output.content.push(currentBlock); + stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output }); + } else { + currentBlock = { type: "text", text: "" }; + output.content.push(currentBlock); + stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output }); + } + } + if (currentBlock.type === "thinking") { + currentBlock.thinking += part.text; + currentBlock.thinkingSignature = retainThoughtSignature( + currentBlock.thinkingSignature, + part.thoughtSignature, + ); + stream.push({ + type: "thinking_delta", + contentIndex: blockIndex(), + delta: part.text, + partial: output, + }); + } else { + currentBlock.text += part.text; + currentBlock.textSignature = retainThoughtSignature( + currentBlock.textSignature, + part.thoughtSignature, + ); + stream.push({ + type: "text_delta", + contentIndex: blockIndex(), + delta: part.text, + partial: output, + }); + } + } + + if (part.functionCall) { + if (currentBlock) { + if (currentBlock.type === "text") { + stream.push({ + type: "text_end", + contentIndex: blockIndex(), + content: currentBlock.text, + partial: output, + }); + } else { + stream.push({ + type: "thinking_end", + contentIndex: blockIndex(), + content: currentBlock.thinking, + partial: output, + }); + } + currentBlock = null; + } + + const providedId = part.functionCall.id; + const needsNewId = + !providedId || output.content.some((b) => b.type === "toolCall" && b.id === providedId); + const toolCallId = needsNewId + ? `${part.functionCall.name}_${Date.now()}_${++toolCallCounter}` + : providedId; + + const toolCall: ToolCall = { + type: "toolCall", + id: toolCallId, + name: part.functionCall.name || "", + arguments: (part.functionCall.args as Record) ?? {}, + ...(part.thoughtSignature && { thoughtSignature: part.thoughtSignature }), + }; + + output.content.push(toolCall); + stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output }); + stream.push({ + type: "toolcall_delta", + contentIndex: blockIndex(), + delta: JSON.stringify(toolCall.arguments), + partial: output, + }); + stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output }); + } + } + } + + if (candidate?.finishReason) { + output.stopReason = mapStopReason(candidate.finishReason); + if (output.content.some((b) => b.type === "toolCall")) { + output.stopReason = "toolUse"; + } + } + + if (chunk.usageMetadata) { + output.usage = { + input: chunk.usageMetadata.promptTokenCount || 0, + output: + (chunk.usageMetadata.candidatesTokenCount || 0) + (chunk.usageMetadata.thoughtsTokenCount || 0), + cacheRead: chunk.usageMetadata.cachedContentTokenCount || 0, + cacheWrite: 0, + totalTokens: chunk.usageMetadata.totalTokenCount || 0, + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + total: 0, + }, + }; + calculateCost(model, output.usage); + } + } + + if (currentBlock) { + if (currentBlock.type === "text") { + stream.push({ + type: "text_end", + contentIndex: blockIndex(), + content: currentBlock.text, + partial: output, + }); + } else { + stream.push({ + type: "thinking_end", + contentIndex: blockIndex(), + content: currentBlock.thinking, + partial: output, + }); + } + } + + if (options?.signal?.aborted) { + throw new Error("Request was aborted"); + } + + if (output.stopReason === "aborted" || output.stopReason === "error") { + throw new Error("An unknown error occurred"); + } + + stream.push({ type: "done", reason: output.stopReason, message: output }); + stream.end(); + } catch (error) { + // Remove internal index property used during streaming + for (const block of output.content) { + if ("index" in block) { + delete (block as { index?: number }).index; + } + } + output.stopReason = options?.signal?.aborted ? "aborted" : "error"; + output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error); + stream.push({ type: "error", reason: output.stopReason, error: output }); + stream.end(); + } + })(); + + return stream; +}; + +export const streamSimpleGoogleVertex: StreamFunction<"google-vertex", SimpleStreamOptions> = ( + model: Model<"google-vertex">, + context: Context, + options?: SimpleStreamOptions, +): AssistantMessageEventStream => { + const base = buildBaseOptions(model, options, undefined); + if (!options?.reasoning) { + return streamGoogleVertex(model, context, { + ...base, + thinking: { enabled: false }, + } satisfies GoogleVertexOptions); + } + + const effort = clampReasoning(options.reasoning)!; + const geminiModel = model as unknown as Model<"google-generative-ai">; + + if (isGemini3ProModel(geminiModel) || isGemini3FlashModel(geminiModel)) { + return streamGoogleVertex(model, context, { + ...base, + thinking: { + enabled: true, + level: getGemini3ThinkingLevel(effort, geminiModel), + }, + } satisfies GoogleVertexOptions); + } + + return streamGoogleVertex(model, context, { + ...base, + thinking: { + enabled: true, + budgetTokens: getGoogleBudget(geminiModel, effort, options.thinkingBudgets), + }, + } satisfies GoogleVertexOptions); +}; + +function createClient( + model: Model<"google-vertex">, + project: string, + location: string, + optionsHeaders?: Record, +): GoogleGenAI { + const httpOptions: { headers?: Record } = {}; + + if (model.headers || optionsHeaders) { + httpOptions.headers = { ...model.headers, ...optionsHeaders }; + } + + const hasHttpOptions = Object.values(httpOptions).some(Boolean); + + return new GoogleGenAI({ + vertexai: true, + project, + location, + apiVersion: API_VERSION, + httpOptions: hasHttpOptions ? httpOptions : undefined, + }); +} + +function resolveProject(options?: GoogleVertexOptions): string { + const project = options?.project || process.env.GOOGLE_CLOUD_PROJECT || process.env.GCLOUD_PROJECT; + if (!project) { + throw new Error( + "Vertex AI requires a project ID. Set GOOGLE_CLOUD_PROJECT/GCLOUD_PROJECT or pass project in options.", + ); + } + return project; +} + +function resolveLocation(options?: GoogleVertexOptions): string { + const location = options?.location || process.env.GOOGLE_CLOUD_LOCATION; + if (!location) { + throw new Error("Vertex AI requires a location. Set GOOGLE_CLOUD_LOCATION or pass location in options."); + } + return location; +} + +function buildParams( + model: Model<"google-vertex">, + context: Context, + options: GoogleVertexOptions = {}, +): GenerateContentParameters { + const contents = convertMessages(model, context); + + const generationConfig: GenerateContentConfig = {}; + if (options.temperature !== undefined) { + generationConfig.temperature = options.temperature; + } + if (options.maxTokens !== undefined) { + generationConfig.maxOutputTokens = options.maxTokens; + } + + const config: GenerateContentConfig = { + ...(Object.keys(generationConfig).length > 0 && generationConfig), + ...(context.systemPrompt && { systemInstruction: sanitizeSurrogates(context.systemPrompt) }), + ...(context.tools && context.tools.length > 0 && { tools: convertTools(context.tools) }), + }; + + if (context.tools && context.tools.length > 0 && options.toolChoice) { + config.toolConfig = { + functionCallingConfig: { + mode: mapToolChoice(options.toolChoice), + }, + }; + } else { + config.toolConfig = undefined; + } + + if (options.thinking?.enabled && model.reasoning) { + const thinkingConfig: ThinkingConfig = { includeThoughts: true }; + if (options.thinking.level !== undefined) { + thinkingConfig.thinkingLevel = THINKING_LEVEL_MAP[options.thinking.level]; + } else if (options.thinking.budgetTokens !== undefined) { + thinkingConfig.thinkingBudget = options.thinking.budgetTokens; + } + config.thinkingConfig = thinkingConfig; + } + + if (options.signal) { + if (options.signal.aborted) { + throw new Error("Request aborted"); + } + config.abortSignal = options.signal; + } + + const params: GenerateContentParameters = { + model: model.id, + contents, + config, + }; + + return params; +} + +type ClampedThinkingLevel = Exclude; + +function isGemini3ProModel(model: Model<"google-generative-ai">): boolean { + return /gemini-3(?:\.\d+)?-pro/.test(model.id.toLowerCase()); +} + +function isGemini3FlashModel(model: Model<"google-generative-ai">): boolean { + return /gemini-3(?:\.\d+)?-flash/.test(model.id.toLowerCase()); +} + +function getGemini3ThinkingLevel( + effort: ClampedThinkingLevel, + model: Model<"google-generative-ai">, +): GoogleThinkingLevel { + if (isGemini3ProModel(model)) { + switch (effort) { + case "minimal": + case "low": + return "LOW"; + case "medium": + case "high": + return "HIGH"; + } + } + switch (effort) { + case "minimal": + return "MINIMAL"; + case "low": + return "LOW"; + case "medium": + return "MEDIUM"; + case "high": + return "HIGH"; + } +} + +function getGoogleBudget( + model: Model<"google-generative-ai">, + effort: ClampedThinkingLevel, + customBudgets?: ThinkingBudgets, +): number { + if (customBudgets?.[effort] !== undefined) { + return customBudgets[effort]!; + } + + if (model.id.includes("2.5-pro")) { + const budgets: Record = { + minimal: 128, + low: 2048, + medium: 8192, + high: 32768, + }; + return budgets[effort]; + } + + if (model.id.includes("2.5-flash")) { + const budgets: Record = { + minimal: 128, + low: 2048, + medium: 8192, + high: 24576, + }; + return budgets[effort]; + } + + return -1; +} diff --git a/packages/pi-ai/src/providers/google.ts b/packages/pi-ai/src/providers/google.ts new file mode 100644 index 000000000..991d5c90d --- /dev/null +++ b/packages/pi-ai/src/providers/google.ts @@ -0,0 +1,455 @@ +import { + type GenerateContentConfig, + type GenerateContentParameters, + GoogleGenAI, + type ThinkingConfig, +} from "@google/genai"; +import { getEnvApiKey } from "../env-api-keys.js"; +import { calculateCost } from "../models.js"; +import type { + Api, + AssistantMessage, + Context, + Model, + SimpleStreamOptions, + StreamFunction, + StreamOptions, + TextContent, + ThinkingBudgets, + ThinkingContent, + ThinkingLevel, + ToolCall, +} from "../types.js"; +import { AssistantMessageEventStream } from "../utils/event-stream.js"; +import { sanitizeSurrogates } from "../utils/sanitize-unicode.js"; +import type { GoogleThinkingLevel } from "./google-gemini-cli.js"; +import { + convertMessages, + convertTools, + isThinkingPart, + mapStopReason, + mapToolChoice, + retainThoughtSignature, +} from "./google-shared.js"; +import { buildBaseOptions, clampReasoning } from "./simple-options.js"; + +export interface GoogleOptions extends StreamOptions { + toolChoice?: "auto" | "none" | "any"; + thinking?: { + enabled: boolean; + budgetTokens?: number; // -1 for dynamic, 0 to disable + level?: GoogleThinkingLevel; + }; +} + +// Counter for generating unique tool call IDs +let toolCallCounter = 0; + +export const streamGoogle: StreamFunction<"google-generative-ai", GoogleOptions> = ( + model: Model<"google-generative-ai">, + context: Context, + options?: GoogleOptions, +): AssistantMessageEventStream => { + const stream = new AssistantMessageEventStream(); + + (async () => { + const output: AssistantMessage = { + role: "assistant", + content: [], + api: "google-generative-ai" as Api, + provider: model.provider, + model: model.id, + usage: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + stopReason: "stop", + timestamp: Date.now(), + }; + + try { + const apiKey = options?.apiKey || getEnvApiKey(model.provider) || ""; + const client = createClient(model, apiKey, options?.headers); + let params = buildParams(model, context, options); + const nextParams = await options?.onPayload?.(params, model); + if (nextParams !== undefined) { + params = nextParams as GenerateContentParameters; + } + const googleStream = await client.models.generateContentStream(params); + + stream.push({ type: "start", partial: output }); + let currentBlock: TextContent | ThinkingContent | null = null; + const blocks = output.content; + const blockIndex = () => blocks.length - 1; + for await (const chunk of googleStream) { + const candidate = chunk.candidates?.[0]; + if (candidate?.content?.parts) { + for (const part of candidate.content.parts) { + if (part.text !== undefined) { + const isThinking = isThinkingPart(part); + if ( + !currentBlock || + (isThinking && currentBlock.type !== "thinking") || + (!isThinking && currentBlock.type !== "text") + ) { + if (currentBlock) { + if (currentBlock.type === "text") { + stream.push({ + type: "text_end", + contentIndex: blocks.length - 1, + content: currentBlock.text, + partial: output, + }); + } else { + stream.push({ + type: "thinking_end", + contentIndex: blockIndex(), + content: currentBlock.thinking, + partial: output, + }); + } + } + if (isThinking) { + currentBlock = { type: "thinking", thinking: "", thinkingSignature: undefined }; + output.content.push(currentBlock); + stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output }); + } else { + currentBlock = { type: "text", text: "" }; + output.content.push(currentBlock); + stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output }); + } + } + if (currentBlock.type === "thinking") { + currentBlock.thinking += part.text; + currentBlock.thinkingSignature = retainThoughtSignature( + currentBlock.thinkingSignature, + part.thoughtSignature, + ); + stream.push({ + type: "thinking_delta", + contentIndex: blockIndex(), + delta: part.text, + partial: output, + }); + } else { + currentBlock.text += part.text; + currentBlock.textSignature = retainThoughtSignature( + currentBlock.textSignature, + part.thoughtSignature, + ); + stream.push({ + type: "text_delta", + contentIndex: blockIndex(), + delta: part.text, + partial: output, + }); + } + } + + if (part.functionCall) { + if (currentBlock) { + if (currentBlock.type === "text") { + stream.push({ + type: "text_end", + contentIndex: blockIndex(), + content: currentBlock.text, + partial: output, + }); + } else { + stream.push({ + type: "thinking_end", + contentIndex: blockIndex(), + content: currentBlock.thinking, + partial: output, + }); + } + currentBlock = null; + } + + // Generate unique ID if not provided or if it's a duplicate + const providedId = part.functionCall.id; + const needsNewId = + !providedId || output.content.some((b) => b.type === "toolCall" && b.id === providedId); + const toolCallId = needsNewId + ? `${part.functionCall.name}_${Date.now()}_${++toolCallCounter}` + : providedId; + + const toolCall: ToolCall = { + type: "toolCall", + id: toolCallId, + name: part.functionCall.name || "", + arguments: (part.functionCall.args as Record) ?? {}, + ...(part.thoughtSignature && { thoughtSignature: part.thoughtSignature }), + }; + + output.content.push(toolCall); + stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output }); + stream.push({ + type: "toolcall_delta", + contentIndex: blockIndex(), + delta: JSON.stringify(toolCall.arguments), + partial: output, + }); + stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output }); + } + } + } + + if (candidate?.finishReason) { + output.stopReason = mapStopReason(candidate.finishReason); + if (output.content.some((b) => b.type === "toolCall")) { + output.stopReason = "toolUse"; + } + } + + if (chunk.usageMetadata) { + output.usage = { + input: chunk.usageMetadata.promptTokenCount || 0, + output: + (chunk.usageMetadata.candidatesTokenCount || 0) + (chunk.usageMetadata.thoughtsTokenCount || 0), + cacheRead: chunk.usageMetadata.cachedContentTokenCount || 0, + cacheWrite: 0, + totalTokens: chunk.usageMetadata.totalTokenCount || 0, + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + total: 0, + }, + }; + calculateCost(model, output.usage); + } + } + + if (currentBlock) { + if (currentBlock.type === "text") { + stream.push({ + type: "text_end", + contentIndex: blockIndex(), + content: currentBlock.text, + partial: output, + }); + } else { + stream.push({ + type: "thinking_end", + contentIndex: blockIndex(), + content: currentBlock.thinking, + partial: output, + }); + } + } + + if (options?.signal?.aborted) { + throw new Error("Request was aborted"); + } + + if (output.stopReason === "aborted" || output.stopReason === "error") { + throw new Error("An unknown error occurred"); + } + + stream.push({ type: "done", reason: output.stopReason, message: output }); + stream.end(); + } catch (error) { + // Remove internal index property used during streaming + for (const block of output.content) { + if ("index" in block) { + delete (block as { index?: number }).index; + } + } + output.stopReason = options?.signal?.aborted ? "aborted" : "error"; + output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error); + stream.push({ type: "error", reason: output.stopReason, error: output }); + stream.end(); + } + })(); + + return stream; +}; + +export const streamSimpleGoogle: StreamFunction<"google-generative-ai", SimpleStreamOptions> = ( + model: Model<"google-generative-ai">, + context: Context, + options?: SimpleStreamOptions, +): AssistantMessageEventStream => { + const apiKey = options?.apiKey || getEnvApiKey(model.provider); + if (!apiKey) { + throw new Error(`No API key for provider: ${model.provider}`); + } + + const base = buildBaseOptions(model, options, apiKey); + if (!options?.reasoning) { + return streamGoogle(model, context, { ...base, thinking: { enabled: false } } satisfies GoogleOptions); + } + + const effort = clampReasoning(options.reasoning)!; + const googleModel = model as Model<"google-generative-ai">; + + if (isGemini3ProModel(googleModel) || isGemini3FlashModel(googleModel)) { + return streamGoogle(model, context, { + ...base, + thinking: { + enabled: true, + level: getGemini3ThinkingLevel(effort, googleModel), + }, + } satisfies GoogleOptions); + } + + return streamGoogle(model, context, { + ...base, + thinking: { + enabled: true, + budgetTokens: getGoogleBudget(googleModel, effort, options.thinkingBudgets), + }, + } satisfies GoogleOptions); +}; + +function createClient( + model: Model<"google-generative-ai">, + apiKey?: string, + optionsHeaders?: Record, +): GoogleGenAI { + const httpOptions: { baseUrl?: string; apiVersion?: string; headers?: Record } = {}; + if (model.baseUrl) { + httpOptions.baseUrl = model.baseUrl; + httpOptions.apiVersion = ""; // baseUrl already includes version path, don't append + } + if (model.headers || optionsHeaders) { + httpOptions.headers = { ...model.headers, ...optionsHeaders }; + } + + return new GoogleGenAI({ + apiKey, + httpOptions: Object.keys(httpOptions).length > 0 ? httpOptions : undefined, + }); +} + +function buildParams( + model: Model<"google-generative-ai">, + context: Context, + options: GoogleOptions = {}, +): GenerateContentParameters { + const contents = convertMessages(model, context); + + const generationConfig: GenerateContentConfig = {}; + if (options.temperature !== undefined) { + generationConfig.temperature = options.temperature; + } + if (options.maxTokens !== undefined) { + generationConfig.maxOutputTokens = options.maxTokens; + } + + const config: GenerateContentConfig = { + ...(Object.keys(generationConfig).length > 0 && generationConfig), + ...(context.systemPrompt && { systemInstruction: sanitizeSurrogates(context.systemPrompt) }), + ...(context.tools && context.tools.length > 0 && { tools: convertTools(context.tools) }), + }; + + if (context.tools && context.tools.length > 0 && options.toolChoice) { + config.toolConfig = { + functionCallingConfig: { + mode: mapToolChoice(options.toolChoice), + }, + }; + } else { + config.toolConfig = undefined; + } + + if (options.thinking?.enabled && model.reasoning) { + const thinkingConfig: ThinkingConfig = { includeThoughts: true }; + if (options.thinking.level !== undefined) { + // Cast to any since our GoogleThinkingLevel mirrors Google's ThinkingLevel enum values + thinkingConfig.thinkingLevel = options.thinking.level as any; + } else if (options.thinking.budgetTokens !== undefined) { + thinkingConfig.thinkingBudget = options.thinking.budgetTokens; + } + config.thinkingConfig = thinkingConfig; + } + + if (options.signal) { + if (options.signal.aborted) { + throw new Error("Request aborted"); + } + config.abortSignal = options.signal; + } + + const params: GenerateContentParameters = { + model: model.id, + contents, + config, + }; + + return params; +} + +type ClampedThinkingLevel = Exclude; + +function isGemini3ProModel(model: Model<"google-generative-ai">): boolean { + return /gemini-3(?:\.\d+)?-pro/.test(model.id.toLowerCase()); +} + +function isGemini3FlashModel(model: Model<"google-generative-ai">): boolean { + return /gemini-3(?:\.\d+)?-flash/.test(model.id.toLowerCase()); +} + +function getGemini3ThinkingLevel( + effort: ClampedThinkingLevel, + model: Model<"google-generative-ai">, +): GoogleThinkingLevel { + if (isGemini3ProModel(model)) { + switch (effort) { + case "minimal": + case "low": + return "LOW"; + case "medium": + case "high": + return "HIGH"; + } + } + switch (effort) { + case "minimal": + return "MINIMAL"; + case "low": + return "LOW"; + case "medium": + return "MEDIUM"; + case "high": + return "HIGH"; + } +} + +function getGoogleBudget( + model: Model<"google-generative-ai">, + effort: ClampedThinkingLevel, + customBudgets?: ThinkingBudgets, +): number { + if (customBudgets?.[effort] !== undefined) { + return customBudgets[effort]!; + } + + if (model.id.includes("2.5-pro")) { + const budgets: Record = { + minimal: 128, + low: 2048, + medium: 8192, + high: 32768, + }; + return budgets[effort]; + } + + if (model.id.includes("2.5-flash")) { + const budgets: Record = { + minimal: 128, + low: 2048, + medium: 8192, + high: 24576, + }; + return budgets[effort]; + } + + return -1; +} diff --git a/packages/pi-ai/src/providers/mistral.ts b/packages/pi-ai/src/providers/mistral.ts new file mode 100644 index 000000000..95d3f839e --- /dev/null +++ b/packages/pi-ai/src/providers/mistral.ts @@ -0,0 +1,582 @@ +import { Mistral } from "@mistralai/mistralai"; +import type { RequestOptions } from "@mistralai/mistralai/lib/sdks.js"; +import type { + ChatCompletionStreamRequest, + ChatCompletionStreamRequestMessages, + CompletionEvent, + ContentChunk, + FunctionTool, +} from "@mistralai/mistralai/models/components/index.js"; +import { getEnvApiKey } from "../env-api-keys.js"; +import { calculateCost } from "../models.js"; +import type { + AssistantMessage, + Context, + Message, + Model, + SimpleStreamOptions, + StopReason, + StreamFunction, + StreamOptions, + TextContent, + ThinkingContent, + Tool, + ToolCall, +} from "../types.js"; +import { AssistantMessageEventStream } from "../utils/event-stream.js"; +import { shortHash } from "../utils/hash.js"; +import { parseStreamingJson } from "../utils/json-parse.js"; +import { sanitizeSurrogates } from "../utils/sanitize-unicode.js"; +import { buildBaseOptions, clampReasoning } from "./simple-options.js"; +import { transformMessages } from "./transform-messages.js"; + +const MISTRAL_TOOL_CALL_ID_LENGTH = 9; +const MAX_MISTRAL_ERROR_BODY_CHARS = 4000; + +/** + * Provider-specific options for the Mistral API. + */ +export interface MistralOptions extends StreamOptions { + toolChoice?: "auto" | "none" | "any" | "required" | { type: "function"; function: { name: string } }; + promptMode?: "reasoning"; +} + +/** + * Stream responses from Mistral using `chat.stream`. + */ +export const streamMistral: StreamFunction<"mistral-conversations", MistralOptions> = ( + model: Model<"mistral-conversations">, + context: Context, + options?: MistralOptions, +): AssistantMessageEventStream => { + const stream = new AssistantMessageEventStream(); + + (async () => { + const output = createOutput(model); + + try { + const apiKey = options?.apiKey || getEnvApiKey(model.provider); + if (!apiKey) { + throw new Error(`No API key for provider: ${model.provider}`); + } + + // Intentionally per-request: avoids shared SDK mutable state across concurrent consumers. + const mistral = new Mistral({ + apiKey, + serverURL: model.baseUrl, + }); + + const normalizeMistralToolCallId = createMistralToolCallIdNormalizer(); + const transformedMessages = transformMessages(context.messages, model, (id) => normalizeMistralToolCallId(id)); + + let payload = buildChatPayload(model, context, transformedMessages, options); + const nextPayload = await options?.onPayload?.(payload, model); + if (nextPayload !== undefined) { + payload = nextPayload as ChatCompletionStreamRequest; + } + const mistralStream = await mistral.chat.stream(payload, buildRequestOptions(model, options)); + stream.push({ type: "start", partial: output }); + await consumeChatStream(model, output, stream, mistralStream); + + if (options?.signal?.aborted) { + throw new Error("Request was aborted"); + } + + if (output.stopReason === "aborted" || output.stopReason === "error") { + throw new Error("An unknown error occurred"); + } + + stream.push({ type: "done", reason: output.stopReason, message: output }); + stream.end(); + } catch (error) { + output.stopReason = options?.signal?.aborted ? "aborted" : "error"; + output.errorMessage = formatMistralError(error); + stream.push({ type: "error", reason: output.stopReason, error: output }); + stream.end(); + } + })(); + + return stream; +}; + +/** + * Maps provider-agnostic `SimpleStreamOptions` to Mistral options. + */ +export const streamSimpleMistral: StreamFunction<"mistral-conversations", SimpleStreamOptions> = ( + model: Model<"mistral-conversations">, + context: Context, + options?: SimpleStreamOptions, +): AssistantMessageEventStream => { + const apiKey = options?.apiKey || getEnvApiKey(model.provider); + if (!apiKey) { + throw new Error(`No API key for provider: ${model.provider}`); + } + + const base = buildBaseOptions(model, options, apiKey); + const reasoning = clampReasoning(options?.reasoning); + + return streamMistral(model, context, { + ...base, + promptMode: model.reasoning && reasoning ? "reasoning" : undefined, + } satisfies MistralOptions); +}; + +function createOutput(model: Model<"mistral-conversations">): AssistantMessage { + return { + role: "assistant", + content: [], + api: model.api, + provider: model.provider, + model: model.id, + usage: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + stopReason: "stop", + timestamp: Date.now(), + }; +} + +function createMistralToolCallIdNormalizer(): (id: string) => string { + const idMap = new Map(); + const reverseMap = new Map(); + + return (id: string): string => { + const existing = idMap.get(id); + if (existing) return existing; + + let attempt = 0; + while (true) { + const candidate = deriveMistralToolCallId(id, attempt); + const owner = reverseMap.get(candidate); + if (!owner || owner === id) { + idMap.set(id, candidate); + reverseMap.set(candidate, id); + return candidate; + } + attempt++; + } + }; +} + +function deriveMistralToolCallId(id: string, attempt: number): string { + const normalized = id.replace(/[^a-zA-Z0-9]/g, ""); + if (attempt === 0 && normalized.length === MISTRAL_TOOL_CALL_ID_LENGTH) return normalized; + const seedBase = normalized || id; + const seed = attempt === 0 ? seedBase : `${seedBase}:${attempt}`; + return shortHash(seed) + .replace(/[^a-zA-Z0-9]/g, "") + .slice(0, MISTRAL_TOOL_CALL_ID_LENGTH); +} + +function formatMistralError(error: unknown): string { + if (error instanceof Error) { + const sdkError = error as Error & { statusCode?: unknown; body?: unknown }; + const statusCode = typeof sdkError.statusCode === "number" ? sdkError.statusCode : undefined; + const bodyText = typeof sdkError.body === "string" ? sdkError.body.trim() : undefined; + if (statusCode !== undefined && bodyText) { + return `Mistral API error (${statusCode}): ${truncateErrorText(bodyText, MAX_MISTRAL_ERROR_BODY_CHARS)}`; + } + if (statusCode !== undefined) return `Mistral API error (${statusCode}): ${error.message}`; + return error.message; + } + return safeJsonStringify(error); +} + +function truncateErrorText(text: string, maxChars: number): string { + if (text.length <= maxChars) return text; + return `${text.slice(0, maxChars)}... [truncated ${text.length - maxChars} chars]`; +} + +function safeJsonStringify(value: unknown): string { + try { + const serialized = JSON.stringify(value); + return serialized === undefined ? String(value) : serialized; + } catch { + return String(value); + } +} + +function buildRequestOptions(model: Model<"mistral-conversations">, options?: MistralOptions): RequestOptions { + const requestOptions: RequestOptions = {}; + if (options?.signal) requestOptions.signal = options.signal; + requestOptions.retries = { strategy: "none" }; + + const headers: Record = {}; + if (model.headers) Object.assign(headers, model.headers); + if (options?.headers) Object.assign(headers, options.headers); + + // Mistral infrastructure uses `x-affinity` for KV-cache reuse (prefix caching). + // Respect explicit caller-provided header values. + if (options?.sessionId && !headers["x-affinity"]) { + headers["x-affinity"] = options.sessionId; + } + + if (Object.keys(headers).length > 0) { + requestOptions.headers = headers; + } + + return requestOptions; +} + +function buildChatPayload( + model: Model<"mistral-conversations">, + context: Context, + messages: Message[], + options?: MistralOptions, +): ChatCompletionStreamRequest { + const payload: ChatCompletionStreamRequest = { + model: model.id, + stream: true, + messages: toChatMessages(messages, model.input.includes("image")), + }; + + if (context.tools?.length) payload.tools = toFunctionTools(context.tools); + if (options?.temperature !== undefined) payload.temperature = options.temperature; + if (options?.maxTokens !== undefined) payload.maxTokens = options.maxTokens; + if (options?.toolChoice) payload.toolChoice = mapToolChoice(options.toolChoice); + if (options?.promptMode) payload.promptMode = options.promptMode as any; + + if (context.systemPrompt) { + payload.messages.unshift({ + role: "system", + content: sanitizeSurrogates(context.systemPrompt), + }); + } + + return payload; +} + +async function consumeChatStream( + model: Model<"mistral-conversations">, + output: AssistantMessage, + stream: AssistantMessageEventStream, + mistralStream: AsyncIterable, +): Promise { + let currentBlock: TextContent | ThinkingContent | null = null; + const blocks = output.content; + const blockIndex = () => blocks.length - 1; + const toolBlocksByKey = new Map(); + + const finishCurrentBlock = (block?: typeof currentBlock) => { + if (!block) return; + if (block.type === "text") { + stream.push({ + type: "text_end", + contentIndex: blockIndex(), + content: block.text, + partial: output, + }); + return; + } + if (block.type === "thinking") { + stream.push({ + type: "thinking_end", + contentIndex: blockIndex(), + content: block.thinking, + partial: output, + }); + } + }; + + for await (const event of mistralStream) { + const chunk = event.data; + + if (chunk.usage) { + output.usage.input = chunk.usage.promptTokens || 0; + output.usage.output = chunk.usage.completionTokens || 0; + output.usage.cacheRead = 0; + output.usage.cacheWrite = 0; + output.usage.totalTokens = chunk.usage.totalTokens || output.usage.input + output.usage.output; + calculateCost(model, output.usage); + } + + const choice = chunk.choices[0]; + if (!choice) continue; + + if (choice.finishReason) { + output.stopReason = mapChatStopReason(choice.finishReason); + } + + const delta = choice.delta; + if (delta.content !== null && delta.content !== undefined) { + const contentItems = typeof delta.content === "string" ? [delta.content] : delta.content; + for (const item of contentItems) { + if (typeof item === "string") { + const textDelta = sanitizeSurrogates(item); + if (!currentBlock || currentBlock.type !== "text") { + finishCurrentBlock(currentBlock); + currentBlock = { type: "text", text: "" }; + output.content.push(currentBlock); + stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output }); + } + currentBlock.text += textDelta; + stream.push({ + type: "text_delta", + contentIndex: blockIndex(), + delta: textDelta, + partial: output, + }); + continue; + } + + if (item.type === "thinking") { + const deltaText = item.thinking + .map((part) => ("text" in part ? part.text : "")) + .filter((text) => text.length > 0) + .join(""); + const thinkingDelta = sanitizeSurrogates(deltaText); + if (!thinkingDelta) continue; + if (!currentBlock || currentBlock.type !== "thinking") { + finishCurrentBlock(currentBlock); + currentBlock = { type: "thinking", thinking: "" }; + output.content.push(currentBlock); + stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output }); + } + currentBlock.thinking += thinkingDelta; + stream.push({ + type: "thinking_delta", + contentIndex: blockIndex(), + delta: thinkingDelta, + partial: output, + }); + continue; + } + + if (item.type === "text") { + const textDelta = sanitizeSurrogates(item.text); + if (!currentBlock || currentBlock.type !== "text") { + finishCurrentBlock(currentBlock); + currentBlock = { type: "text", text: "" }; + output.content.push(currentBlock); + stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output }); + } + currentBlock.text += textDelta; + stream.push({ + type: "text_delta", + contentIndex: blockIndex(), + delta: textDelta, + partial: output, + }); + } + } + } + + const toolCalls = delta.toolCalls || []; + for (const toolCall of toolCalls) { + if (currentBlock) { + finishCurrentBlock(currentBlock); + currentBlock = null; + } + const callId = + toolCall.id && toolCall.id !== "null" + ? toolCall.id + : deriveMistralToolCallId(`toolcall:${toolCall.index ?? 0}`, 0); + const key = `${callId}:${toolCall.index || 0}`; + const existingIndex = toolBlocksByKey.get(key); + let block: (ToolCall & { partialArgs?: string }) | undefined; + + if (existingIndex !== undefined) { + const existing = output.content[existingIndex]; + if (existing?.type === "toolCall") { + block = existing as ToolCall & { partialArgs?: string }; + } + } + + if (!block) { + block = { + type: "toolCall", + id: callId, + name: toolCall.function.name, + arguments: {}, + partialArgs: "", + }; + output.content.push(block); + toolBlocksByKey.set(key, output.content.length - 1); + stream.push({ type: "toolcall_start", contentIndex: output.content.length - 1, partial: output }); + } + + const argsDelta = + typeof toolCall.function.arguments === "string" + ? toolCall.function.arguments + : JSON.stringify(toolCall.function.arguments || {}); + block.partialArgs = (block.partialArgs || "") + argsDelta; + block.arguments = parseStreamingJson>(block.partialArgs); + stream.push({ + type: "toolcall_delta", + contentIndex: toolBlocksByKey.get(key)!, + delta: argsDelta, + partial: output, + }); + } + } + + finishCurrentBlock(currentBlock); + for (const index of toolBlocksByKey.values()) { + const block = output.content[index]; + if (block.type !== "toolCall") continue; + const toolBlock = block as ToolCall & { partialArgs?: string }; + toolBlock.arguments = parseStreamingJson>(toolBlock.partialArgs); + delete toolBlock.partialArgs; + stream.push({ + type: "toolcall_end", + contentIndex: index, + toolCall: toolBlock, + partial: output, + }); + } +} + +function toFunctionTools(tools: Tool[]): Array { + return tools.map((tool) => ({ + type: "function", + function: { + name: tool.name, + description: tool.description, + parameters: tool.parameters as unknown as Record, + strict: false, + }, + })); +} + +function toChatMessages(messages: Message[], supportsImages: boolean): ChatCompletionStreamRequestMessages[] { + const result: ChatCompletionStreamRequestMessages[] = []; + + for (const msg of messages) { + if (msg.role === "user") { + if (typeof msg.content === "string") { + result.push({ role: "user", content: sanitizeSurrogates(msg.content) }); + continue; + } + const hadImages = msg.content.some((item) => item.type === "image"); + const content: ContentChunk[] = msg.content + .filter((item) => item.type === "text" || supportsImages) + .map((item) => { + if (item.type === "text") return { type: "text", text: sanitizeSurrogates(item.text) }; + return { type: "image_url", imageUrl: `data:${item.mimeType};base64,${item.data}` }; + }); + if (content.length > 0) { + result.push({ role: "user", content }); + continue; + } + if (hadImages && !supportsImages) { + result.push({ role: "user", content: "(image omitted: model does not support images)" }); + } + continue; + } + + if (msg.role === "assistant") { + const contentParts: ContentChunk[] = []; + const toolCalls: Array<{ id: string; type: "function"; function: { name: string; arguments: string } }> = []; + + for (const block of msg.content) { + if (block.type === "text") { + if (block.text.trim().length > 0) { + contentParts.push({ type: "text", text: sanitizeSurrogates(block.text) }); + } + continue; + } + if (block.type === "thinking") { + if (block.thinking.trim().length > 0) { + contentParts.push({ + type: "thinking", + thinking: [{ type: "text", text: sanitizeSurrogates(block.thinking) }], + }); + } + continue; + } + toolCalls.push({ + id: block.id, + type: "function", + function: { name: block.name, arguments: JSON.stringify(block.arguments || {}) }, + }); + } + + const assistantMessage: ChatCompletionStreamRequestMessages = { role: "assistant" }; + if (contentParts.length > 0) assistantMessage.content = contentParts; + if (toolCalls.length > 0) assistantMessage.toolCalls = toolCalls; + if (contentParts.length > 0 || toolCalls.length > 0) result.push(assistantMessage); + continue; + } + + const toolContent: ContentChunk[] = []; + const textResult = msg.content + .filter((part) => part.type === "text") + .map((part) => (part.type === "text" ? sanitizeSurrogates(part.text) : "")) + .join("\n"); + const hasImages = msg.content.some((part) => part.type === "image"); + const toolText = buildToolResultText(textResult, hasImages, supportsImages, msg.isError); + toolContent.push({ type: "text", text: toolText }); + for (const part of msg.content) { + if (!supportsImages) continue; + if (part.type !== "image") continue; + toolContent.push({ + type: "image_url", + imageUrl: `data:${part.mimeType};base64,${part.data}`, + }); + } + result.push({ + role: "tool", + toolCallId: msg.toolCallId, + name: msg.toolName, + content: toolContent, + }); + } + + return result; +} + +function buildToolResultText(text: string, hasImages: boolean, supportsImages: boolean, isError: boolean): string { + const trimmed = text.trim(); + const errorPrefix = isError ? "[tool error] " : ""; + + if (trimmed.length > 0) { + const imageSuffix = hasImages && !supportsImages ? "\n[tool image omitted: model does not support images]" : ""; + return `${errorPrefix}${trimmed}${imageSuffix}`; + } + + if (hasImages) { + if (supportsImages) { + return isError ? "[tool error] (see attached image)" : "(see attached image)"; + } + return isError + ? "[tool error] (image omitted: model does not support images)" + : "(image omitted: model does not support images)"; + } + + return isError ? "[tool error] (no tool output)" : "(no tool output)"; +} + +function mapToolChoice( + choice: MistralOptions["toolChoice"], +): "auto" | "none" | "any" | "required" | { type: "function"; function: { name: string } } | undefined { + if (!choice) return undefined; + if (choice === "auto" || choice === "none" || choice === "any" || choice === "required") { + return choice as any; + } + return { + type: "function", + function: { name: choice.function.name }, + }; +} + +function mapChatStopReason(reason: string | null): StopReason { + if (reason === null) return "stop"; + switch (reason) { + case "stop": + return "stop"; + case "length": + case "model_length": + return "length"; + case "tool_calls": + return "toolUse"; + case "error": + return "error"; + default: + return "stop"; + } +} diff --git a/packages/pi-ai/src/providers/openai-codex-responses.ts b/packages/pi-ai/src/providers/openai-codex-responses.ts new file mode 100644 index 000000000..8c9b8aae5 --- /dev/null +++ b/packages/pi-ai/src/providers/openai-codex-responses.ts @@ -0,0 +1,875 @@ +import type * as NodeOs from "node:os"; +import type { Tool as OpenAITool, ResponseInput, ResponseStreamEvent } from "openai/resources/responses/responses.js"; + +// NEVER convert to top-level runtime imports - breaks browser/Vite builds (web-ui) +let _os: typeof NodeOs | null = null; + +type DynamicImport = (specifier: string) => Promise; + +const dynamicImport: DynamicImport = (specifier) => import(specifier); +const NODE_OS_SPECIFIER = "node:" + "os"; + +if (typeof process !== "undefined" && (process.versions?.node || process.versions?.bun)) { + dynamicImport(NODE_OS_SPECIFIER).then((m) => { + _os = m as typeof NodeOs; + }); +} + +import { getEnvApiKey } from "../env-api-keys.js"; +import { supportsXhigh } from "../models.js"; +import type { + Api, + AssistantMessage, + Context, + Model, + SimpleStreamOptions, + StreamFunction, + StreamOptions, +} from "../types.js"; +import { AssistantMessageEventStream } from "../utils/event-stream.js"; +import { convertResponsesMessages, convertResponsesTools, processResponsesStream } from "./openai-responses-shared.js"; +import { buildBaseOptions, clampReasoning } from "./simple-options.js"; + +// ============================================================================ +// Configuration +// ============================================================================ + +const DEFAULT_CODEX_BASE_URL = "https://chatgpt.com/backend-api"; +const JWT_CLAIM_PATH = "https://api.openai.com/auth" as const; +const MAX_RETRIES = 3; +const BASE_DELAY_MS = 1000; +const CODEX_TOOL_CALL_PROVIDERS = new Set(["openai", "openai-codex", "opencode"]); + +const CODEX_RESPONSE_STATUSES = new Set([ + "completed", + "incomplete", + "failed", + "cancelled", + "queued", + "in_progress", +]); + +// ============================================================================ +// Types +// ============================================================================ + +export interface OpenAICodexResponsesOptions extends StreamOptions { + reasoningEffort?: "none" | "minimal" | "low" | "medium" | "high" | "xhigh"; + reasoningSummary?: "auto" | "concise" | "detailed" | "off" | "on" | null; + textVerbosity?: "low" | "medium" | "high"; +} + +type CodexResponseStatus = "completed" | "incomplete" | "failed" | "cancelled" | "queued" | "in_progress"; + +interface RequestBody { + model: string; + store?: boolean; + stream?: boolean; + instructions?: string; + input?: ResponseInput; + tools?: OpenAITool[]; + tool_choice?: "auto"; + parallel_tool_calls?: boolean; + temperature?: number; + reasoning?: { effort?: string; summary?: string }; + text?: { verbosity?: string }; + include?: string[]; + prompt_cache_key?: string; + [key: string]: unknown; +} + +// ============================================================================ +// Retry Helpers +// ============================================================================ + +function isRetryableError(status: number, errorText: string): boolean { + if (status === 429 || status === 500 || status === 502 || status === 503 || status === 504) { + return true; + } + return /rate.?limit|overloaded|service.?unavailable|upstream.?connect|connection.?refused/i.test(errorText); +} + +function sleep(ms: number, signal?: AbortSignal): Promise { + return new Promise((resolve, reject) => { + if (signal?.aborted) { + reject(new Error("Request was aborted")); + return; + } + const timeout = setTimeout(resolve, ms); + signal?.addEventListener("abort", () => { + clearTimeout(timeout); + reject(new Error("Request was aborted")); + }); + }); +} + +// ============================================================================ +// Main Stream Function +// ============================================================================ + +export const streamOpenAICodexResponses: StreamFunction<"openai-codex-responses", OpenAICodexResponsesOptions> = ( + model: Model<"openai-codex-responses">, + context: Context, + options?: OpenAICodexResponsesOptions, +): AssistantMessageEventStream => { + const stream = new AssistantMessageEventStream(); + + (async () => { + const output: AssistantMessage = { + role: "assistant", + content: [], + api: "openai-codex-responses" as Api, + provider: model.provider, + model: model.id, + usage: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + stopReason: "stop", + timestamp: Date.now(), + }; + + try { + const apiKey = options?.apiKey || getEnvApiKey(model.provider) || ""; + if (!apiKey) { + throw new Error(`No API key for provider: ${model.provider}`); + } + + const accountId = extractAccountId(apiKey); + let body = buildRequestBody(model, context, options); + const nextBody = await options?.onPayload?.(body, model); + if (nextBody !== undefined) { + body = nextBody as RequestBody; + } + const headers = buildHeaders(model.headers, options?.headers, accountId, apiKey, options?.sessionId); + const bodyJson = JSON.stringify(body); + const transport = options?.transport || "sse"; + + if (transport !== "sse") { + let websocketStarted = false; + try { + await processWebSocketStream( + resolveCodexWebSocketUrl(model.baseUrl), + body, + headers, + output, + stream, + model, + () => { + websocketStarted = true; + }, + options, + ); + + if (options?.signal?.aborted) { + throw new Error("Request was aborted"); + } + stream.push({ + type: "done", + reason: output.stopReason as "stop" | "length" | "toolUse", + message: output, + }); + stream.end(); + return; + } catch (error) { + if (transport === "websocket" || websocketStarted) { + throw error; + } + } + } + + // Fetch with retry logic for rate limits and transient errors + let response: Response | undefined; + let lastError: Error | undefined; + + for (let attempt = 0; attempt <= MAX_RETRIES; attempt++) { + if (options?.signal?.aborted) { + throw new Error("Request was aborted"); + } + + try { + response = await fetch(resolveCodexUrl(model.baseUrl), { + method: "POST", + headers, + body: bodyJson, + signal: options?.signal, + }); + + if (response.ok) { + break; + } + + const errorText = await response.text(); + if (attempt < MAX_RETRIES && isRetryableError(response.status, errorText)) { + const delayMs = BASE_DELAY_MS * 2 ** attempt; + await sleep(delayMs, options?.signal); + continue; + } + + // Parse error for friendly message on final attempt or non-retryable error + const fakeResponse = new Response(errorText, { + status: response.status, + statusText: response.statusText, + }); + const info = await parseErrorResponse(fakeResponse); + throw new Error(info.friendlyMessage || info.message); + } catch (error) { + if (error instanceof Error) { + if (error.name === "AbortError" || error.message === "Request was aborted") { + throw new Error("Request was aborted"); + } + } + lastError = error instanceof Error ? error : new Error(String(error)); + // Network errors are retryable + if (attempt < MAX_RETRIES && !lastError.message.includes("usage limit")) { + const delayMs = BASE_DELAY_MS * 2 ** attempt; + await sleep(delayMs, options?.signal); + continue; + } + throw lastError; + } + } + + if (!response?.ok) { + throw lastError ?? new Error("Failed after retries"); + } + + if (!response.body) { + throw new Error("No response body"); + } + + stream.push({ type: "start", partial: output }); + await processStream(response, output, stream, model); + + if (options?.signal?.aborted) { + throw new Error("Request was aborted"); + } + + stream.push({ type: "done", reason: output.stopReason as "stop" | "length" | "toolUse", message: output }); + stream.end(); + } catch (error) { + output.stopReason = options?.signal?.aborted ? "aborted" : "error"; + output.errorMessage = error instanceof Error ? error.message : String(error); + stream.push({ type: "error", reason: output.stopReason, error: output }); + stream.end(); + } + })(); + + return stream; +}; + +export const streamSimpleOpenAICodexResponses: StreamFunction<"openai-codex-responses", SimpleStreamOptions> = ( + model: Model<"openai-codex-responses">, + context: Context, + options?: SimpleStreamOptions, +): AssistantMessageEventStream => { + const apiKey = options?.apiKey || getEnvApiKey(model.provider); + if (!apiKey) { + throw new Error(`No API key for provider: ${model.provider}`); + } + + const base = buildBaseOptions(model, options, apiKey); + const reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning); + + return streamOpenAICodexResponses(model, context, { + ...base, + reasoningEffort, + } satisfies OpenAICodexResponsesOptions); +}; + +// ============================================================================ +// Request Building +// ============================================================================ + +function buildRequestBody( + model: Model<"openai-codex-responses">, + context: Context, + options?: OpenAICodexResponsesOptions, +): RequestBody { + const messages = convertResponsesMessages(model, context, CODEX_TOOL_CALL_PROVIDERS, { + includeSystemPrompt: false, + }); + + const body: RequestBody = { + model: model.id, + store: false, + stream: true, + instructions: context.systemPrompt, + input: messages, + text: { verbosity: options?.textVerbosity || "medium" }, + include: ["reasoning.encrypted_content"], + prompt_cache_key: options?.sessionId, + tool_choice: "auto", + parallel_tool_calls: true, + }; + + if (options?.temperature !== undefined) { + body.temperature = options.temperature; + } + + if (context.tools) { + body.tools = convertResponsesTools(context.tools, { strict: null }); + } + + if (options?.reasoningEffort !== undefined) { + body.reasoning = { + effort: clampReasoningEffort(model.id, options.reasoningEffort), + summary: options.reasoningSummary ?? "auto", + }; + } + + return body; +} + +function clampReasoningEffort(modelId: string, effort: string): string { + const id = modelId.includes("/") ? modelId.split("/").pop()! : modelId; + if ((id.startsWith("gpt-5.2") || id.startsWith("gpt-5.3") || id.startsWith("gpt-5.4")) && effort === "minimal") + return "low"; + if (id === "gpt-5.1" && effort === "xhigh") return "high"; + if (id === "gpt-5.1-codex-mini") return effort === "high" || effort === "xhigh" ? "high" : "medium"; + return effort; +} + +function resolveCodexUrl(baseUrl?: string): string { + const raw = baseUrl && baseUrl.trim().length > 0 ? baseUrl : DEFAULT_CODEX_BASE_URL; + const normalized = raw.replace(/\/+$/, ""); + if (normalized.endsWith("/codex/responses")) return normalized; + if (normalized.endsWith("/codex")) return `${normalized}/responses`; + return `${normalized}/codex/responses`; +} + +function resolveCodexWebSocketUrl(baseUrl?: string): string { + const url = new URL(resolveCodexUrl(baseUrl)); + if (url.protocol === "https:") url.protocol = "wss:"; + if (url.protocol === "http:") url.protocol = "ws:"; + return url.toString(); +} + +// ============================================================================ +// Response Processing +// ============================================================================ + +async function processStream( + response: Response, + output: AssistantMessage, + stream: AssistantMessageEventStream, + model: Model<"openai-codex-responses">, +): Promise { + await processResponsesStream(mapCodexEvents(parseSSE(response)), output, stream, model); +} + +async function* mapCodexEvents(events: AsyncIterable>): AsyncGenerator { + for await (const event of events) { + const type = typeof event.type === "string" ? event.type : undefined; + if (!type) continue; + + if (type === "error") { + const code = (event as { code?: string }).code || ""; + const message = (event as { message?: string }).message || ""; + throw new Error(`Codex error: ${message || code || JSON.stringify(event)}`); + } + + if (type === "response.failed") { + const msg = (event as { response?: { error?: { message?: string } } }).response?.error?.message; + throw new Error(msg || "Codex response failed"); + } + + if (type === "response.done" || type === "response.completed") { + const response = (event as { response?: { status?: unknown } }).response; + const normalizedResponse = response + ? { ...response, status: normalizeCodexStatus(response.status) } + : response; + yield { ...event, type: "response.completed", response: normalizedResponse } as ResponseStreamEvent; + continue; + } + + yield event as unknown as ResponseStreamEvent; + } +} + +function normalizeCodexStatus(status: unknown): CodexResponseStatus | undefined { + if (typeof status !== "string") return undefined; + return CODEX_RESPONSE_STATUSES.has(status as CodexResponseStatus) ? (status as CodexResponseStatus) : undefined; +} + +// ============================================================================ +// SSE Parsing +// ============================================================================ + +async function* parseSSE(response: Response): AsyncGenerator> { + if (!response.body) return; + + const reader = response.body.getReader(); + const decoder = new TextDecoder(); + let buffer = ""; + + while (true) { + const { done, value } = await reader.read(); + if (done) break; + buffer += decoder.decode(value, { stream: true }); + + let idx = buffer.indexOf("\n\n"); + while (idx !== -1) { + const chunk = buffer.slice(0, idx); + buffer = buffer.slice(idx + 2); + + const dataLines = chunk + .split("\n") + .filter((l) => l.startsWith("data:")) + .map((l) => l.slice(5).trim()); + if (dataLines.length > 0) { + const data = dataLines.join("\n").trim(); + if (data && data !== "[DONE]") { + try { + yield JSON.parse(data); + } catch {} + } + } + idx = buffer.indexOf("\n\n"); + } + } +} + +// ============================================================================ +// WebSocket Parsing +// ============================================================================ + +const OPENAI_BETA_RESPONSES_WEBSOCKETS = "responses_websockets=2026-02-06"; +const SESSION_WEBSOCKET_CACHE_TTL_MS = 5 * 60 * 1000; + +type WebSocketEventType = "open" | "message" | "error" | "close"; +type WebSocketListener = (event: unknown) => void; + +interface WebSocketLike { + close(code?: number, reason?: string): void; + send(data: string): void; + addEventListener(type: WebSocketEventType, listener: WebSocketListener): void; + removeEventListener(type: WebSocketEventType, listener: WebSocketListener): void; +} + +interface CachedWebSocketConnection { + socket: WebSocketLike; + busy: boolean; + idleTimer?: ReturnType; +} + +const websocketSessionCache = new Map(); + +type WebSocketConstructor = new ( + url: string, + protocols?: string | string[] | { headers?: Record }, +) => WebSocketLike; + +function getWebSocketConstructor(): WebSocketConstructor | null { + const ctor = (globalThis as { WebSocket?: unknown }).WebSocket; + if (typeof ctor !== "function") return null; + return ctor as unknown as WebSocketConstructor; +} + +function headersToRecord(headers: Headers): Record { + const out: Record = {}; + for (const [key, value] of headers.entries()) { + out[key] = value; + } + return out; +} + +function getWebSocketReadyState(socket: WebSocketLike): number | undefined { + const readyState = (socket as { readyState?: unknown }).readyState; + return typeof readyState === "number" ? readyState : undefined; +} + +function isWebSocketReusable(socket: WebSocketLike): boolean { + const readyState = getWebSocketReadyState(socket); + // If readyState is unavailable, assume the runtime keeps it open/reusable. + return readyState === undefined || readyState === 1; +} + +function closeWebSocketSilently(socket: WebSocketLike, code = 1000, reason = "done"): void { + try { + socket.close(code, reason); + } catch {} +} + +function scheduleSessionWebSocketExpiry(sessionId: string, entry: CachedWebSocketConnection): void { + if (entry.idleTimer) { + clearTimeout(entry.idleTimer); + } + entry.idleTimer = setTimeout(() => { + if (entry.busy) return; + closeWebSocketSilently(entry.socket, 1000, "idle_timeout"); + websocketSessionCache.delete(sessionId); + }, SESSION_WEBSOCKET_CACHE_TTL_MS); +} + +async function connectWebSocket(url: string, headers: Headers, signal?: AbortSignal): Promise { + const WebSocketCtor = getWebSocketConstructor(); + if (!WebSocketCtor) { + throw new Error("WebSocket transport is not available in this runtime"); + } + + const wsHeaders = headersToRecord(headers); + wsHeaders["OpenAI-Beta"] = OPENAI_BETA_RESPONSES_WEBSOCKETS; + + return new Promise((resolve, reject) => { + let settled = false; + let socket: WebSocketLike; + + try { + socket = new WebSocketCtor(url, { headers: wsHeaders }); + } catch (error) { + reject(error instanceof Error ? error : new Error(String(error))); + return; + } + + const onOpen: WebSocketListener = () => { + if (settled) return; + settled = true; + cleanup(); + resolve(socket); + }; + const onError: WebSocketListener = (event) => { + if (settled) return; + settled = true; + cleanup(); + reject(extractWebSocketError(event)); + }; + const onClose: WebSocketListener = (event) => { + if (settled) return; + settled = true; + cleanup(); + reject(extractWebSocketCloseError(event)); + }; + const onAbort = () => { + if (settled) return; + settled = true; + cleanup(); + socket.close(1000, "aborted"); + reject(new Error("Request was aborted")); + }; + + const cleanup = () => { + socket.removeEventListener("open", onOpen); + socket.removeEventListener("error", onError); + socket.removeEventListener("close", onClose); + signal?.removeEventListener("abort", onAbort); + }; + + socket.addEventListener("open", onOpen); + socket.addEventListener("error", onError); + socket.addEventListener("close", onClose); + signal?.addEventListener("abort", onAbort); + }); +} + +async function acquireWebSocket( + url: string, + headers: Headers, + sessionId: string | undefined, + signal?: AbortSignal, +): Promise<{ socket: WebSocketLike; release: (options?: { keep?: boolean }) => void }> { + if (!sessionId) { + const socket = await connectWebSocket(url, headers, signal); + return { + socket, + release: ({ keep } = {}) => { + if (keep === false) { + closeWebSocketSilently(socket); + return; + } + closeWebSocketSilently(socket); + }, + }; + } + + const cached = websocketSessionCache.get(sessionId); + if (cached) { + if (cached.idleTimer) { + clearTimeout(cached.idleTimer); + cached.idleTimer = undefined; + } + if (!cached.busy && isWebSocketReusable(cached.socket)) { + cached.busy = true; + return { + socket: cached.socket, + release: ({ keep } = {}) => { + if (!keep || !isWebSocketReusable(cached.socket)) { + closeWebSocketSilently(cached.socket); + websocketSessionCache.delete(sessionId); + return; + } + cached.busy = false; + scheduleSessionWebSocketExpiry(sessionId, cached); + }, + }; + } + if (cached.busy) { + const socket = await connectWebSocket(url, headers, signal); + return { + socket, + release: () => { + closeWebSocketSilently(socket); + }, + }; + } + if (!isWebSocketReusable(cached.socket)) { + closeWebSocketSilently(cached.socket); + websocketSessionCache.delete(sessionId); + } + } + + const socket = await connectWebSocket(url, headers, signal); + const entry: CachedWebSocketConnection = { socket, busy: true }; + websocketSessionCache.set(sessionId, entry); + return { + socket, + release: ({ keep } = {}) => { + if (!keep || !isWebSocketReusable(entry.socket)) { + closeWebSocketSilently(entry.socket); + if (entry.idleTimer) clearTimeout(entry.idleTimer); + if (websocketSessionCache.get(sessionId) === entry) { + websocketSessionCache.delete(sessionId); + } + return; + } + entry.busy = false; + scheduleSessionWebSocketExpiry(sessionId, entry); + }, + }; +} + +function extractWebSocketError(event: unknown): Error { + if (event && typeof event === "object" && "message" in event) { + const message = (event as { message?: unknown }).message; + if (typeof message === "string" && message.length > 0) { + return new Error(message); + } + } + return new Error("WebSocket error"); +} + +function extractWebSocketCloseError(event: unknown): Error { + if (event && typeof event === "object") { + const code = "code" in event ? (event as { code?: unknown }).code : undefined; + const reason = "reason" in event ? (event as { reason?: unknown }).reason : undefined; + const codeText = typeof code === "number" ? ` ${code}` : ""; + const reasonText = typeof reason === "string" && reason.length > 0 ? ` ${reason}` : ""; + return new Error(`WebSocket closed${codeText}${reasonText}`.trim()); + } + return new Error("WebSocket closed"); +} + +async function decodeWebSocketData(data: unknown): Promise { + if (typeof data === "string") return data; + if (data instanceof ArrayBuffer) { + return new TextDecoder().decode(new Uint8Array(data)); + } + if (ArrayBuffer.isView(data)) { + const view = data as ArrayBufferView; + return new TextDecoder().decode(new Uint8Array(view.buffer, view.byteOffset, view.byteLength)); + } + if (data && typeof data === "object" && "arrayBuffer" in data) { + const blobLike = data as { arrayBuffer: () => Promise }; + const arrayBuffer = await blobLike.arrayBuffer(); + return new TextDecoder().decode(new Uint8Array(arrayBuffer)); + } + return null; +} + +async function* parseWebSocket(socket: WebSocketLike, signal?: AbortSignal): AsyncGenerator> { + const queue: Record[] = []; + let pending: (() => void) | null = null; + let done = false; + let failed: Error | null = null; + let sawCompletion = false; + + const wake = () => { + if (!pending) return; + const resolve = pending; + pending = null; + resolve(); + }; + + const onMessage: WebSocketListener = (event) => { + void (async () => { + if (!event || typeof event !== "object" || !("data" in event)) return; + const text = await decodeWebSocketData((event as { data?: unknown }).data); + if (!text) return; + try { + const parsed = JSON.parse(text) as Record; + const type = typeof parsed.type === "string" ? parsed.type : ""; + if (type === "response.completed" || type === "response.done") { + sawCompletion = true; + done = true; + } + queue.push(parsed); + wake(); + } catch {} + })(); + }; + + const onError: WebSocketListener = (event) => { + failed = extractWebSocketError(event); + done = true; + wake(); + }; + + const onClose: WebSocketListener = (event) => { + if (sawCompletion) { + done = true; + wake(); + return; + } + if (!failed) { + failed = extractWebSocketCloseError(event); + } + done = true; + wake(); + }; + + const onAbort = () => { + failed = new Error("Request was aborted"); + done = true; + wake(); + }; + + socket.addEventListener("message", onMessage); + socket.addEventListener("error", onError); + socket.addEventListener("close", onClose); + signal?.addEventListener("abort", onAbort); + + try { + while (true) { + if (signal?.aborted) { + throw new Error("Request was aborted"); + } + if (queue.length > 0) { + yield queue.shift()!; + continue; + } + if (done) break; + await new Promise((resolve) => { + pending = resolve; + }); + } + + if (failed) { + throw failed; + } + if (!sawCompletion) { + throw new Error("WebSocket stream closed before response.completed"); + } + } finally { + socket.removeEventListener("message", onMessage); + socket.removeEventListener("error", onError); + socket.removeEventListener("close", onClose); + signal?.removeEventListener("abort", onAbort); + } +} + +async function processWebSocketStream( + url: string, + body: RequestBody, + headers: Headers, + output: AssistantMessage, + stream: AssistantMessageEventStream, + model: Model<"openai-codex-responses">, + onStart: () => void, + options?: OpenAICodexResponsesOptions, +): Promise { + const { socket, release } = await acquireWebSocket(url, headers, options?.sessionId, options?.signal); + let keepConnection = true; + try { + socket.send(JSON.stringify({ type: "response.create", ...body })); + onStart(); + stream.push({ type: "start", partial: output }); + await processResponsesStream(mapCodexEvents(parseWebSocket(socket, options?.signal)), output, stream, model); + if (options?.signal?.aborted) { + keepConnection = false; + } + } catch (error) { + keepConnection = false; + throw error; + } finally { + release({ keep: keepConnection }); + } +} + +// ============================================================================ +// Error Handling +// ============================================================================ + +async function parseErrorResponse(response: Response): Promise<{ message: string; friendlyMessage?: string }> { + const raw = await response.text(); + let message = raw || response.statusText || "Request failed"; + let friendlyMessage: string | undefined; + + try { + const parsed = JSON.parse(raw) as { + error?: { code?: string; type?: string; message?: string; plan_type?: string; resets_at?: number }; + }; + const err = parsed?.error; + if (err) { + const code = err.code || err.type || ""; + if (/usage_limit_reached|usage_not_included|rate_limit_exceeded/i.test(code) || response.status === 429) { + const plan = err.plan_type ? ` (${err.plan_type.toLowerCase()} plan)` : ""; + const mins = err.resets_at + ? Math.max(0, Math.round((err.resets_at * 1000 - Date.now()) / 60000)) + : undefined; + const when = mins !== undefined ? ` Try again in ~${mins} min.` : ""; + friendlyMessage = `You have hit your ChatGPT usage limit${plan}.${when}`.trim(); + } + message = err.message || friendlyMessage || message; + } + } catch {} + + return { message, friendlyMessage }; +} + +// ============================================================================ +// Auth & Headers +// ============================================================================ + +function extractAccountId(token: string): string { + try { + const parts = token.split("."); + if (parts.length !== 3) throw new Error("Invalid token"); + const payload = JSON.parse(atob(parts[1])); + const accountId = payload?.[JWT_CLAIM_PATH]?.chatgpt_account_id; + if (!accountId) throw new Error("No account ID in token"); + return accountId; + } catch { + throw new Error("Failed to extract accountId from token"); + } +} + +function buildHeaders( + initHeaders: Record | undefined, + additionalHeaders: Record | undefined, + accountId: string, + token: string, + sessionId?: string, +): Headers { + const headers = new Headers(initHeaders); + headers.set("Authorization", `Bearer ${token}`); + headers.set("chatgpt-account-id", accountId); + headers.set("OpenAI-Beta", "responses=experimental"); + headers.set("originator", "pi"); + const userAgent = _os ? `pi (${_os.platform()} ${_os.release()}; ${_os.arch()})` : "pi (browser)"; + headers.set("User-Agent", userAgent); + headers.set("accept", "text/event-stream"); + headers.set("content-type", "application/json"); + for (const [key, value] of Object.entries(additionalHeaders || {})) { + headers.set(key, value); + } + + if (sessionId) { + headers.set("session_id", sessionId); + } + + return headers; +} diff --git a/packages/pi-ai/src/providers/openai-completions.ts b/packages/pi-ai/src/providers/openai-completions.ts new file mode 100644 index 000000000..2d28bf4e1 --- /dev/null +++ b/packages/pi-ai/src/providers/openai-completions.ts @@ -0,0 +1,820 @@ +import OpenAI from "openai"; +import type { + ChatCompletionAssistantMessageParam, + ChatCompletionChunk, + ChatCompletionContentPart, + ChatCompletionContentPartImage, + ChatCompletionContentPartText, + ChatCompletionMessageParam, + ChatCompletionToolMessageParam, +} from "openai/resources/chat/completions.js"; +import { getEnvApiKey } from "../env-api-keys.js"; +import { calculateCost, supportsXhigh } from "../models.js"; +import type { + AssistantMessage, + Context, + Message, + Model, + OpenAICompletionsCompat, + SimpleStreamOptions, + StopReason, + StreamFunction, + StreamOptions, + TextContent, + ThinkingContent, + Tool, + ToolCall, + ToolResultMessage, +} from "../types.js"; +import { AssistantMessageEventStream } from "../utils/event-stream.js"; +import { parseStreamingJson } from "../utils/json-parse.js"; +import { sanitizeSurrogates } from "../utils/sanitize-unicode.js"; +import { buildCopilotDynamicHeaders, hasCopilotVisionInput } from "./github-copilot-headers.js"; +import { buildBaseOptions, clampReasoning } from "./simple-options.js"; +import { transformMessages } from "./transform-messages.js"; + +/** + * Check if conversation messages contain tool calls or tool results. + * This is needed because Anthropic (via proxy) requires the tools param + * to be present when messages include tool_calls or tool role messages. + */ +function hasToolHistory(messages: Message[]): boolean { + for (const msg of messages) { + if (msg.role === "toolResult") { + return true; + } + if (msg.role === "assistant") { + if (msg.content.some((block) => block.type === "toolCall")) { + return true; + } + } + } + return false; +} + +export interface OpenAICompletionsOptions extends StreamOptions { + toolChoice?: "auto" | "none" | "required" | { type: "function"; function: { name: string } }; + reasoningEffort?: "minimal" | "low" | "medium" | "high" | "xhigh"; +} + +export const streamOpenAICompletions: StreamFunction<"openai-completions", OpenAICompletionsOptions> = ( + model: Model<"openai-completions">, + context: Context, + options?: OpenAICompletionsOptions, +): AssistantMessageEventStream => { + const stream = new AssistantMessageEventStream(); + + (async () => { + const output: AssistantMessage = { + role: "assistant", + content: [], + api: model.api, + provider: model.provider, + model: model.id, + usage: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + stopReason: "stop", + timestamp: Date.now(), + }; + + try { + const apiKey = options?.apiKey || getEnvApiKey(model.provider) || ""; + const client = createClient(model, context, apiKey, options?.headers); + let params = buildParams(model, context, options); + const nextParams = await options?.onPayload?.(params, model); + if (nextParams !== undefined) { + params = nextParams as OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming; + } + const openaiStream = await client.chat.completions.create(params, { signal: options?.signal }); + stream.push({ type: "start", partial: output }); + + let currentBlock: TextContent | ThinkingContent | (ToolCall & { partialArgs?: string }) | null = null; + const blocks = output.content; + const blockIndex = () => blocks.length - 1; + const finishCurrentBlock = (block?: typeof currentBlock) => { + if (block) { + if (block.type === "text") { + stream.push({ + type: "text_end", + contentIndex: blockIndex(), + content: block.text, + partial: output, + }); + } else if (block.type === "thinking") { + stream.push({ + type: "thinking_end", + contentIndex: blockIndex(), + content: block.thinking, + partial: output, + }); + } else if (block.type === "toolCall") { + block.arguments = parseStreamingJson(block.partialArgs); + delete block.partialArgs; + stream.push({ + type: "toolcall_end", + contentIndex: blockIndex(), + toolCall: block, + partial: output, + }); + } + } + }; + + for await (const chunk of openaiStream) { + if (chunk.usage) { + const cachedTokens = chunk.usage.prompt_tokens_details?.cached_tokens || 0; + const reasoningTokens = chunk.usage.completion_tokens_details?.reasoning_tokens || 0; + const input = (chunk.usage.prompt_tokens || 0) - cachedTokens; + const outputTokens = (chunk.usage.completion_tokens || 0) + reasoningTokens; + output.usage = { + // OpenAI includes cached tokens in prompt_tokens, so subtract to get non-cached input + input, + output: outputTokens, + cacheRead: cachedTokens, + cacheWrite: 0, + // Compute totalTokens ourselves since we add reasoning_tokens to output + // and some providers (e.g., Groq) don't include them in total_tokens + totalTokens: input + outputTokens + cachedTokens, + cost: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + total: 0, + }, + }; + calculateCost(model, output.usage); + } + + const choice = chunk.choices?.[0]; + if (!choice) continue; + + if (choice.finish_reason) { + output.stopReason = mapStopReason(choice.finish_reason); + } + + if (choice.delta) { + if ( + choice.delta.content !== null && + choice.delta.content !== undefined && + choice.delta.content.length > 0 + ) { + if (!currentBlock || currentBlock.type !== "text") { + finishCurrentBlock(currentBlock); + currentBlock = { type: "text", text: "" }; + output.content.push(currentBlock); + stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output }); + } + + if (currentBlock.type === "text") { + currentBlock.text += choice.delta.content; + stream.push({ + type: "text_delta", + contentIndex: blockIndex(), + delta: choice.delta.content, + partial: output, + }); + } + } + + // Some endpoints return reasoning in reasoning_content (llama.cpp), + // or reasoning (other openai compatible endpoints) + // Use the first non-empty reasoning field to avoid duplication + // (e.g., chutes.ai returns both reasoning_content and reasoning with same content) + const reasoningFields = ["reasoning_content", "reasoning", "reasoning_text"]; + let foundReasoningField: string | null = null; + for (const field of reasoningFields) { + if ( + (choice.delta as any)[field] !== null && + (choice.delta as any)[field] !== undefined && + (choice.delta as any)[field].length > 0 + ) { + if (!foundReasoningField) { + foundReasoningField = field; + break; + } + } + } + + if (foundReasoningField) { + if (!currentBlock || currentBlock.type !== "thinking") { + finishCurrentBlock(currentBlock); + currentBlock = { + type: "thinking", + thinking: "", + thinkingSignature: foundReasoningField, + }; + output.content.push(currentBlock); + stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output }); + } + + if (currentBlock.type === "thinking") { + const delta = (choice.delta as any)[foundReasoningField]; + currentBlock.thinking += delta; + stream.push({ + type: "thinking_delta", + contentIndex: blockIndex(), + delta, + partial: output, + }); + } + } + + if (choice?.delta?.tool_calls) { + for (const toolCall of choice.delta.tool_calls) { + if ( + !currentBlock || + currentBlock.type !== "toolCall" || + (toolCall.id && currentBlock.id !== toolCall.id) + ) { + finishCurrentBlock(currentBlock); + currentBlock = { + type: "toolCall", + id: toolCall.id || "", + name: toolCall.function?.name || "", + arguments: {}, + partialArgs: "", + }; + output.content.push(currentBlock); + stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output }); + } + + if (currentBlock.type === "toolCall") { + if (toolCall.id) currentBlock.id = toolCall.id; + if (toolCall.function?.name) currentBlock.name = toolCall.function.name; + let delta = ""; + if (toolCall.function?.arguments) { + delta = toolCall.function.arguments; + currentBlock.partialArgs += toolCall.function.arguments; + currentBlock.arguments = parseStreamingJson(currentBlock.partialArgs); + } + stream.push({ + type: "toolcall_delta", + contentIndex: blockIndex(), + delta, + partial: output, + }); + } + } + } + + const reasoningDetails = (choice.delta as any).reasoning_details; + if (reasoningDetails && Array.isArray(reasoningDetails)) { + for (const detail of reasoningDetails) { + if (detail.type === "reasoning.encrypted" && detail.id && detail.data) { + const matchingToolCall = output.content.find( + (b) => b.type === "toolCall" && b.id === detail.id, + ) as ToolCall | undefined; + if (matchingToolCall) { + matchingToolCall.thoughtSignature = JSON.stringify(detail); + } + } + } + } + } + } + + finishCurrentBlock(currentBlock); + if (options?.signal?.aborted) { + throw new Error("Request was aborted"); + } + + if (output.stopReason === "aborted" || output.stopReason === "error") { + throw new Error("An unknown error occurred"); + } + + stream.push({ type: "done", reason: output.stopReason, message: output }); + stream.end(); + } catch (error) { + for (const block of output.content) delete (block as any).index; + output.stopReason = options?.signal?.aborted ? "aborted" : "error"; + output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error); + // Some providers via OpenRouter give additional information in this field. + const rawMetadata = (error as any)?.error?.metadata?.raw; + if (rawMetadata) output.errorMessage += `\n${rawMetadata}`; + stream.push({ type: "error", reason: output.stopReason, error: output }); + stream.end(); + } + })(); + + return stream; +}; + +export const streamSimpleOpenAICompletions: StreamFunction<"openai-completions", SimpleStreamOptions> = ( + model: Model<"openai-completions">, + context: Context, + options?: SimpleStreamOptions, +): AssistantMessageEventStream => { + const apiKey = options?.apiKey || getEnvApiKey(model.provider); + if (!apiKey) { + throw new Error(`No API key for provider: ${model.provider}`); + } + + const base = buildBaseOptions(model, options, apiKey); + const reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning); + const toolChoice = (options as OpenAICompletionsOptions | undefined)?.toolChoice; + + return streamOpenAICompletions(model, context, { + ...base, + reasoningEffort, + toolChoice, + } satisfies OpenAICompletionsOptions); +}; + +function createClient( + model: Model<"openai-completions">, + context: Context, + apiKey?: string, + optionsHeaders?: Record, +) { + if (!apiKey) { + if (!process.env.OPENAI_API_KEY) { + throw new Error( + "OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.", + ); + } + apiKey = process.env.OPENAI_API_KEY; + } + + const headers = { ...model.headers }; + if (model.provider === "github-copilot") { + const hasImages = hasCopilotVisionInput(context.messages); + const copilotHeaders = buildCopilotDynamicHeaders({ + messages: context.messages, + hasImages, + }); + Object.assign(headers, copilotHeaders); + } + + // Merge options headers last so they can override defaults + if (optionsHeaders) { + Object.assign(headers, optionsHeaders); + } + + return new OpenAI({ + apiKey, + baseURL: model.baseUrl, + dangerouslyAllowBrowser: true, + defaultHeaders: headers, + }); +} + +function buildParams(model: Model<"openai-completions">, context: Context, options?: OpenAICompletionsOptions) { + const compat = getCompat(model); + const messages = convertMessages(model, context, compat); + maybeAddOpenRouterAnthropicCacheControl(model, messages); + + const params: OpenAI.Chat.Completions.ChatCompletionCreateParamsStreaming = { + model: model.id, + messages, + stream: true, + }; + + if (compat.supportsUsageInStreaming !== false) { + (params as any).stream_options = { include_usage: true }; + } + + if (compat.supportsStore) { + params.store = false; + } + + if (options?.maxTokens) { + if (compat.maxTokensField === "max_tokens") { + (params as any).max_tokens = options.maxTokens; + } else { + params.max_completion_tokens = options.maxTokens; + } + } + + if (options?.temperature !== undefined) { + params.temperature = options.temperature; + } + + if (context.tools) { + params.tools = convertTools(context.tools, compat); + } else if (hasToolHistory(context.messages)) { + // Anthropic (via LiteLLM/proxy) requires tools param when conversation has tool_calls/tool_results + params.tools = []; + } + + if (options?.toolChoice) { + params.tool_choice = options.toolChoice; + } + + if ((compat.thinkingFormat === "zai" || compat.thinkingFormat === "qwen") && model.reasoning) { + // Both Z.ai and Qwen use enable_thinking: boolean + (params as any).enable_thinking = !!options?.reasoningEffort; + } else if (options?.reasoningEffort && model.reasoning && compat.supportsReasoningEffort) { + // OpenAI-style reasoning_effort + (params as any).reasoning_effort = mapReasoningEffort(options.reasoningEffort, compat.reasoningEffortMap); + } + + // OpenRouter provider routing preferences + if (model.baseUrl.includes("openrouter.ai") && model.compat?.openRouterRouting) { + (params as any).provider = model.compat.openRouterRouting; + } + + // Vercel AI Gateway provider routing preferences + if (model.baseUrl.includes("ai-gateway.vercel.sh") && model.compat?.vercelGatewayRouting) { + const routing = model.compat.vercelGatewayRouting; + if (routing.only || routing.order) { + const gatewayOptions: Record = {}; + if (routing.only) gatewayOptions.only = routing.only; + if (routing.order) gatewayOptions.order = routing.order; + (params as any).providerOptions = { gateway: gatewayOptions }; + } + } + + return params; +} + +function mapReasoningEffort( + effort: NonNullable, + reasoningEffortMap: Partial, string>>, +): string { + return reasoningEffortMap[effort] ?? effort; +} + +function maybeAddOpenRouterAnthropicCacheControl( + model: Model<"openai-completions">, + messages: ChatCompletionMessageParam[], +): void { + if (model.provider !== "openrouter" || !model.id.startsWith("anthropic/")) return; + + // Anthropic-style caching requires cache_control on a text part. Add a breakpoint + // on the last user/assistant message (walking backwards until we find text content). + for (let i = messages.length - 1; i >= 0; i--) { + const msg = messages[i]; + if (msg.role !== "user" && msg.role !== "assistant") continue; + + const content = msg.content; + if (typeof content === "string") { + msg.content = [ + Object.assign({ type: "text" as const, text: content }, { cache_control: { type: "ephemeral" } }), + ]; + return; + } + + if (!Array.isArray(content)) continue; + + // Find last text part and add cache_control + for (let j = content.length - 1; j >= 0; j--) { + const part = content[j]; + if (part?.type === "text") { + Object.assign(part, { cache_control: { type: "ephemeral" } }); + return; + } + } + } +} + +export function convertMessages( + model: Model<"openai-completions">, + context: Context, + compat: Required, +): ChatCompletionMessageParam[] { + const params: ChatCompletionMessageParam[] = []; + + const normalizeToolCallId = (id: string): string => { + // Handle pipe-separated IDs from OpenAI Responses API + // Format: {call_id}|{id} where {id} can be 400+ chars with special chars (+, /, =) + // These come from providers like github-copilot, openai-codex, opencode + // Extract just the call_id part and normalize it + if (id.includes("|")) { + const [callId] = id.split("|"); + // Sanitize to allowed chars and truncate to 40 chars (OpenAI limit) + return callId.replace(/[^a-zA-Z0-9_-]/g, "_").slice(0, 40); + } + + if (model.provider === "openai") return id.length > 40 ? id.slice(0, 40) : id; + return id; + }; + + const transformedMessages = transformMessages(context.messages, model, (id) => normalizeToolCallId(id)); + + if (context.systemPrompt) { + const useDeveloperRole = model.reasoning && compat.supportsDeveloperRole; + const role = useDeveloperRole ? "developer" : "system"; + params.push({ role: role, content: sanitizeSurrogates(context.systemPrompt) }); + } + + let lastRole: string | null = null; + + for (let i = 0; i < transformedMessages.length; i++) { + const msg = transformedMessages[i]; + // Some providers don't allow user messages directly after tool results + // Insert a synthetic assistant message to bridge the gap + if (compat.requiresAssistantAfterToolResult && lastRole === "toolResult" && msg.role === "user") { + params.push({ + role: "assistant", + content: "I have processed the tool results.", + }); + } + + if (msg.role === "user") { + if (typeof msg.content === "string") { + params.push({ + role: "user", + content: sanitizeSurrogates(msg.content), + }); + } else { + const content: ChatCompletionContentPart[] = msg.content.map((item): ChatCompletionContentPart => { + if (item.type === "text") { + return { + type: "text", + text: sanitizeSurrogates(item.text), + } satisfies ChatCompletionContentPartText; + } else { + return { + type: "image_url", + image_url: { + url: `data:${item.mimeType};base64,${item.data}`, + }, + } satisfies ChatCompletionContentPartImage; + } + }); + const filteredContent = !model.input.includes("image") + ? content.filter((c) => c.type !== "image_url") + : content; + if (filteredContent.length === 0) continue; + params.push({ + role: "user", + content: filteredContent, + }); + } + } else if (msg.role === "assistant") { + // Some providers don't accept null content, use empty string instead + const assistantMsg: ChatCompletionAssistantMessageParam = { + role: "assistant", + content: compat.requiresAssistantAfterToolResult ? "" : null, + }; + + const textBlocks = msg.content.filter((b) => b.type === "text") as TextContent[]; + // Filter out empty text blocks to avoid API validation errors + const nonEmptyTextBlocks = textBlocks.filter((b) => b.text && b.text.trim().length > 0); + if (nonEmptyTextBlocks.length > 0) { + // GitHub Copilot requires assistant content as a string, not an array. + // Sending as array causes Claude models to re-answer all previous prompts. + if (model.provider === "github-copilot") { + assistantMsg.content = nonEmptyTextBlocks.map((b) => sanitizeSurrogates(b.text)).join(""); + } else { + assistantMsg.content = nonEmptyTextBlocks.map((b) => { + return { type: "text", text: sanitizeSurrogates(b.text) }; + }); + } + } + + // Handle thinking blocks + const thinkingBlocks = msg.content.filter((b) => b.type === "thinking") as ThinkingContent[]; + // Filter out empty thinking blocks to avoid API validation errors + const nonEmptyThinkingBlocks = thinkingBlocks.filter((b) => b.thinking && b.thinking.trim().length > 0); + if (nonEmptyThinkingBlocks.length > 0) { + if (compat.requiresThinkingAsText) { + // Convert thinking blocks to plain text (no tags to avoid model mimicking them) + const thinkingText = nonEmptyThinkingBlocks.map((b) => b.thinking).join("\n\n"); + const textContent = assistantMsg.content as Array<{ type: "text"; text: string }> | null; + if (textContent) { + textContent.unshift({ type: "text", text: thinkingText }); + } else { + assistantMsg.content = [{ type: "text", text: thinkingText }]; + } + } else { + // Use the signature from the first thinking block if available (for llama.cpp server + gpt-oss) + const signature = nonEmptyThinkingBlocks[0].thinkingSignature; + if (signature && signature.length > 0) { + (assistantMsg as any)[signature] = nonEmptyThinkingBlocks.map((b) => b.thinking).join("\n"); + } + } + } + + const toolCalls = msg.content.filter((b) => b.type === "toolCall") as ToolCall[]; + if (toolCalls.length > 0) { + assistantMsg.tool_calls = toolCalls.map((tc) => ({ + id: tc.id, + type: "function" as const, + function: { + name: tc.name, + arguments: JSON.stringify(tc.arguments), + }, + })); + const reasoningDetails = toolCalls + .filter((tc) => tc.thoughtSignature) + .map((tc) => { + try { + return JSON.parse(tc.thoughtSignature!); + } catch { + return null; + } + }) + .filter(Boolean); + if (reasoningDetails.length > 0) { + (assistantMsg as any).reasoning_details = reasoningDetails; + } + } + // Skip assistant messages that have no content and no tool calls. + // Some providers require "either content or tool_calls, but not none". + // Other providers also don't accept empty assistant messages. + // This handles aborted assistant responses that got no content. + const content = assistantMsg.content; + const hasContent = + content !== null && + content !== undefined && + (typeof content === "string" ? content.length > 0 : content.length > 0); + if (!hasContent && !assistantMsg.tool_calls) { + continue; + } + params.push(assistantMsg); + } else if (msg.role === "toolResult") { + const imageBlocks: Array<{ type: "image_url"; image_url: { url: string } }> = []; + let j = i; + + for (; j < transformedMessages.length && transformedMessages[j].role === "toolResult"; j++) { + const toolMsg = transformedMessages[j] as ToolResultMessage; + + // Extract text and image content + const textResult = toolMsg.content + .filter((c) => c.type === "text") + .map((c) => (c as any).text) + .join("\n"); + const hasImages = toolMsg.content.some((c) => c.type === "image"); + + // Always send tool result with text (or placeholder if only images) + const hasText = textResult.length > 0; + // Some providers require the 'name' field in tool results + const toolResultMsg: ChatCompletionToolMessageParam = { + role: "tool", + content: sanitizeSurrogates(hasText ? textResult : "(see attached image)"), + tool_call_id: toolMsg.toolCallId, + }; + if (compat.requiresToolResultName && toolMsg.toolName) { + (toolResultMsg as any).name = toolMsg.toolName; + } + params.push(toolResultMsg); + + if (hasImages && model.input.includes("image")) { + for (const block of toolMsg.content) { + if (block.type === "image") { + imageBlocks.push({ + type: "image_url", + image_url: { + url: `data:${(block as any).mimeType};base64,${(block as any).data}`, + }, + }); + } + } + } + } + + i = j - 1; + + if (imageBlocks.length > 0) { + if (compat.requiresAssistantAfterToolResult) { + params.push({ + role: "assistant", + content: "I have processed the tool results.", + }); + } + + params.push({ + role: "user", + content: [ + { + type: "text", + text: "Attached image(s) from tool result:", + }, + ...imageBlocks, + ], + }); + lastRole = "user"; + } else { + lastRole = "toolResult"; + } + continue; + } + + lastRole = msg.role; + } + + return params; +} + +function convertTools( + tools: Tool[], + compat: Required, +): OpenAI.Chat.Completions.ChatCompletionTool[] { + return tools.map((tool) => ({ + type: "function", + function: { + name: tool.name, + description: tool.description, + parameters: tool.parameters as any, // TypeBox already generates JSON Schema + // Only include strict if provider supports it. Some reject unknown fields. + ...(compat.supportsStrictMode !== false && { strict: false }), + }, + })); +} + +function mapStopReason(reason: ChatCompletionChunk.Choice["finish_reason"]): StopReason { + if (reason === null) return "stop"; + switch (reason) { + case "stop": + return "stop"; + case "length": + return "length"; + case "function_call": + case "tool_calls": + return "toolUse"; + case "content_filter": + return "error"; + default: { + const _exhaustive: never = reason; + throw new Error(`Unhandled stop reason: ${_exhaustive}`); + } + } +} + +/** + * Detect compatibility settings from provider and baseUrl for known providers. + * Provider takes precedence over URL-based detection since it's explicitly configured. + * Returns a fully resolved OpenAICompletionsCompat object with all fields set. + */ +function detectCompat(model: Model<"openai-completions">): Required { + const provider = model.provider; + const baseUrl = model.baseUrl; + + const isZai = provider === "zai" || baseUrl.includes("api.z.ai"); + + const isNonStandard = + provider === "cerebras" || + baseUrl.includes("cerebras.ai") || + provider === "xai" || + baseUrl.includes("api.x.ai") || + baseUrl.includes("chutes.ai") || + baseUrl.includes("deepseek.com") || + isZai || + provider === "opencode" || + baseUrl.includes("opencode.ai"); + + const useMaxTokens = baseUrl.includes("chutes.ai"); + + const isGrok = provider === "xai" || baseUrl.includes("api.x.ai"); + const isGroq = provider === "groq" || baseUrl.includes("groq.com"); + + const reasoningEffortMap = + isGroq && model.id === "qwen/qwen3-32b" + ? { + minimal: "default", + low: "default", + medium: "default", + high: "default", + xhigh: "default", + } + : {}; + return { + supportsStore: !isNonStandard, + supportsDeveloperRole: !isNonStandard, + supportsReasoningEffort: !isGrok && !isZai, + reasoningEffortMap, + supportsUsageInStreaming: true, + maxTokensField: useMaxTokens ? "max_tokens" : "max_completion_tokens", + requiresToolResultName: false, + requiresAssistantAfterToolResult: false, + requiresThinkingAsText: false, + thinkingFormat: isZai ? "zai" : "openai", + openRouterRouting: {}, + vercelGatewayRouting: {}, + supportsStrictMode: true, + }; +} + +/** + * Get resolved compatibility settings for a model. + * Uses explicit model.compat if provided, otherwise auto-detects from provider/URL. + */ +function getCompat(model: Model<"openai-completions">): Required { + const detected = detectCompat(model); + if (!model.compat) return detected; + + return { + supportsStore: model.compat.supportsStore ?? detected.supportsStore, + supportsDeveloperRole: model.compat.supportsDeveloperRole ?? detected.supportsDeveloperRole, + supportsReasoningEffort: model.compat.supportsReasoningEffort ?? detected.supportsReasoningEffort, + reasoningEffortMap: model.compat.reasoningEffortMap ?? detected.reasoningEffortMap, + supportsUsageInStreaming: model.compat.supportsUsageInStreaming ?? detected.supportsUsageInStreaming, + maxTokensField: model.compat.maxTokensField ?? detected.maxTokensField, + requiresToolResultName: model.compat.requiresToolResultName ?? detected.requiresToolResultName, + requiresAssistantAfterToolResult: + model.compat.requiresAssistantAfterToolResult ?? detected.requiresAssistantAfterToolResult, + requiresThinkingAsText: model.compat.requiresThinkingAsText ?? detected.requiresThinkingAsText, + thinkingFormat: model.compat.thinkingFormat ?? detected.thinkingFormat, + openRouterRouting: model.compat.openRouterRouting ?? {}, + vercelGatewayRouting: model.compat.vercelGatewayRouting ?? detected.vercelGatewayRouting, + supportsStrictMode: model.compat.supportsStrictMode ?? detected.supportsStrictMode, + }; +} diff --git a/packages/pi-ai/src/providers/openai-responses-shared.ts b/packages/pi-ai/src/providers/openai-responses-shared.ts new file mode 100644 index 000000000..10ac5ee1b --- /dev/null +++ b/packages/pi-ai/src/providers/openai-responses-shared.ts @@ -0,0 +1,496 @@ +import type OpenAI from "openai"; +import type { + Tool as OpenAITool, + ResponseCreateParamsStreaming, + ResponseFunctionToolCall, + ResponseInput, + ResponseInputContent, + ResponseInputImage, + ResponseInputText, + ResponseOutputMessage, + ResponseReasoningItem, + ResponseStreamEvent, +} from "openai/resources/responses/responses.js"; +import { calculateCost } from "../models.js"; +import type { + Api, + AssistantMessage, + Context, + ImageContent, + Model, + StopReason, + TextContent, + TextSignatureV1, + ThinkingContent, + Tool, + ToolCall, + Usage, +} from "../types.js"; +import type { AssistantMessageEventStream } from "../utils/event-stream.js"; +import { shortHash } from "../utils/hash.js"; +import { parseStreamingJson } from "../utils/json-parse.js"; +import { sanitizeSurrogates } from "../utils/sanitize-unicode.js"; +import { transformMessages } from "./transform-messages.js"; + +// ============================================================================= +// Utilities +// ============================================================================= + +function encodeTextSignatureV1(id: string, phase?: TextSignatureV1["phase"]): string { + const payload: TextSignatureV1 = { v: 1, id }; + if (phase) payload.phase = phase; + return JSON.stringify(payload); +} + +function parseTextSignature( + signature: string | undefined, +): { id: string; phase?: TextSignatureV1["phase"] } | undefined { + if (!signature) return undefined; + if (signature.startsWith("{")) { + try { + const parsed = JSON.parse(signature) as Partial; + if (parsed.v === 1 && typeof parsed.id === "string") { + if (parsed.phase === "commentary" || parsed.phase === "final_answer") { + return { id: parsed.id, phase: parsed.phase }; + } + return { id: parsed.id }; + } + } catch { + // Fall through to legacy plain-string handling. + } + } + return { id: signature }; +} + +export interface OpenAIResponsesStreamOptions { + serviceTier?: ResponseCreateParamsStreaming["service_tier"]; + applyServiceTierPricing?: ( + usage: Usage, + serviceTier: ResponseCreateParamsStreaming["service_tier"] | undefined, + ) => void; +} + +export interface ConvertResponsesMessagesOptions { + includeSystemPrompt?: boolean; +} + +export interface ConvertResponsesToolsOptions { + strict?: boolean | null; +} + +// ============================================================================= +// Message conversion +// ============================================================================= + +export function convertResponsesMessages( + model: Model, + context: Context, + allowedToolCallProviders: ReadonlySet, + options?: ConvertResponsesMessagesOptions, +): ResponseInput { + const messages: ResponseInput = []; + + const normalizeToolCallId = (id: string): string => { + if (!allowedToolCallProviders.has(model.provider)) return id; + if (!id.includes("|")) return id; + const [callId, itemId] = id.split("|"); + const sanitizedCallId = callId.replace(/[^a-zA-Z0-9_-]/g, "_"); + let sanitizedItemId = itemId.replace(/[^a-zA-Z0-9_-]/g, "_"); + // OpenAI Responses API requires item id to start with "fc" + if (!sanitizedItemId.startsWith("fc")) { + sanitizedItemId = `fc_${sanitizedItemId}`; + } + // Truncate to 64 chars and strip trailing underscores (OpenAI Codex rejects them) + let normalizedCallId = sanitizedCallId.length > 64 ? sanitizedCallId.slice(0, 64) : sanitizedCallId; + let normalizedItemId = sanitizedItemId.length > 64 ? sanitizedItemId.slice(0, 64) : sanitizedItemId; + normalizedCallId = normalizedCallId.replace(/_+$/, ""); + normalizedItemId = normalizedItemId.replace(/_+$/, ""); + return `${normalizedCallId}|${normalizedItemId}`; + }; + + const transformedMessages = transformMessages(context.messages, model, normalizeToolCallId); + + const includeSystemPrompt = options?.includeSystemPrompt ?? true; + if (includeSystemPrompt && context.systemPrompt) { + const role = model.reasoning ? "developer" : "system"; + messages.push({ + role, + content: sanitizeSurrogates(context.systemPrompt), + }); + } + + let msgIndex = 0; + for (const msg of transformedMessages) { + if (msg.role === "user") { + if (typeof msg.content === "string") { + messages.push({ + role: "user", + content: [{ type: "input_text", text: sanitizeSurrogates(msg.content) }], + }); + } else { + const content: ResponseInputContent[] = msg.content.map((item): ResponseInputContent => { + if (item.type === "text") { + return { + type: "input_text", + text: sanitizeSurrogates(item.text), + } satisfies ResponseInputText; + } + return { + type: "input_image", + detail: "auto", + image_url: `data:${item.mimeType};base64,${item.data}`, + } satisfies ResponseInputImage; + }); + const filteredContent = !model.input.includes("image") + ? content.filter((c) => c.type !== "input_image") + : content; + if (filteredContent.length === 0) continue; + messages.push({ + role: "user", + content: filteredContent, + }); + } + } else if (msg.role === "assistant") { + const output: ResponseInput = []; + const assistantMsg = msg as AssistantMessage; + const isDifferentModel = + assistantMsg.model !== model.id && + assistantMsg.provider === model.provider && + assistantMsg.api === model.api; + + for (const block of msg.content) { + if (block.type === "thinking") { + if (block.thinkingSignature) { + const reasoningItem = JSON.parse(block.thinkingSignature) as ResponseReasoningItem; + output.push(reasoningItem); + } + } else if (block.type === "text") { + const textBlock = block as TextContent; + const parsedSignature = parseTextSignature(textBlock.textSignature); + // OpenAI requires id to be max 64 characters + let msgId = parsedSignature?.id; + if (!msgId) { + msgId = `msg_${msgIndex}`; + } else if (msgId.length > 64) { + msgId = `msg_${shortHash(msgId)}`; + } + output.push({ + type: "message", + role: "assistant", + content: [{ type: "output_text", text: sanitizeSurrogates(textBlock.text), annotations: [] }], + status: "completed", + id: msgId, + phase: parsedSignature?.phase, + } satisfies ResponseOutputMessage); + } else if (block.type === "toolCall") { + const toolCall = block as ToolCall; + const [callId, itemIdRaw] = toolCall.id.split("|"); + let itemId: string | undefined = itemIdRaw; + + // For different-model messages, set id to undefined to avoid pairing validation. + // OpenAI tracks which fc_xxx IDs were paired with rs_xxx reasoning items. + // By omitting the id, we avoid triggering that validation (like cross-provider does). + if (isDifferentModel && itemId?.startsWith("fc_")) { + itemId = undefined; + } + + output.push({ + type: "function_call", + id: itemId, + call_id: callId, + name: toolCall.name, + arguments: JSON.stringify(toolCall.arguments), + }); + } + } + if (output.length === 0) continue; + messages.push(...output); + } else if (msg.role === "toolResult") { + // Extract text and image content + const textResult = msg.content + .filter((c): c is TextContent => c.type === "text") + .map((c) => c.text) + .join("\n"); + const hasImages = msg.content.some((c): c is ImageContent => c.type === "image"); + + // Always send function_call_output with text (or placeholder if only images) + const hasText = textResult.length > 0; + const [callId] = msg.toolCallId.split("|"); + messages.push({ + type: "function_call_output", + call_id: callId, + output: sanitizeSurrogates(hasText ? textResult : "(see attached image)"), + }); + + // If there are images and model supports them, send a follow-up user message with images + if (hasImages && model.input.includes("image")) { + const contentParts: ResponseInputContent[] = []; + + // Add text prefix + contentParts.push({ + type: "input_text", + text: "Attached image(s) from tool result:", + } satisfies ResponseInputText); + + // Add images + for (const block of msg.content) { + if (block.type === "image") { + contentParts.push({ + type: "input_image", + detail: "auto", + image_url: `data:${block.mimeType};base64,${block.data}`, + } satisfies ResponseInputImage); + } + } + + messages.push({ + role: "user", + content: contentParts, + }); + } + } + msgIndex++; + } + + return messages; +} + +// ============================================================================= +// Tool conversion +// ============================================================================= + +export function convertResponsesTools(tools: Tool[], options?: ConvertResponsesToolsOptions): OpenAITool[] { + const strict = options?.strict === undefined ? false : options.strict; + return tools.map((tool) => ({ + type: "function", + name: tool.name, + description: tool.description, + parameters: tool.parameters as any, // TypeBox already generates JSON Schema + strict, + })); +} + +// ============================================================================= +// Stream processing +// ============================================================================= + +export async function processResponsesStream( + openaiStream: AsyncIterable, + output: AssistantMessage, + stream: AssistantMessageEventStream, + model: Model, + options?: OpenAIResponsesStreamOptions, +): Promise { + let currentItem: ResponseReasoningItem | ResponseOutputMessage | ResponseFunctionToolCall | null = null; + let currentBlock: ThinkingContent | TextContent | (ToolCall & { partialJson: string }) | null = null; + const blocks = output.content; + const blockIndex = () => blocks.length - 1; + + for await (const event of openaiStream) { + if (event.type === "response.output_item.added") { + const item = event.item; + if (item.type === "reasoning") { + currentItem = item; + currentBlock = { type: "thinking", thinking: "" }; + output.content.push(currentBlock); + stream.push({ type: "thinking_start", contentIndex: blockIndex(), partial: output }); + } else if (item.type === "message") { + currentItem = item; + currentBlock = { type: "text", text: "" }; + output.content.push(currentBlock); + stream.push({ type: "text_start", contentIndex: blockIndex(), partial: output }); + } else if (item.type === "function_call") { + currentItem = item; + currentBlock = { + type: "toolCall", + id: `${item.call_id}|${item.id}`, + name: item.name, + arguments: {}, + partialJson: item.arguments || "", + }; + output.content.push(currentBlock); + stream.push({ type: "toolcall_start", contentIndex: blockIndex(), partial: output }); + } + } else if (event.type === "response.reasoning_summary_part.added") { + if (currentItem && currentItem.type === "reasoning") { + currentItem.summary = currentItem.summary || []; + currentItem.summary.push(event.part); + } + } else if (event.type === "response.reasoning_summary_text.delta") { + if (currentItem?.type === "reasoning" && currentBlock?.type === "thinking") { + currentItem.summary = currentItem.summary || []; + const lastPart = currentItem.summary[currentItem.summary.length - 1]; + if (lastPart) { + currentBlock.thinking += event.delta; + lastPart.text += event.delta; + stream.push({ + type: "thinking_delta", + contentIndex: blockIndex(), + delta: event.delta, + partial: output, + }); + } + } + } else if (event.type === "response.reasoning_summary_part.done") { + if (currentItem?.type === "reasoning" && currentBlock?.type === "thinking") { + currentItem.summary = currentItem.summary || []; + const lastPart = currentItem.summary[currentItem.summary.length - 1]; + if (lastPart) { + currentBlock.thinking += "\n\n"; + lastPart.text += "\n\n"; + stream.push({ + type: "thinking_delta", + contentIndex: blockIndex(), + delta: "\n\n", + partial: output, + }); + } + } + } else if (event.type === "response.content_part.added") { + if (currentItem?.type === "message") { + currentItem.content = currentItem.content || []; + // Filter out ReasoningText, only accept output_text and refusal + if (event.part.type === "output_text" || event.part.type === "refusal") { + currentItem.content.push(event.part); + } + } + } else if (event.type === "response.output_text.delta") { + if (currentItem?.type === "message" && currentBlock?.type === "text") { + if (!currentItem.content || currentItem.content.length === 0) { + continue; + } + const lastPart = currentItem.content[currentItem.content.length - 1]; + if (lastPart?.type === "output_text") { + currentBlock.text += event.delta; + lastPart.text += event.delta; + stream.push({ + type: "text_delta", + contentIndex: blockIndex(), + delta: event.delta, + partial: output, + }); + } + } + } else if (event.type === "response.refusal.delta") { + if (currentItem?.type === "message" && currentBlock?.type === "text") { + if (!currentItem.content || currentItem.content.length === 0) { + continue; + } + const lastPart = currentItem.content[currentItem.content.length - 1]; + if (lastPart?.type === "refusal") { + currentBlock.text += event.delta; + lastPart.refusal += event.delta; + stream.push({ + type: "text_delta", + contentIndex: blockIndex(), + delta: event.delta, + partial: output, + }); + } + } + } else if (event.type === "response.function_call_arguments.delta") { + if (currentItem?.type === "function_call" && currentBlock?.type === "toolCall") { + currentBlock.partialJson += event.delta; + currentBlock.arguments = parseStreamingJson(currentBlock.partialJson); + stream.push({ + type: "toolcall_delta", + contentIndex: blockIndex(), + delta: event.delta, + partial: output, + }); + } + } else if (event.type === "response.function_call_arguments.done") { + if (currentItem?.type === "function_call" && currentBlock?.type === "toolCall") { + currentBlock.partialJson = event.arguments; + currentBlock.arguments = parseStreamingJson(currentBlock.partialJson); + } + } else if (event.type === "response.output_item.done") { + const item = event.item; + + if (item.type === "reasoning" && currentBlock?.type === "thinking") { + currentBlock.thinking = item.summary?.map((s) => s.text).join("\n\n") || ""; + currentBlock.thinkingSignature = JSON.stringify(item); + stream.push({ + type: "thinking_end", + contentIndex: blockIndex(), + content: currentBlock.thinking, + partial: output, + }); + currentBlock = null; + } else if (item.type === "message" && currentBlock?.type === "text") { + currentBlock.text = item.content.map((c) => (c.type === "output_text" ? c.text : c.refusal)).join(""); + currentBlock.textSignature = encodeTextSignatureV1(item.id, item.phase ?? undefined); + stream.push({ + type: "text_end", + contentIndex: blockIndex(), + content: currentBlock.text, + partial: output, + }); + currentBlock = null; + } else if (item.type === "function_call") { + const args = + currentBlock?.type === "toolCall" && currentBlock.partialJson + ? parseStreamingJson(currentBlock.partialJson) + : parseStreamingJson(item.arguments || "{}"); + const toolCall: ToolCall = { + type: "toolCall", + id: `${item.call_id}|${item.id}`, + name: item.name, + arguments: args, + }; + + currentBlock = null; + stream.push({ type: "toolcall_end", contentIndex: blockIndex(), toolCall, partial: output }); + } + } else if (event.type === "response.completed") { + const response = event.response; + if (response?.usage) { + const cachedTokens = response.usage.input_tokens_details?.cached_tokens || 0; + output.usage = { + // OpenAI includes cached tokens in input_tokens, so subtract to get non-cached input + input: (response.usage.input_tokens || 0) - cachedTokens, + output: response.usage.output_tokens || 0, + cacheRead: cachedTokens, + cacheWrite: 0, + totalTokens: response.usage.total_tokens || 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }; + } + calculateCost(model, output.usage); + if (options?.applyServiceTierPricing) { + const serviceTier = response?.service_tier ?? options.serviceTier; + options.applyServiceTierPricing(output.usage, serviceTier); + } + // Map status to stop reason + output.stopReason = mapStopReason(response?.status); + if (output.content.some((b) => b.type === "toolCall") && output.stopReason === "stop") { + output.stopReason = "toolUse"; + } + } else if (event.type === "error") { + throw new Error(`Error Code ${event.code}: ${event.message}` || "Unknown error"); + } else if (event.type === "response.failed") { + throw new Error("Unknown error"); + } + } +} + +function mapStopReason(status: OpenAI.Responses.ResponseStatus | undefined): StopReason { + if (!status) return "stop"; + switch (status) { + case "completed": + return "stop"; + case "incomplete": + return "length"; + case "failed": + case "cancelled": + return "error"; + // These two are wonky ... + case "in_progress": + case "queued": + return "stop"; + default: { + const _exhaustive: never = status; + throw new Error(`Unhandled stop reason: ${_exhaustive}`); + } + } +} diff --git a/packages/pi-ai/src/providers/openai-responses.ts b/packages/pi-ai/src/providers/openai-responses.ts new file mode 100644 index 000000000..a2e56404e --- /dev/null +++ b/packages/pi-ai/src/providers/openai-responses.ts @@ -0,0 +1,262 @@ +import OpenAI from "openai"; +import type { ResponseCreateParamsStreaming } from "openai/resources/responses/responses.js"; +import { getEnvApiKey } from "../env-api-keys.js"; +import { supportsXhigh } from "../models.js"; +import type { + Api, + AssistantMessage, + CacheRetention, + Context, + Model, + SimpleStreamOptions, + StreamFunction, + StreamOptions, + Usage, +} from "../types.js"; +import { AssistantMessageEventStream } from "../utils/event-stream.js"; +import { buildCopilotDynamicHeaders, hasCopilotVisionInput } from "./github-copilot-headers.js"; +import { convertResponsesMessages, convertResponsesTools, processResponsesStream } from "./openai-responses-shared.js"; +import { buildBaseOptions, clampReasoning } from "./simple-options.js"; + +const OPENAI_TOOL_CALL_PROVIDERS = new Set(["openai", "openai-codex", "opencode"]); + +/** + * Resolve cache retention preference. + * Defaults to "short" and uses PI_CACHE_RETENTION for backward compatibility. + */ +function resolveCacheRetention(cacheRetention?: CacheRetention): CacheRetention { + if (cacheRetention) { + return cacheRetention; + } + if (typeof process !== "undefined" && process.env.PI_CACHE_RETENTION === "long") { + return "long"; + } + return "short"; +} + +/** + * Get prompt cache retention based on cacheRetention and base URL. + * Only applies to direct OpenAI API calls (api.openai.com). + */ +function getPromptCacheRetention(baseUrl: string, cacheRetention: CacheRetention): "24h" | undefined { + if (cacheRetention !== "long") { + return undefined; + } + if (baseUrl.includes("api.openai.com")) { + return "24h"; + } + return undefined; +} + +// OpenAI Responses-specific options +export interface OpenAIResponsesOptions extends StreamOptions { + reasoningEffort?: "minimal" | "low" | "medium" | "high" | "xhigh"; + reasoningSummary?: "auto" | "detailed" | "concise" | null; + serviceTier?: ResponseCreateParamsStreaming["service_tier"]; +} + +/** + * Generate function for OpenAI Responses API + */ +export const streamOpenAIResponses: StreamFunction<"openai-responses", OpenAIResponsesOptions> = ( + model: Model<"openai-responses">, + context: Context, + options?: OpenAIResponsesOptions, +): AssistantMessageEventStream => { + const stream = new AssistantMessageEventStream(); + + // Start async processing + (async () => { + const output: AssistantMessage = { + role: "assistant", + content: [], + api: model.api as Api, + provider: model.provider, + model: model.id, + usage: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + stopReason: "stop", + timestamp: Date.now(), + }; + + try { + // Create OpenAI client + const apiKey = options?.apiKey || getEnvApiKey(model.provider) || ""; + const client = createClient(model, context, apiKey, options?.headers); + let params = buildParams(model, context, options); + const nextParams = await options?.onPayload?.(params, model); + if (nextParams !== undefined) { + params = nextParams as ResponseCreateParamsStreaming; + } + const openaiStream = await client.responses.create( + params, + options?.signal ? { signal: options.signal } : undefined, + ); + stream.push({ type: "start", partial: output }); + + await processResponsesStream(openaiStream, output, stream, model, { + serviceTier: options?.serviceTier, + applyServiceTierPricing, + }); + + if (options?.signal?.aborted) { + throw new Error("Request was aborted"); + } + + if (output.stopReason === "aborted" || output.stopReason === "error") { + throw new Error("An unknown error occurred"); + } + + stream.push({ type: "done", reason: output.stopReason, message: output }); + stream.end(); + } catch (error) { + for (const block of output.content) delete (block as { index?: number }).index; + output.stopReason = options?.signal?.aborted ? "aborted" : "error"; + output.errorMessage = error instanceof Error ? error.message : JSON.stringify(error); + stream.push({ type: "error", reason: output.stopReason, error: output }); + stream.end(); + } + })(); + + return stream; +}; + +export const streamSimpleOpenAIResponses: StreamFunction<"openai-responses", SimpleStreamOptions> = ( + model: Model<"openai-responses">, + context: Context, + options?: SimpleStreamOptions, +): AssistantMessageEventStream => { + const apiKey = options?.apiKey || getEnvApiKey(model.provider); + if (!apiKey) { + throw new Error(`No API key for provider: ${model.provider}`); + } + + const base = buildBaseOptions(model, options, apiKey); + const reasoningEffort = supportsXhigh(model) ? options?.reasoning : clampReasoning(options?.reasoning); + + return streamOpenAIResponses(model, context, { + ...base, + reasoningEffort, + } satisfies OpenAIResponsesOptions); +}; + +function createClient( + model: Model<"openai-responses">, + context: Context, + apiKey?: string, + optionsHeaders?: Record, +) { + if (!apiKey) { + if (!process.env.OPENAI_API_KEY) { + throw new Error( + "OpenAI API key is required. Set OPENAI_API_KEY environment variable or pass it as an argument.", + ); + } + apiKey = process.env.OPENAI_API_KEY; + } + + const headers = { ...model.headers }; + if (model.provider === "github-copilot") { + const hasImages = hasCopilotVisionInput(context.messages); + const copilotHeaders = buildCopilotDynamicHeaders({ + messages: context.messages, + hasImages, + }); + Object.assign(headers, copilotHeaders); + } + + // Merge options headers last so they can override defaults + if (optionsHeaders) { + Object.assign(headers, optionsHeaders); + } + + return new OpenAI({ + apiKey, + baseURL: model.baseUrl, + dangerouslyAllowBrowser: true, + defaultHeaders: headers, + }); +} + +function buildParams(model: Model<"openai-responses">, context: Context, options?: OpenAIResponsesOptions) { + const messages = convertResponsesMessages(model, context, OPENAI_TOOL_CALL_PROVIDERS); + + const cacheRetention = resolveCacheRetention(options?.cacheRetention); + const params: ResponseCreateParamsStreaming = { + model: model.id, + input: messages, + stream: true, + prompt_cache_key: cacheRetention === "none" ? undefined : options?.sessionId, + prompt_cache_retention: getPromptCacheRetention(model.baseUrl, cacheRetention), + store: false, + }; + + if (options?.maxTokens) { + params.max_output_tokens = options?.maxTokens; + } + + if (options?.temperature !== undefined) { + params.temperature = options?.temperature; + } + + if (options?.serviceTier !== undefined) { + params.service_tier = options.serviceTier; + } + + if (context.tools) { + params.tools = convertResponsesTools(context.tools); + } + + if (model.reasoning) { + if (options?.reasoningEffort || options?.reasoningSummary) { + params.reasoning = { + effort: options?.reasoningEffort || "medium", + summary: options?.reasoningSummary || "auto", + }; + params.include = ["reasoning.encrypted_content"]; + } else { + if (model.name.startsWith("gpt-5")) { + // Jesus Christ, see https://community.openai.com/t/need-reasoning-false-option-for-gpt-5/1351588/7 + messages.push({ + role: "developer", + content: [ + { + type: "input_text", + text: "# Juice: 0 !important", + }, + ], + }); + } + } + } + + return params; +} + +function getServiceTierCostMultiplier(serviceTier: ResponseCreateParamsStreaming["service_tier"] | undefined): number { + switch (serviceTier) { + case "flex": + return 0.5; + case "priority": + return 2; + default: + return 1; + } +} + +function applyServiceTierPricing(usage: Usage, serviceTier: ResponseCreateParamsStreaming["service_tier"] | undefined) { + const multiplier = getServiceTierCostMultiplier(serviceTier); + if (multiplier === 1) return; + + usage.cost.input *= multiplier; + usage.cost.output *= multiplier; + usage.cost.cacheRead *= multiplier; + usage.cost.cacheWrite *= multiplier; + usage.cost.total = usage.cost.input + usage.cost.output + usage.cost.cacheRead + usage.cost.cacheWrite; +} diff --git a/packages/pi-ai/src/providers/register-builtins.ts b/packages/pi-ai/src/providers/register-builtins.ts new file mode 100644 index 000000000..e75f84de3 --- /dev/null +++ b/packages/pi-ai/src/providers/register-builtins.ts @@ -0,0 +1,186 @@ +import { clearApiProviders, registerApiProvider } from "../api-registry.js"; +import type { AssistantMessage, AssistantMessageEvent, Context, Model, SimpleStreamOptions } from "../types.js"; +import { AssistantMessageEventStream } from "../utils/event-stream.js"; +import type { BedrockOptions } from "./amazon-bedrock.js"; +import { streamAnthropic, streamSimpleAnthropic } from "./anthropic.js"; +import { streamAzureOpenAIResponses, streamSimpleAzureOpenAIResponses } from "./azure-openai-responses.js"; +import { streamGoogle, streamSimpleGoogle } from "./google.js"; +import { streamGoogleGeminiCli, streamSimpleGoogleGeminiCli } from "./google-gemini-cli.js"; +import { streamGoogleVertex, streamSimpleGoogleVertex } from "./google-vertex.js"; +import { streamMistral, streamSimpleMistral } from "./mistral.js"; +import { streamOpenAICodexResponses, streamSimpleOpenAICodexResponses } from "./openai-codex-responses.js"; +import { streamOpenAICompletions, streamSimpleOpenAICompletions } from "./openai-completions.js"; +import { streamOpenAIResponses, streamSimpleOpenAIResponses } from "./openai-responses.js"; + +interface BedrockProviderModule { + streamBedrock: ( + model: Model<"bedrock-converse-stream">, + context: Context, + options?: BedrockOptions, + ) => AsyncIterable; + streamSimpleBedrock: ( + model: Model<"bedrock-converse-stream">, + context: Context, + options?: SimpleStreamOptions, + ) => AsyncIterable; +} + +type DynamicImport = (specifier: string) => Promise; + +const dynamicImport: DynamicImport = (specifier) => import(specifier); +const BEDROCK_PROVIDER_SPECIFIER = "./amazon-" + "bedrock.js"; + +let bedrockProviderModuleOverride: BedrockProviderModule | undefined; + +export function setBedrockProviderModule(module: BedrockProviderModule): void { + bedrockProviderModuleOverride = module; +} + +async function loadBedrockProviderModule(): Promise { + if (bedrockProviderModuleOverride) { + return bedrockProviderModuleOverride; + } + const module = await dynamicImport(BEDROCK_PROVIDER_SPECIFIER); + return module as BedrockProviderModule; +} + +function forwardStream(target: AssistantMessageEventStream, source: AsyncIterable): void { + (async () => { + for await (const event of source) { + target.push(event); + } + target.end(); + })(); +} + +function createLazyLoadErrorMessage(model: Model<"bedrock-converse-stream">, error: unknown): AssistantMessage { + return { + role: "assistant", + content: [], + api: "bedrock-converse-stream", + provider: model.provider, + model: model.id, + usage: { + input: 0, + output: 0, + cacheRead: 0, + cacheWrite: 0, + totalTokens: 0, + cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, total: 0 }, + }, + stopReason: "error", + errorMessage: error instanceof Error ? error.message : String(error), + timestamp: Date.now(), + }; +} + +function streamBedrockLazy( + model: Model<"bedrock-converse-stream">, + context: Context, + options?: BedrockOptions, +): AssistantMessageEventStream { + const outer = new AssistantMessageEventStream(); + + loadBedrockProviderModule() + .then((module) => { + const inner = module.streamBedrock(model, context, options); + forwardStream(outer, inner); + }) + .catch((error) => { + const message = createLazyLoadErrorMessage(model, error); + outer.push({ type: "error", reason: "error", error: message }); + outer.end(message); + }); + + return outer; +} + +function streamSimpleBedrockLazy( + model: Model<"bedrock-converse-stream">, + context: Context, + options?: SimpleStreamOptions, +): AssistantMessageEventStream { + const outer = new AssistantMessageEventStream(); + + loadBedrockProviderModule() + .then((module) => { + const inner = module.streamSimpleBedrock(model, context, options); + forwardStream(outer, inner); + }) + .catch((error) => { + const message = createLazyLoadErrorMessage(model, error); + outer.push({ type: "error", reason: "error", error: message }); + outer.end(message); + }); + + return outer; +} + +export function registerBuiltInApiProviders(): void { + registerApiProvider({ + api: "anthropic-messages", + stream: streamAnthropic, + streamSimple: streamSimpleAnthropic, + }); + + registerApiProvider({ + api: "openai-completions", + stream: streamOpenAICompletions, + streamSimple: streamSimpleOpenAICompletions, + }); + + registerApiProvider({ + api: "mistral-conversations", + stream: streamMistral, + streamSimple: streamSimpleMistral, + }); + + registerApiProvider({ + api: "openai-responses", + stream: streamOpenAIResponses, + streamSimple: streamSimpleOpenAIResponses, + }); + + registerApiProvider({ + api: "azure-openai-responses", + stream: streamAzureOpenAIResponses, + streamSimple: streamSimpleAzureOpenAIResponses, + }); + + registerApiProvider({ + api: "openai-codex-responses", + stream: streamOpenAICodexResponses, + streamSimple: streamSimpleOpenAICodexResponses, + }); + + registerApiProvider({ + api: "google-generative-ai", + stream: streamGoogle, + streamSimple: streamSimpleGoogle, + }); + + registerApiProvider({ + api: "google-gemini-cli", + stream: streamGoogleGeminiCli, + streamSimple: streamSimpleGoogleGeminiCli, + }); + + registerApiProvider({ + api: "google-vertex", + stream: streamGoogleVertex, + streamSimple: streamSimpleGoogleVertex, + }); + + registerApiProvider({ + api: "bedrock-converse-stream", + stream: streamBedrockLazy, + streamSimple: streamSimpleBedrockLazy, + }); +} + +export function resetApiProviders(): void { + clearApiProviders(); + registerBuiltInApiProviders(); +} + +registerBuiltInApiProviders(); diff --git a/packages/pi-ai/src/providers/simple-options.ts b/packages/pi-ai/src/providers/simple-options.ts new file mode 100644 index 000000000..71c15847b --- /dev/null +++ b/packages/pi-ai/src/providers/simple-options.ts @@ -0,0 +1,46 @@ +import type { Api, Model, SimpleStreamOptions, StreamOptions, ThinkingBudgets, ThinkingLevel } from "../types.js"; + +export function buildBaseOptions(model: Model, options?: SimpleStreamOptions, apiKey?: string): StreamOptions { + return { + temperature: options?.temperature, + maxTokens: options?.maxTokens || Math.min(model.maxTokens, 32000), + signal: options?.signal, + apiKey: apiKey || options?.apiKey, + cacheRetention: options?.cacheRetention, + sessionId: options?.sessionId, + headers: options?.headers, + onPayload: options?.onPayload, + maxRetryDelayMs: options?.maxRetryDelayMs, + metadata: options?.metadata, + }; +} + +export function clampReasoning(effort: ThinkingLevel | undefined): Exclude | undefined { + return effort === "xhigh" ? "high" : effort; +} + +export function adjustMaxTokensForThinking( + baseMaxTokens: number, + modelMaxTokens: number, + reasoningLevel: ThinkingLevel, + customBudgets?: ThinkingBudgets, +): { maxTokens: number; thinkingBudget: number } { + const defaultBudgets: ThinkingBudgets = { + minimal: 1024, + low: 2048, + medium: 8192, + high: 16384, + }; + const budgets = { ...defaultBudgets, ...customBudgets }; + + const minOutputTokens = 1024; + const level = clampReasoning(reasoningLevel)!; + let thinkingBudget = budgets[level]!; + const maxTokens = Math.min(baseMaxTokens + thinkingBudget, modelMaxTokens); + + if (maxTokens <= thinkingBudget) { + thinkingBudget = Math.max(0, maxTokens - minOutputTokens); + } + + return { maxTokens, thinkingBudget }; +} diff --git a/packages/pi-ai/src/providers/transform-messages.ts b/packages/pi-ai/src/providers/transform-messages.ts new file mode 100644 index 000000000..f61f08037 --- /dev/null +++ b/packages/pi-ai/src/providers/transform-messages.ts @@ -0,0 +1,172 @@ +import type { Api, AssistantMessage, Message, Model, ToolCall, ToolResultMessage } from "../types.js"; + +/** + * Normalize tool call ID for cross-provider compatibility. + * OpenAI Responses API generates IDs that are 450+ chars with special characters like `|`. + * Anthropic APIs require IDs matching ^[a-zA-Z0-9_-]+$ (max 64 chars). + */ +export function transformMessages( + messages: Message[], + model: Model, + normalizeToolCallId?: (id: string, model: Model, source: AssistantMessage) => string, +): Message[] { + // Build a map of original tool call IDs to normalized IDs + const toolCallIdMap = new Map(); + + // First pass: transform messages (thinking blocks, tool call ID normalization) + const transformed = messages.map((msg) => { + // User messages pass through unchanged + if (msg.role === "user") { + return msg; + } + + // Handle toolResult messages - normalize toolCallId if we have a mapping + if (msg.role === "toolResult") { + const normalizedId = toolCallIdMap.get(msg.toolCallId); + if (normalizedId && normalizedId !== msg.toolCallId) { + return { ...msg, toolCallId: normalizedId }; + } + return msg; + } + + // Assistant messages need transformation check + if (msg.role === "assistant") { + const assistantMsg = msg as AssistantMessage; + const isSameModel = + assistantMsg.provider === model.provider && + assistantMsg.api === model.api && + assistantMsg.model === model.id; + + const transformedContent = assistantMsg.content.flatMap((block) => { + if (block.type === "thinking") { + // Redacted thinking is opaque encrypted content, only valid for the same model. + // Drop it for cross-model to avoid API errors. + if (block.redacted) { + return isSameModel ? block : []; + } + // For same model: keep thinking blocks with signatures (needed for replay) + // even if the thinking text is empty (OpenAI encrypted reasoning) + if (isSameModel && block.thinkingSignature) return block; + // Skip empty thinking blocks, convert others to plain text + if (!block.thinking || block.thinking.trim() === "") return []; + if (isSameModel) return block; + return { + type: "text" as const, + text: block.thinking, + }; + } + + if (block.type === "text") { + if (isSameModel) return block; + return { + type: "text" as const, + text: block.text, + }; + } + + if (block.type === "toolCall") { + const toolCall = block as ToolCall; + let normalizedToolCall: ToolCall = toolCall; + + if (!isSameModel && toolCall.thoughtSignature) { + normalizedToolCall = { ...toolCall }; + delete (normalizedToolCall as { thoughtSignature?: string }).thoughtSignature; + } + + if (!isSameModel && normalizeToolCallId) { + const normalizedId = normalizeToolCallId(toolCall.id, model, assistantMsg); + if (normalizedId !== toolCall.id) { + toolCallIdMap.set(toolCall.id, normalizedId); + normalizedToolCall = { ...normalizedToolCall, id: normalizedId }; + } + } + + return normalizedToolCall; + } + + return block; + }); + + return { + ...assistantMsg, + content: transformedContent, + }; + } + return msg; + }); + + // Second pass: insert synthetic empty tool results for orphaned tool calls + // This preserves thinking signatures and satisfies API requirements + const result: Message[] = []; + let pendingToolCalls: ToolCall[] = []; + let existingToolResultIds = new Set(); + + for (let i = 0; i < transformed.length; i++) { + const msg = transformed[i]; + + if (msg.role === "assistant") { + // If we have pending orphaned tool calls from a previous assistant, insert synthetic results now + if (pendingToolCalls.length > 0) { + for (const tc of pendingToolCalls) { + if (!existingToolResultIds.has(tc.id)) { + result.push({ + role: "toolResult", + toolCallId: tc.id, + toolName: tc.name, + content: [{ type: "text", text: "No result provided" }], + isError: true, + timestamp: Date.now(), + } as ToolResultMessage); + } + } + pendingToolCalls = []; + existingToolResultIds = new Set(); + } + + // Skip errored/aborted assistant messages entirely. + // These are incomplete turns that shouldn't be replayed: + // - May have partial content (reasoning without message, incomplete tool calls) + // - Replaying them can cause API errors (e.g., OpenAI "reasoning without following item") + // - The model should retry from the last valid state + const assistantMsg = msg as AssistantMessage; + if (assistantMsg.stopReason === "error" || assistantMsg.stopReason === "aborted") { + continue; + } + + // Track tool calls from this assistant message + const toolCalls = assistantMsg.content.filter((b) => b.type === "toolCall") as ToolCall[]; + if (toolCalls.length > 0) { + pendingToolCalls = toolCalls; + existingToolResultIds = new Set(); + } + + result.push(msg); + } else if (msg.role === "toolResult") { + existingToolResultIds.add(msg.toolCallId); + result.push(msg); + } else if (msg.role === "user") { + // User message interrupts tool flow - insert synthetic results for orphaned calls + if (pendingToolCalls.length > 0) { + for (const tc of pendingToolCalls) { + if (!existingToolResultIds.has(tc.id)) { + result.push({ + role: "toolResult", + toolCallId: tc.id, + toolName: tc.name, + content: [{ type: "text", text: "No result provided" }], + isError: true, + timestamp: Date.now(), + } as ToolResultMessage); + } + } + pendingToolCalls = []; + existingToolResultIds = new Set(); + } + result.push(msg); + } else { + result.push(msg); + } + } + + return result; +} diff --git a/packages/pi-ai/src/stream.ts b/packages/pi-ai/src/stream.ts new file mode 100644 index 000000000..e8a2e50eb --- /dev/null +++ b/packages/pi-ai/src/stream.ts @@ -0,0 +1,59 @@ +import "./providers/register-builtins.js"; + +import { getApiProvider } from "./api-registry.js"; +import type { + Api, + AssistantMessage, + AssistantMessageEventStream, + Context, + Model, + ProviderStreamOptions, + SimpleStreamOptions, + StreamOptions, +} from "./types.js"; + +export { getEnvApiKey } from "./env-api-keys.js"; + +function resolveApiProvider(api: Api) { + const provider = getApiProvider(api); + if (!provider) { + throw new Error(`No API provider registered for api: ${api}`); + } + return provider; +} + +export function stream( + model: Model, + context: Context, + options?: ProviderStreamOptions, +): AssistantMessageEventStream { + const provider = resolveApiProvider(model.api); + return provider.stream(model, context, options as StreamOptions); +} + +export async function complete( + model: Model, + context: Context, + options?: ProviderStreamOptions, +): Promise { + const s = stream(model, context, options); + return s.result(); +} + +export function streamSimple( + model: Model, + context: Context, + options?: SimpleStreamOptions, +): AssistantMessageEventStream { + const provider = resolveApiProvider(model.api); + return provider.streamSimple(model, context, options); +} + +export async function completeSimple( + model: Model, + context: Context, + options?: SimpleStreamOptions, +): Promise { + const s = streamSimple(model, context, options); + return s.result(); +} diff --git a/packages/pi-ai/src/types.ts b/packages/pi-ai/src/types.ts new file mode 100644 index 000000000..cc4a4309b --- /dev/null +++ b/packages/pi-ai/src/types.ts @@ -0,0 +1,321 @@ +import type { AssistantMessageEventStream } from "./utils/event-stream.js"; + +export type { AssistantMessageEventStream } from "./utils/event-stream.js"; + +export type KnownApi = + | "openai-completions" + | "mistral-conversations" + | "openai-responses" + | "azure-openai-responses" + | "openai-codex-responses" + | "anthropic-messages" + | "bedrock-converse-stream" + | "google-generative-ai" + | "google-gemini-cli" + | "google-vertex"; + +export type Api = KnownApi | (string & {}); + +export type KnownProvider = + | "amazon-bedrock" + | "anthropic" + | "google" + | "google-gemini-cli" + | "google-antigravity" + | "google-vertex" + | "openai" + | "azure-openai-responses" + | "openai-codex" + | "github-copilot" + | "xai" + | "groq" + | "cerebras" + | "openrouter" + | "vercel-ai-gateway" + | "zai" + | "mistral" + | "minimax" + | "minimax-cn" + | "huggingface" + | "opencode" + | "opencode-go" + | "kimi-coding"; +export type Provider = KnownProvider | string; + +export type ThinkingLevel = "minimal" | "low" | "medium" | "high" | "xhigh"; + +/** Token budgets for each thinking level (token-based providers only) */ +export interface ThinkingBudgets { + minimal?: number; + low?: number; + medium?: number; + high?: number; +} + +// Base options all providers share +export type CacheRetention = "none" | "short" | "long"; + +export type Transport = "sse" | "websocket" | "auto"; + +export interface StreamOptions { + temperature?: number; + maxTokens?: number; + signal?: AbortSignal; + apiKey?: string; + /** + * Preferred transport for providers that support multiple transports. + * Providers that do not support this option ignore it. + */ + transport?: Transport; + /** + * Prompt cache retention preference. Providers map this to their supported values. + * Default: "short". + */ + cacheRetention?: CacheRetention; + /** + * Optional session identifier for providers that support session-based caching. + * Providers can use this to enable prompt caching, request routing, or other + * session-aware features. Ignored by providers that don't support it. + */ + sessionId?: string; + /** + * Optional callback for inspecting or replacing provider payloads before sending. + * Return undefined to keep the payload unchanged. + */ + onPayload?: (payload: unknown, model: Model) => unknown | undefined | Promise; + /** + * Optional custom HTTP headers to include in API requests. + * Merged with provider defaults; can override default headers. + * Not supported by all providers (e.g., AWS Bedrock uses SDK auth). + */ + headers?: Record; + /** + * Maximum delay in milliseconds to wait for a retry when the server requests a long wait. + * If the server's requested delay exceeds this value, the request fails immediately + * with an error containing the requested delay, allowing higher-level retry logic + * to handle it with user visibility. + * Default: 60000 (60 seconds). Set to 0 to disable the cap. + */ + maxRetryDelayMs?: number; + /** + * Optional metadata to include in API requests. + * Providers extract the fields they understand and ignore the rest. + * For example, Anthropic uses `user_id` for abuse tracking and rate limiting. + */ + metadata?: Record; +} + +export type ProviderStreamOptions = StreamOptions & Record; + +// Unified options with reasoning passed to streamSimple() and completeSimple() +export interface SimpleStreamOptions extends StreamOptions { + reasoning?: ThinkingLevel; + /** Custom token budgets for thinking levels (token-based providers only) */ + thinkingBudgets?: ThinkingBudgets; +} + +// Generic StreamFunction with typed options +export type StreamFunction = ( + model: Model, + context: Context, + options?: TOptions, +) => AssistantMessageEventStream; + +export interface TextSignatureV1 { + v: 1; + id: string; + phase?: "commentary" | "final_answer"; +} + +export interface TextContent { + type: "text"; + text: string; + textSignature?: string; // e.g., for OpenAI responses, message metadata (legacy id string or TextSignatureV1 JSON) +} + +export interface ThinkingContent { + type: "thinking"; + thinking: string; + thinkingSignature?: string; // e.g., for OpenAI responses, the reasoning item ID + /** When true, the thinking content was redacted by safety filters. The opaque + * encrypted payload is stored in `thinkingSignature` so it can be passed back + * to the API for multi-turn continuity. */ + redacted?: boolean; +} + +export interface ImageContent { + type: "image"; + data: string; // base64 encoded image data + mimeType: string; // e.g., "image/jpeg", "image/png" +} + +export interface ToolCall { + type: "toolCall"; + id: string; + name: string; + arguments: Record; + thoughtSignature?: string; // Google-specific: opaque signature for reusing thought context +} + +export interface Usage { + input: number; + output: number; + cacheRead: number; + cacheWrite: number; + totalTokens: number; + cost: { + input: number; + output: number; + cacheRead: number; + cacheWrite: number; + total: number; + }; +} + +export type StopReason = "stop" | "length" | "toolUse" | "error" | "aborted"; + +export interface UserMessage { + role: "user"; + content: string | (TextContent | ImageContent)[]; + timestamp: number; // Unix timestamp in milliseconds +} + +export interface AssistantMessage { + role: "assistant"; + content: (TextContent | ThinkingContent | ToolCall)[]; + api: Api; + provider: Provider; + model: string; + usage: Usage; + stopReason: StopReason; + errorMessage?: string; + timestamp: number; // Unix timestamp in milliseconds +} + +export interface ToolResultMessage { + role: "toolResult"; + toolCallId: string; + toolName: string; + content: (TextContent | ImageContent)[]; // Supports text and images + details?: TDetails; + isError: boolean; + timestamp: number; // Unix timestamp in milliseconds +} + +export type Message = UserMessage | AssistantMessage | ToolResultMessage; + +import type { TSchema } from "@sinclair/typebox"; + +export interface Tool { + name: string; + description: string; + parameters: TParameters; +} + +export interface Context { + systemPrompt?: string; + messages: Message[]; + tools?: Tool[]; +} + +export type AssistantMessageEvent = + | { type: "start"; partial: AssistantMessage } + | { type: "text_start"; contentIndex: number; partial: AssistantMessage } + | { type: "text_delta"; contentIndex: number; delta: string; partial: AssistantMessage } + | { type: "text_end"; contentIndex: number; content: string; partial: AssistantMessage } + | { type: "thinking_start"; contentIndex: number; partial: AssistantMessage } + | { type: "thinking_delta"; contentIndex: number; delta: string; partial: AssistantMessage } + | { type: "thinking_end"; contentIndex: number; content: string; partial: AssistantMessage } + | { type: "toolcall_start"; contentIndex: number; partial: AssistantMessage } + | { type: "toolcall_delta"; contentIndex: number; delta: string; partial: AssistantMessage } + | { type: "toolcall_end"; contentIndex: number; toolCall: ToolCall; partial: AssistantMessage } + | { type: "done"; reason: Extract; message: AssistantMessage } + | { type: "error"; reason: Extract; error: AssistantMessage }; + +/** + * Compatibility settings for OpenAI-compatible completions APIs. + * Use this to override URL-based auto-detection for custom providers. + */ +export interface OpenAICompletionsCompat { + /** Whether the provider supports the `store` field. Default: auto-detected from URL. */ + supportsStore?: boolean; + /** Whether the provider supports the `developer` role (vs `system`). Default: auto-detected from URL. */ + supportsDeveloperRole?: boolean; + /** Whether the provider supports `reasoning_effort`. Default: auto-detected from URL. */ + supportsReasoningEffort?: boolean; + /** Optional mapping from pi-ai reasoning levels to provider/model-specific `reasoning_effort` values. */ + reasoningEffortMap?: Partial>; + /** Whether the provider supports `stream_options: { include_usage: true }` for token usage in streaming responses. Default: true. */ + supportsUsageInStreaming?: boolean; + /** Which field to use for max tokens. Default: auto-detected from URL. */ + maxTokensField?: "max_completion_tokens" | "max_tokens"; + /** Whether tool results require the `name` field. Default: auto-detected from URL. */ + requiresToolResultName?: boolean; + /** Whether a user message after tool results requires an assistant message in between. Default: auto-detected from URL. */ + requiresAssistantAfterToolResult?: boolean; + /** Whether thinking blocks must be converted to text blocks with delimiters. Default: auto-detected from URL. */ + requiresThinkingAsText?: boolean; + /** Format for reasoning/thinking parameter. "openai" uses reasoning_effort, "zai" uses thinking: { type: "enabled" }, "qwen" uses enable_thinking: boolean. Default: "openai". */ + thinkingFormat?: "openai" | "zai" | "qwen"; + /** OpenRouter-specific routing preferences. Only used when baseUrl points to OpenRouter. */ + openRouterRouting?: OpenRouterRouting; + /** Vercel AI Gateway routing preferences. Only used when baseUrl points to Vercel AI Gateway. */ + vercelGatewayRouting?: VercelGatewayRouting; + /** Whether the provider supports the `strict` field in tool definitions. Default: true. */ + supportsStrictMode?: boolean; +} + +/** Compatibility settings for OpenAI Responses APIs. */ +export interface OpenAIResponsesCompat { + // Reserved for future use +} + +/** + * OpenRouter provider routing preferences. + * Controls which upstream providers OpenRouter routes requests to. + * @see https://openrouter.ai/docs/provider-routing + */ +export interface OpenRouterRouting { + /** List of provider slugs to exclusively use for this request (e.g., ["amazon-bedrock", "anthropic"]). */ + only?: string[]; + /** List of provider slugs to try in order (e.g., ["anthropic", "openai"]). */ + order?: string[]; +} + +/** + * Vercel AI Gateway routing preferences. + * Controls which upstream providers the gateway routes requests to. + * @see https://vercel.com/docs/ai-gateway/models-and-providers/provider-options + */ +export interface VercelGatewayRouting { + /** List of provider slugs to exclusively use for this request (e.g., ["bedrock", "anthropic"]). */ + only?: string[]; + /** List of provider slugs to try in order (e.g., ["anthropic", "openai"]). */ + order?: string[]; +} + +// Model interface for the unified model system +export interface Model { + id: string; + name: string; + api: TApi; + provider: Provider; + baseUrl: string; + reasoning: boolean; + input: ("text" | "image")[]; + cost: { + input: number; // $/million tokens + output: number; // $/million tokens + cacheRead: number; // $/million tokens + cacheWrite: number; // $/million tokens + }; + contextWindow: number; + maxTokens: number; + headers?: Record; + /** Compatibility overrides for OpenAI-compatible APIs. If not set, auto-detected from baseUrl. */ + compat?: TApi extends "openai-completions" + ? OpenAICompletionsCompat + : TApi extends "openai-responses" + ? OpenAIResponsesCompat + : never; +} diff --git a/packages/pi-ai/src/utils/event-stream.ts b/packages/pi-ai/src/utils/event-stream.ts new file mode 100644 index 000000000..f4a7ceba8 --- /dev/null +++ b/packages/pi-ai/src/utils/event-stream.ts @@ -0,0 +1,87 @@ +import type { AssistantMessage, AssistantMessageEvent } from "../types.js"; + +// Generic event stream class for async iteration +export class EventStream implements AsyncIterable { + private queue: T[] = []; + private waiting: ((value: IteratorResult) => void)[] = []; + private done = false; + private finalResultPromise: Promise; + private resolveFinalResult!: (result: R) => void; + + constructor( + private isComplete: (event: T) => boolean, + private extractResult: (event: T) => R, + ) { + this.finalResultPromise = new Promise((resolve) => { + this.resolveFinalResult = resolve; + }); + } + + push(event: T): void { + if (this.done) return; + + if (this.isComplete(event)) { + this.done = true; + this.resolveFinalResult(this.extractResult(event)); + } + + // Deliver to waiting consumer or queue it + const waiter = this.waiting.shift(); + if (waiter) { + waiter({ value: event, done: false }); + } else { + this.queue.push(event); + } + } + + end(result?: R): void { + this.done = true; + if (result !== undefined) { + this.resolveFinalResult(result); + } + // Notify all waiting consumers that we're done + while (this.waiting.length > 0) { + const waiter = this.waiting.shift()!; + waiter({ value: undefined as any, done: true }); + } + } + + async *[Symbol.asyncIterator](): AsyncIterator { + while (true) { + if (this.queue.length > 0) { + yield this.queue.shift()!; + } else if (this.done) { + return; + } else { + const result = await new Promise>((resolve) => this.waiting.push(resolve)); + if (result.done) return; + yield result.value; + } + } + } + + result(): Promise { + return this.finalResultPromise; + } +} + +export class AssistantMessageEventStream extends EventStream { + constructor() { + super( + (event) => event.type === "done" || event.type === "error", + (event) => { + if (event.type === "done") { + return event.message; + } else if (event.type === "error") { + return event.error; + } + throw new Error("Unexpected event type for final result"); + }, + ); + } +} + +/** Factory function for AssistantMessageEventStream (for use in extensions) */ +export function createAssistantMessageEventStream(): AssistantMessageEventStream { + return new AssistantMessageEventStream(); +} diff --git a/packages/pi-ai/src/utils/hash.ts b/packages/pi-ai/src/utils/hash.ts new file mode 100644 index 000000000..1ff55e8b4 --- /dev/null +++ b/packages/pi-ai/src/utils/hash.ts @@ -0,0 +1,13 @@ +/** Fast deterministic hash to shorten long strings */ +export function shortHash(str: string): string { + let h1 = 0xdeadbeef; + let h2 = 0x41c6ce57; + for (let i = 0; i < str.length; i++) { + const ch = str.charCodeAt(i); + h1 = Math.imul(h1 ^ ch, 2654435761); + h2 = Math.imul(h2 ^ ch, 1597334677); + } + h1 = Math.imul(h1 ^ (h1 >>> 16), 2246822507) ^ Math.imul(h2 ^ (h2 >>> 13), 3266489909); + h2 = Math.imul(h2 ^ (h2 >>> 16), 2246822507) ^ Math.imul(h1 ^ (h1 >>> 13), 3266489909); + return (h2 >>> 0).toString(36) + (h1 >>> 0).toString(36); +} diff --git a/packages/pi-ai/src/utils/json-parse.ts b/packages/pi-ai/src/utils/json-parse.ts new file mode 100644 index 000000000..feeb32ad1 --- /dev/null +++ b/packages/pi-ai/src/utils/json-parse.ts @@ -0,0 +1,28 @@ +import { parse as partialParse } from "partial-json"; + +/** + * Attempts to parse potentially incomplete JSON during streaming. + * Always returns a valid object, even if the JSON is incomplete. + * + * @param partialJson The partial JSON string from streaming + * @returns Parsed object or empty object if parsing fails + */ +export function parseStreamingJson(partialJson: string | undefined): T { + if (!partialJson || partialJson.trim() === "") { + return {} as T; + } + + // Try standard parsing first (fastest for complete JSON) + try { + return JSON.parse(partialJson) as T; + } catch { + // Try partial-json for incomplete JSON + try { + const result = partialParse(partialJson); + return (result ?? {}) as T; + } catch { + // If all parsing fails, return empty object + return {} as T; + } + } +} diff --git a/packages/pi-ai/src/utils/oauth/anthropic.ts b/packages/pi-ai/src/utils/oauth/anthropic.ts new file mode 100644 index 000000000..5355df0d0 --- /dev/null +++ b/packages/pi-ai/src/utils/oauth/anthropic.ts @@ -0,0 +1,138 @@ +/** + * Anthropic OAuth flow (Claude Pro/Max) + */ + +import { generatePKCE } from "./pkce.js"; +import type { OAuthCredentials, OAuthLoginCallbacks, OAuthProviderInterface } from "./types.js"; + +const decode = (s: string) => atob(s); +const CLIENT_ID = decode("OWQxYzI1MGEtZTYxYi00NGQ5LTg4ZWQtNTk0NGQxOTYyZjVl"); +const AUTHORIZE_URL = "https://claude.ai/oauth/authorize"; +const TOKEN_URL = "https://console.anthropic.com/v1/oauth/token"; +const REDIRECT_URI = "https://console.anthropic.com/oauth/code/callback"; +const SCOPES = "org:create_api_key user:profile user:inference"; + +/** + * Login with Anthropic OAuth (device code flow) + * + * @param onAuthUrl - Callback to handle the authorization URL (e.g., open browser) + * @param onPromptCode - Callback to prompt user for the authorization code + */ +export async function loginAnthropic( + onAuthUrl: (url: string) => void, + onPromptCode: () => Promise, +): Promise { + const { verifier, challenge } = await generatePKCE(); + + // Build authorization URL + const authParams = new URLSearchParams({ + code: "true", + client_id: CLIENT_ID, + response_type: "code", + redirect_uri: REDIRECT_URI, + scope: SCOPES, + code_challenge: challenge, + code_challenge_method: "S256", + state: verifier, + }); + + const authUrl = `${AUTHORIZE_URL}?${authParams.toString()}`; + + // Notify caller with URL to open + onAuthUrl(authUrl); + + // Wait for user to paste authorization code (format: code#state) + const authCode = await onPromptCode(); + const splits = authCode.split("#"); + const code = splits[0]; + const state = splits[1]; + + // Exchange code for tokens + const tokenResponse = await fetch(TOKEN_URL, { + method: "POST", + headers: { + "Content-Type": "application/json", + }, + body: JSON.stringify({ + grant_type: "authorization_code", + client_id: CLIENT_ID, + code: code, + state: state, + redirect_uri: REDIRECT_URI, + code_verifier: verifier, + }), + }); + + if (!tokenResponse.ok) { + const error = await tokenResponse.text(); + throw new Error(`Token exchange failed: ${error}`); + } + + const tokenData = (await tokenResponse.json()) as { + access_token: string; + refresh_token: string; + expires_in: number; + }; + + // Calculate expiry time (current time + expires_in seconds - 5 min buffer) + const expiresAt = Date.now() + tokenData.expires_in * 1000 - 5 * 60 * 1000; + + // Save credentials + return { + refresh: tokenData.refresh_token, + access: tokenData.access_token, + expires: expiresAt, + }; +} + +/** + * Refresh Anthropic OAuth token + */ +export async function refreshAnthropicToken(refreshToken: string): Promise { + const response = await fetch(TOKEN_URL, { + method: "POST", + headers: { "Content-Type": "application/json" }, + body: JSON.stringify({ + grant_type: "refresh_token", + client_id: CLIENT_ID, + refresh_token: refreshToken, + }), + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(`Anthropic token refresh failed: ${error}`); + } + + const data = (await response.json()) as { + access_token: string; + refresh_token: string; + expires_in: number; + }; + + return { + refresh: data.refresh_token, + access: data.access_token, + expires: Date.now() + data.expires_in * 1000 - 5 * 60 * 1000, + }; +} + +export const anthropicOAuthProvider: OAuthProviderInterface = { + id: "anthropic", + name: "Anthropic (Claude Pro/Max)", + + async login(callbacks: OAuthLoginCallbacks): Promise { + return loginAnthropic( + (url) => callbacks.onAuth({ url }), + () => callbacks.onPrompt({ message: "Paste the authorization code:" }), + ); + }, + + async refreshToken(credentials: OAuthCredentials): Promise { + return refreshAnthropicToken(credentials.refresh); + }, + + getApiKey(credentials: OAuthCredentials): string { + return credentials.access; + }, +}; diff --git a/packages/pi-ai/src/utils/oauth/github-copilot.ts b/packages/pi-ai/src/utils/oauth/github-copilot.ts new file mode 100644 index 000000000..1b0fe623e --- /dev/null +++ b/packages/pi-ai/src/utils/oauth/github-copilot.ts @@ -0,0 +1,381 @@ +/** + * GitHub Copilot OAuth flow + */ + +import { getModels } from "../../models.js"; +import type { Api, Model } from "../../types.js"; +import type { OAuthCredentials, OAuthLoginCallbacks, OAuthProviderInterface } from "./types.js"; + +type CopilotCredentials = OAuthCredentials & { + enterpriseUrl?: string; +}; + +const decode = (s: string) => atob(s); +const CLIENT_ID = decode("SXYxLmI1MDdhMDhjODdlY2ZlOTg="); + +const COPILOT_HEADERS = { + "User-Agent": "GitHubCopilotChat/0.35.0", + "Editor-Version": "vscode/1.107.0", + "Editor-Plugin-Version": "copilot-chat/0.35.0", + "Copilot-Integration-Id": "vscode-chat", +} as const; + +type DeviceCodeResponse = { + device_code: string; + user_code: string; + verification_uri: string; + interval: number; + expires_in: number; +}; + +type DeviceTokenSuccessResponse = { + access_token: string; + token_type?: string; + scope?: string; +}; + +type DeviceTokenErrorResponse = { + error: string; + error_description?: string; + interval?: number; +}; + +export function normalizeDomain(input: string): string | null { + const trimmed = input.trim(); + if (!trimmed) return null; + try { + const url = trimmed.includes("://") ? new URL(trimmed) : new URL(`https://${trimmed}`); + return url.hostname; + } catch { + return null; + } +} + +function getUrls(domain: string): { + deviceCodeUrl: string; + accessTokenUrl: string; + copilotTokenUrl: string; +} { + return { + deviceCodeUrl: `https://${domain}/login/device/code`, + accessTokenUrl: `https://${domain}/login/oauth/access_token`, + copilotTokenUrl: `https://api.${domain}/copilot_internal/v2/token`, + }; +} + +/** + * Parse the proxy-ep from a Copilot token and convert to API base URL. + * Token format: tid=...;exp=...;proxy-ep=proxy.individual.githubcopilot.com;... + * Returns API URL like https://api.individual.githubcopilot.com + */ +function getBaseUrlFromToken(token: string): string | null { + const match = token.match(/proxy-ep=([^;]+)/); + if (!match) return null; + const proxyHost = match[1]; + // Convert proxy.xxx to api.xxx + const apiHost = proxyHost.replace(/^proxy\./, "api."); + return `https://${apiHost}`; +} + +export function getGitHubCopilotBaseUrl(token?: string, enterpriseDomain?: string): string { + // If we have a token, extract the base URL from proxy-ep + if (token) { + const urlFromToken = getBaseUrlFromToken(token); + if (urlFromToken) return urlFromToken; + } + // Fallback for enterprise or if token parsing fails + if (enterpriseDomain) return `https://copilot-api.${enterpriseDomain}`; + return "https://api.individual.githubcopilot.com"; +} + +async function fetchJson(url: string, init: RequestInit): Promise { + const response = await fetch(url, init); + if (!response.ok) { + const text = await response.text(); + throw new Error(`${response.status} ${response.statusText}: ${text}`); + } + return response.json(); +} + +async function startDeviceFlow(domain: string): Promise { + const urls = getUrls(domain); + const data = await fetchJson(urls.deviceCodeUrl, { + method: "POST", + headers: { + Accept: "application/json", + "Content-Type": "application/json", + "User-Agent": "GitHubCopilotChat/0.35.0", + }, + body: JSON.stringify({ + client_id: CLIENT_ID, + scope: "read:user", + }), + }); + + if (!data || typeof data !== "object") { + throw new Error("Invalid device code response"); + } + + const deviceCode = (data as Record).device_code; + const userCode = (data as Record).user_code; + const verificationUri = (data as Record).verification_uri; + const interval = (data as Record).interval; + const expiresIn = (data as Record).expires_in; + + if ( + typeof deviceCode !== "string" || + typeof userCode !== "string" || + typeof verificationUri !== "string" || + typeof interval !== "number" || + typeof expiresIn !== "number" + ) { + throw new Error("Invalid device code response fields"); + } + + return { + device_code: deviceCode, + user_code: userCode, + verification_uri: verificationUri, + interval, + expires_in: expiresIn, + }; +} + +/** + * Sleep that can be interrupted by an AbortSignal + */ +function abortableSleep(ms: number, signal?: AbortSignal): Promise { + return new Promise((resolve, reject) => { + if (signal?.aborted) { + reject(new Error("Login cancelled")); + return; + } + + const timeout = setTimeout(resolve, ms); + + signal?.addEventListener( + "abort", + () => { + clearTimeout(timeout); + reject(new Error("Login cancelled")); + }, + { once: true }, + ); + }); +} + +async function pollForGitHubAccessToken( + domain: string, + deviceCode: string, + intervalSeconds: number, + expiresIn: number, + signal?: AbortSignal, +) { + const urls = getUrls(domain); + const deadline = Date.now() + expiresIn * 1000; + let intervalMs = Math.max(1000, Math.floor(intervalSeconds * 1000)); + + while (Date.now() < deadline) { + if (signal?.aborted) { + throw new Error("Login cancelled"); + } + + const raw = await fetchJson(urls.accessTokenUrl, { + method: "POST", + headers: { + Accept: "application/json", + "Content-Type": "application/json", + "User-Agent": "GitHubCopilotChat/0.35.0", + }, + body: JSON.stringify({ + client_id: CLIENT_ID, + device_code: deviceCode, + grant_type: "urn:ietf:params:oauth:grant-type:device_code", + }), + }); + + if (raw && typeof raw === "object" && typeof (raw as DeviceTokenSuccessResponse).access_token === "string") { + return (raw as DeviceTokenSuccessResponse).access_token; + } + + if (raw && typeof raw === "object" && typeof (raw as DeviceTokenErrorResponse).error === "string") { + const err = (raw as DeviceTokenErrorResponse).error; + if (err === "authorization_pending") { + await abortableSleep(intervalMs, signal); + continue; + } + + if (err === "slow_down") { + intervalMs += 5000; + await abortableSleep(intervalMs, signal); + continue; + } + + throw new Error(`Device flow failed: ${err}`); + } + + await abortableSleep(intervalMs, signal); + } + + throw new Error("Device flow timed out"); +} + +/** + * Refresh GitHub Copilot token + */ +export async function refreshGitHubCopilotToken( + refreshToken: string, + enterpriseDomain?: string, +): Promise { + const domain = enterpriseDomain || "github.com"; + const urls = getUrls(domain); + + const raw = await fetchJson(urls.copilotTokenUrl, { + headers: { + Accept: "application/json", + Authorization: `Bearer ${refreshToken}`, + ...COPILOT_HEADERS, + }, + }); + + if (!raw || typeof raw !== "object") { + throw new Error("Invalid Copilot token response"); + } + + const token = (raw as Record).token; + const expiresAt = (raw as Record).expires_at; + + if (typeof token !== "string" || typeof expiresAt !== "number") { + throw new Error("Invalid Copilot token response fields"); + } + + return { + refresh: refreshToken, + access: token, + expires: expiresAt * 1000 - 5 * 60 * 1000, + enterpriseUrl: enterpriseDomain, + }; +} + +/** + * Enable a model for the user's GitHub Copilot account. + * This is required for some models (like Claude, Grok) before they can be used. + */ +async function enableGitHubCopilotModel(token: string, modelId: string, enterpriseDomain?: string): Promise { + const baseUrl = getGitHubCopilotBaseUrl(token, enterpriseDomain); + const url = `${baseUrl}/models/${modelId}/policy`; + + try { + const response = await fetch(url, { + method: "POST", + headers: { + "Content-Type": "application/json", + Authorization: `Bearer ${token}`, + ...COPILOT_HEADERS, + "openai-intent": "chat-policy", + "x-interaction-type": "chat-policy", + }, + body: JSON.stringify({ state: "enabled" }), + }); + return response.ok; + } catch { + return false; + } +} + +/** + * Enable all known GitHub Copilot models that may require policy acceptance. + * Called after successful login to ensure all models are available. + */ +async function enableAllGitHubCopilotModels( + token: string, + enterpriseDomain?: string, + onProgress?: (model: string, success: boolean) => void, +): Promise { + const models = getModels("github-copilot"); + await Promise.all( + models.map(async (model) => { + const success = await enableGitHubCopilotModel(token, model.id, enterpriseDomain); + onProgress?.(model.id, success); + }), + ); +} + +/** + * Login with GitHub Copilot OAuth (device code flow) + * + * @param options.onAuth - Callback with URL and optional instructions (user code) + * @param options.onPrompt - Callback to prompt user for input + * @param options.onProgress - Optional progress callback + * @param options.signal - Optional AbortSignal for cancellation + */ +export async function loginGitHubCopilot(options: { + onAuth: (url: string, instructions?: string) => void; + onPrompt: (prompt: { message: string; placeholder?: string; allowEmpty?: boolean }) => Promise; + onProgress?: (message: string) => void; + signal?: AbortSignal; +}): Promise { + const input = await options.onPrompt({ + message: "GitHub Enterprise URL/domain (blank for github.com)", + placeholder: "company.ghe.com", + allowEmpty: true, + }); + + if (options.signal?.aborted) { + throw new Error("Login cancelled"); + } + + const trimmed = input.trim(); + const enterpriseDomain = normalizeDomain(input); + if (trimmed && !enterpriseDomain) { + throw new Error("Invalid GitHub Enterprise URL/domain"); + } + const domain = enterpriseDomain || "github.com"; + + const device = await startDeviceFlow(domain); + options.onAuth(device.verification_uri, `Enter code: ${device.user_code}`); + + const githubAccessToken = await pollForGitHubAccessToken( + domain, + device.device_code, + device.interval, + device.expires_in, + options.signal, + ); + const credentials = await refreshGitHubCopilotToken(githubAccessToken, enterpriseDomain ?? undefined); + + // Enable all models after successful login + options.onProgress?.("Enabling models..."); + await enableAllGitHubCopilotModels(credentials.access, enterpriseDomain ?? undefined); + return credentials; +} + +export const githubCopilotOAuthProvider: OAuthProviderInterface = { + id: "github-copilot", + name: "GitHub Copilot", + + async login(callbacks: OAuthLoginCallbacks): Promise { + return loginGitHubCopilot({ + onAuth: (url, instructions) => callbacks.onAuth({ url, instructions }), + onPrompt: callbacks.onPrompt, + onProgress: callbacks.onProgress, + signal: callbacks.signal, + }); + }, + + async refreshToken(credentials: OAuthCredentials): Promise { + const creds = credentials as CopilotCredentials; + return refreshGitHubCopilotToken(creds.refresh, creds.enterpriseUrl); + }, + + getApiKey(credentials: OAuthCredentials): string { + return credentials.access; + }, + + modifyModels(models: Model[], credentials: OAuthCredentials): Model[] { + const creds = credentials as CopilotCredentials; + const domain = creds.enterpriseUrl ? (normalizeDomain(creds.enterpriseUrl) ?? undefined) : undefined; + const baseUrl = getGitHubCopilotBaseUrl(creds.access, domain); + return models.map((m) => (m.provider === "github-copilot" ? { ...m, baseUrl } : m)); + }, +}; diff --git a/packages/pi-ai/src/utils/oauth/google-antigravity.ts b/packages/pi-ai/src/utils/oauth/google-antigravity.ts new file mode 100644 index 000000000..e9ef3c740 --- /dev/null +++ b/packages/pi-ai/src/utils/oauth/google-antigravity.ts @@ -0,0 +1,457 @@ +/** + * Antigravity OAuth flow (Gemini 3, Claude, GPT-OSS via Google Cloud) + * Uses different OAuth credentials than google-gemini-cli for access to additional models. + * + * NOTE: This module uses Node.js http.createServer for the OAuth callback. + * It is only intended for CLI use, not browser environments. + */ + +import type { Server } from "node:http"; +import { generatePKCE } from "./pkce.js"; +import type { OAuthCredentials, OAuthLoginCallbacks, OAuthProviderInterface } from "./types.js"; + +type AntigravityCredentials = OAuthCredentials & { + projectId: string; +}; + +let _createServer: typeof import("node:http").createServer | null = null; +let _httpImportPromise: Promise | null = null; +if (typeof process !== "undefined" && (process.versions?.node || process.versions?.bun)) { + _httpImportPromise = import("node:http").then((m) => { + _createServer = m.createServer; + }); +} + +// Antigravity OAuth credentials (different from Gemini CLI) +const decode = (s: string) => atob(s); +const CLIENT_ID = decode( + "MTA3MTAwNjA2MDU5MS10bWhzc2luMmgyMWxjcmUyMzV2dG9sb2poNGc0MDNlcC5hcHBzLmdvb2dsZXVzZXJjb250ZW50LmNvbQ==", +); +const CLIENT_SECRET = decode("R09DU1BYLUs1OEZXUjQ4NkxkTEoxbUxCOHNYQzR6NnFEQWY="); +const REDIRECT_URI = "http://localhost:51121/oauth-callback"; + +// Antigravity requires additional scopes +const SCOPES = [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email", + "https://www.googleapis.com/auth/userinfo.profile", + "https://www.googleapis.com/auth/cclog", + "https://www.googleapis.com/auth/experimentsandconfigs", +]; + +const AUTH_URL = "https://accounts.google.com/o/oauth2/v2/auth"; +const TOKEN_URL = "https://oauth2.googleapis.com/token"; + +// Fallback project ID when discovery fails +const DEFAULT_PROJECT_ID = "rising-fact-p41fc"; + +type CallbackServerInfo = { + server: Server; + cancelWait: () => void; + waitForCode: () => Promise<{ code: string; state: string } | null>; +}; + +/** + * Start a local HTTP server to receive the OAuth callback + */ +async function getNodeCreateServer(): Promise { + if (_createServer) return _createServer; + if (_httpImportPromise) { + await _httpImportPromise; + } + if (_createServer) return _createServer; + throw new Error("Antigravity OAuth is only available in Node.js environments"); +} + +async function startCallbackServer(): Promise { + const createServer = await getNodeCreateServer(); + + return new Promise((resolve, reject) => { + let result: { code: string; state: string } | null = null; + let cancelled = false; + + const server = createServer((req, res) => { + const url = new URL(req.url || "", `http://localhost:51121`); + + if (url.pathname === "/oauth-callback") { + const code = url.searchParams.get("code"); + const state = url.searchParams.get("state"); + const error = url.searchParams.get("error"); + + if (error) { + res.writeHead(400, { "Content-Type": "text/html" }); + res.end( + `

Authentication Failed

Error: ${error}

You can close this window.

`, + ); + return; + } + + if (code && state) { + res.writeHead(200, { "Content-Type": "text/html" }); + res.end( + `

Authentication Successful

You can close this window and return to the terminal.

`, + ); + result = { code, state }; + } else { + res.writeHead(400, { "Content-Type": "text/html" }); + res.end( + `

Authentication Failed

Missing code or state parameter.

`, + ); + } + } else { + res.writeHead(404); + res.end(); + } + }); + + server.on("error", (err) => { + reject(err); + }); + + server.listen(51121, "127.0.0.1", () => { + resolve({ + server, + cancelWait: () => { + cancelled = true; + }, + waitForCode: async () => { + const sleep = () => new Promise((r) => setTimeout(r, 100)); + while (!result && !cancelled) { + await sleep(); + } + return result; + }, + }); + }); + }); +} + +/** + * Parse redirect URL to extract code and state + */ +function parseRedirectUrl(input: string): { code?: string; state?: string } { + const value = input.trim(); + if (!value) return {}; + + try { + const url = new URL(value); + return { + code: url.searchParams.get("code") ?? undefined, + state: url.searchParams.get("state") ?? undefined, + }; + } catch { + // Not a URL, return empty + return {}; + } +} + +interface LoadCodeAssistPayload { + cloudaicompanionProject?: string | { id?: string }; + currentTier?: { id?: string }; + allowedTiers?: Array<{ id?: string; isDefault?: boolean }>; +} + +/** + * Discover or provision a project for the user + */ +async function discoverProject(accessToken: string, onProgress?: (message: string) => void): Promise { + const headers = { + Authorization: `Bearer ${accessToken}`, + "Content-Type": "application/json", + "User-Agent": "google-api-nodejs-client/9.15.1", + "X-Goog-Api-Client": "google-cloud-sdk vscode_cloudshelleditor/0.1", + "Client-Metadata": JSON.stringify({ + ideType: "IDE_UNSPECIFIED", + platform: "PLATFORM_UNSPECIFIED", + pluginType: "GEMINI", + }), + }; + + // Try endpoints in order: prod first, then sandbox + const endpoints = ["https://cloudcode-pa.googleapis.com", "https://daily-cloudcode-pa.sandbox.googleapis.com"]; + + onProgress?.("Checking for existing project..."); + + for (const endpoint of endpoints) { + try { + const loadResponse = await fetch(`${endpoint}/v1internal:loadCodeAssist`, { + method: "POST", + headers, + body: JSON.stringify({ + metadata: { + ideType: "IDE_UNSPECIFIED", + platform: "PLATFORM_UNSPECIFIED", + pluginType: "GEMINI", + }, + }), + }); + + if (loadResponse.ok) { + const data = (await loadResponse.json()) as LoadCodeAssistPayload; + + // Handle both string and object formats + if (typeof data.cloudaicompanionProject === "string" && data.cloudaicompanionProject) { + return data.cloudaicompanionProject; + } + if ( + data.cloudaicompanionProject && + typeof data.cloudaicompanionProject === "object" && + data.cloudaicompanionProject.id + ) { + return data.cloudaicompanionProject.id; + } + } + } catch { + // Try next endpoint + } + } + + // Use fallback project ID + onProgress?.("Using default project..."); + return DEFAULT_PROJECT_ID; +} + +/** + * Get user email from the access token + */ +async function getUserEmail(accessToken: string): Promise { + try { + const response = await fetch("https://www.googleapis.com/oauth2/v1/userinfo?alt=json", { + headers: { + Authorization: `Bearer ${accessToken}`, + }, + }); + + if (response.ok) { + const data = (await response.json()) as { email?: string }; + return data.email; + } + } catch { + // Ignore errors, email is optional + } + return undefined; +} + +/** + * Refresh Antigravity token + */ +export async function refreshAntigravityToken(refreshToken: string, projectId: string): Promise { + const response = await fetch(TOKEN_URL, { + method: "POST", + headers: { "Content-Type": "application/x-www-form-urlencoded" }, + body: new URLSearchParams({ + client_id: CLIENT_ID, + client_secret: CLIENT_SECRET, + refresh_token: refreshToken, + grant_type: "refresh_token", + }), + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(`Antigravity token refresh failed: ${error}`); + } + + const data = (await response.json()) as { + access_token: string; + expires_in: number; + refresh_token?: string; + }; + + return { + refresh: data.refresh_token || refreshToken, + access: data.access_token, + expires: Date.now() + data.expires_in * 1000 - 5 * 60 * 1000, + projectId, + }; +} + +/** + * Login with Antigravity OAuth + * + * @param onAuth - Callback with URL and optional instructions + * @param onProgress - Optional progress callback + * @param onManualCodeInput - Optional promise that resolves with user-pasted redirect URL. + * Races with browser callback - whichever completes first wins. + */ +export async function loginAntigravity( + onAuth: (info: { url: string; instructions?: string }) => void, + onProgress?: (message: string) => void, + onManualCodeInput?: () => Promise, +): Promise { + const { verifier, challenge } = await generatePKCE(); + + // Start local server for callback + onProgress?.("Starting local server for OAuth callback..."); + const server = await startCallbackServer(); + + let code: string | undefined; + + try { + // Build authorization URL + const authParams = new URLSearchParams({ + client_id: CLIENT_ID, + response_type: "code", + redirect_uri: REDIRECT_URI, + scope: SCOPES.join(" "), + code_challenge: challenge, + code_challenge_method: "S256", + state: verifier, + access_type: "offline", + prompt: "consent", + }); + + const authUrl = `${AUTH_URL}?${authParams.toString()}`; + + // Notify caller with URL to open + onAuth({ + url: authUrl, + instructions: "Complete the sign-in in your browser.", + }); + + // Wait for the callback, racing with manual input if provided + onProgress?.("Waiting for OAuth callback..."); + + if (onManualCodeInput) { + // Race between browser callback and manual input + let manualInput: string | undefined; + let manualError: Error | undefined; + const manualPromise = onManualCodeInput() + .then((input) => { + manualInput = input; + server.cancelWait(); + }) + .catch((err) => { + manualError = err instanceof Error ? err : new Error(String(err)); + server.cancelWait(); + }); + + const result = await server.waitForCode(); + + // If manual input was cancelled, throw that error + if (manualError) { + throw manualError; + } + + if (result?.code) { + // Browser callback won - verify state + if (result.state !== verifier) { + throw new Error("OAuth state mismatch - possible CSRF attack"); + } + code = result.code; + } else if (manualInput) { + // Manual input won + const parsed = parseRedirectUrl(manualInput); + if (parsed.state && parsed.state !== verifier) { + throw new Error("OAuth state mismatch - possible CSRF attack"); + } + code = parsed.code; + } + + // If still no code, wait for manual promise and try that + if (!code) { + await manualPromise; + if (manualError) { + throw manualError; + } + if (manualInput) { + const parsed = parseRedirectUrl(manualInput); + if (parsed.state && parsed.state !== verifier) { + throw new Error("OAuth state mismatch - possible CSRF attack"); + } + code = parsed.code; + } + } + } else { + // Original flow: just wait for callback + const result = await server.waitForCode(); + if (result?.code) { + if (result.state !== verifier) { + throw new Error("OAuth state mismatch - possible CSRF attack"); + } + code = result.code; + } + } + + if (!code) { + throw new Error("No authorization code received"); + } + + // Exchange code for tokens + onProgress?.("Exchanging authorization code for tokens..."); + const tokenResponse = await fetch(TOKEN_URL, { + method: "POST", + headers: { + "Content-Type": "application/x-www-form-urlencoded", + }, + body: new URLSearchParams({ + client_id: CLIENT_ID, + client_secret: CLIENT_SECRET, + code, + grant_type: "authorization_code", + redirect_uri: REDIRECT_URI, + code_verifier: verifier, + }), + }); + + if (!tokenResponse.ok) { + const error = await tokenResponse.text(); + throw new Error(`Token exchange failed: ${error}`); + } + + const tokenData = (await tokenResponse.json()) as { + access_token: string; + refresh_token: string; + expires_in: number; + }; + + if (!tokenData.refresh_token) { + throw new Error("No refresh token received. Please try again."); + } + + // Get user email + onProgress?.("Getting user info..."); + const email = await getUserEmail(tokenData.access_token); + + // Discover project + const projectId = await discoverProject(tokenData.access_token, onProgress); + + // Calculate expiry time (current time + expires_in seconds - 5 min buffer) + const expiresAt = Date.now() + tokenData.expires_in * 1000 - 5 * 60 * 1000; + + const credentials: OAuthCredentials = { + refresh: tokenData.refresh_token, + access: tokenData.access_token, + expires: expiresAt, + projectId, + email, + }; + + return credentials; + } finally { + server.server.close(); + } +} + +export const antigravityOAuthProvider: OAuthProviderInterface = { + id: "google-antigravity", + name: "Antigravity (Gemini 3, Claude, GPT-OSS)", + usesCallbackServer: true, + + async login(callbacks: OAuthLoginCallbacks): Promise { + return loginAntigravity(callbacks.onAuth, callbacks.onProgress, callbacks.onManualCodeInput); + }, + + async refreshToken(credentials: OAuthCredentials): Promise { + const creds = credentials as AntigravityCredentials; + if (!creds.projectId) { + throw new Error("Antigravity credentials missing projectId"); + } + return refreshAntigravityToken(creds.refresh, creds.projectId); + }, + + getApiKey(credentials: OAuthCredentials): string { + const creds = credentials as AntigravityCredentials; + return JSON.stringify({ token: creds.access, projectId: creds.projectId }); + }, +}; diff --git a/packages/pi-ai/src/utils/oauth/google-gemini-cli.ts b/packages/pi-ai/src/utils/oauth/google-gemini-cli.ts new file mode 100644 index 000000000..a90524fea --- /dev/null +++ b/packages/pi-ai/src/utils/oauth/google-gemini-cli.ts @@ -0,0 +1,599 @@ +/** + * Gemini CLI OAuth flow (Google Cloud Code Assist) + * Standard Gemini models only (gemini-2.0-flash, gemini-2.5-*) + * + * NOTE: This module uses Node.js http.createServer for the OAuth callback. + * It is only intended for CLI use, not browser environments. + */ + +import type { Server } from "node:http"; +import { generatePKCE } from "./pkce.js"; +import type { OAuthCredentials, OAuthLoginCallbacks, OAuthProviderInterface } from "./types.js"; + +type GeminiCredentials = OAuthCredentials & { + projectId: string; +}; + +let _createServer: typeof import("node:http").createServer | null = null; +let _httpImportPromise: Promise | null = null; +if (typeof process !== "undefined" && (process.versions?.node || process.versions?.bun)) { + _httpImportPromise = import("node:http").then((m) => { + _createServer = m.createServer; + }); +} + +const decode = (s: string) => atob(s); +const CLIENT_ID = decode( + "NjgxMjU1ODA5Mzk1LW9vOGZ0Mm9wcmRybnA5ZTNhcWY2YXYzaG1kaWIxMzVqLmFwcHMuZ29vZ2xldXNlcmNvbnRlbnQuY29t", +); +const CLIENT_SECRET = decode("R09DU1BYLTR1SGdNUG0tMW83U2stZ2VWNkN1NWNsWEZzeGw="); +const REDIRECT_URI = "http://localhost:8085/oauth2callback"; +const SCOPES = [ + "https://www.googleapis.com/auth/cloud-platform", + "https://www.googleapis.com/auth/userinfo.email", + "https://www.googleapis.com/auth/userinfo.profile", +]; +const AUTH_URL = "https://accounts.google.com/o/oauth2/v2/auth"; +const TOKEN_URL = "https://oauth2.googleapis.com/token"; +const CODE_ASSIST_ENDPOINT = "https://cloudcode-pa.googleapis.com"; + +type CallbackServerInfo = { + server: Server; + cancelWait: () => void; + waitForCode: () => Promise<{ code: string; state: string } | null>; +}; + +/** + * Start a local HTTP server to receive the OAuth callback + */ +async function getNodeCreateServer(): Promise { + if (_createServer) return _createServer; + if (_httpImportPromise) { + await _httpImportPromise; + } + if (_createServer) return _createServer; + throw new Error("Gemini CLI OAuth is only available in Node.js environments"); +} + +async function startCallbackServer(): Promise { + const createServer = await getNodeCreateServer(); + + return new Promise((resolve, reject) => { + let result: { code: string; state: string } | null = null; + let cancelled = false; + + const server = createServer((req, res) => { + const url = new URL(req.url || "", `http://localhost:8085`); + + if (url.pathname === "/oauth2callback") { + const code = url.searchParams.get("code"); + const state = url.searchParams.get("state"); + const error = url.searchParams.get("error"); + + if (error) { + res.writeHead(400, { "Content-Type": "text/html" }); + res.end( + `

Authentication Failed

Error: ${error}

You can close this window.

`, + ); + return; + } + + if (code && state) { + res.writeHead(200, { "Content-Type": "text/html" }); + res.end( + `

Authentication Successful

You can close this window and return to the terminal.

`, + ); + result = { code, state }; + } else { + res.writeHead(400, { "Content-Type": "text/html" }); + res.end( + `

Authentication Failed

Missing code or state parameter.

`, + ); + } + } else { + res.writeHead(404); + res.end(); + } + }); + + server.on("error", (err) => { + reject(err); + }); + + server.listen(8085, "127.0.0.1", () => { + resolve({ + server, + cancelWait: () => { + cancelled = true; + }, + waitForCode: async () => { + const sleep = () => new Promise((r) => setTimeout(r, 100)); + while (!result && !cancelled) { + await sleep(); + } + return result; + }, + }); + }); + }); +} + +/** + * Parse redirect URL to extract code and state + */ +function parseRedirectUrl(input: string): { code?: string; state?: string } { + const value = input.trim(); + if (!value) return {}; + + try { + const url = new URL(value); + return { + code: url.searchParams.get("code") ?? undefined, + state: url.searchParams.get("state") ?? undefined, + }; + } catch { + // Not a URL, return empty + return {}; + } +} + +interface LoadCodeAssistPayload { + cloudaicompanionProject?: string; + currentTier?: { id?: string }; + allowedTiers?: Array<{ id?: string; isDefault?: boolean }>; +} + +/** + * Long-running operation response from onboardUser + */ +interface LongRunningOperationResponse { + name?: string; + done?: boolean; + response?: { + cloudaicompanionProject?: { id?: string }; + }; +} + +// Tier IDs as used by the Cloud Code API +const TIER_FREE = "free-tier"; +const TIER_LEGACY = "legacy-tier"; +const TIER_STANDARD = "standard-tier"; + +interface GoogleRpcErrorResponse { + error?: { + details?: Array<{ reason?: string }>; + }; +} + +/** + * Wait helper for onboarding retries + */ +function wait(ms: number): Promise { + return new Promise((resolve) => setTimeout(resolve, ms)); +} + +/** + * Get default tier from allowed tiers + */ +function getDefaultTier(allowedTiers?: Array<{ id?: string; isDefault?: boolean }>): { id?: string } { + if (!allowedTiers || allowedTiers.length === 0) return { id: TIER_LEGACY }; + const defaultTier = allowedTiers.find((t) => t.isDefault); + return defaultTier ?? { id: TIER_LEGACY }; +} + +function isVpcScAffectedUser(payload: unknown): boolean { + if (!payload || typeof payload !== "object") return false; + if (!("error" in payload)) return false; + const error = (payload as GoogleRpcErrorResponse).error; + if (!error?.details || !Array.isArray(error.details)) return false; + return error.details.some((detail) => detail.reason === "SECURITY_POLICY_VIOLATED"); +} + +/** + * Poll a long-running operation until completion + */ +async function pollOperation( + operationName: string, + headers: Record, + onProgress?: (message: string) => void, +): Promise { + let attempt = 0; + while (true) { + if (attempt > 0) { + onProgress?.(`Waiting for project provisioning (attempt ${attempt + 1})...`); + await wait(5000); + } + + const response = await fetch(`${CODE_ASSIST_ENDPOINT}/v1internal/${operationName}`, { + method: "GET", + headers, + }); + + if (!response.ok) { + throw new Error(`Failed to poll operation: ${response.status} ${response.statusText}`); + } + + const data = (await response.json()) as LongRunningOperationResponse; + if (data.done) { + return data; + } + + attempt += 1; + } +} + +/** + * Discover or provision a Google Cloud project for the user + */ +async function discoverProject(accessToken: string, onProgress?: (message: string) => void): Promise { + // Check for user-provided project ID via environment variable + const envProjectId = process.env.GOOGLE_CLOUD_PROJECT || process.env.GOOGLE_CLOUD_PROJECT_ID; + + const headers = { + Authorization: `Bearer ${accessToken}`, + "Content-Type": "application/json", + "User-Agent": "google-api-nodejs-client/9.15.1", + "X-Goog-Api-Client": "gl-node/22.17.0", + }; + + // Try to load existing project via loadCodeAssist + onProgress?.("Checking for existing Cloud Code Assist project..."); + const loadResponse = await fetch(`${CODE_ASSIST_ENDPOINT}/v1internal:loadCodeAssist`, { + method: "POST", + headers, + body: JSON.stringify({ + cloudaicompanionProject: envProjectId, + metadata: { + ideType: "IDE_UNSPECIFIED", + platform: "PLATFORM_UNSPECIFIED", + pluginType: "GEMINI", + duetProject: envProjectId, + }, + }), + }); + + let data: LoadCodeAssistPayload; + + if (!loadResponse.ok) { + let errorPayload: unknown; + try { + errorPayload = await loadResponse.clone().json(); + } catch { + errorPayload = undefined; + } + + if (isVpcScAffectedUser(errorPayload)) { + data = { currentTier: { id: TIER_STANDARD } }; + } else { + const errorText = await loadResponse.text(); + throw new Error(`loadCodeAssist failed: ${loadResponse.status} ${loadResponse.statusText}: ${errorText}`); + } + } else { + data = (await loadResponse.json()) as LoadCodeAssistPayload; + } + + // If user already has a current tier and project, use it + if (data.currentTier) { + if (data.cloudaicompanionProject) { + return data.cloudaicompanionProject; + } + // User has a tier but no managed project - they need to provide one via env var + if (envProjectId) { + return envProjectId; + } + throw new Error( + "This account requires setting the GOOGLE_CLOUD_PROJECT or GOOGLE_CLOUD_PROJECT_ID environment variable. " + + "See https://goo.gle/gemini-cli-auth-docs#workspace-gca", + ); + } + + // User needs to be onboarded - get the default tier + const tier = getDefaultTier(data.allowedTiers); + const tierId = tier?.id ?? TIER_FREE; + + if (tierId !== TIER_FREE && !envProjectId) { + throw new Error( + "This account requires setting the GOOGLE_CLOUD_PROJECT or GOOGLE_CLOUD_PROJECT_ID environment variable. " + + "See https://goo.gle/gemini-cli-auth-docs#workspace-gca", + ); + } + + onProgress?.("Provisioning Cloud Code Assist project (this may take a moment)..."); + + // Build onboard request - for free tier, don't include project ID (Google provisions one) + // For other tiers, include the user's project ID if available + const onboardBody: Record = { + tierId, + metadata: { + ideType: "IDE_UNSPECIFIED", + platform: "PLATFORM_UNSPECIFIED", + pluginType: "GEMINI", + }, + }; + + if (tierId !== TIER_FREE && envProjectId) { + onboardBody.cloudaicompanionProject = envProjectId; + (onboardBody.metadata as Record).duetProject = envProjectId; + } + + // Start onboarding - this returns a long-running operation + const onboardResponse = await fetch(`${CODE_ASSIST_ENDPOINT}/v1internal:onboardUser`, { + method: "POST", + headers, + body: JSON.stringify(onboardBody), + }); + + if (!onboardResponse.ok) { + const errorText = await onboardResponse.text(); + throw new Error(`onboardUser failed: ${onboardResponse.status} ${onboardResponse.statusText}: ${errorText}`); + } + + let lroData = (await onboardResponse.json()) as LongRunningOperationResponse; + + // If the operation isn't done yet, poll until completion + if (!lroData.done && lroData.name) { + lroData = await pollOperation(lroData.name, headers, onProgress); + } + + // Try to get project ID from the response + const projectId = lroData.response?.cloudaicompanionProject?.id; + if (projectId) { + return projectId; + } + + // If no project ID from onboarding, fall back to env var + if (envProjectId) { + return envProjectId; + } + + throw new Error( + "Could not discover or provision a Google Cloud project. " + + "Try setting the GOOGLE_CLOUD_PROJECT or GOOGLE_CLOUD_PROJECT_ID environment variable. " + + "See https://goo.gle/gemini-cli-auth-docs#workspace-gca", + ); +} + +/** + * Get user email from the access token + */ +async function getUserEmail(accessToken: string): Promise { + try { + const response = await fetch("https://www.googleapis.com/oauth2/v1/userinfo?alt=json", { + headers: { + Authorization: `Bearer ${accessToken}`, + }, + }); + + if (response.ok) { + const data = (await response.json()) as { email?: string }; + return data.email; + } + } catch { + // Ignore errors, email is optional + } + return undefined; +} + +/** + * Refresh Google Cloud Code Assist token + */ +export async function refreshGoogleCloudToken(refreshToken: string, projectId: string): Promise { + const response = await fetch(TOKEN_URL, { + method: "POST", + headers: { "Content-Type": "application/x-www-form-urlencoded" }, + body: new URLSearchParams({ + client_id: CLIENT_ID, + client_secret: CLIENT_SECRET, + refresh_token: refreshToken, + grant_type: "refresh_token", + }), + }); + + if (!response.ok) { + const error = await response.text(); + throw new Error(`Google Cloud token refresh failed: ${error}`); + } + + const data = (await response.json()) as { + access_token: string; + expires_in: number; + refresh_token?: string; + }; + + return { + refresh: data.refresh_token || refreshToken, + access: data.access_token, + expires: Date.now() + data.expires_in * 1000 - 5 * 60 * 1000, + projectId, + }; +} + +/** + * Login with Gemini CLI (Google Cloud Code Assist) OAuth + * + * @param onAuth - Callback with URL and optional instructions + * @param onProgress - Optional progress callback + * @param onManualCodeInput - Optional promise that resolves with user-pasted redirect URL. + * Races with browser callback - whichever completes first wins. + */ +export async function loginGeminiCli( + onAuth: (info: { url: string; instructions?: string }) => void, + onProgress?: (message: string) => void, + onManualCodeInput?: () => Promise, +): Promise { + const { verifier, challenge } = await generatePKCE(); + + // Start local server for callback + onProgress?.("Starting local server for OAuth callback..."); + const server = await startCallbackServer(); + + let code: string | undefined; + + try { + // Build authorization URL + const authParams = new URLSearchParams({ + client_id: CLIENT_ID, + response_type: "code", + redirect_uri: REDIRECT_URI, + scope: SCOPES.join(" "), + code_challenge: challenge, + code_challenge_method: "S256", + state: verifier, + access_type: "offline", + prompt: "consent", + }); + + const authUrl = `${AUTH_URL}?${authParams.toString()}`; + + // Notify caller with URL to open + onAuth({ + url: authUrl, + instructions: "Complete the sign-in in your browser.", + }); + + // Wait for the callback, racing with manual input if provided + onProgress?.("Waiting for OAuth callback..."); + + if (onManualCodeInput) { + // Race between browser callback and manual input + let manualInput: string | undefined; + let manualError: Error | undefined; + const manualPromise = onManualCodeInput() + .then((input) => { + manualInput = input; + server.cancelWait(); + }) + .catch((err) => { + manualError = err instanceof Error ? err : new Error(String(err)); + server.cancelWait(); + }); + + const result = await server.waitForCode(); + + // If manual input was cancelled, throw that error + if (manualError) { + throw manualError; + } + + if (result?.code) { + // Browser callback won - verify state + if (result.state !== verifier) { + throw new Error("OAuth state mismatch - possible CSRF attack"); + } + code = result.code; + } else if (manualInput) { + // Manual input won + const parsed = parseRedirectUrl(manualInput); + if (parsed.state && parsed.state !== verifier) { + throw new Error("OAuth state mismatch - possible CSRF attack"); + } + code = parsed.code; + } + + // If still no code, wait for manual promise and try that + if (!code) { + await manualPromise; + if (manualError) { + throw manualError; + } + if (manualInput) { + const parsed = parseRedirectUrl(manualInput); + if (parsed.state && parsed.state !== verifier) { + throw new Error("OAuth state mismatch - possible CSRF attack"); + } + code = parsed.code; + } + } + } else { + // Original flow: just wait for callback + const result = await server.waitForCode(); + if (result?.code) { + if (result.state !== verifier) { + throw new Error("OAuth state mismatch - possible CSRF attack"); + } + code = result.code; + } + } + + if (!code) { + throw new Error("No authorization code received"); + } + + // Exchange code for tokens + onProgress?.("Exchanging authorization code for tokens..."); + const tokenResponse = await fetch(TOKEN_URL, { + method: "POST", + headers: { + "Content-Type": "application/x-www-form-urlencoded", + }, + body: new URLSearchParams({ + client_id: CLIENT_ID, + client_secret: CLIENT_SECRET, + code, + grant_type: "authorization_code", + redirect_uri: REDIRECT_URI, + code_verifier: verifier, + }), + }); + + if (!tokenResponse.ok) { + const error = await tokenResponse.text(); + throw new Error(`Token exchange failed: ${error}`); + } + + const tokenData = (await tokenResponse.json()) as { + access_token: string; + refresh_token: string; + expires_in: number; + }; + + if (!tokenData.refresh_token) { + throw new Error("No refresh token received. Please try again."); + } + + // Get user email + onProgress?.("Getting user info..."); + const email = await getUserEmail(tokenData.access_token); + + // Discover project + const projectId = await discoverProject(tokenData.access_token, onProgress); + + // Calculate expiry time (current time + expires_in seconds - 5 min buffer) + const expiresAt = Date.now() + tokenData.expires_in * 1000 - 5 * 60 * 1000; + + const credentials: OAuthCredentials = { + refresh: tokenData.refresh_token, + access: tokenData.access_token, + expires: expiresAt, + projectId, + email, + }; + + return credentials; + } finally { + server.server.close(); + } +} + +export const geminiCliOAuthProvider: OAuthProviderInterface = { + id: "google-gemini-cli", + name: "Google Cloud Code Assist (Gemini CLI)", + usesCallbackServer: true, + + async login(callbacks: OAuthLoginCallbacks): Promise { + return loginGeminiCli(callbacks.onAuth, callbacks.onProgress, callbacks.onManualCodeInput); + }, + + async refreshToken(credentials: OAuthCredentials): Promise { + const creds = credentials as GeminiCredentials; + if (!creds.projectId) { + throw new Error("Google Cloud credentials missing projectId"); + } + return refreshGoogleCloudToken(creds.refresh, creds.projectId); + }, + + getApiKey(credentials: OAuthCredentials): string { + const creds = credentials as GeminiCredentials; + return JSON.stringify({ token: creds.access, projectId: creds.projectId }); + }, +}; diff --git a/packages/pi-ai/src/utils/oauth/index.ts b/packages/pi-ai/src/utils/oauth/index.ts new file mode 100644 index 000000000..256562a85 --- /dev/null +++ b/packages/pi-ai/src/utils/oauth/index.ts @@ -0,0 +1,162 @@ +/** + * OAuth credential management for AI providers. + * + * This module handles login, token refresh, and credential storage + * for OAuth-based providers: + * - Anthropic (Claude Pro/Max) + * - GitHub Copilot + * - Google Cloud Code Assist (Gemini CLI) + * - Antigravity (Gemini 3, Claude, GPT-OSS via Google Cloud) + */ + +// Anthropic +export { anthropicOAuthProvider, loginAnthropic, refreshAnthropicToken } from "./anthropic.js"; +// GitHub Copilot +export { + getGitHubCopilotBaseUrl, + githubCopilotOAuthProvider, + loginGitHubCopilot, + normalizeDomain, + refreshGitHubCopilotToken, +} from "./github-copilot.js"; +// Google Antigravity +export { antigravityOAuthProvider, loginAntigravity, refreshAntigravityToken } from "./google-antigravity.js"; +// Google Gemini CLI +export { geminiCliOAuthProvider, loginGeminiCli, refreshGoogleCloudToken } from "./google-gemini-cli.js"; +// OpenAI Codex (ChatGPT OAuth) +export { loginOpenAICodex, openaiCodexOAuthProvider, refreshOpenAICodexToken } from "./openai-codex.js"; + +export * from "./types.js"; + +// ============================================================================ +// Provider Registry +// ============================================================================ + +import { anthropicOAuthProvider } from "./anthropic.js"; +import { githubCopilotOAuthProvider } from "./github-copilot.js"; +import { antigravityOAuthProvider } from "./google-antigravity.js"; +import { geminiCliOAuthProvider } from "./google-gemini-cli.js"; +import { openaiCodexOAuthProvider } from "./openai-codex.js"; +import type { OAuthCredentials, OAuthProviderId, OAuthProviderInfo, OAuthProviderInterface } from "./types.js"; + +const BUILT_IN_OAUTH_PROVIDERS: OAuthProviderInterface[] = [ + anthropicOAuthProvider, + githubCopilotOAuthProvider, + geminiCliOAuthProvider, + antigravityOAuthProvider, + openaiCodexOAuthProvider, +]; + +const oauthProviderRegistry = new Map( + BUILT_IN_OAUTH_PROVIDERS.map((provider) => [provider.id, provider]), +); + +/** + * Get an OAuth provider by ID + */ +export function getOAuthProvider(id: OAuthProviderId): OAuthProviderInterface | undefined { + return oauthProviderRegistry.get(id); +} + +/** + * Register a custom OAuth provider + */ +export function registerOAuthProvider(provider: OAuthProviderInterface): void { + oauthProviderRegistry.set(provider.id, provider); +} + +/** + * Unregister an OAuth provider. + * + * If the provider is built-in, restores the built-in implementation. + * Custom providers are removed completely. + */ +export function unregisterOAuthProvider(id: string): void { + const builtInProvider = BUILT_IN_OAUTH_PROVIDERS.find((provider) => provider.id === id); + if (builtInProvider) { + oauthProviderRegistry.set(id, builtInProvider); + return; + } + oauthProviderRegistry.delete(id); +} + +/** + * Reset OAuth providers to built-ins. + */ +export function resetOAuthProviders(): void { + oauthProviderRegistry.clear(); + for (const provider of BUILT_IN_OAUTH_PROVIDERS) { + oauthProviderRegistry.set(provider.id, provider); + } +} + +/** + * Get all registered OAuth providers + */ +export function getOAuthProviders(): OAuthProviderInterface[] { + return Array.from(oauthProviderRegistry.values()); +} + +/** + * @deprecated Use getOAuthProviders() which returns OAuthProviderInterface[] + */ +export function getOAuthProviderInfoList(): OAuthProviderInfo[] { + return getOAuthProviders().map((p) => ({ + id: p.id, + name: p.name, + available: true, + })); +} + +// ============================================================================ +// High-level API (uses provider registry) +// ============================================================================ + +/** + * Refresh token for any OAuth provider. + * @deprecated Use getOAuthProvider(id).refreshToken() instead + */ +export async function refreshOAuthToken( + providerId: OAuthProviderId, + credentials: OAuthCredentials, +): Promise { + const provider = getOAuthProvider(providerId); + if (!provider) { + throw new Error(`Unknown OAuth provider: ${providerId}`); + } + return provider.refreshToken(credentials); +} + +/** + * Get API key for a provider from OAuth credentials. + * Automatically refreshes expired tokens. + * + * @returns API key string and updated credentials, or null if no credentials + * @throws Error if refresh fails + */ +export async function getOAuthApiKey( + providerId: OAuthProviderId, + credentials: Record, +): Promise<{ newCredentials: OAuthCredentials; apiKey: string } | null> { + const provider = getOAuthProvider(providerId); + if (!provider) { + throw new Error(`Unknown OAuth provider: ${providerId}`); + } + + let creds = credentials[providerId]; + if (!creds) { + return null; + } + + // Refresh if expired + if (Date.now() >= creds.expires) { + try { + creds = await provider.refreshToken(creds); + } catch (_error) { + throw new Error(`Failed to refresh OAuth token for ${providerId}`); + } + } + + const apiKey = provider.getApiKey(creds); + return { newCredentials: creds, apiKey }; +} diff --git a/packages/pi-ai/src/utils/oauth/openai-codex.ts b/packages/pi-ai/src/utils/oauth/openai-codex.ts new file mode 100644 index 000000000..820168d91 --- /dev/null +++ b/packages/pi-ai/src/utils/oauth/openai-codex.ts @@ -0,0 +1,455 @@ +/** + * OpenAI Codex (ChatGPT OAuth) flow + * + * NOTE: This module uses Node.js crypto and http for the OAuth callback. + * It is only intended for CLI use, not browser environments. + */ + +// NEVER convert to top-level imports - breaks browser/Vite builds (web-ui) +let _randomBytes: typeof import("node:crypto").randomBytes | null = null; +let _http: typeof import("node:http") | null = null; +if (typeof process !== "undefined" && (process.versions?.node || process.versions?.bun)) { + import("node:crypto").then((m) => { + _randomBytes = m.randomBytes; + }); + import("node:http").then((m) => { + _http = m; + }); +} + +import { generatePKCE } from "./pkce.js"; +import type { OAuthCredentials, OAuthLoginCallbacks, OAuthPrompt, OAuthProviderInterface } from "./types.js"; + +const CLIENT_ID = "app_EMoamEEZ73f0CkXaXp7hrann"; +const AUTHORIZE_URL = "https://auth.openai.com/oauth/authorize"; +const TOKEN_URL = "https://auth.openai.com/oauth/token"; +const REDIRECT_URI = "http://localhost:1455/auth/callback"; +const SCOPE = "openid profile email offline_access"; +const JWT_CLAIM_PATH = "https://api.openai.com/auth"; + +const SUCCESS_HTML = ` + + + + + Authentication successful + + +

Authentication successful. Return to your terminal to continue.

+ +`; + +type TokenSuccess = { type: "success"; access: string; refresh: string; expires: number }; +type TokenFailure = { type: "failed" }; +type TokenResult = TokenSuccess | TokenFailure; + +type JwtPayload = { + [JWT_CLAIM_PATH]?: { + chatgpt_account_id?: string; + }; + [key: string]: unknown; +}; + +function createState(): string { + if (!_randomBytes) { + throw new Error("OpenAI Codex OAuth is only available in Node.js environments"); + } + return _randomBytes(16).toString("hex"); +} + +function parseAuthorizationInput(input: string): { code?: string; state?: string } { + const value = input.trim(); + if (!value) return {}; + + try { + const url = new URL(value); + return { + code: url.searchParams.get("code") ?? undefined, + state: url.searchParams.get("state") ?? undefined, + }; + } catch { + // not a URL + } + + if (value.includes("#")) { + const [code, state] = value.split("#", 2); + return { code, state }; + } + + if (value.includes("code=")) { + const params = new URLSearchParams(value); + return { + code: params.get("code") ?? undefined, + state: params.get("state") ?? undefined, + }; + } + + return { code: value }; +} + +function decodeJwt(token: string): JwtPayload | null { + try { + const parts = token.split("."); + if (parts.length !== 3) return null; + const payload = parts[1] ?? ""; + const decoded = atob(payload); + return JSON.parse(decoded) as JwtPayload; + } catch { + return null; + } +} + +async function exchangeAuthorizationCode( + code: string, + verifier: string, + redirectUri: string = REDIRECT_URI, +): Promise { + const response = await fetch(TOKEN_URL, { + method: "POST", + headers: { "Content-Type": "application/x-www-form-urlencoded" }, + body: new URLSearchParams({ + grant_type: "authorization_code", + client_id: CLIENT_ID, + code, + code_verifier: verifier, + redirect_uri: redirectUri, + }), + }); + + if (!response.ok) { + const text = await response.text().catch(() => ""); + console.error("[openai-codex] code->token failed:", response.status, text); + return { type: "failed" }; + } + + const json = (await response.json()) as { + access_token?: string; + refresh_token?: string; + expires_in?: number; + }; + + if (!json.access_token || !json.refresh_token || typeof json.expires_in !== "number") { + console.error("[openai-codex] token response missing fields:", json); + return { type: "failed" }; + } + + return { + type: "success", + access: json.access_token, + refresh: json.refresh_token, + expires: Date.now() + json.expires_in * 1000, + }; +} + +async function refreshAccessToken(refreshToken: string): Promise { + try { + const response = await fetch(TOKEN_URL, { + method: "POST", + headers: { "Content-Type": "application/x-www-form-urlencoded" }, + body: new URLSearchParams({ + grant_type: "refresh_token", + refresh_token: refreshToken, + client_id: CLIENT_ID, + }), + }); + + if (!response.ok) { + const text = await response.text().catch(() => ""); + console.error("[openai-codex] Token refresh failed:", response.status, text); + return { type: "failed" }; + } + + const json = (await response.json()) as { + access_token?: string; + refresh_token?: string; + expires_in?: number; + }; + + if (!json.access_token || !json.refresh_token || typeof json.expires_in !== "number") { + console.error("[openai-codex] Token refresh response missing fields:", json); + return { type: "failed" }; + } + + return { + type: "success", + access: json.access_token, + refresh: json.refresh_token, + expires: Date.now() + json.expires_in * 1000, + }; + } catch (error) { + console.error("[openai-codex] Token refresh error:", error); + return { type: "failed" }; + } +} + +async function createAuthorizationFlow( + originator: string = "pi", +): Promise<{ verifier: string; state: string; url: string }> { + const { verifier, challenge } = await generatePKCE(); + const state = createState(); + + const url = new URL(AUTHORIZE_URL); + url.searchParams.set("response_type", "code"); + url.searchParams.set("client_id", CLIENT_ID); + url.searchParams.set("redirect_uri", REDIRECT_URI); + url.searchParams.set("scope", SCOPE); + url.searchParams.set("code_challenge", challenge); + url.searchParams.set("code_challenge_method", "S256"); + url.searchParams.set("state", state); + url.searchParams.set("id_token_add_organizations", "true"); + url.searchParams.set("codex_cli_simplified_flow", "true"); + url.searchParams.set("originator", originator); + + return { verifier, state, url: url.toString() }; +} + +type OAuthServerInfo = { + close: () => void; + cancelWait: () => void; + waitForCode: () => Promise<{ code: string } | null>; +}; + +function startLocalOAuthServer(state: string): Promise { + if (!_http) { + throw new Error("OpenAI Codex OAuth is only available in Node.js environments"); + } + let lastCode: string | null = null; + let cancelled = false; + const server = _http.createServer((req, res) => { + try { + const url = new URL(req.url || "", "http://localhost"); + if (url.pathname !== "/auth/callback") { + res.statusCode = 404; + res.end("Not found"); + return; + } + if (url.searchParams.get("state") !== state) { + res.statusCode = 400; + res.end("State mismatch"); + return; + } + const code = url.searchParams.get("code"); + if (!code) { + res.statusCode = 400; + res.end("Missing authorization code"); + return; + } + res.statusCode = 200; + res.setHeader("Content-Type", "text/html; charset=utf-8"); + res.end(SUCCESS_HTML); + lastCode = code; + } catch { + res.statusCode = 500; + res.end("Internal error"); + } + }); + + return new Promise((resolve) => { + server + .listen(1455, "127.0.0.1", () => { + resolve({ + close: () => server.close(), + cancelWait: () => { + cancelled = true; + }, + waitForCode: async () => { + const sleep = () => new Promise((r) => setTimeout(r, 100)); + for (let i = 0; i < 600; i += 1) { + if (lastCode) return { code: lastCode }; + if (cancelled) return null; + await sleep(); + } + return null; + }, + }); + }) + .on("error", (err: NodeJS.ErrnoException) => { + console.error( + "[openai-codex] Failed to bind http://127.0.0.1:1455 (", + err.code, + ") Falling back to manual paste.", + ); + resolve({ + close: () => { + try { + server.close(); + } catch { + // ignore + } + }, + cancelWait: () => {}, + waitForCode: async () => null, + }); + }); + }); +} + +function getAccountId(accessToken: string): string | null { + const payload = decodeJwt(accessToken); + const auth = payload?.[JWT_CLAIM_PATH]; + const accountId = auth?.chatgpt_account_id; + return typeof accountId === "string" && accountId.length > 0 ? accountId : null; +} + +/** + * Login with OpenAI Codex OAuth + * + * @param options.onAuth - Called with URL and instructions when auth starts + * @param options.onPrompt - Called to prompt user for manual code paste (fallback if no onManualCodeInput) + * @param options.onProgress - Optional progress messages + * @param options.onManualCodeInput - Optional promise that resolves with user-pasted code. + * Races with browser callback - whichever completes first wins. + * Useful for showing paste input immediately alongside browser flow. + * @param options.originator - OAuth originator parameter (defaults to "pi") + */ +export async function loginOpenAICodex(options: { + onAuth: (info: { url: string; instructions?: string }) => void; + onPrompt: (prompt: OAuthPrompt) => Promise; + onProgress?: (message: string) => void; + onManualCodeInput?: () => Promise; + originator?: string; +}): Promise { + const { verifier, state, url } = await createAuthorizationFlow(options.originator); + const server = await startLocalOAuthServer(state); + + options.onAuth({ url, instructions: "A browser window should open. Complete login to finish." }); + + let code: string | undefined; + try { + if (options.onManualCodeInput) { + // Race between browser callback and manual input + let manualCode: string | undefined; + let manualError: Error | undefined; + const manualPromise = options + .onManualCodeInput() + .then((input) => { + manualCode = input; + server.cancelWait(); + }) + .catch((err) => { + manualError = err instanceof Error ? err : new Error(String(err)); + server.cancelWait(); + }); + + const result = await server.waitForCode(); + + // If manual input was cancelled, throw that error + if (manualError) { + throw manualError; + } + + if (result?.code) { + // Browser callback won + code = result.code; + } else if (manualCode) { + // Manual input won (or callback timed out and user had entered code) + const parsed = parseAuthorizationInput(manualCode); + if (parsed.state && parsed.state !== state) { + throw new Error("State mismatch"); + } + code = parsed.code; + } + + // If still no code, wait for manual promise to complete and try that + if (!code) { + await manualPromise; + if (manualError) { + throw manualError; + } + if (manualCode) { + const parsed = parseAuthorizationInput(manualCode); + if (parsed.state && parsed.state !== state) { + throw new Error("State mismatch"); + } + code = parsed.code; + } + } + } else { + // Original flow: wait for callback, then prompt if needed + const result = await server.waitForCode(); + if (result?.code) { + code = result.code; + } + } + + // Fallback to onPrompt if still no code + if (!code) { + const input = await options.onPrompt({ + message: "Paste the authorization code (or full redirect URL):", + }); + const parsed = parseAuthorizationInput(input); + if (parsed.state && parsed.state !== state) { + throw new Error("State mismatch"); + } + code = parsed.code; + } + + if (!code) { + throw new Error("Missing authorization code"); + } + + const tokenResult = await exchangeAuthorizationCode(code, verifier); + if (tokenResult.type !== "success") { + throw new Error("Token exchange failed"); + } + + const accountId = getAccountId(tokenResult.access); + if (!accountId) { + throw new Error("Failed to extract accountId from token"); + } + + return { + access: tokenResult.access, + refresh: tokenResult.refresh, + expires: tokenResult.expires, + accountId, + }; + } finally { + server.close(); + } +} + +/** + * Refresh OpenAI Codex OAuth token + */ +export async function refreshOpenAICodexToken(refreshToken: string): Promise { + const result = await refreshAccessToken(refreshToken); + if (result.type !== "success") { + throw new Error("Failed to refresh OpenAI Codex token"); + } + + const accountId = getAccountId(result.access); + if (!accountId) { + throw new Error("Failed to extract accountId from token"); + } + + return { + access: result.access, + refresh: result.refresh, + expires: result.expires, + accountId, + }; +} + +export const openaiCodexOAuthProvider: OAuthProviderInterface = { + id: "openai-codex", + name: "ChatGPT Plus/Pro (Codex Subscription)", + usesCallbackServer: true, + + async login(callbacks: OAuthLoginCallbacks): Promise { + return loginOpenAICodex({ + onAuth: callbacks.onAuth, + onPrompt: callbacks.onPrompt, + onProgress: callbacks.onProgress, + onManualCodeInput: callbacks.onManualCodeInput, + }); + }, + + async refreshToken(credentials: OAuthCredentials): Promise { + return refreshOpenAICodexToken(credentials.refresh); + }, + + getApiKey(credentials: OAuthCredentials): string { + return credentials.access; + }, +}; diff --git a/packages/pi-ai/src/utils/oauth/pkce.ts b/packages/pi-ai/src/utils/oauth/pkce.ts new file mode 100644 index 000000000..bf7ac7d58 --- /dev/null +++ b/packages/pi-ai/src/utils/oauth/pkce.ts @@ -0,0 +1,34 @@ +/** + * PKCE utilities using Web Crypto API. + * Works in both Node.js 20+ and browsers. + */ + +/** + * Encode bytes as base64url string. + */ +function base64urlEncode(bytes: Uint8Array): string { + let binary = ""; + for (const byte of bytes) { + binary += String.fromCharCode(byte); + } + return btoa(binary).replace(/\+/g, "-").replace(/\//g, "_").replace(/=/g, ""); +} + +/** + * Generate PKCE code verifier and challenge. + * Uses Web Crypto API for cross-platform compatibility. + */ +export async function generatePKCE(): Promise<{ verifier: string; challenge: string }> { + // Generate random verifier + const verifierBytes = new Uint8Array(32); + crypto.getRandomValues(verifierBytes); + const verifier = base64urlEncode(verifierBytes); + + // Compute SHA-256 challenge + const encoder = new TextEncoder(); + const data = encoder.encode(verifier); + const hashBuffer = await crypto.subtle.digest("SHA-256", data); + const challenge = base64urlEncode(new Uint8Array(hashBuffer)); + + return { verifier, challenge }; +} diff --git a/packages/pi-ai/src/utils/oauth/types.ts b/packages/pi-ai/src/utils/oauth/types.ts new file mode 100644 index 000000000..e3520342d --- /dev/null +++ b/packages/pi-ai/src/utils/oauth/types.ts @@ -0,0 +1,59 @@ +import type { Api, Model } from "../../types.js"; + +export type OAuthCredentials = { + refresh: string; + access: string; + expires: number; + [key: string]: unknown; +}; + +export type OAuthProviderId = string; + +/** @deprecated Use OAuthProviderId instead */ +export type OAuthProvider = OAuthProviderId; + +export type OAuthPrompt = { + message: string; + placeholder?: string; + allowEmpty?: boolean; +}; + +export type OAuthAuthInfo = { + url: string; + instructions?: string; +}; + +export interface OAuthLoginCallbacks { + onAuth: (info: OAuthAuthInfo) => void; + onPrompt: (prompt: OAuthPrompt) => Promise; + onProgress?: (message: string) => void; + onManualCodeInput?: () => Promise; + signal?: AbortSignal; +} + +export interface OAuthProviderInterface { + readonly id: OAuthProviderId; + readonly name: string; + + /** Run the login flow, return credentials to persist */ + login(callbacks: OAuthLoginCallbacks): Promise; + + /** Whether login uses a local callback server and supports manual code input. */ + usesCallbackServer?: boolean; + + /** Refresh expired credentials, return updated credentials to persist */ + refreshToken(credentials: OAuthCredentials): Promise; + + /** Convert credentials to API key string for the provider */ + getApiKey(credentials: OAuthCredentials): string; + + /** Optional: modify models for this provider (e.g., update baseUrl) */ + modifyModels?(models: Model[], credentials: OAuthCredentials): Model[]; +} + +/** @deprecated Use OAuthProviderInterface instead */ +export interface OAuthProviderInfo { + id: OAuthProviderId; + name: string; + available: boolean; +} diff --git a/packages/pi-ai/src/utils/overflow.ts b/packages/pi-ai/src/utils/overflow.ts new file mode 100644 index 000000000..c6f4696e7 --- /dev/null +++ b/packages/pi-ai/src/utils/overflow.ts @@ -0,0 +1,123 @@ +import type { AssistantMessage } from "../types.js"; + +/** + * Regex patterns to detect context overflow errors from different providers. + * + * These patterns match error messages returned when the input exceeds + * the model's context window. + * + * Provider-specific patterns (with example error messages): + * + * - Anthropic: "prompt is too long: 213462 tokens > 200000 maximum" + * - OpenAI: "Your input exceeds the context window of this model" + * - Google: "The input token count (1196265) exceeds the maximum number of tokens allowed (1048575)" + * - xAI: "This model's maximum prompt length is 131072 but the request contains 537812 tokens" + * - Groq: "Please reduce the length of the messages or completion" + * - OpenRouter: "This endpoint's maximum context length is X tokens. However, you requested about Y tokens" + * - llama.cpp: "the request exceeds the available context size, try increasing it" + * - LM Studio: "tokens to keep from the initial prompt is greater than the context length" + * - GitHub Copilot: "prompt token count of X exceeds the limit of Y" + * - MiniMax: "invalid params, context window exceeds limit" + * - Kimi For Coding: "Your request exceeded model token limit: X (requested: Y)" + * - Cerebras: Returns "400/413 status code (no body)" - handled separately below + * - Mistral: "Prompt contains X tokens ... too large for model with Y maximum context length" + * - z.ai: Does NOT error, accepts overflow silently - handled via usage.input > contextWindow + * - Ollama: Silently truncates input - not detectable via error message + */ +const OVERFLOW_PATTERNS = [ + /prompt is too long/i, // Anthropic + /input is too long for requested model/i, // Amazon Bedrock + /exceeds the context window/i, // OpenAI (Completions & Responses API) + /input token count.*exceeds the maximum/i, // Google (Gemini) + /maximum prompt length is \d+/i, // xAI (Grok) + /reduce the length of the messages/i, // Groq + /maximum context length is \d+ tokens/i, // OpenRouter (all backends) + /exceeds the limit of \d+/i, // GitHub Copilot + /exceeds the available context size/i, // llama.cpp server + /greater than the context length/i, // LM Studio + /context window exceeds limit/i, // MiniMax + /exceeded model token limit/i, // Kimi For Coding + /too large for model with \d+ maximum context length/i, // Mistral + /model_context_window_exceeded/i, // z.ai non-standard finish_reason surfaced as error text + /context[_ ]length[_ ]exceeded/i, // Generic fallback + /too many tokens/i, // Generic fallback + /token limit exceeded/i, // Generic fallback +]; + +/** + * Check if an assistant message represents a context overflow error. + * + * This handles two cases: + * 1. Error-based overflow: Most providers return stopReason "error" with a + * specific error message pattern. + * 2. Silent overflow: Some providers accept overflow requests and return + * successfully. For these, we check if usage.input exceeds the context window. + * + * ## Reliability by Provider + * + * **Reliable detection (returns error with detectable message):** + * - Anthropic: "prompt is too long: X tokens > Y maximum" + * - OpenAI (Completions & Responses): "exceeds the context window" + * - Google Gemini: "input token count exceeds the maximum" + * - xAI (Grok): "maximum prompt length is X but request contains Y" + * - Groq: "reduce the length of the messages" + * - Cerebras: 400/413 status code (no body) + * - Mistral: "Prompt contains X tokens ... too large for model with Y maximum context length" + * - OpenRouter (all backends): "maximum context length is X tokens" + * - llama.cpp: "exceeds the available context size" + * - LM Studio: "greater than the context length" + * - Kimi For Coding: "exceeded model token limit: X (requested: Y)" + * + * **Unreliable detection:** + * - z.ai: Sometimes accepts overflow silently (detectable via usage.input > contextWindow), + * sometimes returns rate limit errors. Pass contextWindow param to detect silent overflow. + * - Ollama: Silently truncates input without error. Cannot be detected via this function. + * The response will have usage.input < expected, but we don't know the expected value. + * + * ## Custom Providers + * + * If you've added custom models via settings.json, this function may not detect + * overflow errors from those providers. To add support: + * + * 1. Send a request that exceeds the model's context window + * 2. Check the errorMessage in the response + * 3. Create a regex pattern that matches the error + * 4. The pattern should be added to OVERFLOW_PATTERNS in this file, or + * check the errorMessage yourself before calling this function + * + * @param message - The assistant message to check + * @param contextWindow - Optional context window size for detecting silent overflow (z.ai) + * @returns true if the message indicates a context overflow + */ +export function isContextOverflow(message: AssistantMessage, contextWindow?: number): boolean { + // Case 1: Check error message patterns + if (message.stopReason === "error" && message.errorMessage) { + // Check known patterns + if (OVERFLOW_PATTERNS.some((p) => p.test(message.errorMessage!))) { + return true; + } + + // Cerebras returns 400/413 with no body for context overflow + // Note: 429 is rate limiting (requests/tokens per time), NOT context overflow + if (/^4(00|13)\s*(status code)?\s*\(no body\)/i.test(message.errorMessage)) { + return true; + } + } + + // Case 2: Silent overflow (z.ai style) - successful but usage exceeds context + if (contextWindow && message.stopReason === "stop") { + const inputTokens = message.usage.input + message.usage.cacheRead; + if (inputTokens > contextWindow) { + return true; + } + } + + return false; +} + +/** + * Get the overflow patterns for testing purposes. + */ +export function getOverflowPatterns(): RegExp[] { + return [...OVERFLOW_PATTERNS]; +} diff --git a/packages/pi-ai/src/utils/sanitize-unicode.ts b/packages/pi-ai/src/utils/sanitize-unicode.ts new file mode 100644 index 000000000..d869ee9dc --- /dev/null +++ b/packages/pi-ai/src/utils/sanitize-unicode.ts @@ -0,0 +1,25 @@ +/** + * Removes unpaired Unicode surrogate characters from a string. + * + * Unpaired surrogates (high surrogates 0xD800-0xDBFF without matching low surrogates 0xDC00-0xDFFF, + * or vice versa) cause JSON serialization errors in many API providers. + * + * Valid emoji and other characters outside the Basic Multilingual Plane use properly paired + * surrogates and will NOT be affected by this function. + * + * @param text - The text to sanitize + * @returns The sanitized text with unpaired surrogates removed + * + * @example + * // Valid emoji (properly paired surrogates) are preserved + * sanitizeSurrogates("Hello 🙈 World") // => "Hello 🙈 World" + * + * // Unpaired high surrogate is removed + * const unpaired = String.fromCharCode(0xD83D); // high surrogate without low + * sanitizeSurrogates(`Text ${unpaired} here`) // => "Text here" + */ +export function sanitizeSurrogates(text: string): string { + // Replace unpaired high surrogates (0xD800-0xDBFF not followed by low surrogate) + // Replace unpaired low surrogates (0xDC00-0xDFFF not preceded by high surrogate) + return text.replace(/[\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?; // "add" | "subtract" | "multiply" | "divide" + */ +export function StringEnum( + values: T, + options?: { description?: string; default?: T[number] }, +): TUnsafe { + return Type.Unsafe({ + type: "string", + enum: values as any, + ...(options?.description && { description: options.description }), + ...(options?.default && { default: options.default }), + }); +} diff --git a/packages/pi-ai/src/utils/validation.ts b/packages/pi-ai/src/utils/validation.ts new file mode 100644 index 000000000..b5f48faa9 --- /dev/null +++ b/packages/pi-ai/src/utils/validation.ts @@ -0,0 +1,84 @@ +import AjvModule from "ajv"; +import addFormatsModule from "ajv-formats"; + +// Handle both default and named exports +const Ajv = (AjvModule as any).default || AjvModule; +const addFormats = (addFormatsModule as any).default || addFormatsModule; + +import type { Tool, ToolCall } from "../types.js"; + +// Detect if we're in a browser extension environment with strict CSP +// Chrome extensions with Manifest V3 don't allow eval/Function constructor +const isBrowserExtension = typeof globalThis !== "undefined" && (globalThis as any).chrome?.runtime?.id !== undefined; + +// Create a singleton AJV instance with formats (only if not in browser extension) +// AJV requires 'unsafe-eval' CSP which is not allowed in Manifest V3 +let ajv: any = null; +if (!isBrowserExtension) { + try { + ajv = new Ajv({ + allErrors: true, + strict: false, + coerceTypes: true, + }); + addFormats(ajv); + } catch (_e) { + // AJV initialization failed (likely CSP restriction) + console.warn("AJV validation disabled due to CSP restrictions"); + } +} + +/** + * Finds a tool by name and validates the tool call arguments against its TypeBox schema + * @param tools Array of tool definitions + * @param toolCall The tool call from the LLM + * @returns The validated arguments + * @throws Error if tool is not found or validation fails + */ +export function validateToolCall(tools: Tool[], toolCall: ToolCall): any { + const tool = tools.find((t) => t.name === toolCall.name); + if (!tool) { + throw new Error(`Tool "${toolCall.name}" not found`); + } + return validateToolArguments(tool, toolCall); +} + +/** + * Validates tool call arguments against the tool's TypeBox schema + * @param tool The tool definition with TypeBox schema + * @param toolCall The tool call from the LLM + * @returns The validated (and potentially coerced) arguments + * @throws Error with formatted message if validation fails + */ +export function validateToolArguments(tool: Tool, toolCall: ToolCall): any { + // Skip validation in browser extension environment (CSP restrictions prevent AJV from working) + if (!ajv || isBrowserExtension) { + // Trust the LLM's output without validation + // Browser extensions can't use AJV due to Manifest V3 CSP restrictions + return toolCall.arguments; + } + + // Compile the schema + const validate = ajv.compile(tool.parameters); + + // Clone arguments so AJV can safely mutate for type coercion + const args = structuredClone(toolCall.arguments); + + // Validate the arguments (AJV mutates args in-place for type coercion) + if (validate(args)) { + return args; + } + + // Format validation errors nicely + const errors = + validate.errors + ?.map((err: any) => { + const path = err.instancePath ? err.instancePath.substring(1) : err.params.missingProperty || "root"; + return ` - ${path}: ${err.message}`; + }) + .join("\n") || "Unknown validation error"; + + const errorMessage = `Validation failed for tool "${toolCall.name}":\n${errors}\n\nReceived arguments:\n${JSON.stringify(toolCall.arguments, null, 2)}`; + + throw new Error(errorMessage); +} diff --git a/packages/pi-ai/tsconfig.json b/packages/pi-ai/tsconfig.json new file mode 100644 index 000000000..6f6331d49 --- /dev/null +++ b/packages/pi-ai/tsconfig.json @@ -0,0 +1,27 @@ +{ + "compilerOptions": { + "target": "ES2024", + "module": "Node16", + "lib": ["ES2024"], + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "declaration": true, + "declarationMap": true, + "sourceMap": true, + "inlineSources": true, + "inlineSourceMap": false, + "moduleResolution": "Node16", + "resolveJsonModule": true, + "allowImportingTsExtensions": false, + "experimentalDecorators": true, + "emitDecoratorMetadata": true, + "useDefineForClassFields": false, + "types": ["node"], + "outDir": "./dist", + "rootDir": "./src" + }, + "include": ["src/**/*.ts"], + "exclude": ["node_modules", "dist", "**/*.d.ts", "src/**/*.d.ts"] +} diff --git a/packages/pi-coding-agent/package.json b/packages/pi-coding-agent/package.json new file mode 100644 index 000000000..9c33bbfac --- /dev/null +++ b/packages/pi-coding-agent/package.json @@ -0,0 +1,55 @@ +{ + "name": "@gsd/pi-coding-agent", + "version": "0.57.1", + "description": "Coding agent CLI (vendored from pi-mono)", + "type": "module", + "piConfig": { + "name": "pi", + "configDir": ".pi" + }, + "main": "./dist/index.js", + "types": "./dist/index.d.ts", + "exports": { + ".": { + "types": "./dist/index.d.ts", + "import": "./dist/index.js" + }, + "./hooks": { + "types": "./dist/core/hooks/index.d.ts", + "import": "./dist/core/hooks/index.js" + } + }, + "scripts": { + "build": "tsc -p tsconfig.json && npm run copy-assets", + "copy-assets": "node -e \"const{mkdirSync,cpSync}=require('fs');mkdirSync('dist/modes/interactive/theme',{recursive:true});cpSync('src/modes/interactive/theme','dist/modes/interactive/theme',{recursive:true,filter:(s)=>!s.endsWith('.ts')});mkdirSync('dist/core/export-html/vendor',{recursive:true});cpSync('src/core/export-html/template.html','dist/core/export-html/template.html');cpSync('src/core/export-html/template.css','dist/core/export-html/template.css');cpSync('src/core/export-html/template.js','dist/core/export-html/template.js');cpSync('src/core/export-html/vendor','dist/core/export-html/vendor',{recursive:true,filter:(s)=>!s.endsWith('.ts')})\"" + }, + "dependencies": { + "@gsd/pi-agent-core": "*", + "@gsd/pi-ai": "*", + "@gsd/pi-tui": "*", + "@mariozechner/jiti": "^2.6.2", + "@silvia-odwyer/photon-node": "^0.3.4", + "chalk": "^5.5.0", + "cli-highlight": "^2.1.11", + "diff": "^8.0.2", + "extract-zip": "^2.0.1", + "file-type": "^21.1.1", + "glob": "^13.0.1", + "hosted-git-info": "^9.0.2", + "ignore": "^7.0.5", + "marked": "^15.0.12", + "minimatch": "^10.2.3", + "proper-lockfile": "^4.1.2", + "strip-ansi": "^7.1.0", + "undici": "^7.19.1", + "yaml": "^2.8.2" + }, + "optionalDependencies": { + "@mariozechner/clipboard": "^0.3.2" + }, + "devDependencies": { + "@types/diff": "^7.0.2", + "@types/hosted-git-info": "^3.0.5", + "@types/proper-lockfile": "^4.1.4" + } +} diff --git a/packages/pi-coding-agent/src/cli.ts b/packages/pi-coding-agent/src/cli.ts new file mode 100644 index 000000000..0876299a3 --- /dev/null +++ b/packages/pi-coding-agent/src/cli.ts @@ -0,0 +1,18 @@ +#!/usr/bin/env node +/** + * CLI entry point for the refactored coding agent. + * Uses main.ts with AgentSession and new mode modules. + * + * Test with: npx tsx src/cli-new.ts [args...] + */ +process.title = "pi"; + +import { setBedrockProviderModule } from "@gsd/pi-ai"; +import { bedrockProviderModule } from "@gsd/pi-ai/bedrock-provider"; +import { EnvHttpProxyAgent, setGlobalDispatcher } from "undici"; +import { main } from "./main.js"; + +setGlobalDispatcher(new EnvHttpProxyAgent()); +setBedrockProviderModule(bedrockProviderModule); + +main(process.argv.slice(2)); diff --git a/packages/pi-coding-agent/src/cli/args.ts b/packages/pi-coding-agent/src/cli/args.ts new file mode 100644 index 000000000..61683d5f4 --- /dev/null +++ b/packages/pi-coding-agent/src/cli/args.ts @@ -0,0 +1,316 @@ +/** + * CLI argument parsing and help display + */ + +import type { ThinkingLevel } from "@gsd/pi-agent-core"; +import chalk from "chalk"; +import { APP_NAME, CONFIG_DIR_NAME, ENV_AGENT_DIR } from "../config.js"; +import { allTools, type ToolName } from "../core/tools/index.js"; + +export type Mode = "text" | "json" | "rpc"; + +export interface Args { + provider?: string; + model?: string; + apiKey?: string; + systemPrompt?: string; + appendSystemPrompt?: string; + thinking?: ThinkingLevel; + continue?: boolean; + resume?: boolean; + help?: boolean; + version?: boolean; + mode?: Mode; + noSession?: boolean; + session?: string; + sessionDir?: string; + models?: string[]; + tools?: ToolName[]; + noTools?: boolean; + extensions?: string[]; + noExtensions?: boolean; + print?: boolean; + export?: string; + noSkills?: boolean; + skills?: string[]; + promptTemplates?: string[]; + noPromptTemplates?: boolean; + themes?: string[]; + noThemes?: boolean; + listModels?: string | true; + offline?: boolean; + verbose?: boolean; + messages: string[]; + fileArgs: string[]; + /** Unknown flags (potentially extension flags) - map of flag name to value */ + unknownFlags: Map; +} + +const VALID_THINKING_LEVELS = ["off", "minimal", "low", "medium", "high", "xhigh"] as const; + +export function isValidThinkingLevel(level: string): level is ThinkingLevel { + return VALID_THINKING_LEVELS.includes(level as ThinkingLevel); +} + +export function parseArgs(args: string[], extensionFlags?: Map): Args { + const result: Args = { + messages: [], + fileArgs: [], + unknownFlags: new Map(), + }; + + for (let i = 0; i < args.length; i++) { + const arg = args[i]; + + if (arg === "--help" || arg === "-h") { + result.help = true; + } else if (arg === "--version" || arg === "-v") { + result.version = true; + } else if (arg === "--mode" && i + 1 < args.length) { + const mode = args[++i]; + if (mode === "text" || mode === "json" || mode === "rpc") { + result.mode = mode; + } + } else if (arg === "--continue" || arg === "-c") { + result.continue = true; + } else if (arg === "--resume" || arg === "-r") { + result.resume = true; + } else if (arg === "--provider" && i + 1 < args.length) { + result.provider = args[++i]; + } else if (arg === "--model" && i + 1 < args.length) { + result.model = args[++i]; + } else if (arg === "--api-key" && i + 1 < args.length) { + result.apiKey = args[++i]; + } else if (arg === "--system-prompt" && i + 1 < args.length) { + result.systemPrompt = args[++i]; + } else if (arg === "--append-system-prompt" && i + 1 < args.length) { + result.appendSystemPrompt = args[++i]; + } else if (arg === "--no-session") { + result.noSession = true; + } else if (arg === "--session" && i + 1 < args.length) { + result.session = args[++i]; + } else if (arg === "--session-dir" && i + 1 < args.length) { + result.sessionDir = args[++i]; + } else if (arg === "--models" && i + 1 < args.length) { + result.models = args[++i].split(",").map((s) => s.trim()); + } else if (arg === "--no-tools") { + result.noTools = true; + } else if (arg === "--tools" && i + 1 < args.length) { + const toolNames = args[++i].split(",").map((s) => s.trim()); + const validTools: ToolName[] = []; + for (const name of toolNames) { + if (name in allTools) { + validTools.push(name as ToolName); + } else { + console.error( + chalk.yellow(`Warning: Unknown tool "${name}". Valid tools: ${Object.keys(allTools).join(", ")}`), + ); + } + } + result.tools = validTools; + } else if (arg === "--thinking" && i + 1 < args.length) { + const level = args[++i]; + if (isValidThinkingLevel(level)) { + result.thinking = level; + } else { + console.error( + chalk.yellow( + `Warning: Invalid thinking level "${level}". Valid values: ${VALID_THINKING_LEVELS.join(", ")}`, + ), + ); + } + } else if (arg === "--print" || arg === "-p") { + result.print = true; + } else if (arg === "--export" && i + 1 < args.length) { + result.export = args[++i]; + } else if ((arg === "--extension" || arg === "-e") && i + 1 < args.length) { + result.extensions = result.extensions ?? []; + result.extensions.push(args[++i]); + } else if (arg === "--no-extensions" || arg === "-ne") { + result.noExtensions = true; + } else if (arg === "--skill" && i + 1 < args.length) { + result.skills = result.skills ?? []; + result.skills.push(args[++i]); + } else if (arg === "--prompt-template" && i + 1 < args.length) { + result.promptTemplates = result.promptTemplates ?? []; + result.promptTemplates.push(args[++i]); + } else if (arg === "--theme" && i + 1 < args.length) { + result.themes = result.themes ?? []; + result.themes.push(args[++i]); + } else if (arg === "--no-skills" || arg === "-ns") { + result.noSkills = true; + } else if (arg === "--no-prompt-templates" || arg === "-np") { + result.noPromptTemplates = true; + } else if (arg === "--no-themes") { + result.noThemes = true; + } else if (arg === "--list-models") { + // Check if next arg is a search pattern (not a flag or file arg) + if (i + 1 < args.length && !args[i + 1].startsWith("-") && !args[i + 1].startsWith("@")) { + result.listModels = args[++i]; + } else { + result.listModels = true; + } + } else if (arg === "--verbose") { + result.verbose = true; + } else if (arg === "--offline") { + result.offline = true; + } else if (arg.startsWith("@")) { + result.fileArgs.push(arg.slice(1)); // Remove @ prefix + } else if (arg.startsWith("--") && extensionFlags) { + // Check if it's an extension-registered flag + const flagName = arg.slice(2); + const extFlag = extensionFlags.get(flagName); + if (extFlag) { + if (extFlag.type === "boolean") { + result.unknownFlags.set(flagName, true); + } else if (extFlag.type === "string" && i + 1 < args.length) { + result.unknownFlags.set(flagName, args[++i]); + } + } + // Unknown flags without extensionFlags are silently ignored (first pass) + } else if (!arg.startsWith("-")) { + result.messages.push(arg); + } + } + + return result; +} + +export function printHelp(): void { + console.log(`${chalk.bold(APP_NAME)} - AI coding assistant with read, bash, edit, write tools + +${chalk.bold("Usage:")} + ${APP_NAME} [options] [@files...] [messages...] + +${chalk.bold("Commands:")} + ${APP_NAME} install [-l] Install extension source and add to settings + ${APP_NAME} remove [-l] Remove extension source from settings + ${APP_NAME} update [source] Update installed extensions (skips pinned sources) + ${APP_NAME} list List installed extensions from settings + ${APP_NAME} config Open TUI to enable/disable package resources + ${APP_NAME} --help Show help for install/remove/update/list + +${chalk.bold("Options:")} + --provider Provider name (default: google) + --model Model pattern or ID (supports "provider/id" and optional ":") + --api-key API key (defaults to env vars) + --system-prompt System prompt (default: coding assistant prompt) + --append-system-prompt Append text or file contents to the system prompt + --mode Output mode: text (default), json, or rpc + --print, -p Non-interactive mode: process prompt and exit + --continue, -c Continue previous session + --resume, -r Select a session to resume + --session Use specific session file + --session-dir Directory for session storage and lookup + --no-session Don't save session (ephemeral) + --models Comma-separated model patterns for Ctrl+P cycling + Supports globs (anthropic/*, *sonnet*) and fuzzy matching + --no-tools Disable all built-in tools + --tools Comma-separated list of tools to enable (default: read,bash,edit,write) + Available: read, bash, edit, write, grep, find, ls + --thinking Set thinking level: off, minimal, low, medium, high, xhigh + --extension, -e Load an extension file (can be used multiple times) + --no-extensions, -ne Disable extension discovery (explicit -e paths still work) + --skill Load a skill file or directory (can be used multiple times) + --no-skills, -ns Disable skills discovery and loading + --prompt-template Load a prompt template file or directory (can be used multiple times) + --no-prompt-templates, -np Disable prompt template discovery and loading + --theme Load a theme file or directory (can be used multiple times) + --no-themes Disable theme discovery and loading + --export Export session file to HTML and exit + --list-models [search] List available models (with optional fuzzy search) + --verbose Force verbose startup (overrides quietStartup setting) + --offline Disable startup network operations (same as PI_OFFLINE=1) + --help, -h Show this help + --version, -v Show version number + +Extensions can register additional flags (e.g., --plan from plan-mode extension). + +${chalk.bold("Examples:")} + # Interactive mode + ${APP_NAME} + + # Interactive mode with initial prompt + ${APP_NAME} "List all .ts files in src/" + + # Include files in initial message + ${APP_NAME} @prompt.md @image.png "What color is the sky?" + + # Non-interactive mode (process and exit) + ${APP_NAME} -p "List all .ts files in src/" + + # Multiple messages (interactive) + ${APP_NAME} "Read package.json" "What dependencies do we have?" + + # Continue previous session + ${APP_NAME} --continue "What did we discuss?" + + # Use different model + ${APP_NAME} --provider openai --model gpt-4o-mini "Help me refactor this code" + + # Use model with provider prefix (no --provider needed) + ${APP_NAME} --model openai/gpt-4o "Help me refactor this code" + + # Use model with thinking level shorthand + ${APP_NAME} --model sonnet:high "Solve this complex problem" + + # Limit model cycling to specific models + ${APP_NAME} --models claude-sonnet,claude-haiku,gpt-4o + + # Limit to a specific provider with glob pattern + ${APP_NAME} --models "github-copilot/*" + + # Cycle models with fixed thinking levels + ${APP_NAME} --models sonnet:high,haiku:low + + # Start with a specific thinking level + ${APP_NAME} --thinking high "Solve this complex problem" + + # Read-only mode (no file modifications possible) + ${APP_NAME} --tools read,grep,find,ls -p "Review the code in src/" + + # Export a session file to HTML + ${APP_NAME} --export ~/${CONFIG_DIR_NAME}/agent/sessions/--path--/session.jsonl + ${APP_NAME} --export session.jsonl output.html + +${chalk.bold("Environment Variables:")} + ANTHROPIC_API_KEY - Anthropic Claude API key + ANTHROPIC_OAUTH_TOKEN - Anthropic OAuth token (alternative to API key) + OPENAI_API_KEY - OpenAI GPT API key + AZURE_OPENAI_API_KEY - Azure OpenAI API key + AZURE_OPENAI_BASE_URL - Azure OpenAI base URL (https://{resource}.openai.azure.com/openai/v1) + AZURE_OPENAI_RESOURCE_NAME - Azure OpenAI resource name (alternative to base URL) + AZURE_OPENAI_API_VERSION - Azure OpenAI API version (default: v1) + AZURE_OPENAI_DEPLOYMENT_NAME_MAP - Azure OpenAI model=deployment map (comma-separated) + GEMINI_API_KEY - Google Gemini API key + GROQ_API_KEY - Groq API key + CEREBRAS_API_KEY - Cerebras API key + XAI_API_KEY - xAI Grok API key + OPENROUTER_API_KEY - OpenRouter API key + AI_GATEWAY_API_KEY - Vercel AI Gateway API key + ZAI_API_KEY - ZAI API key + MISTRAL_API_KEY - Mistral API key + MINIMAX_API_KEY - MiniMax API key + OPENCODE_API_KEY - OpenCode Zen/OpenCode Go API key + KIMI_API_KEY - Kimi For Coding API key + AWS_PROFILE - AWS profile for Amazon Bedrock + AWS_ACCESS_KEY_ID - AWS access key for Amazon Bedrock + AWS_SECRET_ACCESS_KEY - AWS secret key for Amazon Bedrock + AWS_BEARER_TOKEN_BEDROCK - Bedrock API key (bearer token) + AWS_REGION - AWS region for Amazon Bedrock (e.g., us-east-1) + ${ENV_AGENT_DIR.padEnd(32)} - Session storage directory (default: ~/${CONFIG_DIR_NAME}/agent) + PI_PACKAGE_DIR - Override package directory (for Nix/Guix store paths) + PI_OFFLINE - Disable startup network operations when set to 1/true/yes + PI_SHARE_VIEWER_URL - Base URL for /share command (default: https://pi.dev/session/) + PI_AI_ANTIGRAVITY_VERSION - Override Antigravity User-Agent version (e.g., 1.23.0) + +${chalk.bold("Available Tools (default: read, bash, edit, write):")} + read - Read file contents + bash - Execute bash commands + edit - Edit files with find/replace + write - Write files (creates/overwrites) + grep - Search file contents (read-only, off by default) + find - Find files by glob pattern (read-only, off by default) + ls - List directory contents (read-only, off by default) +`); +} diff --git a/packages/pi-coding-agent/src/cli/config-selector.ts b/packages/pi-coding-agent/src/cli/config-selector.ts new file mode 100644 index 000000000..6d4e5d6c0 --- /dev/null +++ b/packages/pi-coding-agent/src/cli/config-selector.ts @@ -0,0 +1,52 @@ +/** + * TUI config selector for `pi config` command + */ + +import { ProcessTerminal, TUI } from "@gsd/pi-tui"; +import type { ResolvedPaths } from "../core/package-manager.js"; +import type { SettingsManager } from "../core/settings-manager.js"; +import { ConfigSelectorComponent } from "../modes/interactive/components/config-selector.js"; +import { initTheme, stopThemeWatcher } from "../modes/interactive/theme/theme.js"; + +export interface ConfigSelectorOptions { + resolvedPaths: ResolvedPaths; + settingsManager: SettingsManager; + cwd: string; + agentDir: string; +} + +/** Show TUI config selector and return when closed */ +export async function selectConfig(options: ConfigSelectorOptions): Promise { + // Initialize theme before showing TUI + initTheme(options.settingsManager.getTheme(), true); + + return new Promise((resolve) => { + const ui = new TUI(new ProcessTerminal()); + let resolved = false; + + const selector = new ConfigSelectorComponent( + options.resolvedPaths, + options.settingsManager, + options.cwd, + options.agentDir, + () => { + if (!resolved) { + resolved = true; + ui.stop(); + stopThemeWatcher(); + resolve(); + } + }, + () => { + ui.stop(); + stopThemeWatcher(); + process.exit(0); + }, + () => ui.requestRender(), + ); + + ui.addChild(selector); + ui.setFocus(selector.getResourceList()); + ui.start(); + }); +} diff --git a/packages/pi-coding-agent/src/cli/file-processor.ts b/packages/pi-coding-agent/src/cli/file-processor.ts new file mode 100644 index 000000000..1f5a5e7fb --- /dev/null +++ b/packages/pi-coding-agent/src/cli/file-processor.ts @@ -0,0 +1,96 @@ +/** + * Process @file CLI arguments into text content and image attachments + */ + +import { access, readFile, stat } from "node:fs/promises"; +import type { ImageContent } from "@gsd/pi-ai"; +import chalk from "chalk"; +import { resolve } from "path"; +import { resolveReadPath } from "../core/tools/path-utils.js"; +import { formatDimensionNote, resizeImage } from "../utils/image-resize.js"; +import { detectSupportedImageMimeTypeFromFile } from "../utils/mime.js"; + +export interface ProcessedFiles { + text: string; + images: ImageContent[]; +} + +export interface ProcessFileOptions { + /** Whether to auto-resize images to 2000x2000 max. Default: true */ + autoResizeImages?: boolean; +} + +/** Process @file arguments into text content and image attachments */ +export async function processFileArguments(fileArgs: string[], options?: ProcessFileOptions): Promise { + const autoResizeImages = options?.autoResizeImages ?? true; + let text = ""; + const images: ImageContent[] = []; + + for (const fileArg of fileArgs) { + // Expand and resolve path (handles ~ expansion and macOS screenshot Unicode spaces) + const absolutePath = resolve(resolveReadPath(fileArg, process.cwd())); + + // Check if file exists + try { + await access(absolutePath); + } catch { + console.error(chalk.red(`Error: File not found: ${absolutePath}`)); + process.exit(1); + } + + // Check if file is empty + const stats = await stat(absolutePath); + if (stats.size === 0) { + // Skip empty files + continue; + } + + const mimeType = await detectSupportedImageMimeTypeFromFile(absolutePath); + + if (mimeType) { + // Handle image file + const content = await readFile(absolutePath); + const base64Content = content.toString("base64"); + + let attachment: ImageContent; + let dimensionNote: string | undefined; + + if (autoResizeImages) { + const resized = await resizeImage({ type: "image", data: base64Content, mimeType }); + dimensionNote = formatDimensionNote(resized); + attachment = { + type: "image", + mimeType: resized.mimeType, + data: resized.data, + }; + } else { + attachment = { + type: "image", + mimeType, + data: base64Content, + }; + } + + images.push(attachment); + + // Add text reference to image with optional dimension note + if (dimensionNote) { + text += `${dimensionNote}\n`; + } else { + text += `\n`; + } + } else { + // Handle text file + try { + const content = await readFile(absolutePath, "utf-8"); + text += `\n${content}\n\n`; + } catch (error: unknown) { + const message = error instanceof Error ? error.message : String(error); + console.error(chalk.red(`Error: Could not read file ${absolutePath}: ${message}`)); + process.exit(1); + } + } + } + + return { text, images }; +} diff --git a/packages/pi-coding-agent/src/cli/list-models.ts b/packages/pi-coding-agent/src/cli/list-models.ts new file mode 100644 index 000000000..8a7feafd6 --- /dev/null +++ b/packages/pi-coding-agent/src/cli/list-models.ts @@ -0,0 +1,104 @@ +/** + * List available models with optional fuzzy search + */ + +import type { Api, Model } from "@gsd/pi-ai"; +import { fuzzyFilter } from "@gsd/pi-tui"; +import type { ModelRegistry } from "../core/model-registry.js"; + +/** + * Format a number as human-readable (e.g., 200000 -> "200K", 1000000 -> "1M") + */ +function formatTokenCount(count: number): string { + if (count >= 1_000_000) { + const millions = count / 1_000_000; + return millions % 1 === 0 ? `${millions}M` : `${millions.toFixed(1)}M`; + } + if (count >= 1_000) { + const thousands = count / 1_000; + return thousands % 1 === 0 ? `${thousands}K` : `${thousands.toFixed(1)}K`; + } + return count.toString(); +} + +/** + * List available models, optionally filtered by search pattern + */ +export async function listModels(modelRegistry: ModelRegistry, searchPattern?: string): Promise { + const models = modelRegistry.getAvailable(); + + if (models.length === 0) { + console.log("No models available. Set API keys in environment variables."); + return; + } + + // Apply fuzzy filter if search pattern provided + let filteredModels: Model[] = models; + if (searchPattern) { + filteredModels = fuzzyFilter(models, searchPattern, (m) => `${m.provider} ${m.id}`); + } + + if (filteredModels.length === 0) { + console.log(`No models matching "${searchPattern}"`); + return; + } + + // Sort by provider, then by model id + filteredModels.sort((a, b) => { + const providerCmp = a.provider.localeCompare(b.provider); + if (providerCmp !== 0) return providerCmp; + return a.id.localeCompare(b.id); + }); + + // Calculate column widths + const rows = filteredModels.map((m) => ({ + provider: m.provider, + model: m.id, + context: formatTokenCount(m.contextWindow), + maxOut: formatTokenCount(m.maxTokens), + thinking: m.reasoning ? "yes" : "no", + images: m.input.includes("image") ? "yes" : "no", + })); + + const headers = { + provider: "provider", + model: "model", + context: "context", + maxOut: "max-out", + thinking: "thinking", + images: "images", + }; + + const widths = { + provider: Math.max(headers.provider.length, ...rows.map((r) => r.provider.length)), + model: Math.max(headers.model.length, ...rows.map((r) => r.model.length)), + context: Math.max(headers.context.length, ...rows.map((r) => r.context.length)), + maxOut: Math.max(headers.maxOut.length, ...rows.map((r) => r.maxOut.length)), + thinking: Math.max(headers.thinking.length, ...rows.map((r) => r.thinking.length)), + images: Math.max(headers.images.length, ...rows.map((r) => r.images.length)), + }; + + // Print header + const headerLine = [ + headers.provider.padEnd(widths.provider), + headers.model.padEnd(widths.model), + headers.context.padEnd(widths.context), + headers.maxOut.padEnd(widths.maxOut), + headers.thinking.padEnd(widths.thinking), + headers.images.padEnd(widths.images), + ].join(" "); + console.log(headerLine); + + // Print rows + for (const row of rows) { + const line = [ + row.provider.padEnd(widths.provider), + row.model.padEnd(widths.model), + row.context.padEnd(widths.context), + row.maxOut.padEnd(widths.maxOut), + row.thinking.padEnd(widths.thinking), + row.images.padEnd(widths.images), + ].join(" "); + console.log(line); + } +} diff --git a/packages/pi-coding-agent/src/cli/session-picker.ts b/packages/pi-coding-agent/src/cli/session-picker.ts new file mode 100644 index 000000000..ee06c0b96 --- /dev/null +++ b/packages/pi-coding-agent/src/cli/session-picker.ts @@ -0,0 +1,51 @@ +/** + * TUI session selector for --resume flag + */ + +import { ProcessTerminal, TUI } from "@gsd/pi-tui"; +import { KeybindingsManager } from "../core/keybindings.js"; +import type { SessionInfo, SessionListProgress } from "../core/session-manager.js"; +import { SessionSelectorComponent } from "../modes/interactive/components/session-selector.js"; + +type SessionsLoader = (onProgress?: SessionListProgress) => Promise; + +/** Show TUI session selector and return selected session path or null if cancelled */ +export async function selectSession( + currentSessionsLoader: SessionsLoader, + allSessionsLoader: SessionsLoader, +): Promise { + return new Promise((resolve) => { + const ui = new TUI(new ProcessTerminal()); + const keybindings = KeybindingsManager.create(); + let resolved = false; + + const selector = new SessionSelectorComponent( + currentSessionsLoader, + allSessionsLoader, + (path: string) => { + if (!resolved) { + resolved = true; + ui.stop(); + resolve(path); + } + }, + () => { + if (!resolved) { + resolved = true; + ui.stop(); + resolve(null); + } + }, + () => { + ui.stop(); + process.exit(0); + }, + () => ui.requestRender(), + { showRenameHint: false, keybindings }, + ); + + ui.addChild(selector); + ui.setFocus(selector.getSessionList()); + ui.start(); + }); +} diff --git a/packages/pi-coding-agent/src/config.ts b/packages/pi-coding-agent/src/config.ts new file mode 100644 index 000000000..5b9e48271 --- /dev/null +++ b/packages/pi-coding-agent/src/config.ts @@ -0,0 +1,241 @@ +import { existsSync, readFileSync } from "fs"; +import { homedir } from "os"; +import { dirname, join, resolve } from "path"; +import { fileURLToPath } from "url"; + +// ============================================================================= +// Package Detection +// ============================================================================= + +const __filename = fileURLToPath(import.meta.url); +const __dirname = dirname(__filename); + +/** + * Detect if we're running as a Bun compiled binary. + * Bun binaries have import.meta.url containing "$bunfs", "~BUN", or "%7EBUN" (Bun's virtual filesystem path) + */ +export const isBunBinary = + import.meta.url.includes("$bunfs") || import.meta.url.includes("~BUN") || import.meta.url.includes("%7EBUN"); + +/** Detect if Bun is the runtime (compiled binary or bun run) */ +export const isBunRuntime = !!process.versions.bun; + +// ============================================================================= +// Install Method Detection +// ============================================================================= + +export type InstallMethod = "bun-binary" | "npm" | "pnpm" | "yarn" | "bun" | "unknown"; + +export function detectInstallMethod(): InstallMethod { + if (isBunBinary) { + return "bun-binary"; + } + + const resolvedPath = `${__dirname}\0${process.execPath || ""}`.toLowerCase(); + + if (resolvedPath.includes("/pnpm/") || resolvedPath.includes("/.pnpm/") || resolvedPath.includes("\\pnpm\\")) { + return "pnpm"; + } + if (resolvedPath.includes("/yarn/") || resolvedPath.includes("/.yarn/") || resolvedPath.includes("\\yarn\\")) { + return "yarn"; + } + if (isBunRuntime) { + return "bun"; + } + if (resolvedPath.includes("/npm/") || resolvedPath.includes("/node_modules/") || resolvedPath.includes("\\npm\\")) { + return "npm"; + } + + return "unknown"; +} + +export function getUpdateInstruction(packageName: string): string { + const method = detectInstallMethod(); + switch (method) { + case "bun-binary": + return `Download from: https://github.com/badlogic/pi-mono/releases/latest`; + case "pnpm": + return `Run: pnpm install -g ${packageName}`; + case "yarn": + return `Run: yarn global add ${packageName}`; + case "bun": + return `Run: bun install -g ${packageName}`; + case "npm": + return `Run: npm install -g ${packageName}`; + default: + return `Run: npm install -g ${packageName}`; + } +} + +// ============================================================================= +// Package Asset Paths (shipped with executable) +// ============================================================================= + +/** + * Get the base directory for resolving package assets (themes, package.json, README.md, CHANGELOG.md). + * - For Bun binary: returns the directory containing the executable + * - For Node.js (dist/): returns __dirname (the dist/ directory) + * - For tsx (src/): returns parent directory (the package root) + */ +export function getPackageDir(): string { + // Allow override via environment variable (useful for Nix/Guix where store paths tokenize poorly) + const envDir = process.env.PI_PACKAGE_DIR; + if (envDir) { + if (envDir === "~") return homedir(); + if (envDir.startsWith("~/")) return homedir() + envDir.slice(1); + return envDir; + } + + if (isBunBinary) { + // Bun binary: process.execPath points to the compiled executable + return dirname(process.execPath); + } + // Node.js: walk up from __dirname until we find package.json + let dir = __dirname; + while (dir !== dirname(dir)) { + if (existsSync(join(dir, "package.json"))) { + return dir; + } + dir = dirname(dir); + } + // Fallback (shouldn't happen) + return __dirname; +} + +/** + * Get path to built-in themes directory (shipped with package) + * - For Bun binary: theme/ next to executable + * - For Node.js (dist/): dist/modes/interactive/theme/ + * - For tsx (src/): src/modes/interactive/theme/ + */ +export function getThemesDir(): string { + if (isBunBinary) { + return join(dirname(process.execPath), "theme"); + } + // Theme is in modes/interactive/theme/ relative to src/ or dist/ + const packageDir = getPackageDir(); + const srcOrDist = existsSync(join(packageDir, "src")) ? "src" : "dist"; + return join(packageDir, srcOrDist, "modes", "interactive", "theme"); +} + +/** + * Get path to HTML export template directory (shipped with package) + * - For Bun binary: export-html/ next to executable + * - For Node.js (dist/): dist/core/export-html/ + * - For tsx (src/): src/core/export-html/ + */ +export function getExportTemplateDir(): string { + if (isBunBinary) { + return join(dirname(process.execPath), "export-html"); + } + const packageDir = getPackageDir(); + const srcOrDist = existsSync(join(packageDir, "src")) ? "src" : "dist"; + return join(packageDir, srcOrDist, "core", "export-html"); +} + +/** Get path to package.json */ +export function getPackageJsonPath(): string { + return join(getPackageDir(), "package.json"); +} + +/** Get path to README.md */ +export function getReadmePath(): string { + return resolve(join(getPackageDir(), "README.md")); +} + +/** Get path to docs directory */ +export function getDocsPath(): string { + return resolve(join(getPackageDir(), "docs")); +} + +/** Get path to examples directory */ +export function getExamplesPath(): string { + return resolve(join(getPackageDir(), "examples")); +} + +/** Get path to CHANGELOG.md */ +export function getChangelogPath(): string { + return resolve(join(getPackageDir(), "CHANGELOG.md")); +} + +// ============================================================================= +// App Config (from package.json piConfig) +// ============================================================================= + +const pkg = JSON.parse(readFileSync(getPackageJsonPath(), "utf-8")); + +export const APP_NAME: string = pkg.piConfig?.name || "pi"; +export const CONFIG_DIR_NAME: string = pkg.piConfig?.configDir || ".pi"; +export const VERSION: string = pkg.version; + +// e.g., PI_CODING_AGENT_DIR or TAU_CODING_AGENT_DIR +export const ENV_AGENT_DIR = `${APP_NAME.toUpperCase()}_CODING_AGENT_DIR`; + +const DEFAULT_SHARE_VIEWER_URL = "https://pi.dev/session/"; + +/** Get the share viewer URL for a gist ID */ +export function getShareViewerUrl(gistId: string): string { + const baseUrl = process.env.PI_SHARE_VIEWER_URL || DEFAULT_SHARE_VIEWER_URL; + return `${baseUrl}#${gistId}`; +} + +// ============================================================================= +// User Config Paths (~/.pi/agent/*) +// ============================================================================= + +/** Get the agent config directory (e.g., ~/.pi/agent/) */ +export function getAgentDir(): string { + const envDir = process.env[ENV_AGENT_DIR]; + if (envDir) { + // Expand tilde to home directory + if (envDir === "~") return homedir(); + if (envDir.startsWith("~/")) return homedir() + envDir.slice(1); + return envDir; + } + return join(homedir(), CONFIG_DIR_NAME, "agent"); +} + +/** Get path to user's custom themes directory */ +export function getCustomThemesDir(): string { + return join(getAgentDir(), "themes"); +} + +/** Get path to models.json */ +export function getModelsPath(): string { + return join(getAgentDir(), "models.json"); +} + +/** Get path to auth.json */ +export function getAuthPath(): string { + return join(getAgentDir(), "auth.json"); +} + +/** Get path to settings.json */ +export function getSettingsPath(): string { + return join(getAgentDir(), "settings.json"); +} + +/** Get path to tools directory */ +export function getToolsDir(): string { + return join(getAgentDir(), "tools"); +} + +/** Get path to managed binaries directory (fd, rg) */ +export function getBinDir(): string { + return join(getAgentDir(), "bin"); +} + +/** Get path to prompt templates directory */ +export function getPromptsDir(): string { + return join(getAgentDir(), "prompts"); +} + +/** Get path to sessions directory */ +export function getSessionsDir(): string { + return join(getAgentDir(), "sessions"); +} + +/** Get path to debug log file */ +export function getDebugLogPath(): string { + return join(getAgentDir(), `${APP_NAME}-debug.log`); +} diff --git a/packages/pi-coding-agent/src/core/agent-session.ts b/packages/pi-coding-agent/src/core/agent-session.ts new file mode 100644 index 000000000..69e7d6680 --- /dev/null +++ b/packages/pi-coding-agent/src/core/agent-session.ts @@ -0,0 +1,3050 @@ +/** + * AgentSession - Core abstraction for agent lifecycle and session management. + * + * This class is shared between all run modes (interactive, print, rpc). + * It encapsulates: + * - Agent state access + * - Event subscription with automatic session persistence + * - Model and thinking level management + * - Compaction (manual and auto) + * - Bash execution + * - Session switching and branching + * + * Modes use this class and add their own I/O layer on top. + */ + +import { readFileSync } from "node:fs"; +import { basename, dirname, join } from "node:path"; +import type { + Agent, + AgentEvent, + AgentMessage, + AgentState, + AgentTool, + ThinkingLevel, +} from "@gsd/pi-agent-core"; +import type { AssistantMessage, ImageContent, Message, Model, TextContent } from "@gsd/pi-ai"; +import { isContextOverflow, modelsAreEqual, resetApiProviders, supportsXhigh } from "@gsd/pi-ai"; +import { getDocsPath } from "../config.js"; +import { theme } from "../modes/interactive/theme/theme.js"; +import { stripFrontmatter } from "../utils/frontmatter.js"; +import { sleep } from "../utils/sleep.js"; +import { type BashResult, executeBash as executeBashCommand, executeBashWithOperations } from "./bash-executor.js"; +import { + type CompactionResult, + calculateContextTokens, + collectEntriesForBranchSummary, + compact, + estimateContextTokens, + generateBranchSummary, + prepareCompaction, + shouldCompact, +} from "./compaction/index.js"; +import { DEFAULT_THINKING_LEVEL } from "./defaults.js"; +import { exportSessionToHtml, type ToolHtmlRenderer } from "./export-html/index.js"; +import { createToolHtmlRenderer } from "./export-html/tool-renderer.js"; +import { + type ContextUsage, + type ExtensionCommandContextActions, + type ExtensionErrorListener, + ExtensionRunner, + type ExtensionUIContext, + type InputSource, + type MessageEndEvent, + type MessageStartEvent, + type MessageUpdateEvent, + type SessionBeforeCompactResult, + type SessionBeforeForkResult, + type SessionBeforeSwitchResult, + type SessionBeforeTreeResult, + type ShutdownHandler, + type ToolDefinition, + type ToolExecutionEndEvent, + type ToolExecutionStartEvent, + type ToolExecutionUpdateEvent, + type ToolInfo, + type TreePreparation, + type TurnEndEvent, + type TurnStartEvent, + wrapRegisteredTools, + wrapToolsWithExtensions, +} from "./extensions/index.js"; +import type { BashExecutionMessage, CustomMessage } from "./messages.js"; +import type { ModelRegistry } from "./model-registry.js"; +import { expandPromptTemplate, type PromptTemplate } from "./prompt-templates.js"; +import type { ResourceExtensionPaths, ResourceLoader } from "./resource-loader.js"; +import type { BranchSummaryEntry, CompactionEntry, SessionManager } from "./session-manager.js"; +import { getLatestCompactionEntry } from "./session-manager.js"; +import type { SettingsManager } from "./settings-manager.js"; +import { BUILTIN_SLASH_COMMANDS, type SlashCommandInfo, type SlashCommandLocation } from "./slash-commands.js"; +import { buildSystemPrompt } from "./system-prompt.js"; +import type { BashOperations } from "./tools/bash.js"; +import { createAllTools } from "./tools/index.js"; + +// ============================================================================ +// Skill Block Parsing +// ============================================================================ + +/** Parsed skill block from a user message */ +export interface ParsedSkillBlock { + name: string; + location: string; + content: string; + userMessage: string | undefined; +} + +/** + * Parse a skill block from message text. + * Returns null if the text doesn't contain a skill block. + */ +export function parseSkillBlock(text: string): ParsedSkillBlock | null { + const match = text.match(/^\n([\s\S]*?)\n<\/skill>(?:\n\n([\s\S]+))?$/); + if (!match) return null; + return { + name: match[1], + location: match[2], + content: match[3], + userMessage: match[4]?.trim() || undefined, + }; +} + +/** Session-specific events that extend the core AgentEvent */ +export type AgentSessionEvent = + | AgentEvent + | { type: "auto_compaction_start"; reason: "threshold" | "overflow" } + | { + type: "auto_compaction_end"; + result: CompactionResult | undefined; + aborted: boolean; + willRetry: boolean; + errorMessage?: string; + } + | { type: "auto_retry_start"; attempt: number; maxAttempts: number; delayMs: number; errorMessage: string } + | { type: "auto_retry_end"; success: boolean; attempt: number; finalError?: string }; + +/** Listener function for agent session events */ +export type AgentSessionEventListener = (event: AgentSessionEvent) => void; + +// ============================================================================ +// Types +// ============================================================================ + +export interface AgentSessionConfig { + agent: Agent; + sessionManager: SessionManager; + settingsManager: SettingsManager; + cwd: string; + /** Models to cycle through with Ctrl+P (from --models flag) */ + scopedModels?: Array<{ model: Model; thinkingLevel?: ThinkingLevel }>; + /** Resource loader for skills, prompts, themes, context files, system prompt */ + resourceLoader: ResourceLoader; + /** SDK custom tools registered outside extensions */ + customTools?: ToolDefinition[]; + /** Model registry for API key resolution and model discovery */ + modelRegistry: ModelRegistry; + /** Initial active built-in tool names. Default: [read, bash, edit, write] */ + initialActiveToolNames?: string[]; + /** Override base tools (useful for custom runtimes). */ + baseToolsOverride?: Record; + /** Mutable ref used by Agent to access the current ExtensionRunner */ + extensionRunnerRef?: { current?: ExtensionRunner }; +} + +export interface ExtensionBindings { + uiContext?: ExtensionUIContext; + commandContextActions?: ExtensionCommandContextActions; + shutdownHandler?: ShutdownHandler; + onError?: ExtensionErrorListener; +} + +/** Options for AgentSession.prompt() */ +export interface PromptOptions { + /** Whether to expand file-based prompt templates (default: true) */ + expandPromptTemplates?: boolean; + /** Image attachments */ + images?: ImageContent[]; + /** When streaming, how to queue the message: "steer" (interrupt) or "followUp" (wait). Required if streaming. */ + streamingBehavior?: "steer" | "followUp"; + /** Source of input for extension input event handlers. Defaults to "interactive". */ + source?: InputSource; +} + +/** Result from cycleModel() */ +export interface ModelCycleResult { + model: Model; + thinkingLevel: ThinkingLevel; + /** Whether cycling through scoped models (--models flag) or all available */ + isScoped: boolean; +} + +/** Session statistics for /session command */ +export interface SessionStats { + sessionFile: string | undefined; + sessionId: string; + userMessages: number; + assistantMessages: number; + toolCalls: number; + toolResults: number; + totalMessages: number; + tokens: { + input: number; + output: number; + cacheRead: number; + cacheWrite: number; + total: number; + }; + cost: number; +} + +// ============================================================================ +// Constants +// ============================================================================ + +/** Standard thinking levels */ +const THINKING_LEVELS: ThinkingLevel[] = ["off", "minimal", "low", "medium", "high"]; + +/** Thinking levels including xhigh (for supported models) */ +const THINKING_LEVELS_WITH_XHIGH: ThinkingLevel[] = ["off", "minimal", "low", "medium", "high", "xhigh"]; + +// ============================================================================ +// AgentSession Class +// ============================================================================ + +export class AgentSession { + readonly agent: Agent; + readonly sessionManager: SessionManager; + readonly settingsManager: SettingsManager; + + private _scopedModels: Array<{ model: Model; thinkingLevel?: ThinkingLevel }>; + + // Event subscription state + private _unsubscribeAgent?: () => void; + private _eventListeners: AgentSessionEventListener[] = []; + private _agentEventQueue: Promise = Promise.resolve(); + + /** Tracks pending steering messages for UI display. Removed when delivered. */ + private _steeringMessages: string[] = []; + /** Tracks pending follow-up messages for UI display. Removed when delivered. */ + private _followUpMessages: string[] = []; + /** Messages queued to be included with the next user prompt as context ("asides"). */ + private _pendingNextTurnMessages: CustomMessage[] = []; + + // Compaction state + private _compactionAbortController: AbortController | undefined = undefined; + private _autoCompactionAbortController: AbortController | undefined = undefined; + private _overflowRecoveryAttempted = false; + + // Branch summarization state + private _branchSummaryAbortController: AbortController | undefined = undefined; + + // Retry state + private _retryAbortController: AbortController | undefined = undefined; + private _retryAttempt = 0; + private _retryPromise: Promise | undefined = undefined; + private _retryResolve: (() => void) | undefined = undefined; + + // Bash execution state + private _bashAbortController: AbortController | undefined = undefined; + private _pendingBashMessages: BashExecutionMessage[] = []; + + // Extension system + private _extensionRunner: ExtensionRunner | undefined = undefined; + private _turnIndex = 0; + + private _resourceLoader: ResourceLoader; + private _customTools: ToolDefinition[]; + private _baseToolRegistry: Map = new Map(); + private _cwd: string; + private _extensionRunnerRef?: { current?: ExtensionRunner }; + private _initialActiveToolNames?: string[]; + private _baseToolsOverride?: Record; + private _extensionUIContext?: ExtensionUIContext; + private _extensionCommandContextActions?: ExtensionCommandContextActions; + private _extensionShutdownHandler?: ShutdownHandler; + private _extensionErrorListener?: ExtensionErrorListener; + private _extensionErrorUnsubscriber?: () => void; + + // Model registry for API key resolution + private _modelRegistry: ModelRegistry; + + // Tool registry for extension getTools/setTools + private _toolRegistry: Map = new Map(); + private _toolPromptSnippets: Map = new Map(); + private _toolPromptGuidelines: Map = new Map(); + + // Base system prompt (without extension appends) - used to apply fresh appends each turn + private _baseSystemPrompt = ""; + + constructor(config: AgentSessionConfig) { + this.agent = config.agent; + this.sessionManager = config.sessionManager; + this.settingsManager = config.settingsManager; + this._scopedModels = config.scopedModels ?? []; + this._resourceLoader = config.resourceLoader; + this._customTools = config.customTools ?? []; + this._cwd = config.cwd; + this._modelRegistry = config.modelRegistry; + this._extensionRunnerRef = config.extensionRunnerRef; + this._initialActiveToolNames = config.initialActiveToolNames; + this._baseToolsOverride = config.baseToolsOverride; + + // Always subscribe to agent events for internal handling + // (session persistence, extensions, auto-compaction, retry logic) + this._unsubscribeAgent = this.agent.subscribe(this._handleAgentEvent); + + this._buildRuntime({ + activeToolNames: this._initialActiveToolNames, + includeAllExtensionTools: true, + }); + } + + /** Model registry for API key resolution and model discovery */ + get modelRegistry(): ModelRegistry { + return this._modelRegistry; + } + + // ========================================================================= + // Event Subscription + // ========================================================================= + + /** Emit an event to all listeners */ + private _emit(event: AgentSessionEvent): void { + for (const l of this._eventListeners) { + l(event); + } + } + + // Track last assistant message for auto-compaction check + private _lastAssistantMessage: AssistantMessage | undefined = undefined; + + /** Internal handler for agent events - shared by subscribe and reconnect */ + private _handleAgentEvent = (event: AgentEvent): void => { + // Create retry promise synchronously before queueing async processing. + // Agent.emit() calls this handler synchronously, and prompt() calls waitForRetry() + // as soon as agent.prompt() resolves. If _retryPromise is created only inside + // _processAgentEvent, slow earlier queued events can delay agent_end processing + // and waitForRetry() can miss the in-flight retry. + this._createRetryPromiseForAgentEnd(event); + + this._agentEventQueue = this._agentEventQueue.then( + () => this._processAgentEvent(event), + () => this._processAgentEvent(event), + ); + + // Keep queue alive if an event handler fails + this._agentEventQueue.catch(() => {}); + }; + + private _createRetryPromiseForAgentEnd(event: AgentEvent): void { + if (event.type !== "agent_end" || this._retryPromise) { + return; + } + + const settings = this.settingsManager.getRetrySettings(); + if (!settings.enabled) { + return; + } + + const lastAssistant = this._findLastAssistantInMessages(event.messages); + if (!lastAssistant || !this._isRetryableError(lastAssistant)) { + return; + } + + this._retryPromise = new Promise((resolve) => { + this._retryResolve = resolve; + }); + } + + private _findLastAssistantInMessages(messages: AgentMessage[]): AssistantMessage | undefined { + for (let i = messages.length - 1; i >= 0; i--) { + const message = messages[i]; + if (message.role === "assistant") { + return message as AssistantMessage; + } + } + return undefined; + } + + private async _processAgentEvent(event: AgentEvent): Promise { + // When a user message starts, check if it's from either queue and remove it BEFORE emitting + // This ensures the UI sees the updated queue state + if (event.type === "message_start" && event.message.role === "user") { + this._overflowRecoveryAttempted = false; + const messageText = this._getUserMessageText(event.message); + if (messageText) { + // Check steering queue first + const steeringIndex = this._steeringMessages.indexOf(messageText); + if (steeringIndex !== -1) { + this._steeringMessages.splice(steeringIndex, 1); + } else { + // Check follow-up queue + const followUpIndex = this._followUpMessages.indexOf(messageText); + if (followUpIndex !== -1) { + this._followUpMessages.splice(followUpIndex, 1); + } + } + } + } + + // Emit to extensions first + await this._emitExtensionEvent(event); + + // Notify all listeners + this._emit(event); + + // Handle session persistence + if (event.type === "message_end") { + // Check if this is a custom message from extensions + if (event.message.role === "custom") { + // Persist as CustomMessageEntry + this.sessionManager.appendCustomMessageEntry( + event.message.customType, + event.message.content, + event.message.display, + event.message.details, + ); + } else if ( + event.message.role === "user" || + event.message.role === "assistant" || + event.message.role === "toolResult" + ) { + // Regular LLM message - persist as SessionMessageEntry + this.sessionManager.appendMessage(event.message); + } + // Other message types (bashExecution, compactionSummary, branchSummary) are persisted elsewhere + + // Track assistant message for auto-compaction (checked on agent_end) + if (event.message.role === "assistant") { + this._lastAssistantMessage = event.message; + + const assistantMsg = event.message as AssistantMessage; + if (assistantMsg.stopReason !== "error") { + this._overflowRecoveryAttempted = false; + } + + // Reset retry counter immediately on successful assistant response + // This prevents accumulation across multiple LLM calls within a turn + if (assistantMsg.stopReason !== "error" && this._retryAttempt > 0) { + this._emit({ + type: "auto_retry_end", + success: true, + attempt: this._retryAttempt, + }); + this._retryAttempt = 0; + this._resolveRetry(); + } + } + } + + // Check auto-retry and auto-compaction after agent completes + if (event.type === "agent_end" && this._lastAssistantMessage) { + const msg = this._lastAssistantMessage; + this._lastAssistantMessage = undefined; + + // Check for retryable errors first (overloaded, rate limit, server errors) + if (this._isRetryableError(msg)) { + const didRetry = await this._handleRetryableError(msg); + if (didRetry) return; // Retry was initiated, don't proceed to compaction + } + + await this._checkCompaction(msg); + } + } + + /** Resolve the pending retry promise */ + private _resolveRetry(): void { + if (this._retryResolve) { + this._retryResolve(); + this._retryResolve = undefined; + this._retryPromise = undefined; + } + } + + /** Extract text content from a message */ + private _getUserMessageText(message: Message): string { + if (message.role !== "user") return ""; + const content = message.content; + if (typeof content === "string") return content; + const textBlocks = content.filter((c) => c.type === "text"); + return textBlocks.map((c) => (c as TextContent).text).join(""); + } + + /** Find the last assistant message in agent state (including aborted ones) */ + private _findLastAssistantMessage(): AssistantMessage | undefined { + const messages = this.agent.state.messages; + for (let i = messages.length - 1; i >= 0; i--) { + const msg = messages[i]; + if (msg.role === "assistant") { + return msg as AssistantMessage; + } + } + return undefined; + } + + /** Emit extension events based on agent events */ + private async _emitExtensionEvent(event: AgentEvent): Promise { + if (!this._extensionRunner) return; + + if (event.type === "agent_start") { + this._turnIndex = 0; + await this._extensionRunner.emit({ type: "agent_start" }); + } else if (event.type === "agent_end") { + await this._extensionRunner.emit({ type: "agent_end", messages: event.messages }); + } else if (event.type === "turn_start") { + const extensionEvent: TurnStartEvent = { + type: "turn_start", + turnIndex: this._turnIndex, + timestamp: Date.now(), + }; + await this._extensionRunner.emit(extensionEvent); + } else if (event.type === "turn_end") { + const extensionEvent: TurnEndEvent = { + type: "turn_end", + turnIndex: this._turnIndex, + message: event.message, + toolResults: event.toolResults, + }; + await this._extensionRunner.emit(extensionEvent); + this._turnIndex++; + } else if (event.type === "message_start") { + const extensionEvent: MessageStartEvent = { + type: "message_start", + message: event.message, + }; + await this._extensionRunner.emit(extensionEvent); + } else if (event.type === "message_update") { + const extensionEvent: MessageUpdateEvent = { + type: "message_update", + message: event.message, + assistantMessageEvent: event.assistantMessageEvent, + }; + await this._extensionRunner.emit(extensionEvent); + } else if (event.type === "message_end") { + const extensionEvent: MessageEndEvent = { + type: "message_end", + message: event.message, + }; + await this._extensionRunner.emit(extensionEvent); + } else if (event.type === "tool_execution_start") { + const extensionEvent: ToolExecutionStartEvent = { + type: "tool_execution_start", + toolCallId: event.toolCallId, + toolName: event.toolName, + args: event.args, + }; + await this._extensionRunner.emit(extensionEvent); + } else if (event.type === "tool_execution_update") { + const extensionEvent: ToolExecutionUpdateEvent = { + type: "tool_execution_update", + toolCallId: event.toolCallId, + toolName: event.toolName, + args: event.args, + partialResult: event.partialResult, + }; + await this._extensionRunner.emit(extensionEvent); + } else if (event.type === "tool_execution_end") { + const extensionEvent: ToolExecutionEndEvent = { + type: "tool_execution_end", + toolCallId: event.toolCallId, + toolName: event.toolName, + result: event.result, + isError: event.isError, + }; + await this._extensionRunner.emit(extensionEvent); + } + } + + /** + * Subscribe to agent events. + * Session persistence is handled internally (saves messages on message_end). + * Multiple listeners can be added. Returns unsubscribe function for this listener. + */ + subscribe(listener: AgentSessionEventListener): () => void { + this._eventListeners.push(listener); + + // Return unsubscribe function for this specific listener + return () => { + const index = this._eventListeners.indexOf(listener); + if (index !== -1) { + this._eventListeners.splice(index, 1); + } + }; + } + + /** + * Temporarily disconnect from agent events. + * User listeners are preserved and will receive events again after resubscribe(). + * Used internally during operations that need to pause event processing. + */ + private _disconnectFromAgent(): void { + if (this._unsubscribeAgent) { + this._unsubscribeAgent(); + this._unsubscribeAgent = undefined; + } + } + + /** + * Reconnect to agent events after _disconnectFromAgent(). + * Preserves all existing listeners. + */ + private _reconnectToAgent(): void { + if (this._unsubscribeAgent) return; // Already connected + this._unsubscribeAgent = this.agent.subscribe(this._handleAgentEvent); + } + + /** + * Remove all listeners and disconnect from agent. + * Call this when completely done with the session. + */ + dispose(): void { + this._disconnectFromAgent(); + this._eventListeners = []; + } + + // ========================================================================= + // Read-only State Access + // ========================================================================= + + /** Full agent state */ + get state(): AgentState { + return this.agent.state; + } + + /** Current model (may be undefined if not yet selected) */ + get model(): Model | undefined { + return this.agent.state.model; + } + + /** Current thinking level */ + get thinkingLevel(): ThinkingLevel { + return this.agent.state.thinkingLevel; + } + + /** Whether agent is currently streaming a response */ + get isStreaming(): boolean { + return this.agent.state.isStreaming; + } + + /** Current effective system prompt (includes any per-turn extension modifications) */ + get systemPrompt(): string { + return this.agent.state.systemPrompt; + } + + /** Current retry attempt (0 if not retrying) */ + get retryAttempt(): number { + return this._retryAttempt; + } + + /** + * Get the names of currently active tools. + * Returns the names of tools currently set on the agent. + */ + getActiveToolNames(): string[] { + return this.agent.state.tools.map((t) => t.name); + } + + /** + * Get all configured tools with name, description, and parameter schema. + */ + getAllTools(): ToolInfo[] { + return Array.from(this._toolRegistry.values()).map((t) => ({ + name: t.name, + description: t.description, + parameters: t.parameters, + })); + } + + /** + * Set active tools by name. + * Only tools in the registry can be enabled. Unknown tool names are ignored. + * Also rebuilds the system prompt to reflect the new tool set. + * Changes take effect on the next agent turn. + */ + setActiveToolsByName(toolNames: string[]): void { + const tools: AgentTool[] = []; + const validToolNames: string[] = []; + for (const name of toolNames) { + const tool = this._toolRegistry.get(name); + if (tool) { + tools.push(tool); + validToolNames.push(name); + } + } + this.agent.setTools(tools); + + // Rebuild base system prompt with new tool set + this._baseSystemPrompt = this._rebuildSystemPrompt(validToolNames); + this.agent.setSystemPrompt(this._baseSystemPrompt); + } + + /** Whether compaction or branch summarization is currently running */ + get isCompacting(): boolean { + return ( + this._autoCompactionAbortController !== undefined || + this._compactionAbortController !== undefined || + this._branchSummaryAbortController !== undefined + ); + } + + /** All messages including custom types like BashExecutionMessage */ + get messages(): AgentMessage[] { + return this.agent.state.messages; + } + + /** Current steering mode */ + get steeringMode(): "all" | "one-at-a-time" { + return this.agent.getSteeringMode(); + } + + /** Current follow-up mode */ + get followUpMode(): "all" | "one-at-a-time" { + return this.agent.getFollowUpMode(); + } + + /** Current session file path, or undefined if sessions are disabled */ + get sessionFile(): string | undefined { + return this.sessionManager.getSessionFile(); + } + + /** Current session ID */ + get sessionId(): string { + return this.sessionManager.getSessionId(); + } + + /** Current session display name, if set */ + get sessionName(): string | undefined { + return this.sessionManager.getSessionName(); + } + + /** Scoped models for cycling (from --models flag) */ + get scopedModels(): ReadonlyArray<{ model: Model; thinkingLevel?: ThinkingLevel }> { + return this._scopedModels; + } + + /** Update scoped models for cycling */ + setScopedModels(scopedModels: Array<{ model: Model; thinkingLevel?: ThinkingLevel }>): void { + this._scopedModels = scopedModels; + } + + /** File-based prompt templates */ + get promptTemplates(): ReadonlyArray { + return this._resourceLoader.getPrompts().prompts; + } + + private _normalizePromptSnippet(text: string | undefined): string | undefined { + if (!text) return undefined; + const oneLine = text + .replace(/[\r\n]+/g, " ") + .replace(/\s+/g, " ") + .trim(); + return oneLine.length > 0 ? oneLine : undefined; + } + + private _normalizePromptGuidelines(guidelines: string[] | undefined): string[] { + if (!guidelines || guidelines.length === 0) { + return []; + } + + const unique = new Set(); + for (const guideline of guidelines) { + const normalized = guideline.trim(); + if (normalized.length > 0) { + unique.add(normalized); + } + } + return Array.from(unique); + } + + private _rebuildSystemPrompt(toolNames: string[]): string { + const validToolNames = toolNames.filter((name) => this._toolRegistry.has(name)); + const toolSnippets: Record = {}; + const promptGuidelines: string[] = []; + for (const name of validToolNames) { + const snippet = this._toolPromptSnippets.get(name); + if (snippet) { + toolSnippets[name] = snippet; + } + + const toolGuidelines = this._toolPromptGuidelines.get(name); + if (toolGuidelines) { + promptGuidelines.push(...toolGuidelines); + } + } + + const loaderSystemPrompt = this._resourceLoader.getSystemPrompt(); + const loaderAppendSystemPrompt = this._resourceLoader.getAppendSystemPrompt(); + const appendSystemPrompt = + loaderAppendSystemPrompt.length > 0 ? loaderAppendSystemPrompt.join("\n\n") : undefined; + const loadedSkills = this._resourceLoader.getSkills().skills; + const loadedContextFiles = this._resourceLoader.getAgentsFiles().agentsFiles; + + return buildSystemPrompt({ + cwd: this._cwd, + skills: loadedSkills, + contextFiles: loadedContextFiles, + customPrompt: loaderSystemPrompt, + appendSystemPrompt, + selectedTools: validToolNames, + toolSnippets, + promptGuidelines, + }); + } + + // ========================================================================= + // Prompting + // ========================================================================= + + /** + * Send a prompt to the agent. + * - Handles extension commands (registered via pi.registerCommand) immediately, even during streaming + * - Expands file-based prompt templates by default + * - During streaming, queues via steer() or followUp() based on streamingBehavior option + * - Validates model and API key before sending (when not streaming) + * @throws Error if streaming and no streamingBehavior specified + * @throws Error if no model selected or no API key available (when not streaming) + */ + async prompt(text: string, options?: PromptOptions): Promise { + const expandPromptTemplates = options?.expandPromptTemplates ?? true; + + // Handle extension commands first (execute immediately, even during streaming) + // Extension commands manage their own LLM interaction via pi.sendMessage() + if (expandPromptTemplates && text.startsWith("/")) { + const handled = await this._tryExecuteExtensionCommand(text); + if (handled) { + // Extension command executed, no prompt to send + return; + } + } + + // Emit input event for extension interception (before skill/template expansion) + let currentText = text; + let currentImages = options?.images; + if (this._extensionRunner?.hasHandlers("input")) { + const inputResult = await this._extensionRunner.emitInput( + currentText, + currentImages, + options?.source ?? "interactive", + ); + if (inputResult.action === "handled") { + return; + } + if (inputResult.action === "transform") { + currentText = inputResult.text; + currentImages = inputResult.images ?? currentImages; + } + } + + // Expand skill commands (/skill:name args) and prompt templates (/template args) + let expandedText = currentText; + if (expandPromptTemplates) { + expandedText = this._expandSkillCommand(expandedText); + expandedText = expandPromptTemplate(expandedText, [...this.promptTemplates]); + } + + // If streaming, queue via steer() or followUp() based on option + if (this.isStreaming) { + if (!options?.streamingBehavior) { + throw new Error( + "Agent is already processing. Specify streamingBehavior ('steer' or 'followUp') to queue the message.", + ); + } + if (options.streamingBehavior === "followUp") { + await this._queueFollowUp(expandedText, currentImages); + } else { + await this._queueSteer(expandedText, currentImages); + } + return; + } + + // Flush any pending bash messages before the new prompt + this._flushPendingBashMessages(); + + // Validate model + if (!this.model) { + throw new Error( + "No model selected.\n\n" + + `Use /login or set an API key environment variable. See ${join(getDocsPath(), "providers.md")}\n\n` + + "Then use /model to select a model.", + ); + } + + // Validate API key + const apiKey = await this._modelRegistry.getApiKey(this.model); + if (!apiKey) { + const isOAuth = this._modelRegistry.isUsingOAuth(this.model); + if (isOAuth) { + throw new Error( + `Authentication failed for "${this.model.provider}". ` + + `Credentials may have expired or network is unavailable. ` + + `Run '/login ${this.model.provider}' to re-authenticate.`, + ); + } + throw new Error( + `No API key found for ${this.model.provider}.\n\n` + + `Use /login or set an API key environment variable. See ${join(getDocsPath(), "providers.md")}`, + ); + } + + // Check if we need to compact before sending (catches aborted responses) + const lastAssistant = this._findLastAssistantMessage(); + if (lastAssistant) { + await this._checkCompaction(lastAssistant, false); + } + + // Build messages array (custom message if any, then user message) + const messages: AgentMessage[] = []; + + // Add user message + const userContent: (TextContent | ImageContent)[] = [{ type: "text", text: expandedText }]; + if (currentImages) { + userContent.push(...currentImages); + } + messages.push({ + role: "user", + content: userContent, + timestamp: Date.now(), + }); + + // Inject any pending "nextTurn" messages as context alongside the user message + for (const msg of this._pendingNextTurnMessages) { + messages.push(msg); + } + this._pendingNextTurnMessages = []; + + // Emit before_agent_start extension event + if (this._extensionRunner) { + const result = await this._extensionRunner.emitBeforeAgentStart( + expandedText, + currentImages, + this._baseSystemPrompt, + ); + // Add all custom messages from extensions + if (result?.messages) { + for (const msg of result.messages) { + messages.push({ + role: "custom", + customType: msg.customType, + content: msg.content, + display: msg.display, + details: msg.details, + timestamp: Date.now(), + }); + } + } + // Apply extension-modified system prompt, or reset to base + if (result?.systemPrompt) { + this.agent.setSystemPrompt(result.systemPrompt); + } else { + // Ensure we're using the base prompt (in case previous turn had modifications) + this.agent.setSystemPrompt(this._baseSystemPrompt); + } + } + + await this.agent.prompt(messages); + await this.waitForRetry(); + } + + /** + * Try to execute an extension command. Returns true if command was found and executed. + */ + private async _tryExecuteExtensionCommand(text: string): Promise { + if (!this._extensionRunner) return false; + + // Parse command name and args + const spaceIndex = text.indexOf(" "); + const commandName = spaceIndex === -1 ? text.slice(1) : text.slice(1, spaceIndex); + const args = spaceIndex === -1 ? "" : text.slice(spaceIndex + 1); + + const command = this._extensionRunner.getCommand(commandName); + if (!command) return false; + + // Get command context from extension runner (includes session control methods) + const ctx = this._extensionRunner.createCommandContext(); + + try { + await command.handler(args, ctx); + return true; + } catch (err) { + // Emit error via extension runner + this._extensionRunner.emitError({ + extensionPath: `command:${commandName}`, + event: "command", + error: err instanceof Error ? err.message : String(err), + }); + return true; + } + } + + /** + * Expand skill commands (/skill:name args) to their full content. + * Returns the expanded text, or the original text if not a skill command or skill not found. + * Emits errors via extension runner if file read fails. + */ + private _expandSkillCommand(text: string): string { + if (!text.startsWith("/skill:")) return text; + + const spaceIndex = text.indexOf(" "); + const skillName = spaceIndex === -1 ? text.slice(7) : text.slice(7, spaceIndex); + const args = spaceIndex === -1 ? "" : text.slice(spaceIndex + 1).trim(); + + const skill = this.resourceLoader.getSkills().skills.find((s) => s.name === skillName); + if (!skill) return text; // Unknown skill, pass through + + try { + const content = readFileSync(skill.filePath, "utf-8"); + const body = stripFrontmatter(content).trim(); + const skillBlock = `\nReferences are relative to ${skill.baseDir}.\n\n${body}\n`; + return args ? `${skillBlock}\n\n${args}` : skillBlock; + } catch (err) { + // Emit error like extension commands do + this._extensionRunner?.emitError({ + extensionPath: skill.filePath, + event: "skill_expansion", + error: err instanceof Error ? err.message : String(err), + }); + return text; // Return original on error + } + } + + /** + * Queue a steering message to interrupt the agent mid-run. + * Delivered after current tool execution, skips remaining tools. + * Expands skill commands and prompt templates. Errors on extension commands. + * @param images Optional image attachments to include with the message + * @throws Error if text is an extension command + */ + async steer(text: string, images?: ImageContent[]): Promise { + // Check for extension commands (cannot be queued) + if (text.startsWith("/")) { + this._throwIfExtensionCommand(text); + } + + // Expand skill commands and prompt templates + let expandedText = this._expandSkillCommand(text); + expandedText = expandPromptTemplate(expandedText, [...this.promptTemplates]); + + await this._queueSteer(expandedText, images); + } + + /** + * Queue a follow-up message to be processed after the agent finishes. + * Delivered only when agent has no more tool calls or steering messages. + * Expands skill commands and prompt templates. Errors on extension commands. + * @param images Optional image attachments to include with the message + * @throws Error if text is an extension command + */ + async followUp(text: string, images?: ImageContent[]): Promise { + // Check for extension commands (cannot be queued) + if (text.startsWith("/")) { + this._throwIfExtensionCommand(text); + } + + // Expand skill commands and prompt templates + let expandedText = this._expandSkillCommand(text); + expandedText = expandPromptTemplate(expandedText, [...this.promptTemplates]); + + await this._queueFollowUp(expandedText, images); + } + + /** + * Internal: Queue a steering message (already expanded, no extension command check). + */ + private async _queueSteer(text: string, images?: ImageContent[]): Promise { + this._steeringMessages.push(text); + const content: (TextContent | ImageContent)[] = [{ type: "text", text }]; + if (images) { + content.push(...images); + } + this.agent.steer({ + role: "user", + content, + timestamp: Date.now(), + }); + } + + /** + * Internal: Queue a follow-up message (already expanded, no extension command check). + */ + private async _queueFollowUp(text: string, images?: ImageContent[]): Promise { + this._followUpMessages.push(text); + const content: (TextContent | ImageContent)[] = [{ type: "text", text }]; + if (images) { + content.push(...images); + } + this.agent.followUp({ + role: "user", + content, + timestamp: Date.now(), + }); + } + + /** + * Throw an error if the text is an extension command. + */ + private _throwIfExtensionCommand(text: string): void { + if (!this._extensionRunner) return; + + const spaceIndex = text.indexOf(" "); + const commandName = spaceIndex === -1 ? text.slice(1) : text.slice(1, spaceIndex); + const command = this._extensionRunner.getCommand(commandName); + + if (command) { + throw new Error( + `Extension command "/${commandName}" cannot be queued. Use prompt() or execute the command when not streaming.`, + ); + } + } + + /** + * Send a custom message to the session. Creates a CustomMessageEntry. + * + * Handles three cases: + * - Streaming: queues message, processed when loop pulls from queue + * - Not streaming + triggerTurn: appends to state/session, starts new turn + * - Not streaming + no trigger: appends to state/session, no turn + * + * @param message Custom message with customType, content, display, details + * @param options.triggerTurn If true and not streaming, triggers a new LLM turn + * @param options.deliverAs Delivery mode: "steer", "followUp", or "nextTurn" + */ + async sendCustomMessage( + message: Pick, "customType" | "content" | "display" | "details">, + options?: { triggerTurn?: boolean; deliverAs?: "steer" | "followUp" | "nextTurn" }, + ): Promise { + const appMessage = { + role: "custom" as const, + customType: message.customType, + content: message.content, + display: message.display, + details: message.details, + timestamp: Date.now(), + } satisfies CustomMessage; + if (options?.deliverAs === "nextTurn") { + this._pendingNextTurnMessages.push(appMessage); + } else if (this.isStreaming) { + if (options?.deliverAs === "followUp") { + this.agent.followUp(appMessage); + } else { + this.agent.steer(appMessage); + } + } else if (options?.triggerTurn) { + await this.agent.prompt(appMessage); + } else { + this.agent.appendMessage(appMessage); + this.sessionManager.appendCustomMessageEntry( + message.customType, + message.content, + message.display, + message.details, + ); + this._emit({ type: "message_start", message: appMessage }); + this._emit({ type: "message_end", message: appMessage }); + } + } + + /** + * Send a user message to the agent. Always triggers a turn. + * When the agent is streaming, use deliverAs to specify how to queue the message. + * + * @param content User message content (string or content array) + * @param options.deliverAs Delivery mode when streaming: "steer" or "followUp" + */ + async sendUserMessage( + content: string | (TextContent | ImageContent)[], + options?: { deliverAs?: "steer" | "followUp" }, + ): Promise { + // Normalize content to text string + optional images + let text: string; + let images: ImageContent[] | undefined; + + if (typeof content === "string") { + text = content; + } else { + const textParts: string[] = []; + images = []; + for (const part of content) { + if (part.type === "text") { + textParts.push(part.text); + } else { + images.push(part); + } + } + text = textParts.join("\n"); + if (images.length === 0) images = undefined; + } + + // Use prompt() with expandPromptTemplates: false to skip command handling and template expansion + await this.prompt(text, { + expandPromptTemplates: false, + streamingBehavior: options?.deliverAs, + images, + source: "extension", + }); + } + + /** + * Clear all queued messages and return them. + * Useful for restoring to editor when user aborts. + * @returns Object with steering and followUp arrays + */ + clearQueue(): { steering: string[]; followUp: string[] } { + const steering = [...this._steeringMessages]; + const followUp = [...this._followUpMessages]; + this._steeringMessages = []; + this._followUpMessages = []; + this.agent.clearAllQueues(); + return { steering, followUp }; + } + + /** Number of pending messages (includes both steering and follow-up) */ + get pendingMessageCount(): number { + return this._steeringMessages.length + this._followUpMessages.length; + } + + /** Get pending steering messages (read-only) */ + getSteeringMessages(): readonly string[] { + return this._steeringMessages; + } + + /** Get pending follow-up messages (read-only) */ + getFollowUpMessages(): readonly string[] { + return this._followUpMessages; + } + + get resourceLoader(): ResourceLoader { + return this._resourceLoader; + } + + /** + * Abort current operation and wait for agent to become idle. + */ + async abort(): Promise { + this.abortRetry(); + this.agent.abort(); + await this.agent.waitForIdle(); + } + + /** + * Start a new session, optionally with initial messages and parent tracking. + * Clears all messages and starts a new session. + * Listeners are preserved and will continue receiving events. + * @param options.parentSession - Optional parent session path for tracking + * @param options.setup - Optional callback to initialize session (e.g., append messages) + * @returns true if completed, false if cancelled by extension + */ + async newSession(options?: { + parentSession?: string; + setup?: (sessionManager: SessionManager) => Promise; + }): Promise { + const previousSessionFile = this.sessionFile; + + // Emit session_before_switch event with reason "new" (can be cancelled) + if (this._extensionRunner?.hasHandlers("session_before_switch")) { + const result = (await this._extensionRunner.emit({ + type: "session_before_switch", + reason: "new", + })) as SessionBeforeSwitchResult | undefined; + + if (result?.cancel) { + return false; + } + } + + this._disconnectFromAgent(); + await this.abort(); + this.agent.reset(); + this.sessionManager.newSession({ parentSession: options?.parentSession }); + this.agent.sessionId = this.sessionManager.getSessionId(); + this._steeringMessages = []; + this._followUpMessages = []; + this._pendingNextTurnMessages = []; + + this.sessionManager.appendThinkingLevelChange(this.thinkingLevel); + + // Run setup callback if provided (e.g., to append initial messages) + if (options?.setup) { + await options.setup(this.sessionManager); + // Sync agent state with session manager after setup + const sessionContext = this.sessionManager.buildSessionContext(); + this.agent.replaceMessages(sessionContext.messages); + } + + this._reconnectToAgent(); + + // Emit session_switch event with reason "new" to extensions + if (this._extensionRunner) { + await this._extensionRunner.emit({ + type: "session_switch", + reason: "new", + previousSessionFile, + }); + } + + // Emit session event to custom tools + return true; + } + + // ========================================================================= + // Model Management + // ========================================================================= + + private async _emitModelSelect( + nextModel: Model, + previousModel: Model | undefined, + source: "set" | "cycle" | "restore", + ): Promise { + if (!this._extensionRunner) return; + if (modelsAreEqual(previousModel, nextModel)) return; + await this._extensionRunner.emit({ + type: "model_select", + model: nextModel, + previousModel, + source, + }); + } + + /** + * Set model directly. + * Validates API key, saves to session and settings. + * @throws Error if no API key available for the model + */ + async setModel(model: Model, options?: { persist?: boolean }): Promise { + const apiKey = await this._modelRegistry.getApiKey(model); + if (!apiKey) { + throw new Error(`No API key for ${model.provider}/${model.id}`); + } + + const previousModel = this.model; + const thinkingLevel = this._getThinkingLevelForModelSwitch(); + this.agent.setModel(model); + this.sessionManager.appendModelChange(model.provider, model.id); + if (options?.persist !== false) { + this.settingsManager.setDefaultModelAndProvider(model.provider, model.id); + } + + // Re-clamp thinking level for new model's capabilities + this.setThinkingLevel(thinkingLevel); + + await this._emitModelSelect(model, previousModel, "set"); + } + + /** + * Cycle to next/previous model. + * Uses scoped models (from --models flag) if available, otherwise all available models. + * @param direction - "forward" (default) or "backward" + * @returns The new model info, or undefined if only one model available + */ + async cycleModel(direction: "forward" | "backward" = "forward", options?: { persist?: boolean }): Promise { + if (this._scopedModels.length > 0) { + return this._cycleScopedModel(direction, options); + } + return this._cycleAvailableModel(direction, options); + } + + private async _getScopedModelsWithApiKey(): Promise; thinkingLevel?: ThinkingLevel }>> { + const apiKeysByProvider = new Map(); + const result: Array<{ model: Model; thinkingLevel?: ThinkingLevel }> = []; + + for (const scoped of this._scopedModels) { + const provider = scoped.model.provider; + let apiKey: string | undefined; + if (apiKeysByProvider.has(provider)) { + apiKey = apiKeysByProvider.get(provider); + } else { + apiKey = await this._modelRegistry.getApiKeyForProvider(provider); + apiKeysByProvider.set(provider, apiKey); + } + + if (apiKey) { + result.push(scoped); + } + } + + return result; + } + + private async _cycleScopedModel(direction: "forward" | "backward", options?: { persist?: boolean }): Promise { + const scopedModels = await this._getScopedModelsWithApiKey(); + if (scopedModels.length <= 1) return undefined; + + const currentModel = this.model; + let currentIndex = scopedModels.findIndex((sm) => modelsAreEqual(sm.model, currentModel)); + + if (currentIndex === -1) currentIndex = 0; + const len = scopedModels.length; + const nextIndex = direction === "forward" ? (currentIndex + 1) % len : (currentIndex - 1 + len) % len; + const next = scopedModels[nextIndex]; + const thinkingLevel = this._getThinkingLevelForModelSwitch(next.thinkingLevel); + + // Apply model + this.agent.setModel(next.model); + this.sessionManager.appendModelChange(next.model.provider, next.model.id); + if (options?.persist !== false) { + this.settingsManager.setDefaultModelAndProvider(next.model.provider, next.model.id); + } + + // Apply thinking level. + // - Explicit scoped model thinking level overrides current session level + // - Undefined scoped model thinking level inherits the current session preference + // setThinkingLevel clamps to model capabilities. + this.setThinkingLevel(thinkingLevel); + + await this._emitModelSelect(next.model, currentModel, "cycle"); + + return { model: next.model, thinkingLevel: this.thinkingLevel, isScoped: true }; + } + + private async _cycleAvailableModel(direction: "forward" | "backward", options?: { persist?: boolean }): Promise { + const availableModels = await this._modelRegistry.getAvailable(); + if (availableModels.length <= 1) return undefined; + + const currentModel = this.model; + let currentIndex = availableModels.findIndex((m) => modelsAreEqual(m, currentModel)); + + if (currentIndex === -1) currentIndex = 0; + const len = availableModels.length; + const nextIndex = direction === "forward" ? (currentIndex + 1) % len : (currentIndex - 1 + len) % len; + const nextModel = availableModels[nextIndex]; + + const apiKey = await this._modelRegistry.getApiKey(nextModel); + if (!apiKey) { + throw new Error(`No API key for ${nextModel.provider}/${nextModel.id}`); + } + + const thinkingLevel = this._getThinkingLevelForModelSwitch(); + this.agent.setModel(nextModel); + this.sessionManager.appendModelChange(nextModel.provider, nextModel.id); + if (options?.persist !== false) { + this.settingsManager.setDefaultModelAndProvider(nextModel.provider, nextModel.id); + } + + // Re-clamp thinking level for new model's capabilities + this.setThinkingLevel(thinkingLevel); + + await this._emitModelSelect(nextModel, currentModel, "cycle"); + + return { model: nextModel, thinkingLevel: this.thinkingLevel, isScoped: false }; + } + + // ========================================================================= + // Thinking Level Management + // ========================================================================= + + /** + * Set thinking level. + * Clamps to model capabilities based on available thinking levels. + * Saves to session and settings only if the level actually changes. + */ + setThinkingLevel(level: ThinkingLevel): void { + const availableLevels = this.getAvailableThinkingLevels(); + const effectiveLevel = availableLevels.includes(level) ? level : this._clampThinkingLevel(level, availableLevels); + + // Only persist if actually changing + const isChanging = effectiveLevel !== this.agent.state.thinkingLevel; + + this.agent.setThinkingLevel(effectiveLevel); + + if (isChanging) { + this.sessionManager.appendThinkingLevelChange(effectiveLevel); + if (this.supportsThinking() || effectiveLevel !== "off") { + this.settingsManager.setDefaultThinkingLevel(effectiveLevel); + } + } + } + + /** + * Cycle to next thinking level. + * @returns New level, or undefined if model doesn't support thinking + */ + cycleThinkingLevel(): ThinkingLevel | undefined { + if (!this.supportsThinking()) return undefined; + + const levels = this.getAvailableThinkingLevels(); + const currentIndex = levels.indexOf(this.thinkingLevel); + const nextIndex = (currentIndex + 1) % levels.length; + const nextLevel = levels[nextIndex]; + + this.setThinkingLevel(nextLevel); + return nextLevel; + } + + /** + * Get available thinking levels for current model. + * The provider will clamp to what the specific model supports internally. + */ + getAvailableThinkingLevels(): ThinkingLevel[] { + if (!this.supportsThinking()) return ["off"]; + return this.supportsXhighThinking() ? THINKING_LEVELS_WITH_XHIGH : THINKING_LEVELS; + } + + /** + * Check if current model supports xhigh thinking level. + */ + supportsXhighThinking(): boolean { + return this.model ? supportsXhigh(this.model) : false; + } + + /** + * Check if current model supports thinking/reasoning. + */ + supportsThinking(): boolean { + return !!this.model?.reasoning; + } + + private _getThinkingLevelForModelSwitch(explicitLevel?: ThinkingLevel): ThinkingLevel { + if (explicitLevel !== undefined) { + return explicitLevel; + } + if (!this.supportsThinking()) { + return this.settingsManager.getDefaultThinkingLevel() ?? DEFAULT_THINKING_LEVEL; + } + return this.thinkingLevel; + } + + private _clampThinkingLevel(level: ThinkingLevel, availableLevels: ThinkingLevel[]): ThinkingLevel { + const ordered = THINKING_LEVELS_WITH_XHIGH; + const available = new Set(availableLevels); + const requestedIndex = ordered.indexOf(level); + if (requestedIndex === -1) { + return availableLevels[0] ?? "off"; + } + for (let i = requestedIndex; i < ordered.length; i++) { + const candidate = ordered[i]; + if (available.has(candidate)) return candidate; + } + for (let i = requestedIndex - 1; i >= 0; i--) { + const candidate = ordered[i]; + if (available.has(candidate)) return candidate; + } + return availableLevels[0] ?? "off"; + } + + // ========================================================================= + // Queue Mode Management + // ========================================================================= + + /** + * Set steering message mode. + * Saves to settings. + */ + setSteeringMode(mode: "all" | "one-at-a-time"): void { + this.agent.setSteeringMode(mode); + this.settingsManager.setSteeringMode(mode); + } + + /** + * Set follow-up message mode. + * Saves to settings. + */ + setFollowUpMode(mode: "all" | "one-at-a-time"): void { + this.agent.setFollowUpMode(mode); + this.settingsManager.setFollowUpMode(mode); + } + + // ========================================================================= + // Compaction + // ========================================================================= + + /** + * Manually compact the session context. + * Aborts current agent operation first. + * @param customInstructions Optional instructions for the compaction summary + */ + async compact(customInstructions?: string): Promise { + this._disconnectFromAgent(); + await this.abort(); + this._compactionAbortController = new AbortController(); + + try { + if (!this.model) { + throw new Error("No model selected"); + } + + const apiKey = await this._modelRegistry.getApiKey(this.model); + if (!apiKey) { + throw new Error(`No API key for ${this.model.provider}`); + } + + const pathEntries = this.sessionManager.getBranch(); + const settings = this.settingsManager.getCompactionSettings(); + + const preparation = prepareCompaction(pathEntries, settings); + if (!preparation) { + // Check why we can't compact + const lastEntry = pathEntries[pathEntries.length - 1]; + if (lastEntry?.type === "compaction") { + throw new Error("Already compacted"); + } + throw new Error("Nothing to compact (session too small)"); + } + + let extensionCompaction: CompactionResult | undefined; + let fromExtension = false; + + if (this._extensionRunner?.hasHandlers("session_before_compact")) { + const result = (await this._extensionRunner.emit({ + type: "session_before_compact", + preparation, + branchEntries: pathEntries, + customInstructions, + signal: this._compactionAbortController.signal, + })) as SessionBeforeCompactResult | undefined; + + if (result?.cancel) { + throw new Error("Compaction cancelled"); + } + + if (result?.compaction) { + extensionCompaction = result.compaction; + fromExtension = true; + } + } + + let summary: string; + let firstKeptEntryId: string; + let tokensBefore: number; + let details: unknown; + + if (extensionCompaction) { + // Extension provided compaction content + summary = extensionCompaction.summary; + firstKeptEntryId = extensionCompaction.firstKeptEntryId; + tokensBefore = extensionCompaction.tokensBefore; + details = extensionCompaction.details; + } else { + // Generate compaction result + const result = await compact( + preparation, + this.model, + apiKey, + customInstructions, + this._compactionAbortController.signal, + ); + summary = result.summary; + firstKeptEntryId = result.firstKeptEntryId; + tokensBefore = result.tokensBefore; + details = result.details; + } + + if (this._compactionAbortController.signal.aborted) { + throw new Error("Compaction cancelled"); + } + + this.sessionManager.appendCompaction(summary, firstKeptEntryId, tokensBefore, details, fromExtension); + const newEntries = this.sessionManager.getEntries(); + const sessionContext = this.sessionManager.buildSessionContext(); + this.agent.replaceMessages(sessionContext.messages); + + // Get the saved compaction entry for the extension event + const savedCompactionEntry = newEntries.find((e) => e.type === "compaction" && e.summary === summary) as + | CompactionEntry + | undefined; + + if (this._extensionRunner && savedCompactionEntry) { + await this._extensionRunner.emit({ + type: "session_compact", + compactionEntry: savedCompactionEntry, + fromExtension, + }); + } + + return { + summary, + firstKeptEntryId, + tokensBefore, + details, + }; + } finally { + this._compactionAbortController = undefined; + this._reconnectToAgent(); + } + } + + /** + * Cancel in-progress compaction (manual or auto). + */ + abortCompaction(): void { + this._compactionAbortController?.abort(); + this._autoCompactionAbortController?.abort(); + } + + /** + * Cancel in-progress branch summarization. + */ + abortBranchSummary(): void { + this._branchSummaryAbortController?.abort(); + } + + /** + * Check if compaction is needed and run it. + * Called after agent_end and before prompt submission. + * + * Two cases: + * 1. Overflow: LLM returned context overflow error, remove error message from agent state, compact, auto-retry + * 2. Threshold: Context over threshold, compact, NO auto-retry (user continues manually) + * + * @param assistantMessage The assistant message to check + * @param skipAbortedCheck If false, include aborted messages (for pre-prompt check). Default: true + */ + private async _checkCompaction(assistantMessage: AssistantMessage, skipAbortedCheck = true): Promise { + const settings = this.settingsManager.getCompactionSettings(); + if (!settings.enabled) return; + + // Skip if message was aborted (user cancelled) - unless skipAbortedCheck is false + if (skipAbortedCheck && assistantMessage.stopReason === "aborted") return; + + const contextWindow = this.model?.contextWindow ?? 0; + + // Skip overflow check if the message came from a different model. + // This handles the case where user switched from a smaller-context model (e.g. opus) + // to a larger-context model (e.g. codex) - the overflow error from the old model + // shouldn't trigger compaction for the new model. + const sameModel = + this.model && assistantMessage.provider === this.model.provider && assistantMessage.model === this.model.id; + + // Skip compaction checks if this assistant message is older than the latest + // compaction boundary. This prevents a stale pre-compaction usage/error + // from retriggering compaction on the first prompt after compaction. + const compactionEntry = getLatestCompactionEntry(this.sessionManager.getBranch()); + const assistantIsFromBeforeCompaction = + compactionEntry !== null && assistantMessage.timestamp <= new Date(compactionEntry.timestamp).getTime(); + if (assistantIsFromBeforeCompaction) { + return; + } + + // Case 1: Overflow - LLM returned context overflow error + if (sameModel && isContextOverflow(assistantMessage, contextWindow)) { + if (this._overflowRecoveryAttempted) { + this._emit({ + type: "auto_compaction_end", + result: undefined, + aborted: false, + willRetry: false, + errorMessage: + "Context overflow recovery failed after one compact-and-retry attempt. Try reducing context or switching to a larger-context model.", + }); + return; + } + + this._overflowRecoveryAttempted = true; + // Remove the error message from agent state (it IS saved to session for history, + // but we don't want it in context for the retry) + const messages = this.agent.state.messages; + if (messages.length > 0 && messages[messages.length - 1].role === "assistant") { + this.agent.replaceMessages(messages.slice(0, -1)); + } + await this._runAutoCompaction("overflow", true); + return; + } + + // Case 2: Threshold - context is getting large + // For error messages (no usage data), estimate from last successful response. + // This ensures sessions that hit persistent API errors (e.g. 529) can still compact. + let contextTokens: number; + if (assistantMessage.stopReason === "error") { + const messages = this.agent.state.messages; + const estimate = estimateContextTokens(messages); + if (estimate.lastUsageIndex === null) return; // No usage data at all + // Verify the usage source is post-compaction. Kept pre-compaction messages + // have stale usage reflecting the old (larger) context and would falsely + // trigger compaction right after one just finished. + const usageMsg = messages[estimate.lastUsageIndex]; + if ( + compactionEntry && + usageMsg.role === "assistant" && + (usageMsg as AssistantMessage).timestamp <= new Date(compactionEntry.timestamp).getTime() + ) { + return; + } + contextTokens = estimate.tokens; + } else { + contextTokens = calculateContextTokens(assistantMessage.usage); + } + if (shouldCompact(contextTokens, contextWindow, settings)) { + await this._runAutoCompaction("threshold", false); + } + } + + /** + * Internal: Run auto-compaction with events. + */ + private async _runAutoCompaction(reason: "overflow" | "threshold", willRetry: boolean): Promise { + const settings = this.settingsManager.getCompactionSettings(); + + this._emit({ type: "auto_compaction_start", reason }); + this._autoCompactionAbortController = new AbortController(); + + try { + if (!this.model) { + this._emit({ type: "auto_compaction_end", result: undefined, aborted: false, willRetry: false }); + return; + } + + const apiKey = await this._modelRegistry.getApiKey(this.model); + if (!apiKey) { + this._emit({ type: "auto_compaction_end", result: undefined, aborted: false, willRetry: false }); + return; + } + + const pathEntries = this.sessionManager.getBranch(); + + const preparation = prepareCompaction(pathEntries, settings); + if (!preparation) { + this._emit({ type: "auto_compaction_end", result: undefined, aborted: false, willRetry: false }); + return; + } + + let extensionCompaction: CompactionResult | undefined; + let fromExtension = false; + + if (this._extensionRunner?.hasHandlers("session_before_compact")) { + const extensionResult = (await this._extensionRunner.emit({ + type: "session_before_compact", + preparation, + branchEntries: pathEntries, + customInstructions: undefined, + signal: this._autoCompactionAbortController.signal, + })) as SessionBeforeCompactResult | undefined; + + if (extensionResult?.cancel) { + this._emit({ type: "auto_compaction_end", result: undefined, aborted: true, willRetry: false }); + return; + } + + if (extensionResult?.compaction) { + extensionCompaction = extensionResult.compaction; + fromExtension = true; + } + } + + let summary: string; + let firstKeptEntryId: string; + let tokensBefore: number; + let details: unknown; + + if (extensionCompaction) { + // Extension provided compaction content + summary = extensionCompaction.summary; + firstKeptEntryId = extensionCompaction.firstKeptEntryId; + tokensBefore = extensionCompaction.tokensBefore; + details = extensionCompaction.details; + } else { + // Generate compaction result + const compactResult = await compact( + preparation, + this.model, + apiKey, + undefined, + this._autoCompactionAbortController.signal, + ); + summary = compactResult.summary; + firstKeptEntryId = compactResult.firstKeptEntryId; + tokensBefore = compactResult.tokensBefore; + details = compactResult.details; + } + + if (this._autoCompactionAbortController.signal.aborted) { + this._emit({ type: "auto_compaction_end", result: undefined, aborted: true, willRetry: false }); + return; + } + + this.sessionManager.appendCompaction(summary, firstKeptEntryId, tokensBefore, details, fromExtension); + const newEntries = this.sessionManager.getEntries(); + const sessionContext = this.sessionManager.buildSessionContext(); + this.agent.replaceMessages(sessionContext.messages); + + // Get the saved compaction entry for the extension event + const savedCompactionEntry = newEntries.find((e) => e.type === "compaction" && e.summary === summary) as + | CompactionEntry + | undefined; + + if (this._extensionRunner && savedCompactionEntry) { + await this._extensionRunner.emit({ + type: "session_compact", + compactionEntry: savedCompactionEntry, + fromExtension, + }); + } + + const result: CompactionResult = { + summary, + firstKeptEntryId, + tokensBefore, + details, + }; + this._emit({ type: "auto_compaction_end", result, aborted: false, willRetry }); + + if (willRetry) { + const messages = this.agent.state.messages; + const lastMsg = messages[messages.length - 1]; + if (lastMsg?.role === "assistant" && (lastMsg as AssistantMessage).stopReason === "error") { + this.agent.replaceMessages(messages.slice(0, -1)); + } + + setTimeout(() => { + this.agent.continue().catch(() => {}); + }, 100); + } else if (this.agent.hasQueuedMessages()) { + // Auto-compaction can complete while follow-up/steering/custom messages are waiting. + // Kick the loop so queued messages are actually delivered. + setTimeout(() => { + this.agent.continue().catch(() => {}); + }, 100); + } + } catch (error) { + const errorMessage = error instanceof Error ? error.message : "compaction failed"; + this._emit({ + type: "auto_compaction_end", + result: undefined, + aborted: false, + willRetry: false, + errorMessage: + reason === "overflow" + ? `Context overflow recovery failed: ${errorMessage}` + : `Auto-compaction failed: ${errorMessage}`, + }); + } finally { + this._autoCompactionAbortController = undefined; + } + } + + /** + * Toggle auto-compaction setting. + */ + setAutoCompactionEnabled(enabled: boolean): void { + this.settingsManager.setCompactionEnabled(enabled); + } + + /** Whether auto-compaction is enabled */ + get autoCompactionEnabled(): boolean { + return this.settingsManager.getCompactionEnabled(); + } + + async bindExtensions(bindings: ExtensionBindings): Promise { + if (bindings.uiContext !== undefined) { + this._extensionUIContext = bindings.uiContext; + } + if (bindings.commandContextActions !== undefined) { + this._extensionCommandContextActions = bindings.commandContextActions; + } + if (bindings.shutdownHandler !== undefined) { + this._extensionShutdownHandler = bindings.shutdownHandler; + } + if (bindings.onError !== undefined) { + this._extensionErrorListener = bindings.onError; + } + + if (this._extensionRunner) { + this._applyExtensionBindings(this._extensionRunner); + await this._extensionRunner.emit({ type: "session_start" }); + await this.extendResourcesFromExtensions("startup"); + } + } + + private async extendResourcesFromExtensions(reason: "startup" | "reload"): Promise { + if (!this._extensionRunner?.hasHandlers("resources_discover")) { + return; + } + + const { skillPaths, promptPaths, themePaths } = await this._extensionRunner.emitResourcesDiscover( + this._cwd, + reason, + ); + + if (skillPaths.length === 0 && promptPaths.length === 0 && themePaths.length === 0) { + return; + } + + const extensionPaths: ResourceExtensionPaths = { + skillPaths: this.buildExtensionResourcePaths(skillPaths), + promptPaths: this.buildExtensionResourcePaths(promptPaths), + themePaths: this.buildExtensionResourcePaths(themePaths), + }; + + this._resourceLoader.extendResources(extensionPaths); + this._baseSystemPrompt = this._rebuildSystemPrompt(this.getActiveToolNames()); + this.agent.setSystemPrompt(this._baseSystemPrompt); + } + + private buildExtensionResourcePaths(entries: Array<{ path: string; extensionPath: string }>): Array<{ + path: string; + metadata: { source: string; scope: "temporary"; origin: "top-level"; baseDir?: string }; + }> { + return entries.map((entry) => { + const source = this.getExtensionSourceLabel(entry.extensionPath); + const baseDir = entry.extensionPath.startsWith("<") ? undefined : dirname(entry.extensionPath); + return { + path: entry.path, + metadata: { + source, + scope: "temporary", + origin: "top-level", + baseDir, + }, + }; + }); + } + + private getExtensionSourceLabel(extensionPath: string): string { + if (extensionPath.startsWith("<")) { + return `extension:${extensionPath.replace(/[<>]/g, "")}`; + } + const base = basename(extensionPath); + const name = base.replace(/\.(ts|js)$/, ""); + return `extension:${name}`; + } + + private _applyExtensionBindings(runner: ExtensionRunner): void { + runner.setUIContext(this._extensionUIContext); + runner.bindCommandContext(this._extensionCommandContextActions); + + this._extensionErrorUnsubscriber?.(); + this._extensionErrorUnsubscriber = this._extensionErrorListener + ? runner.onError(this._extensionErrorListener) + : undefined; + } + + private _bindExtensionCore(runner: ExtensionRunner): void { + const normalizeLocation = (source: string): SlashCommandLocation | undefined => { + if (source === "user" || source === "project" || source === "path") { + return source; + } + return undefined; + }; + + const reservedBuiltins = new Set(BUILTIN_SLASH_COMMANDS.map((command) => command.name)); + + const getCommands = (): SlashCommandInfo[] => { + const extensionCommands: SlashCommandInfo[] = runner + .getRegisteredCommandsWithPaths() + .filter(({ command }) => !reservedBuiltins.has(command.name)) + .map(({ command, extensionPath }) => ({ + name: command.name, + description: command.description, + source: "extension", + path: extensionPath, + })); + + const templates: SlashCommandInfo[] = this.promptTemplates.map((template) => ({ + name: template.name, + description: template.description, + source: "prompt", + location: normalizeLocation(template.source), + path: template.filePath, + })); + + const skills: SlashCommandInfo[] = this._resourceLoader.getSkills().skills.map((skill) => ({ + name: `skill:${skill.name}`, + description: skill.description, + source: "skill", + location: normalizeLocation(skill.source), + path: skill.filePath, + })); + + return [...extensionCommands, ...templates, ...skills]; + }; + + runner.bindCore( + { + sendMessage: (message, options) => { + this.sendCustomMessage(message, options).catch((err) => { + runner.emitError({ + extensionPath: "", + event: "send_message", + error: err instanceof Error ? err.message : String(err), + }); + }); + }, + sendUserMessage: (content, options) => { + this.sendUserMessage(content, options).catch((err) => { + runner.emitError({ + extensionPath: "", + event: "send_user_message", + error: err instanceof Error ? err.message : String(err), + }); + }); + }, + appendEntry: (customType, data) => { + this.sessionManager.appendCustomEntry(customType, data); + }, + setSessionName: (name) => { + this.sessionManager.appendSessionInfo(name); + }, + getSessionName: () => { + return this.sessionManager.getSessionName(); + }, + setLabel: (entryId, label) => { + this.sessionManager.appendLabelChange(entryId, label); + }, + getActiveTools: () => this.getActiveToolNames(), + getAllTools: () => this.getAllTools(), + setActiveTools: (toolNames) => this.setActiveToolsByName(toolNames), + refreshTools: () => this._refreshToolRegistry(), + getCommands, + setModel: async (model, options) => { + const key = await this.modelRegistry.getApiKey(model); + if (!key) return false; + await this.setModel(model, options); + return true; + }, + getThinkingLevel: () => this.thinkingLevel, + setThinkingLevel: (level) => this.setThinkingLevel(level), + }, + { + getModel: () => this.model, + isIdle: () => !this.isStreaming, + abort: () => this.abort(), + hasPendingMessages: () => this.pendingMessageCount > 0, + shutdown: () => { + this._extensionShutdownHandler?.(); + }, + getContextUsage: () => this.getContextUsage(), + compact: (options) => { + void (async () => { + try { + const result = await this.compact(options?.customInstructions); + options?.onComplete?.(result); + } catch (error) { + const err = error instanceof Error ? error : new Error(String(error)); + options?.onError?.(err); + } + })(); + }, + getSystemPrompt: () => this.systemPrompt, + }, + ); + } + + private _refreshToolRegistry(options?: { activeToolNames?: string[]; includeAllExtensionTools?: boolean }): void { + const previousRegistryNames = new Set(this._toolRegistry.keys()); + const previousActiveToolNames = this.getActiveToolNames(); + + const registeredTools = this._extensionRunner?.getAllRegisteredTools() ?? []; + const allCustomTools = [ + ...registeredTools, + ...this._customTools.map((def) => ({ definition: def, extensionPath: "" })), + ]; + this._toolPromptSnippets = new Map( + allCustomTools + .map((registeredTool) => { + const snippet = this._normalizePromptSnippet( + registeredTool.definition.promptSnippet ?? registeredTool.definition.description, + ); + return snippet ? ([registeredTool.definition.name, snippet] as const) : undefined; + }) + .filter((entry): entry is readonly [string, string] => entry !== undefined), + ); + this._toolPromptGuidelines = new Map( + allCustomTools + .map((registeredTool) => { + const guidelines = this._normalizePromptGuidelines(registeredTool.definition.promptGuidelines); + return guidelines.length > 0 ? ([registeredTool.definition.name, guidelines] as const) : undefined; + }) + .filter((entry): entry is readonly [string, string[]] => entry !== undefined), + ); + const wrappedExtensionTools = this._extensionRunner + ? wrapRegisteredTools(allCustomTools, this._extensionRunner) + : []; + + const toolRegistry = new Map(this._baseToolRegistry); + for (const tool of wrappedExtensionTools as AgentTool[]) { + toolRegistry.set(tool.name, tool); + } + + if (this._extensionRunner) { + const wrappedAllTools = wrapToolsWithExtensions(Array.from(toolRegistry.values()), this._extensionRunner); + this._toolRegistry = new Map(wrappedAllTools.map((tool) => [tool.name, tool])); + } else { + this._toolRegistry = toolRegistry; + } + + const nextActiveToolNames = options?.activeToolNames + ? [...options.activeToolNames] + : [...previousActiveToolNames]; + + if (options?.includeAllExtensionTools) { + for (const tool of wrappedExtensionTools) { + nextActiveToolNames.push(tool.name); + } + } else if (!options?.activeToolNames) { + for (const toolName of this._toolRegistry.keys()) { + if (!previousRegistryNames.has(toolName)) { + nextActiveToolNames.push(toolName); + } + } + } + + this.setActiveToolsByName([...new Set(nextActiveToolNames)]); + } + + private _buildRuntime(options: { + activeToolNames?: string[]; + flagValues?: Map; + includeAllExtensionTools?: boolean; + }): void { + const autoResizeImages = this.settingsManager.getImageAutoResize(); + const shellCommandPrefix = this.settingsManager.getShellCommandPrefix(); + const baseTools = this._baseToolsOverride + ? this._baseToolsOverride + : createAllTools(this._cwd, { + read: { autoResizeImages }, + bash: { commandPrefix: shellCommandPrefix }, + }); + + this._baseToolRegistry = new Map(Object.entries(baseTools).map(([name, tool]) => [name, tool as AgentTool])); + + const extensionsResult = this._resourceLoader.getExtensions(); + if (options.flagValues) { + for (const [name, value] of options.flagValues) { + extensionsResult.runtime.flagValues.set(name, value); + } + } + + const hasExtensions = extensionsResult.extensions.length > 0; + const hasCustomTools = this._customTools.length > 0; + this._extensionRunner = + hasExtensions || hasCustomTools + ? new ExtensionRunner( + extensionsResult.extensions, + extensionsResult.runtime, + this._cwd, + this.sessionManager, + this._modelRegistry, + ) + : undefined; + if (this._extensionRunnerRef) { + this._extensionRunnerRef.current = this._extensionRunner; + } + if (this._extensionRunner) { + this._bindExtensionCore(this._extensionRunner); + this._applyExtensionBindings(this._extensionRunner); + } + + const defaultActiveToolNames = this._baseToolsOverride + ? Object.keys(this._baseToolsOverride) + : ["read", "bash", "edit", "write"]; + const baseActiveToolNames = options.activeToolNames ?? defaultActiveToolNames; + this._refreshToolRegistry({ + activeToolNames: baseActiveToolNames, + includeAllExtensionTools: options.includeAllExtensionTools, + }); + } + + async reload(): Promise { + const previousFlagValues = this._extensionRunner?.getFlagValues(); + await this._extensionRunner?.emit({ type: "session_shutdown" }); + this.settingsManager.reload(); + resetApiProviders(); + await this._resourceLoader.reload(); + this._buildRuntime({ + activeToolNames: this.getActiveToolNames(), + flagValues: previousFlagValues, + includeAllExtensionTools: true, + }); + + const hasBindings = + this._extensionUIContext || + this._extensionCommandContextActions || + this._extensionShutdownHandler || + this._extensionErrorListener; + if (this._extensionRunner && hasBindings) { + await this._extensionRunner.emit({ type: "session_start" }); + await this.extendResourcesFromExtensions("reload"); + } + } + + // ========================================================================= + // Auto-Retry + // ========================================================================= + + /** + * Check if an error is retryable (overloaded, rate limit, server errors). + * Context overflow errors are NOT retryable (handled by compaction instead). + */ + private _isRetryableError(message: AssistantMessage): boolean { + if (message.stopReason !== "error" || !message.errorMessage) return false; + + // Context overflow is handled by compaction, not retry + const contextWindow = this.model?.contextWindow ?? 0; + if (isContextOverflow(message, contextWindow)) return false; + + const err = message.errorMessage; + // Match: overloaded_error, rate limit, 429, 500, 502, 503, 504, service unavailable, connection errors, fetch failed, terminated, retry delay exceeded + return /overloaded|rate.?limit|too many requests|429|500|502|503|504|service.?unavailable|server error|internal error|connection.?error|connection.?refused|other side closed|fetch failed|upstream.?connect|reset before headers|terminated|retry delay/i.test( + err, + ); + } + + /** + * Handle retryable errors with exponential backoff. + * @returns true if retry was initiated, false if max retries exceeded or disabled + */ + private async _handleRetryableError(message: AssistantMessage): Promise { + const settings = this.settingsManager.getRetrySettings(); + if (!settings.enabled) { + this._resolveRetry(); + return false; + } + + // Retry promise is created synchronously in _handleAgentEvent for agent_end. + // Keep a defensive fallback here in case a future refactor bypasses that path. + if (!this._retryPromise) { + this._retryPromise = new Promise((resolve) => { + this._retryResolve = resolve; + }); + } + + this._retryAttempt++; + + if (this._retryAttempt > settings.maxRetries) { + // Max retries exceeded, emit final failure and reset + this._emit({ + type: "auto_retry_end", + success: false, + attempt: this._retryAttempt - 1, + finalError: message.errorMessage, + }); + this._retryAttempt = 0; + this._resolveRetry(); // Resolve so waitForRetry() completes + return false; + } + + const delayMs = settings.baseDelayMs * 2 ** (this._retryAttempt - 1); + + this._emit({ + type: "auto_retry_start", + attempt: this._retryAttempt, + maxAttempts: settings.maxRetries, + delayMs, + errorMessage: message.errorMessage || "Unknown error", + }); + + // Remove error message from agent state (keep in session for history) + const messages = this.agent.state.messages; + if (messages.length > 0 && messages[messages.length - 1].role === "assistant") { + this.agent.replaceMessages(messages.slice(0, -1)); + } + + // Wait with exponential backoff (abortable) + this._retryAbortController = new AbortController(); + try { + await sleep(delayMs, this._retryAbortController.signal); + } catch { + // Aborted during sleep - emit end event so UI can clean up + const attempt = this._retryAttempt; + this._retryAttempt = 0; + this._retryAbortController = undefined; + this._emit({ + type: "auto_retry_end", + success: false, + attempt, + finalError: "Retry cancelled", + }); + this._resolveRetry(); + return false; + } + this._retryAbortController = undefined; + + // Retry via continue() - use setTimeout to break out of event handler chain + setTimeout(() => { + this.agent.continue().catch(() => { + // Retry failed - will be caught by next agent_end + }); + }, 0); + + return true; + } + + /** + * Cancel in-progress retry. + */ + abortRetry(): void { + this._retryAbortController?.abort(); + // Note: _retryAttempt is reset in the catch block of _autoRetry + this._resolveRetry(); + } + + /** + * Wait for any in-progress retry to complete. + * Returns immediately if no retry is in progress. + */ + private async waitForRetry(): Promise { + if (this._retryPromise) { + await this._retryPromise; + } + } + + /** Whether auto-retry is currently in progress */ + get isRetrying(): boolean { + return this._retryPromise !== undefined; + } + + /** Whether auto-retry is enabled */ + get autoRetryEnabled(): boolean { + return this.settingsManager.getRetryEnabled(); + } + + /** + * Toggle auto-retry setting. + */ + setAutoRetryEnabled(enabled: boolean): void { + this.settingsManager.setRetryEnabled(enabled); + } + + // ========================================================================= + // Bash Execution + // ========================================================================= + + /** + * Execute a bash command. + * Adds result to agent context and session. + * @param command The bash command to execute + * @param onChunk Optional streaming callback for output + * @param options.excludeFromContext If true, command output won't be sent to LLM (!! prefix) + * @param options.operations Custom BashOperations for remote execution + */ + async executeBash( + command: string, + onChunk?: (chunk: string) => void, + options?: { excludeFromContext?: boolean; operations?: BashOperations }, + ): Promise { + this._bashAbortController = new AbortController(); + + // Apply command prefix if configured (e.g., "shopt -s expand_aliases" for alias support) + const prefix = this.settingsManager.getShellCommandPrefix(); + const resolvedCommand = prefix ? `${prefix}\n${command}` : command; + + try { + const result = options?.operations + ? await executeBashWithOperations(resolvedCommand, process.cwd(), options.operations, { + onChunk, + signal: this._bashAbortController.signal, + }) + : await executeBashCommand(resolvedCommand, { + onChunk, + signal: this._bashAbortController.signal, + }); + + this.recordBashResult(command, result, options); + return result; + } finally { + this._bashAbortController = undefined; + } + } + + /** + * Record a bash execution result in session history. + * Used by executeBash and by extensions that handle bash execution themselves. + */ + recordBashResult(command: string, result: BashResult, options?: { excludeFromContext?: boolean }): void { + const bashMessage: BashExecutionMessage = { + role: "bashExecution", + command, + output: result.output, + exitCode: result.exitCode, + cancelled: result.cancelled, + truncated: result.truncated, + fullOutputPath: result.fullOutputPath, + timestamp: Date.now(), + excludeFromContext: options?.excludeFromContext, + }; + + // If agent is streaming, defer adding to avoid breaking tool_use/tool_result ordering + if (this.isStreaming) { + // Queue for later - will be flushed on agent_end + this._pendingBashMessages.push(bashMessage); + } else { + // Add to agent state immediately + this.agent.appendMessage(bashMessage); + + // Save to session + this.sessionManager.appendMessage(bashMessage); + } + } + + /** + * Cancel running bash command. + */ + abortBash(): void { + this._bashAbortController?.abort(); + } + + /** Whether a bash command is currently running */ + get isBashRunning(): boolean { + return this._bashAbortController !== undefined; + } + + /** Whether there are pending bash messages waiting to be flushed */ + get hasPendingBashMessages(): boolean { + return this._pendingBashMessages.length > 0; + } + + /** + * Flush pending bash messages to agent state and session. + * Called after agent turn completes to maintain proper message ordering. + */ + private _flushPendingBashMessages(): void { + if (this._pendingBashMessages.length === 0) return; + + for (const bashMessage of this._pendingBashMessages) { + // Add to agent state + this.agent.appendMessage(bashMessage); + + // Save to session + this.sessionManager.appendMessage(bashMessage); + } + + this._pendingBashMessages = []; + } + + // ========================================================================= + // Session Management + // ========================================================================= + + /** + * Switch to a different session file. + * Aborts current operation, loads messages, restores model/thinking. + * Listeners are preserved and will continue receiving events. + * @returns true if switch completed, false if cancelled by extension + */ + async switchSession(sessionPath: string): Promise { + const previousSessionFile = this.sessionManager.getSessionFile(); + + // Emit session_before_switch event (can be cancelled) + if (this._extensionRunner?.hasHandlers("session_before_switch")) { + const result = (await this._extensionRunner.emit({ + type: "session_before_switch", + reason: "resume", + targetSessionFile: sessionPath, + })) as SessionBeforeSwitchResult | undefined; + + if (result?.cancel) { + return false; + } + } + + this._disconnectFromAgent(); + await this.abort(); + this._steeringMessages = []; + this._followUpMessages = []; + this._pendingNextTurnMessages = []; + + // Set new session + this.sessionManager.setSessionFile(sessionPath); + this.agent.sessionId = this.sessionManager.getSessionId(); + + // Reload messages + const sessionContext = this.sessionManager.buildSessionContext(); + + // Emit session_switch event to extensions + if (this._extensionRunner) { + await this._extensionRunner.emit({ + type: "session_switch", + reason: "resume", + previousSessionFile, + }); + } + + // Emit session event to custom tools + + this.agent.replaceMessages(sessionContext.messages); + + // Restore model if saved + if (sessionContext.model) { + const previousModel = this.model; + const availableModels = await this._modelRegistry.getAvailable(); + const match = availableModels.find( + (m) => m.provider === sessionContext.model!.provider && m.id === sessionContext.model!.modelId, + ); + if (match) { + this.agent.setModel(match); + await this._emitModelSelect(match, previousModel, "restore"); + } + } + + const hasThinkingEntry = this.sessionManager.getBranch().some((entry) => entry.type === "thinking_level_change"); + const defaultThinkingLevel = this.settingsManager.getDefaultThinkingLevel() ?? DEFAULT_THINKING_LEVEL; + + if (hasThinkingEntry) { + // Restore thinking level if saved (setThinkingLevel clamps to model capabilities) + this.setThinkingLevel(sessionContext.thinkingLevel as ThinkingLevel); + } else { + const availableLevels = this.getAvailableThinkingLevels(); + const effectiveLevel = availableLevels.includes(defaultThinkingLevel) + ? defaultThinkingLevel + : this._clampThinkingLevel(defaultThinkingLevel, availableLevels); + this.agent.setThinkingLevel(effectiveLevel); + this.sessionManager.appendThinkingLevelChange(effectiveLevel); + } + + this._reconnectToAgent(); + return true; + } + + /** + * Set a display name for the current session. + */ + setSessionName(name: string): void { + this.sessionManager.appendSessionInfo(name); + } + + /** + * Create a fork from a specific entry. + * Emits before_fork/fork session events to extensions. + * + * @param entryId ID of the entry to fork from + * @returns Object with: + * - selectedText: The text of the selected user message (for editor pre-fill) + * - cancelled: True if an extension cancelled the fork + */ + async fork(entryId: string): Promise<{ selectedText: string; cancelled: boolean }> { + const previousSessionFile = this.sessionFile; + const selectedEntry = this.sessionManager.getEntry(entryId); + + if (!selectedEntry || selectedEntry.type !== "message" || selectedEntry.message.role !== "user") { + throw new Error("Invalid entry ID for forking"); + } + + const selectedText = this._extractUserMessageText(selectedEntry.message.content); + + let skipConversationRestore = false; + + // Emit session_before_fork event (can be cancelled) + if (this._extensionRunner?.hasHandlers("session_before_fork")) { + const result = (await this._extensionRunner.emit({ + type: "session_before_fork", + entryId, + })) as SessionBeforeForkResult | undefined; + + if (result?.cancel) { + return { selectedText, cancelled: true }; + } + skipConversationRestore = result?.skipConversationRestore ?? false; + } + + // Clear pending messages (bound to old session state) + this._pendingNextTurnMessages = []; + + if (!selectedEntry.parentId) { + this.sessionManager.newSession({ parentSession: previousSessionFile }); + } else { + this.sessionManager.createBranchedSession(selectedEntry.parentId); + } + this.agent.sessionId = this.sessionManager.getSessionId(); + + // Reload messages from entries (works for both file and in-memory mode) + const sessionContext = this.sessionManager.buildSessionContext(); + + // Emit session_fork event to extensions (after fork completes) + if (this._extensionRunner) { + await this._extensionRunner.emit({ + type: "session_fork", + previousSessionFile, + }); + } + + // Emit session event to custom tools (with reason "fork") + + if (!skipConversationRestore) { + this.agent.replaceMessages(sessionContext.messages); + } + + return { selectedText, cancelled: false }; + } + + // ========================================================================= + // Tree Navigation + // ========================================================================= + + /** + * Navigate to a different node in the session tree. + * Unlike fork() which creates a new session file, this stays in the same file. + * + * @param targetId The entry ID to navigate to + * @param options.summarize Whether user wants to summarize abandoned branch + * @param options.customInstructions Custom instructions for summarizer + * @param options.replaceInstructions If true, customInstructions replaces the default prompt + * @param options.label Label to attach to the branch summary entry + * @returns Result with editorText (if user message) and cancelled status + */ + async navigateTree( + targetId: string, + options: { summarize?: boolean; customInstructions?: string; replaceInstructions?: boolean; label?: string } = {}, + ): Promise<{ editorText?: string; cancelled: boolean; aborted?: boolean; summaryEntry?: BranchSummaryEntry }> { + const oldLeafId = this.sessionManager.getLeafId(); + + // No-op if already at target + if (targetId === oldLeafId) { + return { cancelled: false }; + } + + // Model required for summarization + if (options.summarize && !this.model) { + throw new Error("No model available for summarization"); + } + + const targetEntry = this.sessionManager.getEntry(targetId); + if (!targetEntry) { + throw new Error(`Entry ${targetId} not found`); + } + + // Collect entries to summarize (from old leaf to common ancestor) + const { entries: entriesToSummarize, commonAncestorId } = collectEntriesForBranchSummary( + this.sessionManager, + oldLeafId, + targetId, + ); + + // Prepare event data - mutable so extensions can override + let customInstructions = options.customInstructions; + let replaceInstructions = options.replaceInstructions; + let label = options.label; + + const preparation: TreePreparation = { + targetId, + oldLeafId, + commonAncestorId, + entriesToSummarize, + userWantsSummary: options.summarize ?? false, + customInstructions, + replaceInstructions, + label, + }; + + // Set up abort controller for summarization + this._branchSummaryAbortController = new AbortController(); + let extensionSummary: { summary: string; details?: unknown } | undefined; + let fromExtension = false; + + // Emit session_before_tree event + if (this._extensionRunner?.hasHandlers("session_before_tree")) { + const result = (await this._extensionRunner.emit({ + type: "session_before_tree", + preparation, + signal: this._branchSummaryAbortController.signal, + })) as SessionBeforeTreeResult | undefined; + + if (result?.cancel) { + return { cancelled: true }; + } + + if (result?.summary && options.summarize) { + extensionSummary = result.summary; + fromExtension = true; + } + + // Allow extensions to override instructions and label + if (result?.customInstructions !== undefined) { + customInstructions = result.customInstructions; + } + if (result?.replaceInstructions !== undefined) { + replaceInstructions = result.replaceInstructions; + } + if (result?.label !== undefined) { + label = result.label; + } + } + + // Run default summarizer if needed + let summaryText: string | undefined; + let summaryDetails: unknown; + if (options.summarize && entriesToSummarize.length > 0 && !extensionSummary) { + const model = this.model!; + const apiKey = await this._modelRegistry.getApiKey(model); + if (!apiKey) { + throw new Error(`No API key for ${model.provider}`); + } + const branchSummarySettings = this.settingsManager.getBranchSummarySettings(); + const result = await generateBranchSummary(entriesToSummarize, { + model, + apiKey, + signal: this._branchSummaryAbortController.signal, + customInstructions, + replaceInstructions, + reserveTokens: branchSummarySettings.reserveTokens, + }); + this._branchSummaryAbortController = undefined; + if (result.aborted) { + return { cancelled: true, aborted: true }; + } + if (result.error) { + throw new Error(result.error); + } + summaryText = result.summary; + summaryDetails = { + readFiles: result.readFiles || [], + modifiedFiles: result.modifiedFiles || [], + }; + } else if (extensionSummary) { + summaryText = extensionSummary.summary; + summaryDetails = extensionSummary.details; + } + + // Determine the new leaf position based on target type + let newLeafId: string | null; + let editorText: string | undefined; + + if (targetEntry.type === "message" && targetEntry.message.role === "user") { + // User message: leaf = parent (null if root), text goes to editor + newLeafId = targetEntry.parentId; + editorText = this._extractUserMessageText(targetEntry.message.content); + } else if (targetEntry.type === "custom_message") { + // Custom message: leaf = parent (null if root), text goes to editor + newLeafId = targetEntry.parentId; + editorText = + typeof targetEntry.content === "string" + ? targetEntry.content + : targetEntry.content + .filter((c): c is { type: "text"; text: string } => c.type === "text") + .map((c) => c.text) + .join(""); + } else { + // Non-user message: leaf = selected node + newLeafId = targetId; + } + + // Switch leaf (with or without summary) + // Summary is attached at the navigation target position (newLeafId), not the old branch + let summaryEntry: BranchSummaryEntry | undefined; + if (summaryText) { + // Create summary at target position (can be null for root) + const summaryId = this.sessionManager.branchWithSummary(newLeafId, summaryText, summaryDetails, fromExtension); + summaryEntry = this.sessionManager.getEntry(summaryId) as BranchSummaryEntry; + + // Attach label to the summary entry + if (label) { + this.sessionManager.appendLabelChange(summaryId, label); + } + } else if (newLeafId === null) { + // No summary, navigating to root - reset leaf + this.sessionManager.resetLeaf(); + } else { + // No summary, navigating to non-root + this.sessionManager.branch(newLeafId); + } + + // Attach label to target entry when not summarizing (no summary entry to label) + if (label && !summaryText) { + this.sessionManager.appendLabelChange(targetId, label); + } + + // Update agent state + const sessionContext = this.sessionManager.buildSessionContext(); + this.agent.replaceMessages(sessionContext.messages); + + // Emit session_tree event + if (this._extensionRunner) { + await this._extensionRunner.emit({ + type: "session_tree", + newLeafId: this.sessionManager.getLeafId(), + oldLeafId, + summaryEntry, + fromExtension: summaryText ? fromExtension : undefined, + }); + } + + // Emit to custom tools + + this._branchSummaryAbortController = undefined; + return { editorText, cancelled: false, summaryEntry }; + } + + /** + * Get all user messages from session for fork selector. + */ + getUserMessagesForForking(): Array<{ entryId: string; text: string }> { + const entries = this.sessionManager.getEntries(); + const result: Array<{ entryId: string; text: string }> = []; + + for (const entry of entries) { + if (entry.type !== "message") continue; + if (entry.message.role !== "user") continue; + + const text = this._extractUserMessageText(entry.message.content); + if (text) { + result.push({ entryId: entry.id, text }); + } + } + + return result; + } + + private _extractUserMessageText(content: string | Array<{ type: string; text?: string }>): string { + if (typeof content === "string") return content; + if (Array.isArray(content)) { + return content + .filter((c): c is { type: "text"; text: string } => c.type === "text") + .map((c) => c.text) + .join(""); + } + return ""; + } + + /** + * Get session statistics. + */ + getSessionStats(): SessionStats { + const state = this.state; + const userMessages = state.messages.filter((m) => m.role === "user").length; + const assistantMessages = state.messages.filter((m) => m.role === "assistant").length; + const toolResults = state.messages.filter((m) => m.role === "toolResult").length; + + let toolCalls = 0; + let totalInput = 0; + let totalOutput = 0; + let totalCacheRead = 0; + let totalCacheWrite = 0; + let totalCost = 0; + + for (const message of state.messages) { + if (message.role === "assistant") { + const assistantMsg = message as AssistantMessage; + toolCalls += assistantMsg.content.filter((c) => c.type === "toolCall").length; + totalInput += assistantMsg.usage.input; + totalOutput += assistantMsg.usage.output; + totalCacheRead += assistantMsg.usage.cacheRead; + totalCacheWrite += assistantMsg.usage.cacheWrite; + totalCost += assistantMsg.usage.cost.total; + } + } + + return { + sessionFile: this.sessionFile, + sessionId: this.sessionId, + userMessages, + assistantMessages, + toolCalls, + toolResults, + totalMessages: state.messages.length, + tokens: { + input: totalInput, + output: totalOutput, + cacheRead: totalCacheRead, + cacheWrite: totalCacheWrite, + total: totalInput + totalOutput + totalCacheRead + totalCacheWrite, + }, + cost: totalCost, + }; + } + + getContextUsage(): ContextUsage | undefined { + const model = this.model; + if (!model) return undefined; + + const contextWindow = model.contextWindow ?? 0; + if (contextWindow <= 0) return undefined; + + // After compaction, the last assistant usage reflects pre-compaction context size. + // We can only trust usage from an assistant that responded after the latest compaction. + // If no such assistant exists, context token count is unknown until the next LLM response. + const branchEntries = this.sessionManager.getBranch(); + const latestCompaction = getLatestCompactionEntry(branchEntries); + + if (latestCompaction) { + // Check if there's a valid assistant usage after the compaction boundary + const compactionIndex = branchEntries.lastIndexOf(latestCompaction); + let hasPostCompactionUsage = false; + for (let i = branchEntries.length - 1; i > compactionIndex; i--) { + const entry = branchEntries[i]; + if (entry.type === "message" && entry.message.role === "assistant") { + const assistant = entry.message; + if (assistant.stopReason !== "aborted" && assistant.stopReason !== "error") { + const contextTokens = calculateContextTokens(assistant.usage); + if (contextTokens > 0) { + hasPostCompactionUsage = true; + } + break; + } + } + } + + if (!hasPostCompactionUsage) { + return { tokens: null, contextWindow, percent: null }; + } + } + + const estimate = estimateContextTokens(this.messages); + const percent = (estimate.tokens / contextWindow) * 100; + + return { + tokens: estimate.tokens, + contextWindow, + percent, + }; + } + + /** + * Export session to HTML. + * @param outputPath Optional output path (defaults to session directory) + * @returns Path to exported file + */ + async exportToHtml(outputPath?: string): Promise { + const themeName = this.settingsManager.getTheme(); + + // Create tool renderer if we have an extension runner (for custom tool HTML rendering) + let toolRenderer: ToolHtmlRenderer | undefined; + if (this._extensionRunner) { + toolRenderer = createToolHtmlRenderer({ + getToolDefinition: (name) => this._extensionRunner!.getToolDefinition(name), + theme, + }); + } + + return await exportSessionToHtml(this.sessionManager, this.state, { + outputPath, + themeName, + toolRenderer, + }); + } + + // ========================================================================= + // Utilities + // ========================================================================= + + /** + * Get text content of last assistant message. + * Useful for /copy command. + * @returns Text content, or undefined if no assistant message exists + */ + getLastAssistantText(): string | undefined { + const lastAssistant = this.messages + .slice() + .reverse() + .find((m) => { + if (m.role !== "assistant") return false; + const msg = m as AssistantMessage; + // Skip aborted messages with no content + if (msg.stopReason === "aborted" && msg.content.length === 0) return false; + return true; + }); + + if (!lastAssistant) return undefined; + + let text = ""; + for (const content of (lastAssistant as AssistantMessage).content) { + if (content.type === "text") { + text += content.text; + } + } + + return text.trim() || undefined; + } + + // ========================================================================= + // Extension System + // ========================================================================= + + /** + * Check if extensions have handlers for a specific event type. + */ + hasExtensionHandlers(eventType: string): boolean { + return this._extensionRunner?.hasHandlers(eventType) ?? false; + } + + /** + * Get the extension runner (for setting UI context and error handlers). + */ + get extensionRunner(): ExtensionRunner | undefined { + return this._extensionRunner; + } +} diff --git a/packages/pi-coding-agent/src/core/auth-storage.ts b/packages/pi-coding-agent/src/core/auth-storage.ts new file mode 100644 index 000000000..f8eb23bc5 --- /dev/null +++ b/packages/pi-coding-agent/src/core/auth-storage.ts @@ -0,0 +1,489 @@ +/** + * Credential storage for API keys and OAuth tokens. + * Handles loading, saving, and refreshing credentials from auth.json. + * + * Uses file locking to prevent race conditions when multiple pi instances + * try to refresh tokens simultaneously. + */ + +import { + getEnvApiKey, + type OAuthCredentials, + type OAuthLoginCallbacks, + type OAuthProviderId, +} from "@gsd/pi-ai"; +import { getOAuthApiKey, getOAuthProvider, getOAuthProviders } from "@gsd/pi-ai/oauth"; +import { chmodSync, existsSync, mkdirSync, readFileSync, writeFileSync } from "fs"; +import { dirname, join } from "path"; +import lockfile from "proper-lockfile"; +import { getAgentDir } from "../config.js"; +import { resolveConfigValue } from "./resolve-config-value.js"; + +export type ApiKeyCredential = { + type: "api_key"; + key: string; +}; + +export type OAuthCredential = { + type: "oauth"; +} & OAuthCredentials; + +export type AuthCredential = ApiKeyCredential | OAuthCredential; + +export type AuthStorageData = Record; + +type LockResult = { + result: T; + next?: string; +}; + +export interface AuthStorageBackend { + withLock(fn: (current: string | undefined) => LockResult): T; + withLockAsync(fn: (current: string | undefined) => Promise>): Promise; +} + +export class FileAuthStorageBackend implements AuthStorageBackend { + constructor(private authPath: string = join(getAgentDir(), "auth.json")) {} + + private ensureParentDir(): void { + const dir = dirname(this.authPath); + if (!existsSync(dir)) { + mkdirSync(dir, { recursive: true, mode: 0o700 }); + } + } + + private ensureFileExists(): void { + if (!existsSync(this.authPath)) { + writeFileSync(this.authPath, "{}", "utf-8"); + chmodSync(this.authPath, 0o600); + } + } + + private acquireLockSyncWithRetry(path: string): () => void { + const maxAttempts = 10; + const delayMs = 20; + let lastError: unknown; + + for (let attempt = 1; attempt <= maxAttempts; attempt++) { + try { + return lockfile.lockSync(path, { realpath: false }); + } catch (error) { + const code = + typeof error === "object" && error !== null && "code" in error + ? String((error as { code?: unknown }).code) + : undefined; + if (code !== "ELOCKED" || attempt === maxAttempts) { + throw error; + } + lastError = error; + const start = Date.now(); + while (Date.now() - start < delayMs) { + // Sleep synchronously to avoid changing callers to async. + } + } + } + + throw (lastError as Error) ?? new Error("Failed to acquire auth storage lock"); + } + + withLock(fn: (current: string | undefined) => LockResult): T { + this.ensureParentDir(); + this.ensureFileExists(); + + let release: (() => void) | undefined; + try { + release = this.acquireLockSyncWithRetry(this.authPath); + const current = existsSync(this.authPath) ? readFileSync(this.authPath, "utf-8") : undefined; + const { result, next } = fn(current); + if (next !== undefined) { + writeFileSync(this.authPath, next, "utf-8"); + chmodSync(this.authPath, 0o600); + } + return result; + } finally { + if (release) { + release(); + } + } + } + + async withLockAsync(fn: (current: string | undefined) => Promise>): Promise { + this.ensureParentDir(); + this.ensureFileExists(); + + let release: (() => Promise) | undefined; + let lockCompromised = false; + let lockCompromisedError: Error | undefined; + const throwIfCompromised = () => { + if (lockCompromised) { + throw lockCompromisedError ?? new Error("Auth storage lock was compromised"); + } + }; + + try { + release = await lockfile.lock(this.authPath, { + retries: { + retries: 10, + factor: 2, + minTimeout: 100, + maxTimeout: 10000, + randomize: true, + }, + stale: 30000, + onCompromised: (err) => { + lockCompromised = true; + lockCompromisedError = err; + }, + }); + + throwIfCompromised(); + const current = existsSync(this.authPath) ? readFileSync(this.authPath, "utf-8") : undefined; + const { result, next } = await fn(current); + throwIfCompromised(); + if (next !== undefined) { + writeFileSync(this.authPath, next, "utf-8"); + chmodSync(this.authPath, 0o600); + } + throwIfCompromised(); + return result; + } finally { + if (release) { + try { + await release(); + } catch { + // Ignore unlock errors when lock is compromised. + } + } + } + } +} + +export class InMemoryAuthStorageBackend implements AuthStorageBackend { + private value: string | undefined; + + withLock(fn: (current: string | undefined) => LockResult): T { + const { result, next } = fn(this.value); + if (next !== undefined) { + this.value = next; + } + return result; + } + + async withLockAsync(fn: (current: string | undefined) => Promise>): Promise { + const { result, next } = await fn(this.value); + if (next !== undefined) { + this.value = next; + } + return result; + } +} + +/** + * Credential storage backed by a JSON file. + */ +export class AuthStorage { + private data: AuthStorageData = {}; + private runtimeOverrides: Map = new Map(); + private fallbackResolver?: (provider: string) => string | undefined; + private loadError: Error | null = null; + private errors: Error[] = []; + + private constructor(private storage: AuthStorageBackend) { + this.reload(); + } + + static create(authPath?: string): AuthStorage { + return new AuthStorage(new FileAuthStorageBackend(authPath ?? join(getAgentDir(), "auth.json"))); + } + + static fromStorage(storage: AuthStorageBackend): AuthStorage { + return new AuthStorage(storage); + } + + static inMemory(data: AuthStorageData = {}): AuthStorage { + const storage = new InMemoryAuthStorageBackend(); + storage.withLock(() => ({ result: undefined, next: JSON.stringify(data, null, 2) })); + return AuthStorage.fromStorage(storage); + } + + /** + * Set a runtime API key override (not persisted to disk). + * Used for CLI --api-key flag. + */ + setRuntimeApiKey(provider: string, apiKey: string): void { + this.runtimeOverrides.set(provider, apiKey); + } + + /** + * Remove a runtime API key override. + */ + removeRuntimeApiKey(provider: string): void { + this.runtimeOverrides.delete(provider); + } + + /** + * Set a fallback resolver for API keys not found in auth.json or env vars. + * Used for custom provider keys from models.json. + */ + setFallbackResolver(resolver: (provider: string) => string | undefined): void { + this.fallbackResolver = resolver; + } + + private recordError(error: unknown): void { + const normalizedError = error instanceof Error ? error : new Error(String(error)); + this.errors.push(normalizedError); + } + + private parseStorageData(content: string | undefined): AuthStorageData { + if (!content) { + return {}; + } + return JSON.parse(content) as AuthStorageData; + } + + /** + * Reload credentials from storage. + */ + reload(): void { + let content: string | undefined; + try { + this.storage.withLock((current) => { + content = current; + return { result: undefined }; + }); + this.data = this.parseStorageData(content); + this.loadError = null; + } catch (error) { + this.loadError = error as Error; + this.recordError(error); + } + } + + private persistProviderChange(provider: string, credential: AuthCredential | undefined): void { + if (this.loadError) { + return; + } + + try { + this.storage.withLock((current) => { + const currentData = this.parseStorageData(current); + const merged: AuthStorageData = { ...currentData }; + if (credential) { + merged[provider] = credential; + } else { + delete merged[provider]; + } + return { result: undefined, next: JSON.stringify(merged, null, 2) }; + }); + } catch (error) { + this.recordError(error); + } + } + + /** + * Get credential for a provider. + */ + get(provider: string): AuthCredential | undefined { + return this.data[provider] ?? undefined; + } + + /** + * Set credential for a provider. + */ + set(provider: string, credential: AuthCredential): void { + this.data[provider] = credential; + this.persistProviderChange(provider, credential); + } + + /** + * Remove credential for a provider. + */ + remove(provider: string): void { + delete this.data[provider]; + this.persistProviderChange(provider, undefined); + } + + /** + * List all providers with credentials. + */ + list(): string[] { + return Object.keys(this.data); + } + + /** + * Check if credentials exist for a provider in auth.json. + */ + has(provider: string): boolean { + return provider in this.data; + } + + /** + * Check if any form of auth is configured for a provider. + * Unlike getApiKey(), this doesn't refresh OAuth tokens. + */ + hasAuth(provider: string): boolean { + if (this.runtimeOverrides.has(provider)) return true; + if (this.data[provider]) return true; + if (getEnvApiKey(provider)) return true; + if (this.fallbackResolver?.(provider)) return true; + return false; + } + + /** + * Get all credentials (for passing to getOAuthApiKey). + */ + getAll(): AuthStorageData { + return { ...this.data }; + } + + drainErrors(): Error[] { + const drained = [...this.errors]; + this.errors = []; + return drained; + } + + /** + * Login to an OAuth provider. + */ + async login(providerId: OAuthProviderId, callbacks: OAuthLoginCallbacks): Promise { + const provider = getOAuthProvider(providerId); + if (!provider) { + throw new Error(`Unknown OAuth provider: ${providerId}`); + } + + const credentials = await provider.login(callbacks); + this.set(providerId, { type: "oauth", ...credentials }); + } + + /** + * Logout from a provider. + */ + logout(provider: string): void { + this.remove(provider); + } + + /** + * Refresh OAuth token with backend locking to prevent race conditions. + * Multiple pi instances may try to refresh simultaneously when tokens expire. + */ + private async refreshOAuthTokenWithLock( + providerId: OAuthProviderId, + ): Promise<{ apiKey: string; newCredentials: OAuthCredentials } | null> { + const provider = getOAuthProvider(providerId); + if (!provider) { + return null; + } + + const result = await this.storage.withLockAsync(async (current) => { + const currentData = this.parseStorageData(current); + this.data = currentData; + this.loadError = null; + + const cred = currentData[providerId]; + if (cred?.type !== "oauth") { + return { result: null }; + } + + if (Date.now() < cred.expires) { + return { result: { apiKey: provider.getApiKey(cred), newCredentials: cred } }; + } + + const oauthCreds: Record = {}; + for (const [key, value] of Object.entries(currentData)) { + if (value.type === "oauth") { + oauthCreds[key] = value; + } + } + + const refreshed = await getOAuthApiKey(providerId, oauthCreds); + if (!refreshed) { + return { result: null }; + } + + const merged: AuthStorageData = { + ...currentData, + [providerId]: { type: "oauth", ...refreshed.newCredentials }, + }; + this.data = merged; + this.loadError = null; + return { result: refreshed, next: JSON.stringify(merged, null, 2) }; + }); + + return result; + } + + /** + * Get API key for a provider. + * Priority: + * 1. Runtime override (CLI --api-key) + * 2. API key from auth.json + * 3. OAuth token from auth.json (auto-refreshed with locking) + * 4. Environment variable + * 5. Fallback resolver (models.json custom providers) + */ + async getApiKey(providerId: string): Promise { + // Runtime override takes highest priority + const runtimeKey = this.runtimeOverrides.get(providerId); + if (runtimeKey) { + return runtimeKey; + } + + const cred = this.data[providerId]; + + if (cred?.type === "api_key") { + return resolveConfigValue(cred.key); + } + + if (cred?.type === "oauth") { + const provider = getOAuthProvider(providerId); + if (!provider) { + // Unknown OAuth provider, can't get API key + return undefined; + } + + // Check if token needs refresh + const needsRefresh = Date.now() >= cred.expires; + + if (needsRefresh) { + // Use locked refresh to prevent race conditions + try { + const result = await this.refreshOAuthTokenWithLock(providerId); + if (result) { + return result.apiKey; + } + } catch (error) { + this.recordError(error); + // Refresh failed - re-read file to check if another instance succeeded + this.reload(); + const updatedCred = this.data[providerId]; + + if (updatedCred?.type === "oauth" && Date.now() < updatedCred.expires) { + // Another instance refreshed successfully, use those credentials + return provider.getApiKey(updatedCred); + } + + // Refresh truly failed - return undefined so model discovery skips this provider + // User can /login to re-authenticate (credentials preserved for retry) + return undefined; + } + } else { + // Token not expired, use current access token + return provider.getApiKey(cred); + } + } + + // Fall back to environment variable + const envKey = getEnvApiKey(providerId); + if (envKey) return envKey; + + // Fall back to custom resolver (e.g., models.json custom providers) + return this.fallbackResolver?.(providerId) ?? undefined; + } + + /** + * Get all registered OAuth providers + */ + getOAuthProviders() { + return getOAuthProviders(); + } +} diff --git a/packages/pi-coding-agent/src/core/bash-executor.ts b/packages/pi-coding-agent/src/core/bash-executor.ts new file mode 100644 index 000000000..b24982186 --- /dev/null +++ b/packages/pi-coding-agent/src/core/bash-executor.ts @@ -0,0 +1,278 @@ +/** + * Bash command execution with streaming support and cancellation. + * + * This module provides a unified bash execution implementation used by: + * - AgentSession.executeBash() for interactive and RPC modes + * - Direct calls from modes that need bash execution + */ + +import { randomBytes } from "node:crypto"; +import { createWriteStream, type WriteStream } from "node:fs"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; +import { type ChildProcess, spawn } from "child_process"; +import stripAnsi from "strip-ansi"; +import { getShellConfig, getShellEnv, killProcessTree, sanitizeBinaryOutput } from "../utils/shell.js"; +import type { BashOperations } from "./tools/bash.js"; +import { DEFAULT_MAX_BYTES, truncateTail } from "./tools/truncate.js"; + +// ============================================================================ +// Types +// ============================================================================ + +export interface BashExecutorOptions { + /** Callback for streaming output chunks (already sanitized) */ + onChunk?: (chunk: string) => void; + /** AbortSignal for cancellation */ + signal?: AbortSignal; +} + +export interface BashResult { + /** Combined stdout + stderr output (sanitized, possibly truncated) */ + output: string; + /** Process exit code (undefined if killed/cancelled) */ + exitCode: number | undefined; + /** Whether the command was cancelled via signal */ + cancelled: boolean; + /** Whether the output was truncated */ + truncated: boolean; + /** Path to temp file containing full output (if output exceeded truncation threshold) */ + fullOutputPath?: string; +} + +// ============================================================================ +// Implementation +// ============================================================================ + +/** + * Execute a bash command with optional streaming and cancellation support. + * + * Features: + * - Streams sanitized output via onChunk callback + * - Writes large output to temp file for later retrieval + * - Supports cancellation via AbortSignal + * - Sanitizes output (strips ANSI, removes binary garbage, normalizes newlines) + * - Truncates output if it exceeds the default max bytes + * + * @param command - The bash command to execute + * @param options - Optional streaming callback and abort signal + * @returns Promise resolving to execution result + */ +export function executeBash(command: string, options?: BashExecutorOptions): Promise { + return new Promise((resolve, reject) => { + const { shell, args } = getShellConfig(); + const child: ChildProcess = spawn(shell, [...args, command], { + detached: true, + env: getShellEnv(), + stdio: ["ignore", "pipe", "pipe"], + }); + + // Track sanitized output for truncation + const outputChunks: string[] = []; + let outputBytes = 0; + const maxOutputBytes = DEFAULT_MAX_BYTES * 2; + + // Temp file for large output + let tempFilePath: string | undefined; + let tempFileStream: WriteStream | undefined; + let totalBytes = 0; + + // Handle abort signal + const abortHandler = () => { + if (child.pid) { + killProcessTree(child.pid); + } + }; + + if (options?.signal) { + if (options.signal.aborted) { + // Already aborted, don't even start + child.kill(); + resolve({ + output: "", + exitCode: undefined, + cancelled: true, + truncated: false, + }); + return; + } + options.signal.addEventListener("abort", abortHandler, { once: true }); + } + + const decoder = new TextDecoder(); + + const handleData = (data: Buffer) => { + totalBytes += data.length; + + // Sanitize once at the source: strip ANSI, replace binary garbage, normalize newlines + const text = sanitizeBinaryOutput(stripAnsi(decoder.decode(data, { stream: true }))).replace(/\r/g, ""); + + // Start writing to temp file if exceeds threshold + if (totalBytes > DEFAULT_MAX_BYTES && !tempFilePath) { + const id = randomBytes(8).toString("hex"); + tempFilePath = join(tmpdir(), `pi-bash-${id}.log`); + tempFileStream = createWriteStream(tempFilePath); + // Write already-buffered chunks to temp file + for (const chunk of outputChunks) { + tempFileStream.write(chunk); + } + } + + if (tempFileStream) { + tempFileStream.write(text); + } + + // Keep rolling buffer of sanitized text + outputChunks.push(text); + outputBytes += text.length; + while (outputBytes > maxOutputBytes && outputChunks.length > 1) { + const removed = outputChunks.shift()!; + outputBytes -= removed.length; + } + + // Stream to callback if provided + if (options?.onChunk) { + options.onChunk(text); + } + }; + + child.stdout?.on("data", handleData); + child.stderr?.on("data", handleData); + + child.on("close", (code) => { + // Clean up abort listener + if (options?.signal) { + options.signal.removeEventListener("abort", abortHandler); + } + + if (tempFileStream) { + tempFileStream.end(); + } + + // Combine buffered chunks for truncation (already sanitized) + const fullOutput = outputChunks.join(""); + const truncationResult = truncateTail(fullOutput); + + // code === null means killed (cancelled) + const cancelled = code === null; + + resolve({ + output: truncationResult.truncated ? truncationResult.content : fullOutput, + exitCode: cancelled ? undefined : code, + cancelled, + truncated: truncationResult.truncated, + fullOutputPath: tempFilePath, + }); + }); + + child.on("error", (err) => { + // Clean up abort listener + if (options?.signal) { + options.signal.removeEventListener("abort", abortHandler); + } + + if (tempFileStream) { + tempFileStream.end(); + } + + reject(err); + }); + }); +} + +/** + * Execute a bash command using custom BashOperations. + * Used for remote execution (SSH, containers, etc.). + */ +export async function executeBashWithOperations( + command: string, + cwd: string, + operations: BashOperations, + options?: BashExecutorOptions, +): Promise { + const outputChunks: string[] = []; + let outputBytes = 0; + const maxOutputBytes = DEFAULT_MAX_BYTES * 2; + + let tempFilePath: string | undefined; + let tempFileStream: WriteStream | undefined; + let totalBytes = 0; + + const decoder = new TextDecoder(); + + const onData = (data: Buffer) => { + totalBytes += data.length; + + // Sanitize: strip ANSI, replace binary garbage, normalize newlines + const text = sanitizeBinaryOutput(stripAnsi(decoder.decode(data, { stream: true }))).replace(/\r/g, ""); + + // Start writing to temp file if exceeds threshold + if (totalBytes > DEFAULT_MAX_BYTES && !tempFilePath) { + const id = randomBytes(8).toString("hex"); + tempFilePath = join(tmpdir(), `pi-bash-${id}.log`); + tempFileStream = createWriteStream(tempFilePath); + for (const chunk of outputChunks) { + tempFileStream.write(chunk); + } + } + + if (tempFileStream) { + tempFileStream.write(text); + } + + // Keep rolling buffer + outputChunks.push(text); + outputBytes += text.length; + while (outputBytes > maxOutputBytes && outputChunks.length > 1) { + const removed = outputChunks.shift()!; + outputBytes -= removed.length; + } + + // Stream to callback + if (options?.onChunk) { + options.onChunk(text); + } + }; + + try { + const result = await operations.exec(command, cwd, { + onData, + signal: options?.signal, + }); + + if (tempFileStream) { + tempFileStream.end(); + } + + const fullOutput = outputChunks.join(""); + const truncationResult = truncateTail(fullOutput); + const cancelled = options?.signal?.aborted ?? false; + + return { + output: truncationResult.truncated ? truncationResult.content : fullOutput, + exitCode: cancelled ? undefined : (result.exitCode ?? undefined), + cancelled, + truncated: truncationResult.truncated, + fullOutputPath: tempFilePath, + }; + } catch (err) { + if (tempFileStream) { + tempFileStream.end(); + } + + // Check if it was an abort + if (options?.signal?.aborted) { + const fullOutput = outputChunks.join(""); + const truncationResult = truncateTail(fullOutput); + return { + output: truncationResult.truncated ? truncationResult.content : fullOutput, + exitCode: undefined, + cancelled: true, + truncated: truncationResult.truncated, + fullOutputPath: tempFilePath, + }; + } + + throw err; + } +} diff --git a/packages/pi-coding-agent/src/core/compaction/branch-summarization.ts b/packages/pi-coding-agent/src/core/compaction/branch-summarization.ts new file mode 100644 index 000000000..ce2077ac0 --- /dev/null +++ b/packages/pi-coding-agent/src/core/compaction/branch-summarization.ts @@ -0,0 +1,352 @@ +/** + * Branch summarization for tree navigation. + * + * When navigating to a different point in the session tree, this generates + * a summary of the branch being left so context isn't lost. + */ + +import type { AgentMessage } from "@gsd/pi-agent-core"; +import type { Model } from "@gsd/pi-ai"; +import { completeSimple } from "@gsd/pi-ai"; +import { + convertToLlm, + createBranchSummaryMessage, + createCompactionSummaryMessage, + createCustomMessage, +} from "../messages.js"; +import type { ReadonlySessionManager, SessionEntry } from "../session-manager.js"; +import { estimateTokens } from "./compaction.js"; +import { + computeFileLists, + createFileOps, + extractFileOpsFromMessage, + type FileOperations, + formatFileOperations, + SUMMARIZATION_SYSTEM_PROMPT, + serializeConversation, +} from "./utils.js"; + +// ============================================================================ +// Types +// ============================================================================ + +export interface BranchSummaryResult { + summary?: string; + readFiles?: string[]; + modifiedFiles?: string[]; + aborted?: boolean; + error?: string; +} + +/** Details stored in BranchSummaryEntry.details for file tracking */ +export interface BranchSummaryDetails { + readFiles: string[]; + modifiedFiles: string[]; +} + +export type { FileOperations } from "./utils.js"; + +export interface BranchPreparation { + /** Messages extracted for summarization, in chronological order */ + messages: AgentMessage[]; + /** File operations extracted from tool calls */ + fileOps: FileOperations; + /** Total estimated tokens in messages */ + totalTokens: number; +} + +export interface CollectEntriesResult { + /** Entries to summarize, in chronological order */ + entries: SessionEntry[]; + /** Common ancestor between old and new position, if any */ + commonAncestorId: string | null; +} + +export interface GenerateBranchSummaryOptions { + /** Model to use for summarization */ + model: Model; + /** API key for the model */ + apiKey: string; + /** Abort signal for cancellation */ + signal: AbortSignal; + /** Optional custom instructions for summarization */ + customInstructions?: string; + /** If true, customInstructions replaces the default prompt instead of being appended */ + replaceInstructions?: boolean; + /** Tokens reserved for prompt + LLM response (default 16384) */ + reserveTokens?: number; +} + +// ============================================================================ +// Entry Collection +// ============================================================================ + +/** + * Collect entries that should be summarized when navigating from one position to another. + * + * Walks from oldLeafId back to the common ancestor with targetId, collecting entries + * along the way. Does NOT stop at compaction boundaries - those are included and their + * summaries become context. + * + * @param session - Session manager (read-only access) + * @param oldLeafId - Current position (where we're navigating from) + * @param targetId - Target position (where we're navigating to) + * @returns Entries to summarize and the common ancestor + */ +export function collectEntriesForBranchSummary( + session: ReadonlySessionManager, + oldLeafId: string | null, + targetId: string, +): CollectEntriesResult { + // If no old position, nothing to summarize + if (!oldLeafId) { + return { entries: [], commonAncestorId: null }; + } + + // Find common ancestor (deepest node that's on both paths) + const oldPath = new Set(session.getBranch(oldLeafId).map((e) => e.id)); + const targetPath = session.getBranch(targetId); + + // targetPath is root-first, so iterate backwards to find deepest common ancestor + let commonAncestorId: string | null = null; + for (let i = targetPath.length - 1; i >= 0; i--) { + if (oldPath.has(targetPath[i].id)) { + commonAncestorId = targetPath[i].id; + break; + } + } + + // Collect entries from old leaf back to common ancestor + const entries: SessionEntry[] = []; + let current: string | null = oldLeafId; + + while (current && current !== commonAncestorId) { + const entry = session.getEntry(current); + if (!entry) break; + entries.push(entry); + current = entry.parentId; + } + + // Reverse to get chronological order + entries.reverse(); + + return { entries, commonAncestorId }; +} + +// ============================================================================ +// Entry to Message Conversion +// ============================================================================ + +/** + * Extract AgentMessage from a session entry. + * Similar to getMessageFromEntry in compaction.ts but also handles compaction entries. + */ +function getMessageFromEntry(entry: SessionEntry): AgentMessage | undefined { + switch (entry.type) { + case "message": + // Skip tool results - context is in assistant's tool call + if (entry.message.role === "toolResult") return undefined; + return entry.message; + + case "custom_message": + return createCustomMessage(entry.customType, entry.content, entry.display, entry.details, entry.timestamp); + + case "branch_summary": + return createBranchSummaryMessage(entry.summary, entry.fromId, entry.timestamp); + + case "compaction": + return createCompactionSummaryMessage(entry.summary, entry.tokensBefore, entry.timestamp); + + // These don't contribute to conversation content + case "thinking_level_change": + case "model_change": + case "custom": + case "label": + return undefined; + } +} + +/** + * Prepare entries for summarization with token budget. + * + * Walks entries from NEWEST to OLDEST, adding messages until we hit the token budget. + * This ensures we keep the most recent context when the branch is too long. + * + * Also collects file operations from: + * - Tool calls in assistant messages + * - Existing branch_summary entries' details (for cumulative tracking) + * + * @param entries - Entries in chronological order + * @param tokenBudget - Maximum tokens to include (0 = no limit) + */ +export function prepareBranchEntries(entries: SessionEntry[], tokenBudget: number = 0): BranchPreparation { + const messages: AgentMessage[] = []; + const fileOps = createFileOps(); + let totalTokens = 0; + + // First pass: collect file ops from ALL entries (even if they don't fit in token budget) + // This ensures we capture cumulative file tracking from nested branch summaries + // Only extract from pi-generated summaries (fromHook !== true), not extension-generated ones + for (const entry of entries) { + if (entry.type === "branch_summary" && !entry.fromHook && entry.details) { + const details = entry.details as BranchSummaryDetails; + if (Array.isArray(details.readFiles)) { + for (const f of details.readFiles) fileOps.read.add(f); + } + if (Array.isArray(details.modifiedFiles)) { + // Modified files go into both edited and written for proper deduplication + for (const f of details.modifiedFiles) { + fileOps.edited.add(f); + } + } + } + } + + // Second pass: walk from newest to oldest, adding messages until token budget + for (let i = entries.length - 1; i >= 0; i--) { + const entry = entries[i]; + const message = getMessageFromEntry(entry); + if (!message) continue; + + // Extract file ops from assistant messages (tool calls) + extractFileOpsFromMessage(message, fileOps); + + const tokens = estimateTokens(message); + + // Check budget before adding + if (tokenBudget > 0 && totalTokens + tokens > tokenBudget) { + // If this is a summary entry, try to fit it anyway as it's important context + if (entry.type === "compaction" || entry.type === "branch_summary") { + if (totalTokens < tokenBudget * 0.9) { + messages.unshift(message); + totalTokens += tokens; + } + } + // Stop - we've hit the budget + break; + } + + messages.unshift(message); + totalTokens += tokens; + } + + return { messages, fileOps, totalTokens }; +} + +// ============================================================================ +// Summary Generation +// ============================================================================ + +const BRANCH_SUMMARY_PREAMBLE = `The user explored a different conversation branch before returning here. +Summary of that exploration: + +`; + +const BRANCH_SUMMARY_PROMPT = `Create a structured summary of this conversation branch for context when returning later. + +Use this EXACT format: + +## Goal +[What was the user trying to accomplish in this branch?] + +## Constraints & Preferences +- [Any constraints, preferences, or requirements mentioned] +- [Or "(none)" if none were mentioned] + +## Progress +### Done +- [x] [Completed tasks/changes] + +### In Progress +- [ ] [Work that was started but not finished] + +### Blocked +- [Issues preventing progress, if any] + +## Key Decisions +- **[Decision]**: [Brief rationale] + +## Next Steps +1. [What should happen next to continue this work] + +Keep each section concise. Preserve exact file paths, function names, and error messages.`; + +/** + * Generate a summary of abandoned branch entries. + * + * @param entries - Session entries to summarize (chronological order) + * @param options - Generation options + */ +export async function generateBranchSummary( + entries: SessionEntry[], + options: GenerateBranchSummaryOptions, +): Promise { + const { model, apiKey, signal, customInstructions, replaceInstructions, reserveTokens = 16384 } = options; + + // Token budget = context window minus reserved space for prompt + response + const contextWindow = model.contextWindow || 128000; + const tokenBudget = contextWindow - reserveTokens; + + const { messages, fileOps } = prepareBranchEntries(entries, tokenBudget); + + if (messages.length === 0) { + return { summary: "No content to summarize" }; + } + + // Transform to LLM-compatible messages, then serialize to text + // Serialization prevents the model from treating it as a conversation to continue + const llmMessages = convertToLlm(messages); + const conversationText = serializeConversation(llmMessages); + + // Build prompt + let instructions: string; + if (replaceInstructions && customInstructions) { + instructions = customInstructions; + } else if (customInstructions) { + instructions = `${BRANCH_SUMMARY_PROMPT}\n\nAdditional focus: ${customInstructions}`; + } else { + instructions = BRANCH_SUMMARY_PROMPT; + } + const promptText = `\n${conversationText}\n\n\n${instructions}`; + + const summarizationMessages = [ + { + role: "user" as const, + content: [{ type: "text" as const, text: promptText }], + timestamp: Date.now(), + }, + ]; + + // Call LLM for summarization + const response = await completeSimple( + model, + { systemPrompt: SUMMARIZATION_SYSTEM_PROMPT, messages: summarizationMessages }, + { apiKey, signal, maxTokens: 2048 }, + ); + + // Check if aborted or errored + if (response.stopReason === "aborted") { + return { aborted: true }; + } + if (response.stopReason === "error") { + return { error: response.errorMessage || "Summarization failed" }; + } + + let summary = response.content + .filter((c): c is { type: "text"; text: string } => c.type === "text") + .map((c) => c.text) + .join("\n"); + + // Prepend preamble to provide context about the branch summary + summary = BRANCH_SUMMARY_PREAMBLE + summary; + + // Compute file lists and append to summary + const { readFiles, modifiedFiles } = computeFileLists(fileOps); + summary += formatFileOperations(readFiles, modifiedFiles); + + return { + summary: summary || "No summary generated", + readFiles, + modifiedFiles, + }; +} diff --git a/packages/pi-coding-agent/src/core/compaction/compaction.ts b/packages/pi-coding-agent/src/core/compaction/compaction.ts new file mode 100644 index 000000000..cca57276d --- /dev/null +++ b/packages/pi-coding-agent/src/core/compaction/compaction.ts @@ -0,0 +1,813 @@ +/** + * Context compaction for long sessions. + * + * Pure functions for compaction logic. The session manager handles I/O, + * and after compaction the session is reloaded. + */ + +import type { AgentMessage } from "@gsd/pi-agent-core"; +import type { AssistantMessage, Model, Usage } from "@gsd/pi-ai"; +import { completeSimple } from "@gsd/pi-ai"; +import { + convertToLlm, + createBranchSummaryMessage, + createCompactionSummaryMessage, + createCustomMessage, +} from "../messages.js"; +import type { CompactionEntry, SessionEntry } from "../session-manager.js"; +import { + computeFileLists, + createFileOps, + extractFileOpsFromMessage, + type FileOperations, + formatFileOperations, + SUMMARIZATION_SYSTEM_PROMPT, + serializeConversation, +} from "./utils.js"; + +// ============================================================================ +// File Operation Tracking +// ============================================================================ + +/** Details stored in CompactionEntry.details for file tracking */ +export interface CompactionDetails { + readFiles: string[]; + modifiedFiles: string[]; +} + +/** + * Extract file operations from messages and previous compaction entries. + */ +function extractFileOperations( + messages: AgentMessage[], + entries: SessionEntry[], + prevCompactionIndex: number, +): FileOperations { + const fileOps = createFileOps(); + + // Collect from previous compaction's details (if pi-generated) + if (prevCompactionIndex >= 0) { + const prevCompaction = entries[prevCompactionIndex] as CompactionEntry; + if (!prevCompaction.fromHook && prevCompaction.details) { + // fromHook field kept for session file compatibility + const details = prevCompaction.details as CompactionDetails; + if (Array.isArray(details.readFiles)) { + for (const f of details.readFiles) fileOps.read.add(f); + } + if (Array.isArray(details.modifiedFiles)) { + for (const f of details.modifiedFiles) fileOps.edited.add(f); + } + } + } + + // Extract from tool calls in messages + for (const msg of messages) { + extractFileOpsFromMessage(msg, fileOps); + } + + return fileOps; +} + +// ============================================================================ +// Message Extraction +// ============================================================================ + +/** + * Extract AgentMessage from an entry if it produces one. + * Returns undefined for entries that don't contribute to LLM context. + */ +function getMessageFromEntry(entry: SessionEntry): AgentMessage | undefined { + if (entry.type === "message") { + return entry.message; + } + if (entry.type === "custom_message") { + return createCustomMessage(entry.customType, entry.content, entry.display, entry.details, entry.timestamp); + } + if (entry.type === "branch_summary") { + return createBranchSummaryMessage(entry.summary, entry.fromId, entry.timestamp); + } + if (entry.type === "compaction") { + return createCompactionSummaryMessage(entry.summary, entry.tokensBefore, entry.timestamp); + } + return undefined; +} + +/** Result from compact() - SessionManager adds uuid/parentUuid when saving */ +export interface CompactionResult { + summary: string; + firstKeptEntryId: string; + tokensBefore: number; + /** Extension-specific data (e.g., ArtifactIndex, version markers for structured compaction) */ + details?: T; +} + +// ============================================================================ +// Types +// ============================================================================ + +export interface CompactionSettings { + enabled: boolean; + reserveTokens: number; + keepRecentTokens: number; +} + +export const DEFAULT_COMPACTION_SETTINGS: CompactionSettings = { + enabled: true, + reserveTokens: 16384, + keepRecentTokens: 20000, +}; + +// ============================================================================ +// Token calculation +// ============================================================================ + +/** + * Calculate total context tokens from usage. + * Uses the native totalTokens field when available, falls back to computing from components. + */ +export function calculateContextTokens(usage: Usage): number { + return usage.totalTokens || usage.input + usage.output + usage.cacheRead + usage.cacheWrite; +} + +/** + * Get usage from an assistant message if available. + * Skips aborted and error messages as they don't have valid usage data. + */ +function getAssistantUsage(msg: AgentMessage): Usage | undefined { + if (msg.role === "assistant" && "usage" in msg) { + const assistantMsg = msg as AssistantMessage; + if (assistantMsg.stopReason !== "aborted" && assistantMsg.stopReason !== "error" && assistantMsg.usage) { + return assistantMsg.usage; + } + } + return undefined; +} + +/** + * Find the last non-aborted assistant message usage from session entries. + */ +export function getLastAssistantUsage(entries: SessionEntry[]): Usage | undefined { + for (let i = entries.length - 1; i >= 0; i--) { + const entry = entries[i]; + if (entry.type === "message") { + const usage = getAssistantUsage(entry.message); + if (usage) return usage; + } + } + return undefined; +} + +export interface ContextUsageEstimate { + tokens: number; + usageTokens: number; + trailingTokens: number; + lastUsageIndex: number | null; +} + +function getLastAssistantUsageInfo(messages: AgentMessage[]): { usage: Usage; index: number } | undefined { + for (let i = messages.length - 1; i >= 0; i--) { + const usage = getAssistantUsage(messages[i]); + if (usage) return { usage, index: i }; + } + return undefined; +} + +/** + * Estimate context tokens from messages, using the last assistant usage when available. + * If there are messages after the last usage, estimate their tokens with estimateTokens. + */ +export function estimateContextTokens(messages: AgentMessage[]): ContextUsageEstimate { + const usageInfo = getLastAssistantUsageInfo(messages); + + if (!usageInfo) { + let estimated = 0; + for (const message of messages) { + estimated += estimateTokens(message); + } + return { + tokens: estimated, + usageTokens: 0, + trailingTokens: estimated, + lastUsageIndex: null, + }; + } + + const usageTokens = calculateContextTokens(usageInfo.usage); + let trailingTokens = 0; + for (let i = usageInfo.index + 1; i < messages.length; i++) { + trailingTokens += estimateTokens(messages[i]); + } + + return { + tokens: usageTokens + trailingTokens, + usageTokens, + trailingTokens, + lastUsageIndex: usageInfo.index, + }; +} + +/** + * Check if compaction should trigger based on context usage. + */ +export function shouldCompact(contextTokens: number, contextWindow: number, settings: CompactionSettings): boolean { + if (!settings.enabled) return false; + return contextTokens > contextWindow - settings.reserveTokens; +} + +// ============================================================================ +// Cut point detection +// ============================================================================ + +/** + * Estimate token count for a message using chars/4 heuristic. + * This is conservative (overestimates tokens). + */ +export function estimateTokens(message: AgentMessage): number { + let chars = 0; + + switch (message.role) { + case "user": { + const content = (message as { content: string | Array<{ type: string; text?: string }> }).content; + if (typeof content === "string") { + chars = content.length; + } else if (Array.isArray(content)) { + for (const block of content) { + if (block.type === "text" && block.text) { + chars += block.text.length; + } + } + } + return Math.ceil(chars / 4); + } + case "assistant": { + const assistant = message as AssistantMessage; + for (const block of assistant.content) { + if (block.type === "text") { + chars += block.text.length; + } else if (block.type === "thinking") { + chars += block.thinking.length; + } else if (block.type === "toolCall") { + chars += block.name.length + JSON.stringify(block.arguments).length; + } + } + return Math.ceil(chars / 4); + } + case "custom": + case "toolResult": { + if (typeof message.content === "string") { + chars = message.content.length; + } else { + for (const block of message.content) { + if (block.type === "text" && block.text) { + chars += block.text.length; + } + if (block.type === "image") { + chars += 4800; // Estimate images as 4000 chars, or 1200 tokens + } + } + } + return Math.ceil(chars / 4); + } + case "bashExecution": { + chars = message.command.length + message.output.length; + return Math.ceil(chars / 4); + } + case "branchSummary": + case "compactionSummary": { + chars = message.summary.length; + return Math.ceil(chars / 4); + } + } + + return 0; +} + +/** + * Find valid cut points: indices of user, assistant, custom, or bashExecution messages. + * Never cut at tool results (they must follow their tool call). + * When we cut at an assistant message with tool calls, its tool results follow it + * and will be kept. + * BashExecutionMessage is treated like a user message (user-initiated context). + */ +function findValidCutPoints(entries: SessionEntry[], startIndex: number, endIndex: number): number[] { + const cutPoints: number[] = []; + for (let i = startIndex; i < endIndex; i++) { + const entry = entries[i]; + switch (entry.type) { + case "message": { + const role = entry.message.role; + switch (role) { + case "bashExecution": + case "custom": + case "branchSummary": + case "compactionSummary": + case "user": + case "assistant": + cutPoints.push(i); + break; + case "toolResult": + break; + } + break; + } + case "thinking_level_change": + case "model_change": + case "compaction": + case "branch_summary": + case "custom": + case "custom_message": + case "label": + } + // branch_summary and custom_message are user-role messages, valid cut points + if (entry.type === "branch_summary" || entry.type === "custom_message") { + cutPoints.push(i); + } + } + return cutPoints; +} + +/** + * Find the user message (or bashExecution) that starts the turn containing the given entry index. + * Returns -1 if no turn start found before the index. + * BashExecutionMessage is treated like a user message for turn boundaries. + */ +export function findTurnStartIndex(entries: SessionEntry[], entryIndex: number, startIndex: number): number { + for (let i = entryIndex; i >= startIndex; i--) { + const entry = entries[i]; + // branch_summary and custom_message are user-role messages, can start a turn + if (entry.type === "branch_summary" || entry.type === "custom_message") { + return i; + } + if (entry.type === "message") { + const role = entry.message.role; + if (role === "user" || role === "bashExecution") { + return i; + } + } + } + return -1; +} + +export interface CutPointResult { + /** Index of first entry to keep */ + firstKeptEntryIndex: number; + /** Index of user message that starts the turn being split, or -1 if not splitting */ + turnStartIndex: number; + /** Whether this cut splits a turn (cut point is not a user message) */ + isSplitTurn: boolean; +} + +/** + * Find the cut point in session entries that keeps approximately `keepRecentTokens`. + * + * Algorithm: Walk backwards from newest, accumulating estimated message sizes. + * Stop when we've accumulated >= keepRecentTokens. Cut at that point. + * + * Can cut at user OR assistant messages (never tool results). When cutting at an + * assistant message with tool calls, its tool results come after and will be kept. + * + * Returns CutPointResult with: + * - firstKeptEntryIndex: the entry index to start keeping from + * - turnStartIndex: if cutting mid-turn, the user message that started that turn + * - isSplitTurn: whether we're cutting in the middle of a turn + * + * Only considers entries between `startIndex` and `endIndex` (exclusive). + */ +export function findCutPoint( + entries: SessionEntry[], + startIndex: number, + endIndex: number, + keepRecentTokens: number, +): CutPointResult { + const cutPoints = findValidCutPoints(entries, startIndex, endIndex); + + if (cutPoints.length === 0) { + return { firstKeptEntryIndex: startIndex, turnStartIndex: -1, isSplitTurn: false }; + } + + // Walk backwards from newest, accumulating estimated message sizes + let accumulatedTokens = 0; + let cutIndex = cutPoints[0]; // Default: keep from first message (not header) + + for (let i = endIndex - 1; i >= startIndex; i--) { + const entry = entries[i]; + if (entry.type !== "message") continue; + + // Estimate this message's size + const messageTokens = estimateTokens(entry.message); + accumulatedTokens += messageTokens; + + // Check if we've exceeded the budget + if (accumulatedTokens >= keepRecentTokens) { + // Find the closest valid cut point at or after this entry + for (let c = 0; c < cutPoints.length; c++) { + if (cutPoints[c] >= i) { + cutIndex = cutPoints[c]; + break; + } + } + break; + } + } + + // Scan backwards from cutIndex to include any non-message entries (bash, settings, etc.) + while (cutIndex > startIndex) { + const prevEntry = entries[cutIndex - 1]; + // Stop at session header or compaction boundaries + if (prevEntry.type === "compaction") { + break; + } + if (prevEntry.type === "message") { + // Stop if we hit any message + break; + } + // Include this non-message entry (bash, settings change, etc.) + cutIndex--; + } + + // Determine if this is a split turn + const cutEntry = entries[cutIndex]; + const isUserMessage = cutEntry.type === "message" && cutEntry.message.role === "user"; + const turnStartIndex = isUserMessage ? -1 : findTurnStartIndex(entries, cutIndex, startIndex); + + return { + firstKeptEntryIndex: cutIndex, + turnStartIndex, + isSplitTurn: !isUserMessage && turnStartIndex !== -1, + }; +} + +// ============================================================================ +// Summarization +// ============================================================================ + +const SUMMARIZATION_PROMPT = `The messages above are a conversation to summarize. Create a structured context checkpoint summary that another LLM will use to continue the work. + +Use this EXACT format: + +## Goal +[What is the user trying to accomplish? Can be multiple items if the session covers different tasks.] + +## Constraints & Preferences +- [Any constraints, preferences, or requirements mentioned by user] +- [Or "(none)" if none were mentioned] + +## Progress +### Done +- [x] [Completed tasks/changes] + +### In Progress +- [ ] [Current work] + +### Blocked +- [Issues preventing progress, if any] + +## Key Decisions +- **[Decision]**: [Brief rationale] + +## Next Steps +1. [Ordered list of what should happen next] + +## Critical Context +- [Any data, examples, or references needed to continue] +- [Or "(none)" if not applicable] + +Keep each section concise. Preserve exact file paths, function names, and error messages.`; + +const UPDATE_SUMMARIZATION_PROMPT = `The messages above are NEW conversation messages to incorporate into the existing summary provided in tags. + +Update the existing structured summary with new information. RULES: +- PRESERVE all existing information from the previous summary +- ADD new progress, decisions, and context from the new messages +- UPDATE the Progress section: move items from "In Progress" to "Done" when completed +- UPDATE "Next Steps" based on what was accomplished +- PRESERVE exact file paths, function names, and error messages +- If something is no longer relevant, you may remove it + +Use this EXACT format: + +## Goal +[Preserve existing goals, add new ones if the task expanded] + +## Constraints & Preferences +- [Preserve existing, add new ones discovered] + +## Progress +### Done +- [x] [Include previously done items AND newly completed items] + +### In Progress +- [ ] [Current work - update based on progress] + +### Blocked +- [Current blockers - remove if resolved] + +## Key Decisions +- **[Decision]**: [Brief rationale] (preserve all previous, add new) + +## Next Steps +1. [Update based on current state] + +## Critical Context +- [Preserve important context, add new if needed] + +Keep each section concise. Preserve exact file paths, function names, and error messages.`; + +/** + * Generate a summary of the conversation using the LLM. + * If previousSummary is provided, uses the update prompt to merge. + */ +export async function generateSummary( + currentMessages: AgentMessage[], + model: Model, + reserveTokens: number, + apiKey: string, + signal?: AbortSignal, + customInstructions?: string, + previousSummary?: string, +): Promise { + const maxTokens = Math.floor(0.8 * reserveTokens); + + // Use update prompt if we have a previous summary, otherwise initial prompt + let basePrompt = previousSummary ? UPDATE_SUMMARIZATION_PROMPT : SUMMARIZATION_PROMPT; + if (customInstructions) { + basePrompt = `${basePrompt}\n\nAdditional focus: ${customInstructions}`; + } + + // Serialize conversation to text so model doesn't try to continue it + // Convert to LLM messages first (handles custom types like bashExecution, custom, etc.) + const llmMessages = convertToLlm(currentMessages); + const conversationText = serializeConversation(llmMessages); + + // Build the prompt with conversation wrapped in tags + let promptText = `\n${conversationText}\n\n\n`; + if (previousSummary) { + promptText += `\n${previousSummary}\n\n\n`; + } + promptText += basePrompt; + + const summarizationMessages = [ + { + role: "user" as const, + content: [{ type: "text" as const, text: promptText }], + timestamp: Date.now(), + }, + ]; + + const completionOptions = model.reasoning + ? { maxTokens, signal, apiKey, reasoning: "high" as const } + : { maxTokens, signal, apiKey }; + + const response = await completeSimple( + model, + { systemPrompt: SUMMARIZATION_SYSTEM_PROMPT, messages: summarizationMessages }, + completionOptions, + ); + + if (response.stopReason === "error") { + throw new Error(`Summarization failed: ${response.errorMessage || "Unknown error"}`); + } + + const textContent = response.content + .filter((c): c is { type: "text"; text: string } => c.type === "text") + .map((c) => c.text) + .join("\n"); + + return textContent; +} + +// ============================================================================ +// Compaction Preparation (for extensions) +// ============================================================================ + +export interface CompactionPreparation { + /** UUID of first entry to keep */ + firstKeptEntryId: string; + /** Messages that will be summarized and discarded */ + messagesToSummarize: AgentMessage[]; + /** Messages that will be turned into turn prefix summary (if splitting) */ + turnPrefixMessages: AgentMessage[]; + /** Whether this is a split turn (cut point in middle of turn) */ + isSplitTurn: boolean; + tokensBefore: number; + /** Summary from previous compaction, for iterative update */ + previousSummary?: string; + /** File operations extracted from messagesToSummarize */ + fileOps: FileOperations; + /** Compaction settions from settings.jsonl */ + settings: CompactionSettings; +} + +export function prepareCompaction( + pathEntries: SessionEntry[], + settings: CompactionSettings, +): CompactionPreparation | undefined { + if (pathEntries.length > 0 && pathEntries[pathEntries.length - 1].type === "compaction") { + return undefined; + } + + let prevCompactionIndex = -1; + for (let i = pathEntries.length - 1; i >= 0; i--) { + if (pathEntries[i].type === "compaction") { + prevCompactionIndex = i; + break; + } + } + const boundaryStart = prevCompactionIndex + 1; + const boundaryEnd = pathEntries.length; + + const usageStart = prevCompactionIndex >= 0 ? prevCompactionIndex : 0; + const usageMessages: AgentMessage[] = []; + for (let i = usageStart; i < boundaryEnd; i++) { + const msg = getMessageFromEntry(pathEntries[i]); + if (msg) usageMessages.push(msg); + } + const tokensBefore = estimateContextTokens(usageMessages).tokens; + + const cutPoint = findCutPoint(pathEntries, boundaryStart, boundaryEnd, settings.keepRecentTokens); + + // Get UUID of first kept entry + const firstKeptEntry = pathEntries[cutPoint.firstKeptEntryIndex]; + if (!firstKeptEntry?.id) { + return undefined; // Session needs migration + } + const firstKeptEntryId = firstKeptEntry.id; + + const historyEnd = cutPoint.isSplitTurn ? cutPoint.turnStartIndex : cutPoint.firstKeptEntryIndex; + + // Messages to summarize (will be discarded after summary) + const messagesToSummarize: AgentMessage[] = []; + for (let i = boundaryStart; i < historyEnd; i++) { + const msg = getMessageFromEntry(pathEntries[i]); + if (msg) messagesToSummarize.push(msg); + } + + // Messages for turn prefix summary (if splitting a turn) + const turnPrefixMessages: AgentMessage[] = []; + if (cutPoint.isSplitTurn) { + for (let i = cutPoint.turnStartIndex; i < cutPoint.firstKeptEntryIndex; i++) { + const msg = getMessageFromEntry(pathEntries[i]); + if (msg) turnPrefixMessages.push(msg); + } + } + + // Get previous summary for iterative update + let previousSummary: string | undefined; + if (prevCompactionIndex >= 0) { + const prevCompaction = pathEntries[prevCompactionIndex] as CompactionEntry; + previousSummary = prevCompaction.summary; + } + + // Extract file operations from messages and previous compaction + const fileOps = extractFileOperations(messagesToSummarize, pathEntries, prevCompactionIndex); + + // Also extract file ops from turn prefix if splitting + if (cutPoint.isSplitTurn) { + for (const msg of turnPrefixMessages) { + extractFileOpsFromMessage(msg, fileOps); + } + } + + return { + firstKeptEntryId, + messagesToSummarize, + turnPrefixMessages, + isSplitTurn: cutPoint.isSplitTurn, + tokensBefore, + previousSummary, + fileOps, + settings, + }; +} + +// ============================================================================ +// Main compaction function +// ============================================================================ + +const TURN_PREFIX_SUMMARIZATION_PROMPT = `This is the PREFIX of a turn that was too large to keep. The SUFFIX (recent work) is retained. + +Summarize the prefix to provide context for the retained suffix: + +## Original Request +[What did the user ask for in this turn?] + +## Early Progress +- [Key decisions and work done in the prefix] + +## Context for Suffix +- [Information needed to understand the retained recent work] + +Be concise. Focus on what's needed to understand the kept suffix.`; + +/** + * Generate summaries for compaction using prepared data. + * Returns CompactionResult - SessionManager adds uuid/parentUuid when saving. + * + * @param preparation - Pre-calculated preparation from prepareCompaction() + * @param customInstructions - Optional custom focus for the summary + */ +export async function compact( + preparation: CompactionPreparation, + model: Model, + apiKey: string, + customInstructions?: string, + signal?: AbortSignal, +): Promise { + const { + firstKeptEntryId, + messagesToSummarize, + turnPrefixMessages, + isSplitTurn, + tokensBefore, + previousSummary, + fileOps, + settings, + } = preparation; + + // Generate summaries (can be parallel if both needed) and merge into one + let summary: string; + + if (isSplitTurn && turnPrefixMessages.length > 0) { + // Generate both summaries in parallel + const [historyResult, turnPrefixResult] = await Promise.all([ + messagesToSummarize.length > 0 + ? generateSummary( + messagesToSummarize, + model, + settings.reserveTokens, + apiKey, + signal, + customInstructions, + previousSummary, + ) + : Promise.resolve("No prior history."), + generateTurnPrefixSummary(turnPrefixMessages, model, settings.reserveTokens, apiKey, signal), + ]); + // Merge into single summary + summary = `${historyResult}\n\n---\n\n**Turn Context (split turn):**\n\n${turnPrefixResult}`; + } else { + // Just generate history summary + summary = await generateSummary( + messagesToSummarize, + model, + settings.reserveTokens, + apiKey, + signal, + customInstructions, + previousSummary, + ); + } + + // Compute file lists and append to summary + const { readFiles, modifiedFiles } = computeFileLists(fileOps); + summary += formatFileOperations(readFiles, modifiedFiles); + + if (!firstKeptEntryId) { + throw new Error("First kept entry has no UUID - session may need migration"); + } + + return { + summary, + firstKeptEntryId, + tokensBefore, + details: { readFiles, modifiedFiles } as CompactionDetails, + }; +} + +/** + * Generate a summary for a turn prefix (when splitting a turn). + */ +async function generateTurnPrefixSummary( + messages: AgentMessage[], + model: Model, + reserveTokens: number, + apiKey: string, + signal?: AbortSignal, +): Promise { + const maxTokens = Math.floor(0.5 * reserveTokens); // Smaller budget for turn prefix + const llmMessages = convertToLlm(messages); + const conversationText = serializeConversation(llmMessages); + const promptText = `\n${conversationText}\n\n\n${TURN_PREFIX_SUMMARIZATION_PROMPT}`; + const summarizationMessages = [ + { + role: "user" as const, + content: [{ type: "text" as const, text: promptText }], + timestamp: Date.now(), + }, + ]; + + const response = await completeSimple( + model, + { systemPrompt: SUMMARIZATION_SYSTEM_PROMPT, messages: summarizationMessages }, + { maxTokens, signal, apiKey }, + ); + + if (response.stopReason === "error") { + throw new Error(`Turn prefix summarization failed: ${response.errorMessage || "Unknown error"}`); + } + + return response.content + .filter((c): c is { type: "text"; text: string } => c.type === "text") + .map((c) => c.text) + .join("\n"); +} diff --git a/packages/pi-coding-agent/src/core/compaction/index.ts b/packages/pi-coding-agent/src/core/compaction/index.ts new file mode 100644 index 000000000..d8c92a67b --- /dev/null +++ b/packages/pi-coding-agent/src/core/compaction/index.ts @@ -0,0 +1,7 @@ +/** + * Compaction and summarization utilities. + */ + +export * from "./branch-summarization.js"; +export * from "./compaction.js"; +export * from "./utils.js"; diff --git a/packages/pi-coding-agent/src/core/compaction/utils.ts b/packages/pi-coding-agent/src/core/compaction/utils.ts new file mode 100644 index 000000000..45fd347e5 --- /dev/null +++ b/packages/pi-coding-agent/src/core/compaction/utils.ts @@ -0,0 +1,170 @@ +/** + * Shared utilities for compaction and branch summarization. + */ + +import type { AgentMessage } from "@gsd/pi-agent-core"; +import type { Message } from "@gsd/pi-ai"; + +// ============================================================================ +// File Operation Tracking +// ============================================================================ + +export interface FileOperations { + read: Set; + written: Set; + edited: Set; +} + +export function createFileOps(): FileOperations { + return { + read: new Set(), + written: new Set(), + edited: new Set(), + }; +} + +/** + * Extract file operations from tool calls in an assistant message. + */ +export function extractFileOpsFromMessage(message: AgentMessage, fileOps: FileOperations): void { + if (message.role !== "assistant") return; + if (!("content" in message) || !Array.isArray(message.content)) return; + + for (const block of message.content) { + if (typeof block !== "object" || block === null) continue; + if (!("type" in block) || block.type !== "toolCall") continue; + if (!("arguments" in block) || !("name" in block)) continue; + + const args = block.arguments as Record | undefined; + if (!args) continue; + + const path = typeof args.path === "string" ? args.path : undefined; + if (!path) continue; + + switch (block.name) { + case "read": + fileOps.read.add(path); + break; + case "write": + fileOps.written.add(path); + break; + case "edit": + fileOps.edited.add(path); + break; + } + } +} + +/** + * Compute final file lists from file operations. + * Returns readFiles (files only read, not modified) and modifiedFiles. + */ +export function computeFileLists(fileOps: FileOperations): { readFiles: string[]; modifiedFiles: string[] } { + const modified = new Set([...fileOps.edited, ...fileOps.written]); + const readOnly = [...fileOps.read].filter((f) => !modified.has(f)).sort(); + const modifiedFiles = [...modified].sort(); + return { readFiles: readOnly, modifiedFiles }; +} + +/** + * Format file operations as XML tags for summary. + */ +export function formatFileOperations(readFiles: string[], modifiedFiles: string[]): string { + const sections: string[] = []; + if (readFiles.length > 0) { + sections.push(`\n${readFiles.join("\n")}\n`); + } + if (modifiedFiles.length > 0) { + sections.push(`\n${modifiedFiles.join("\n")}\n`); + } + if (sections.length === 0) return ""; + return `\n\n${sections.join("\n\n")}`; +} + +// ============================================================================ +// Message Serialization +// ============================================================================ + +/** Maximum characters for a tool result in serialized summaries. */ +const TOOL_RESULT_MAX_CHARS = 2000; + +/** + * Truncate text to a maximum character length for summarization. + * Keeps the beginning and appends a truncation marker. + */ +function truncateForSummary(text: string, maxChars: number): string { + if (text.length <= maxChars) return text; + const truncatedChars = text.length - maxChars; + return `${text.slice(0, maxChars)}\n\n[... ${truncatedChars} more characters truncated]`; +} + +/** + * Serialize LLM messages to text for summarization. + * This prevents the model from treating it as a conversation to continue. + * Call convertToLlm() first to handle custom message types. + * + * Tool results are truncated to keep the summarization request within + * reasonable token budgets. Full content is not needed for summarization. + */ +export function serializeConversation(messages: Message[]): string { + const parts: string[] = []; + + for (const msg of messages) { + if (msg.role === "user") { + const content = + typeof msg.content === "string" + ? msg.content + : msg.content + .filter((c): c is { type: "text"; text: string } => c.type === "text") + .map((c) => c.text) + .join(""); + if (content) parts.push(`[User]: ${content}`); + } else if (msg.role === "assistant") { + const textParts: string[] = []; + const thinkingParts: string[] = []; + const toolCalls: string[] = []; + + for (const block of msg.content) { + if (block.type === "text") { + textParts.push(block.text); + } else if (block.type === "thinking") { + thinkingParts.push(block.thinking); + } else if (block.type === "toolCall") { + const args = block.arguments as Record; + const argsStr = Object.entries(args) + .map(([k, v]) => `${k}=${JSON.stringify(v)}`) + .join(", "); + toolCalls.push(`${block.name}(${argsStr})`); + } + } + + if (thinkingParts.length > 0) { + parts.push(`[Assistant thinking]: ${thinkingParts.join("\n")}`); + } + if (textParts.length > 0) { + parts.push(`[Assistant]: ${textParts.join("\n")}`); + } + if (toolCalls.length > 0) { + parts.push(`[Assistant tool calls]: ${toolCalls.join("; ")}`); + } + } else if (msg.role === "toolResult") { + const content = msg.content + .filter((c): c is { type: "text"; text: string } => c.type === "text") + .map((c) => c.text) + .join(""); + if (content) { + parts.push(`[Tool result]: ${truncateForSummary(content, TOOL_RESULT_MAX_CHARS)}`); + } + } + } + + return parts.join("\n\n"); +} + +// ============================================================================ +// Summarization System Prompt +// ============================================================================ + +export const SUMMARIZATION_SYSTEM_PROMPT = `You are a context summarization assistant. Your task is to read a conversation between a user and an AI coding assistant, then produce a structured summary following the exact format specified. + +Do NOT continue the conversation. Do NOT respond to any questions in the conversation. ONLY output the structured summary.`; diff --git a/packages/pi-coding-agent/src/core/defaults.ts b/packages/pi-coding-agent/src/core/defaults.ts new file mode 100644 index 000000000..61ee10dd3 --- /dev/null +++ b/packages/pi-coding-agent/src/core/defaults.ts @@ -0,0 +1,3 @@ +import type { ThinkingLevel } from "@gsd/pi-agent-core"; + +export const DEFAULT_THINKING_LEVEL: ThinkingLevel = "medium"; diff --git a/packages/pi-coding-agent/src/core/diagnostics.ts b/packages/pi-coding-agent/src/core/diagnostics.ts new file mode 100644 index 000000000..20fb80243 --- /dev/null +++ b/packages/pi-coding-agent/src/core/diagnostics.ts @@ -0,0 +1,15 @@ +export interface ResourceCollision { + resourceType: "extension" | "skill" | "prompt" | "theme"; + name: string; // skill name, command/tool/flag name, prompt name, theme name + winnerPath: string; + loserPath: string; + winnerSource?: string; // e.g., "npm:foo", "git:...", "local" + loserSource?: string; +} + +export interface ResourceDiagnostic { + type: "warning" | "error" | "collision"; + message: string; + path?: string; + collision?: ResourceCollision; +} diff --git a/packages/pi-coding-agent/src/core/event-bus.ts b/packages/pi-coding-agent/src/core/event-bus.ts new file mode 100644 index 000000000..a4c87b9f0 --- /dev/null +++ b/packages/pi-coding-agent/src/core/event-bus.ts @@ -0,0 +1,33 @@ +import { EventEmitter } from "node:events"; + +export interface EventBus { + emit(channel: string, data: unknown): void; + on(channel: string, handler: (data: unknown) => void): () => void; +} + +export interface EventBusController extends EventBus { + clear(): void; +} + +export function createEventBus(): EventBusController { + const emitter = new EventEmitter(); + return { + emit: (channel, data) => { + emitter.emit(channel, data); + }, + on: (channel, handler) => { + const safeHandler = async (data: unknown) => { + try { + await handler(data); + } catch (err) { + console.error(`Event handler error (${channel}):`, err); + } + }; + emitter.on(channel, safeHandler); + return () => emitter.off(channel, safeHandler); + }, + clear: () => { + emitter.removeAllListeners(); + }, + }; +} diff --git a/packages/pi-coding-agent/src/core/exec.ts b/packages/pi-coding-agent/src/core/exec.ts new file mode 100644 index 000000000..b7dd046c4 --- /dev/null +++ b/packages/pi-coding-agent/src/core/exec.ts @@ -0,0 +1,104 @@ +/** + * Shared command execution utilities for extensions and custom tools. + */ + +import { spawn } from "node:child_process"; + +/** + * Options for executing shell commands. + */ +export interface ExecOptions { + /** AbortSignal to cancel the command */ + signal?: AbortSignal; + /** Timeout in milliseconds */ + timeout?: number; + /** Working directory */ + cwd?: string; +} + +/** + * Result of executing a shell command. + */ +export interface ExecResult { + stdout: string; + stderr: string; + code: number; + killed: boolean; +} + +/** + * Execute a shell command and return stdout/stderr/code. + * Supports timeout and abort signal. + */ +export async function execCommand( + command: string, + args: string[], + cwd: string, + options?: ExecOptions, +): Promise { + return new Promise((resolve) => { + const proc = spawn(command, args, { + cwd, + shell: false, + stdio: ["ignore", "pipe", "pipe"], + }); + + let stdout = ""; + let stderr = ""; + let killed = false; + let timeoutId: NodeJS.Timeout | undefined; + + const killProcess = () => { + if (!killed) { + killed = true; + proc.kill("SIGTERM"); + // Force kill after 5 seconds if SIGTERM doesn't work + setTimeout(() => { + if (!proc.killed) { + proc.kill("SIGKILL"); + } + }, 5000); + } + }; + + // Handle abort signal + if (options?.signal) { + if (options.signal.aborted) { + killProcess(); + } else { + options.signal.addEventListener("abort", killProcess, { once: true }); + } + } + + // Handle timeout + if (options?.timeout && options.timeout > 0) { + timeoutId = setTimeout(() => { + killProcess(); + }, options.timeout); + } + + proc.stdout?.on("data", (data) => { + stdout += data.toString(); + }); + + proc.stderr?.on("data", (data) => { + stderr += data.toString(); + }); + + proc.on("close", (code) => { + if (timeoutId) clearTimeout(timeoutId); + if (options?.signal) { + options.signal.removeEventListener("abort", killProcess); + } + resolve({ stdout, stderr, code: code ?? 0, killed }); + }); + + proc.on("error", (_err) => { + if (timeoutId) clearTimeout(timeoutId); + if (options?.signal) { + options.signal.removeEventListener("abort", killProcess); + } + resolve({ stdout, stderr, code: 1, killed }); + }); + }); +} diff --git a/packages/pi-coding-agent/src/core/export-html/ansi-to-html.ts b/packages/pi-coding-agent/src/core/export-html/ansi-to-html.ts new file mode 100644 index 000000000..92a3974e0 --- /dev/null +++ b/packages/pi-coding-agent/src/core/export-html/ansi-to-html.ts @@ -0,0 +1,258 @@ +/** + * ANSI escape code to HTML converter. + * + * Converts terminal ANSI color/style codes to HTML with inline styles. + * Supports: + * - Standard foreground colors (30-37) and bright variants (90-97) + * - Standard background colors (40-47) and bright variants (100-107) + * - 256-color palette (38;5;N and 48;5;N) + * - RGB true color (38;2;R;G;B and 48;2;R;G;B) + * - Text styles: bold (1), dim (2), italic (3), underline (4) + * - Reset (0) + */ + +// Standard ANSI color palette (0-15) +const ANSI_COLORS = [ + "#000000", // 0: black + "#800000", // 1: red + "#008000", // 2: green + "#808000", // 3: yellow + "#000080", // 4: blue + "#800080", // 5: magenta + "#008080", // 6: cyan + "#c0c0c0", // 7: white + "#808080", // 8: bright black + "#ff0000", // 9: bright red + "#00ff00", // 10: bright green + "#ffff00", // 11: bright yellow + "#0000ff", // 12: bright blue + "#ff00ff", // 13: bright magenta + "#00ffff", // 14: bright cyan + "#ffffff", // 15: bright white +]; + +/** + * Convert 256-color index to hex. + */ +function color256ToHex(index: number): string { + // Standard colors (0-15) + if (index < 16) { + return ANSI_COLORS[index]; + } + + // Color cube (16-231): 6x6x6 = 216 colors + if (index < 232) { + const cubeIndex = index - 16; + const r = Math.floor(cubeIndex / 36); + const g = Math.floor((cubeIndex % 36) / 6); + const b = cubeIndex % 6; + const toComponent = (n: number) => (n === 0 ? 0 : 55 + n * 40); + const toHex = (n: number) => toComponent(n).toString(16).padStart(2, "0"); + return `#${toHex(r)}${toHex(g)}${toHex(b)}`; + } + + // Grayscale (232-255): 24 shades + const gray = 8 + (index - 232) * 10; + const grayHex = gray.toString(16).padStart(2, "0"); + return `#${grayHex}${grayHex}${grayHex}`; +} + +/** + * Escape HTML special characters. + */ +function escapeHtml(text: string): string { + return text + .replace(/&/g, "&") + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); +} + +interface TextStyle { + fg: string | null; + bg: string | null; + bold: boolean; + dim: boolean; + italic: boolean; + underline: boolean; +} + +function createEmptyStyle(): TextStyle { + return { + fg: null, + bg: null, + bold: false, + dim: false, + italic: false, + underline: false, + }; +} + +function styleToInlineCSS(style: TextStyle): string { + const parts: string[] = []; + if (style.fg) parts.push(`color:${style.fg}`); + if (style.bg) parts.push(`background-color:${style.bg}`); + if (style.bold) parts.push("font-weight:bold"); + if (style.dim) parts.push("opacity:0.6"); + if (style.italic) parts.push("font-style:italic"); + if (style.underline) parts.push("text-decoration:underline"); + return parts.join(";"); +} + +function hasStyle(style: TextStyle): boolean { + return style.fg !== null || style.bg !== null || style.bold || style.dim || style.italic || style.underline; +} + +/** + * Parse ANSI SGR (Select Graphic Rendition) codes and update style. + */ +function applySgrCode(params: number[], style: TextStyle): void { + let i = 0; + while (i < params.length) { + const code = params[i]; + + if (code === 0) { + // Reset all + style.fg = null; + style.bg = null; + style.bold = false; + style.dim = false; + style.italic = false; + style.underline = false; + } else if (code === 1) { + style.bold = true; + } else if (code === 2) { + style.dim = true; + } else if (code === 3) { + style.italic = true; + } else if (code === 4) { + style.underline = true; + } else if (code === 22) { + // Reset bold/dim + style.bold = false; + style.dim = false; + } else if (code === 23) { + style.italic = false; + } else if (code === 24) { + style.underline = false; + } else if (code >= 30 && code <= 37) { + // Standard foreground colors + style.fg = ANSI_COLORS[code - 30]; + } else if (code === 38) { + // Extended foreground color + if (params[i + 1] === 5 && params.length > i + 2) { + // 256-color: 38;5;N + style.fg = color256ToHex(params[i + 2]); + i += 2; + } else if (params[i + 1] === 2 && params.length > i + 4) { + // RGB: 38;2;R;G;B + const r = params[i + 2]; + const g = params[i + 3]; + const b = params[i + 4]; + style.fg = `rgb(${r},${g},${b})`; + i += 4; + } + } else if (code === 39) { + // Default foreground + style.fg = null; + } else if (code >= 40 && code <= 47) { + // Standard background colors + style.bg = ANSI_COLORS[code - 40]; + } else if (code === 48) { + // Extended background color + if (params[i + 1] === 5 && params.length > i + 2) { + // 256-color: 48;5;N + style.bg = color256ToHex(params[i + 2]); + i += 2; + } else if (params[i + 1] === 2 && params.length > i + 4) { + // RGB: 48;2;R;G;B + const r = params[i + 2]; + const g = params[i + 3]; + const b = params[i + 4]; + style.bg = `rgb(${r},${g},${b})`; + i += 4; + } + } else if (code === 49) { + // Default background + style.bg = null; + } else if (code >= 90 && code <= 97) { + // Bright foreground colors + style.fg = ANSI_COLORS[code - 90 + 8]; + } else if (code >= 100 && code <= 107) { + // Bright background colors + style.bg = ANSI_COLORS[code - 100 + 8]; + } + // Ignore unrecognized codes + + i++; + } +} + +// Match ANSI escape sequences: ESC[ followed by params and ending with 'm' +const ANSI_REGEX = /\x1b\[([\d;]*)m/g; + +/** + * Convert ANSI-escaped text to HTML with inline styles. + */ +export function ansiToHtml(text: string): string { + const style = createEmptyStyle(); + let result = ""; + let lastIndex = 0; + let inSpan = false; + + // Reset regex state + ANSI_REGEX.lastIndex = 0; + + let match = ANSI_REGEX.exec(text); + while (match !== null) { + // Add text before this escape sequence + const beforeText = text.slice(lastIndex, match.index); + if (beforeText) { + result += escapeHtml(beforeText); + } + + // Parse SGR parameters + const paramStr = match[1]; + const params = paramStr ? paramStr.split(";").map((p) => parseInt(p, 10) || 0) : [0]; + + // Close existing span if we have one + if (inSpan) { + result += ""; + inSpan = false; + } + + // Apply the codes + applySgrCode(params, style); + + // Open new span if we have any styling + if (hasStyle(style)) { + result += ``; + inSpan = true; + } + + lastIndex = match.index + match[0].length; + match = ANSI_REGEX.exec(text); + } + + // Add remaining text + const remainingText = text.slice(lastIndex); + if (remainingText) { + result += escapeHtml(remainingText); + } + + // Close any open span + if (inSpan) { + result += ""; + } + + return result; +} + +/** + * Convert array of ANSI-escaped lines to HTML. + * Each line is wrapped in a div element. + */ +export function ansiLinesToHtml(lines: string[]): string { + return lines.map((line) => `
${ansiToHtml(line) || " "}
`).join("\n"); +} diff --git a/packages/pi-coding-agent/src/core/export-html/index.ts b/packages/pi-coding-agent/src/core/export-html/index.ts new file mode 100644 index 000000000..4130d4e6b --- /dev/null +++ b/packages/pi-coding-agent/src/core/export-html/index.ts @@ -0,0 +1,306 @@ +import type { AgentState } from "@gsd/pi-agent-core"; +import { existsSync, readFileSync, writeFileSync } from "fs"; +import { basename, join } from "path"; +import { APP_NAME, getExportTemplateDir } from "../../config.js"; +import { getResolvedThemeColors, getThemeExportColors } from "../../modes/interactive/theme/theme.js"; +import type { ToolInfo } from "../extensions/types.js"; +import type { SessionEntry } from "../session-manager.js"; +import { SessionManager } from "../session-manager.js"; + +/** + * Interface for rendering custom tools to HTML. + * Used by agent-session to pre-render extension tool output. + */ +export interface ToolHtmlRenderer { + /** Render a tool call to HTML. Returns undefined if tool has no custom renderer. */ + renderCall(toolName: string, args: unknown): string | undefined; + /** Render a tool result to HTML. Returns collapsed/expanded or undefined if tool has no custom renderer. */ + renderResult( + toolName: string, + result: Array<{ type: string; text?: string; data?: string; mimeType?: string }>, + details: unknown, + isError: boolean, + ): { collapsed?: string; expanded?: string } | undefined; +} + +/** Pre-rendered HTML for a custom tool call and result */ +interface RenderedToolHtml { + callHtml?: string; + resultHtmlCollapsed?: string; + resultHtmlExpanded?: string; +} + +export interface ExportOptions { + outputPath?: string; + themeName?: string; + /** Optional tool renderer for custom tools */ + toolRenderer?: ToolHtmlRenderer; +} + +/** Parse a color string to RGB values. Supports hex (#RRGGBB) and rgb(r,g,b) formats. */ +function parseColor(color: string): { r: number; g: number; b: number } | undefined { + const hexMatch = color.match(/^#([0-9a-fA-F]{2})([0-9a-fA-F]{2})([0-9a-fA-F]{2})$/); + if (hexMatch) { + return { + r: Number.parseInt(hexMatch[1], 16), + g: Number.parseInt(hexMatch[2], 16), + b: Number.parseInt(hexMatch[3], 16), + }; + } + const rgbMatch = color.match(/^rgb\s*\(\s*(\d+)\s*,\s*(\d+)\s*,\s*(\d+)\s*\)$/); + if (rgbMatch) { + return { + r: Number.parseInt(rgbMatch[1], 10), + g: Number.parseInt(rgbMatch[2], 10), + b: Number.parseInt(rgbMatch[3], 10), + }; + } + return undefined; +} + +/** Calculate relative luminance of a color (0-1, higher = lighter). */ +function getLuminance(r: number, g: number, b: number): number { + const toLinear = (c: number) => { + const s = c / 255; + return s <= 0.03928 ? s / 12.92 : ((s + 0.055) / 1.055) ** 2.4; + }; + return 0.2126 * toLinear(r) + 0.7152 * toLinear(g) + 0.0722 * toLinear(b); +} + +/** Adjust color brightness. Factor > 1 lightens, < 1 darkens. */ +function adjustBrightness(color: string, factor: number): string { + const parsed = parseColor(color); + if (!parsed) return color; + const adjust = (c: number) => Math.min(255, Math.max(0, Math.round(c * factor))); + return `rgb(${adjust(parsed.r)}, ${adjust(parsed.g)}, ${adjust(parsed.b)})`; +} + +/** Derive export background colors from a base color (e.g., userMessageBg). */ +function deriveExportColors(baseColor: string): { pageBg: string; cardBg: string; infoBg: string } { + const parsed = parseColor(baseColor); + if (!parsed) { + return { + pageBg: "rgb(24, 24, 30)", + cardBg: "rgb(30, 30, 36)", + infoBg: "rgb(60, 55, 40)", + }; + } + + const luminance = getLuminance(parsed.r, parsed.g, parsed.b); + const isLight = luminance > 0.5; + + if (isLight) { + return { + pageBg: adjustBrightness(baseColor, 0.96), + cardBg: baseColor, + infoBg: `rgb(${Math.min(255, parsed.r + 10)}, ${Math.min(255, parsed.g + 5)}, ${Math.max(0, parsed.b - 20)})`, + }; + } + return { + pageBg: adjustBrightness(baseColor, 0.7), + cardBg: adjustBrightness(baseColor, 0.85), + infoBg: `rgb(${Math.min(255, parsed.r + 20)}, ${Math.min(255, parsed.g + 15)}, ${parsed.b})`, + }; +} + +/** + * Generate CSS custom property declarations from theme colors. + */ +function generateThemeVars(themeName?: string): string { + const colors = getResolvedThemeColors(themeName); + const lines: string[] = []; + for (const [key, value] of Object.entries(colors)) { + lines.push(`--${key}: ${value};`); + } + + // Use explicit theme export colors if available, otherwise derive from userMessageBg + const themeExport = getThemeExportColors(themeName); + const userMessageBg = colors.userMessageBg || "#343541"; + const derivedColors = deriveExportColors(userMessageBg); + + lines.push(`--exportPageBg: ${themeExport.pageBg ?? derivedColors.pageBg};`); + lines.push(`--exportCardBg: ${themeExport.cardBg ?? derivedColors.cardBg};`); + lines.push(`--exportInfoBg: ${themeExport.infoBg ?? derivedColors.infoBg};`); + + return lines.join("\n "); +} + +interface SessionData { + header: ReturnType; + entries: ReturnType; + leafId: string | null; + systemPrompt?: string; + tools?: ToolInfo[]; + /** Pre-rendered HTML for custom tool calls/results, keyed by tool call ID */ + renderedTools?: Record; +} + +/** + * Core HTML generation logic shared by both export functions. + */ +function generateHtml(sessionData: SessionData, themeName?: string): string { + const templateDir = getExportTemplateDir(); + const template = readFileSync(join(templateDir, "template.html"), "utf-8"); + const templateCss = readFileSync(join(templateDir, "template.css"), "utf-8"); + const templateJs = readFileSync(join(templateDir, "template.js"), "utf-8"); + const markedJs = readFileSync(join(templateDir, "vendor", "marked.min.js"), "utf-8"); + const hljsJs = readFileSync(join(templateDir, "vendor", "highlight.min.js"), "utf-8"); + + const themeVars = generateThemeVars(themeName); + const colors = getResolvedThemeColors(themeName); + const exportColors = deriveExportColors(colors.userMessageBg || "#343541"); + const bodyBg = exportColors.pageBg; + const containerBg = exportColors.cardBg; + const infoBg = exportColors.infoBg; + + // Base64 encode session data to avoid escaping issues + const sessionDataBase64 = Buffer.from(JSON.stringify(sessionData)).toString("base64"); + + // Build the CSS with theme variables injected + const css = templateCss + .replace("{{THEME_VARS}}", themeVars) + .replace("{{BODY_BG}}", bodyBg) + .replace("{{CONTAINER_BG}}", containerBg) + .replace("{{INFO_BG}}", infoBg); + + return template + .replace("{{CSS}}", css) + .replace("{{JS}}", templateJs) + .replace("{{SESSION_DATA}}", sessionDataBase64) + .replace("{{MARKED_JS}}", markedJs) + .replace("{{HIGHLIGHT_JS}}", hljsJs); +} + +/** Built-in tool names that have custom rendering in template.js */ +const BUILTIN_TOOLS = new Set(["bash", "read", "write", "edit", "ls", "find", "grep"]); + +/** + * Pre-render custom tools to HTML using their TUI renderers. + */ +function preRenderCustomTools( + entries: SessionEntry[], + toolRenderer: ToolHtmlRenderer, +): Record { + const renderedTools: Record = {}; + + for (const entry of entries) { + if (entry.type !== "message") continue; + const msg = entry.message; + + // Find tool calls in assistant messages + if (msg.role === "assistant" && Array.isArray(msg.content)) { + for (const block of msg.content) { + if (block.type === "toolCall" && !BUILTIN_TOOLS.has(block.name)) { + const callHtml = toolRenderer.renderCall(block.name, block.arguments); + if (callHtml) { + renderedTools[block.id] = { callHtml }; + } + } + } + } + + // Find tool results + if (msg.role === "toolResult" && msg.toolCallId) { + const toolName = msg.toolName || ""; + // Only render if we have a pre-rendered call OR it's not a built-in tool + const existing = renderedTools[msg.toolCallId]; + if (existing || !BUILTIN_TOOLS.has(toolName)) { + const rendered = toolRenderer.renderResult(toolName, msg.content, msg.details, msg.isError || false); + if (rendered) { + renderedTools[msg.toolCallId] = { + ...existing, + resultHtmlCollapsed: rendered.collapsed, + resultHtmlExpanded: rendered.expanded, + }; + } + } + } + } + + return renderedTools; +} + +/** + * Export session to HTML using SessionManager and AgentState. + * Used by TUI's /export command. + */ +export async function exportSessionToHtml( + sm: SessionManager, + state?: AgentState, + options?: ExportOptions | string, +): Promise { + const opts: ExportOptions = typeof options === "string" ? { outputPath: options } : options || {}; + + const sessionFile = sm.getSessionFile(); + if (!sessionFile) { + throw new Error("Cannot export in-memory session to HTML"); + } + if (!existsSync(sessionFile)) { + throw new Error("Nothing to export yet - start a conversation first"); + } + + const entries = sm.getEntries(); + + // Pre-render custom tools if a tool renderer is provided + let renderedTools: Record | undefined; + if (opts.toolRenderer) { + renderedTools = preRenderCustomTools(entries, opts.toolRenderer); + // Only include if we actually rendered something + if (Object.keys(renderedTools).length === 0) { + renderedTools = undefined; + } + } + + const sessionData: SessionData = { + header: sm.getHeader(), + entries, + leafId: sm.getLeafId(), + systemPrompt: state?.systemPrompt, + tools: state?.tools?.map((t) => ({ name: t.name, description: t.description, parameters: t.parameters })), + renderedTools, + }; + + const html = generateHtml(sessionData, opts.themeName); + + let outputPath = opts.outputPath; + if (!outputPath) { + const sessionBasename = basename(sessionFile, ".jsonl"); + outputPath = `${APP_NAME}-session-${sessionBasename}.html`; + } + + writeFileSync(outputPath, html, "utf8"); + return outputPath; +} + +/** + * Export session file to HTML (standalone, without AgentState). + * Used by CLI for exporting arbitrary session files. + */ +export async function exportFromFile(inputPath: string, options?: ExportOptions | string): Promise { + const opts: ExportOptions = typeof options === "string" ? { outputPath: options } : options || {}; + + if (!existsSync(inputPath)) { + throw new Error(`File not found: ${inputPath}`); + } + + const sm = SessionManager.open(inputPath); + + const sessionData: SessionData = { + header: sm.getHeader(), + entries: sm.getEntries(), + leafId: sm.getLeafId(), + systemPrompt: undefined, + tools: undefined, + }; + + const html = generateHtml(sessionData, opts.themeName); + + let outputPath = opts.outputPath; + if (!outputPath) { + const inputBasename = basename(inputPath, ".jsonl"); + outputPath = `${APP_NAME}-session-${inputBasename}.html`; + } + + writeFileSync(outputPath, html, "utf8"); + return outputPath; +} diff --git a/packages/pi-coding-agent/src/core/export-html/template.css b/packages/pi-coding-agent/src/core/export-html/template.css new file mode 100644 index 000000000..6ef5d3976 --- /dev/null +++ b/packages/pi-coding-agent/src/core/export-html/template.css @@ -0,0 +1,971 @@ + :root { + {{THEME_VARS}} + --body-bg: {{BODY_BG}}; + --container-bg: {{CONTAINER_BG}}; + --info-bg: {{INFO_BG}}; + } + + * { margin: 0; padding: 0; box-sizing: border-box; } + + :root { + --line-height: 18px; /* 12px font * 1.5 */ + } + + body { + font-family: ui-monospace, 'Cascadia Code', 'Source Code Pro', Menlo, Consolas, 'DejaVu Sans Mono', monospace; + font-size: 12px; + line-height: var(--line-height); + color: var(--text); + background: var(--body-bg); + } + + #app { + display: flex; + min-height: 100vh; + } + + /* Sidebar */ + #sidebar { + width: 400px; + background: var(--container-bg); + flex-shrink: 0; + display: flex; + flex-direction: column; + position: sticky; + top: 0; + height: 100vh; + border-right: 1px solid var(--dim); + } + + .sidebar-header { + padding: 8px 12px; + flex-shrink: 0; + } + + .sidebar-controls { + padding: 8px 8px 4px 8px; + } + + .sidebar-search { + width: 100%; + box-sizing: border-box; + padding: 4px 8px; + font-size: 11px; + font-family: inherit; + background: var(--body-bg); + color: var(--text); + border: 1px solid var(--dim); + border-radius: 3px; + } + + .sidebar-filters { + display: flex; + padding: 4px 8px 8px 8px; + gap: 4px; + align-items: center; + flex-wrap: wrap; + } + + .sidebar-search:focus { + outline: none; + border-color: var(--accent); + } + + .sidebar-search::placeholder { + color: var(--muted); + } + + .filter-btn { + padding: 3px 8px; + font-size: 10px; + font-family: inherit; + background: transparent; + color: var(--muted); + border: 1px solid var(--dim); + border-radius: 3px; + cursor: pointer; + } + + .filter-btn:hover { + color: var(--text); + border-color: var(--text); + } + + .filter-btn.active { + background: var(--accent); + color: var(--body-bg); + border-color: var(--accent); + } + + .sidebar-close { + display: none; + padding: 3px 8px; + font-size: 12px; + font-family: inherit; + background: transparent; + color: var(--muted); + border: 1px solid var(--dim); + border-radius: 3px; + cursor: pointer; + margin-left: auto; + } + + .sidebar-close:hover { + color: var(--text); + border-color: var(--text); + } + + .tree-container { + flex: 1; + overflow: auto; + padding: 4px 0; + } + + .tree-node { + padding: 0 8px; + cursor: pointer; + display: flex; + align-items: baseline; + font-size: 11px; + line-height: 13px; + white-space: nowrap; + } + + .tree-node:hover { + background: var(--selectedBg); + } + + .tree-node.active { + background: var(--selectedBg); + } + + .tree-node.active .tree-content { + font-weight: bold; + } + + .tree-node.in-path { + background: color-mix(in srgb, var(--accent) 10%, transparent); + } + + .tree-node:not(.in-path) { + opacity: 0.5; + } + + .tree-node:not(.in-path):hover { + opacity: 1; + } + + .tree-prefix { + color: var(--muted); + flex-shrink: 0; + font-family: monospace; + white-space: pre; + } + + .tree-marker { + color: var(--accent); + flex-shrink: 0; + } + + .tree-content { + color: var(--text); + } + + .tree-role-user { + color: var(--accent); + } + + .tree-role-assistant { + color: var(--success); + } + + .tree-role-tool { + color: var(--muted); + } + + .tree-muted { + color: var(--muted); + } + + .tree-error { + color: var(--error); + } + + .tree-compaction { + color: var(--borderAccent); + } + + .tree-branch-summary { + color: var(--warning); + } + + .tree-custom-message { + color: var(--customMessageLabel); + } + + .tree-status { + padding: 4px 12px; + font-size: 10px; + color: var(--muted); + flex-shrink: 0; + } + + /* Main content */ + #content { + flex: 1; + overflow-y: auto; + padding: var(--line-height) calc(var(--line-height) * 2); + display: flex; + flex-direction: column; + align-items: center; + } + + #content > * { + width: 100%; + max-width: 800px; + } + + /* Help bar */ + .help-bar { + font-size: 11px; + color: var(--warning); + margin-bottom: var(--line-height); + display: flex; + align-items: center; + gap: 12px; + } + + .download-json-btn { + font-size: 10px; + padding: 2px 8px; + background: var(--container-bg); + border: 1px solid var(--border); + border-radius: 3px; + color: var(--text); + cursor: pointer; + font-family: inherit; + } + + .download-json-btn:hover { + background: var(--hover); + border-color: var(--borderAccent); + } + + /* Header */ + .header { + background: var(--container-bg); + border-radius: 4px; + padding: var(--line-height); + margin-bottom: var(--line-height); + } + + .header h1 { + font-size: 12px; + font-weight: bold; + color: var(--borderAccent); + margin-bottom: var(--line-height); + } + + .header-info { + display: flex; + flex-direction: column; + gap: 0; + font-size: 11px; + } + + .info-item { + color: var(--dim); + display: flex; + align-items: baseline; + } + + .info-label { + font-weight: 600; + margin-right: 8px; + min-width: 100px; + } + + .info-value { + color: var(--text); + flex: 1; + } + + /* Messages */ + #messages { + display: flex; + flex-direction: column; + gap: var(--line-height); + } + + .message-timestamp { + font-size: 10px; + color: var(--dim); + opacity: 0.8; + } + + .user-message { + background: var(--userMessageBg); + color: var(--userMessageText); + padding: var(--line-height); + border-radius: 4px; + position: relative; + } + + .assistant-message { + padding: 0; + position: relative; + } + + /* Copy link button - appears on hover */ + .copy-link-btn { + position: absolute; + top: 8px; + right: 8px; + width: 28px; + height: 28px; + padding: 6px; + background: var(--container-bg); + border: 1px solid var(--dim); + border-radius: 4px; + color: var(--muted); + cursor: pointer; + opacity: 0; + transition: opacity 0.15s, background 0.15s, color 0.15s; + display: flex; + align-items: center; + justify-content: center; + z-index: 10; + } + + .user-message:hover .copy-link-btn, + .assistant-message:hover .copy-link-btn { + opacity: 1; + } + + .copy-link-btn:hover { + background: var(--accent); + color: var(--body-bg); + border-color: var(--accent); + } + + .copy-link-btn.copied { + background: var(--success, #22c55e); + color: white; + border-color: var(--success, #22c55e); + } + + /* Highlight effect for deep-linked messages */ + .user-message.highlight, + .assistant-message.highlight { + animation: highlight-pulse 2s ease-out; + } + + @keyframes highlight-pulse { + 0% { + box-shadow: 0 0 0 3px var(--accent); + } + 100% { + box-shadow: 0 0 0 0 transparent; + } + } + + .assistant-message > .message-timestamp { + padding-left: var(--line-height); + } + + .assistant-text { + padding: var(--line-height); + padding-bottom: 0; + } + + .message-timestamp + .assistant-text, + .message-timestamp + .thinking-block { + padding-top: 0; + } + + .thinking-block + .assistant-text { + padding-top: 0; + } + + .thinking-text { + padding: var(--line-height); + color: var(--thinkingText); + font-style: italic; + white-space: pre-wrap; + } + + .message-timestamp + .thinking-block .thinking-text, + .message-timestamp + .thinking-block .thinking-collapsed { + padding-top: 0; + } + + .thinking-collapsed { + display: none; + padding: var(--line-height); + color: var(--thinkingText); + font-style: italic; + } + + /* Tool execution */ + .tool-execution { + padding: var(--line-height); + border-radius: 4px; + } + + .tool-execution + .tool-execution { + margin-top: var(--line-height); + } + + .assistant-text + .tool-execution { + margin-top: var(--line-height); + } + + .tool-execution.pending { background: var(--toolPendingBg); } + .tool-execution.success { background: var(--toolSuccessBg); } + .tool-execution.error { background: var(--toolErrorBg); } + + .tool-header, .tool-name { + font-weight: bold; + } + + .tool-path { + color: var(--accent); + word-break: break-all; + } + + .line-numbers { + color: var(--warning); + } + + .line-count { + color: var(--dim); + } + + .tool-command { + font-weight: bold; + white-space: pre-wrap; + word-wrap: break-word; + overflow-wrap: break-word; + word-break: break-word; + } + + .tool-output { + margin-top: var(--line-height); + color: var(--toolOutput); + word-wrap: break-word; + overflow-wrap: break-word; + word-break: break-word; + font-family: inherit; + overflow-x: auto; + } + + .tool-output > div, + .output-preview, + .output-full { + margin: 0; + padding: 0; + line-height: var(--line-height); + } + + .tool-output pre { + margin: 0; + padding: 0; + font-family: inherit; + color: inherit; + white-space: pre-wrap; + word-wrap: break-word; + overflow-wrap: break-word; + } + + .tool-output code { + padding: 0; + background: none; + color: var(--text); + } + + .tool-output.expandable { + cursor: pointer; + } + + .tool-output.expandable:hover { + opacity: 0.9; + } + + .tool-output.expandable .output-full { + display: none; + } + + .tool-output.expandable.expanded .output-preview { + display: none; + } + + .tool-output.expandable.expanded .output-full { + display: block; + } + + .ansi-line { + white-space: pre-wrap; + } + + .tool-images { + } + + .tool-image { + max-width: 100%; + max-height: 500px; + border-radius: 4px; + margin: var(--line-height) 0; + } + + .expand-hint { + color: var(--toolOutput); + } + + /* Diff */ + .tool-diff { + font-size: 11px; + overflow-x: auto; + white-space: pre; + } + + .diff-added { color: var(--toolDiffAdded); } + .diff-removed { color: var(--toolDiffRemoved); } + .diff-context { color: var(--toolDiffContext); } + + /* Model change */ + .model-change { + padding: 0 var(--line-height); + color: var(--dim); + font-size: 11px; + } + + .model-name { + color: var(--borderAccent); + font-weight: bold; + } + + /* Compaction / Branch Summary - matches customMessage colors from TUI */ + .compaction { + background: var(--customMessageBg); + border-radius: 4px; + padding: var(--line-height); + cursor: pointer; + } + + .compaction-label { + color: var(--customMessageLabel); + font-weight: bold; + } + + .compaction-collapsed { + color: var(--customMessageText); + } + + .compaction-content { + display: none; + color: var(--customMessageText); + white-space: pre-wrap; + margin-top: var(--line-height); + } + + .compaction.expanded .compaction-collapsed { + display: none; + } + + .compaction.expanded .compaction-content { + display: block; + } + + /* System prompt */ + .system-prompt { + background: var(--customMessageBg); + padding: var(--line-height); + border-radius: 4px; + margin-bottom: var(--line-height); + } + + .system-prompt.expandable { + cursor: pointer; + } + + .system-prompt-header { + font-weight: bold; + color: var(--customMessageLabel); + } + + .system-prompt-preview { + color: var(--customMessageText); + white-space: pre-wrap; + word-wrap: break-word; + font-size: 11px; + margin-top: var(--line-height); + } + + .system-prompt-expand-hint { + color: var(--muted); + font-style: italic; + margin-top: 4px; + } + + .system-prompt-full { + display: none; + color: var(--customMessageText); + white-space: pre-wrap; + word-wrap: break-word; + font-size: 11px; + margin-top: var(--line-height); + } + + .system-prompt.expanded .system-prompt-preview, + .system-prompt.expanded .system-prompt-expand-hint { + display: none; + } + + .system-prompt.expanded .system-prompt-full { + display: block; + } + + .system-prompt.provider-prompt { + border-left: 3px solid var(--warning); + } + + .system-prompt-note { + font-size: 10px; + font-style: italic; + color: var(--muted); + margin-top: 4px; + } + + /* Tools list */ + .tools-list { + background: var(--customMessageBg); + padding: var(--line-height); + border-radius: 4px; + margin-bottom: var(--line-height); + } + + .tools-header { + font-weight: bold; + color: var(--customMessageLabel); + margin-bottom: var(--line-height); + } + + .tool-item { + font-size: 11px; + } + + .tool-item-name { + font-weight: bold; + color: var(--text); + } + + .tool-item-desc { + color: var(--dim); + } + + .tool-params-hint { + color: var(--muted); + font-style: italic; + } + + .tool-item:has(.tool-params-hint) { + cursor: pointer; + } + + .tool-params-hint::after { + content: '[click to show parameters]'; + } + + .tool-item.params-expanded .tool-params-hint::after { + content: '[hide parameters]'; + } + + .tool-params-content { + display: none; + margin-top: 4px; + margin-left: 12px; + padding-left: 8px; + border-left: 1px solid var(--dim); + } + + .tool-item.params-expanded .tool-params-content { + display: block; + } + + .tool-param { + margin-bottom: 4px; + font-size: 11px; + } + + .tool-param-name { + font-weight: bold; + color: var(--text); + } + + .tool-param-type { + color: var(--dim); + font-style: italic; + } + + .tool-param-required { + color: var(--warning, #e8a838); + font-size: 10px; + } + + .tool-param-optional { + color: var(--dim); + font-size: 10px; + } + + .tool-param-desc { + color: var(--dim); + margin-left: 8px; + } + + /* Hook/custom messages */ + .hook-message { + background: var(--customMessageBg); + color: var(--customMessageText); + padding: var(--line-height); + border-radius: 4px; + } + + .hook-type { + color: var(--customMessageLabel); + font-weight: bold; + } + + /* Branch summary */ + .branch-summary { + background: var(--customMessageBg); + padding: var(--line-height); + border-radius: 4px; + } + + .branch-summary-header { + font-weight: bold; + color: var(--borderAccent); + } + + /* Error */ + .error-text { + color: var(--error); + padding: 0 var(--line-height); + } + .tool-error { + color: var(--error); + } + + /* Images */ + .message-images { + margin-bottom: 12px; + } + + .message-image { + max-width: 100%; + max-height: 400px; + border-radius: 4px; + margin: var(--line-height) 0; + } + + /* Markdown content */ + .markdown-content h1, + .markdown-content h2, + .markdown-content h3, + .markdown-content h4, + .markdown-content h5, + .markdown-content h6 { + color: var(--mdHeading); + margin: var(--line-height) 0 0 0; + font-weight: bold; + } + + .markdown-content h1 { font-size: 1em; } + .markdown-content h2 { font-size: 1em; } + .markdown-content h3 { font-size: 1em; } + .markdown-content h4 { font-size: 1em; } + .markdown-content h5 { font-size: 1em; } + .markdown-content h6 { font-size: 1em; } + .markdown-content p { margin: 0; } + .markdown-content p + p { margin-top: var(--line-height); } + + .markdown-content a { + color: var(--mdLink); + text-decoration: underline; + } + + .markdown-content code { + background: rgba(128, 128, 128, 0.2); + color: var(--mdCode); + padding: 0 4px; + border-radius: 3px; + font-family: inherit; + } + + .markdown-content pre { + background: transparent; + margin: var(--line-height) 0; + overflow-x: auto; + } + + .markdown-content pre code { + display: block; + background: none; + color: var(--text); + } + + .markdown-content blockquote { + border-left: 3px solid var(--mdQuoteBorder); + padding-left: var(--line-height); + margin: var(--line-height) 0; + color: var(--mdQuote); + font-style: italic; + } + + .markdown-content ul, + .markdown-content ol { + margin: var(--line-height) 0; + padding-left: calc(var(--line-height) * 2); + } + + .markdown-content li { margin: 0; } + .markdown-content li::marker { color: var(--mdListBullet); } + + .markdown-content hr { + border: none; + border-top: 1px solid var(--mdHr); + margin: var(--line-height) 0; + } + + .markdown-content table { + border-collapse: collapse; + margin: 0.5em 0; + width: 100%; + } + + .markdown-content th, + .markdown-content td { + border: 1px solid var(--mdCodeBlockBorder); + padding: 6px 10px; + text-align: left; + } + + .markdown-content th { + background: rgba(128, 128, 128, 0.1); + font-weight: bold; + } + + .markdown-content img { + max-width: 100%; + border-radius: 4px; + } + + /* Syntax highlighting */ + .hljs { background: transparent; color: var(--text); } + .hljs-comment, .hljs-quote { color: var(--syntaxComment); } + .hljs-keyword, .hljs-selector-tag { color: var(--syntaxKeyword); } + .hljs-number, .hljs-literal { color: var(--syntaxNumber); } + .hljs-string, .hljs-doctag { color: var(--syntaxString); } + /* Function names: hljs v11 uses .hljs-title.function_ compound class */ + .hljs-function, .hljs-title, .hljs-title.function_, .hljs-section, .hljs-name { color: var(--syntaxFunction); } + /* Types: hljs v11 uses .hljs-title.class_ for class names */ + .hljs-type, .hljs-class, .hljs-title.class_, .hljs-built_in { color: var(--syntaxType); } + .hljs-attr, .hljs-variable, .hljs-variable.language_, .hljs-params, .hljs-property { color: var(--syntaxVariable); } + .hljs-meta, .hljs-meta .hljs-keyword, .hljs-meta .hljs-string { color: var(--syntaxKeyword); } + .hljs-operator { color: var(--syntaxOperator); } + .hljs-punctuation { color: var(--syntaxPunctuation); } + .hljs-subst { color: var(--text); } + + /* Footer */ + .footer { + margin-top: 48px; + padding: 20px; + text-align: center; + color: var(--dim); + font-size: 10px; + } + + /* Mobile */ + #hamburger { + display: none; + position: fixed; + top: 10px; + left: 10px; + z-index: 100; + padding: 3px 8px; + font-size: 12px; + font-family: inherit; + background: transparent; + color: var(--muted); + border: 1px solid var(--dim); + border-radius: 3px; + cursor: pointer; + } + + #hamburger:hover { + color: var(--text); + border-color: var(--text); + } + + + + #sidebar-overlay { + display: none; + position: fixed; + top: 0; + left: 0; + right: 0; + bottom: 0; + background: rgba(0, 0, 0, 0.5); + z-index: 98; + } + + @media (max-width: 900px) { + #sidebar { + position: fixed; + left: -400px; + width: 400px; + top: 0; + bottom: 0; + height: 100vh; + z-index: 99; + transition: left 0.3s; + } + + #sidebar.open { + left: 0; + } + + #sidebar-overlay.open { + display: block; + } + + #hamburger { + display: block; + } + + .sidebar-close { + display: block; + } + + #content { + padding: var(--line-height) 16px; + } + + #content > * { + max-width: 100%; + } + } + + @media (max-width: 500px) { + #sidebar { + width: 100vw; + left: -100vw; + } + } + + @media print { + #sidebar, #sidebar-toggle { display: none !important; } + body { background: white; color: black; } + #content { max-width: none; } + } diff --git a/packages/pi-coding-agent/src/core/export-html/template.html b/packages/pi-coding-agent/src/core/export-html/template.html new file mode 100644 index 000000000..42f2a45b0 --- /dev/null +++ b/packages/pi-coding-agent/src/core/export-html/template.html @@ -0,0 +1,54 @@ + + + + + + Session Export + + + + + +
+ +
+
+
+
+
+ +
+
+ + + + + + + + + + + + + diff --git a/packages/pi-coding-agent/src/core/export-html/template.js b/packages/pi-coding-agent/src/core/export-html/template.js new file mode 100644 index 000000000..e170d7a6b --- /dev/null +++ b/packages/pi-coding-agent/src/core/export-html/template.js @@ -0,0 +1,1583 @@ + (function() { + 'use strict'; + + // ============================================================ + // DATA LOADING + // ============================================================ + + const base64 = document.getElementById('session-data').textContent; + const binary = atob(base64); + const bytes = new Uint8Array(binary.length); + for (let i = 0; i < binary.length; i++) { + bytes[i] = binary.charCodeAt(i); + } + const data = JSON.parse(new TextDecoder('utf-8').decode(bytes)); + const { header, entries, leafId: defaultLeafId, systemPrompt, tools, renderedTools } = data; + + // ============================================================ + // URL PARAMETER HANDLING + // ============================================================ + + // Parse URL parameters for deep linking: leafId and targetId + // Check for injected params (when loaded in iframe via srcdoc) or use window.location + const injectedParams = document.querySelector('meta[name="pi-url-params"]'); + const searchString = injectedParams ? injectedParams.content : window.location.search.substring(1); + const urlParams = new URLSearchParams(searchString); + const urlLeafId = urlParams.get('leafId'); + const urlTargetId = urlParams.get('targetId'); + // Use URL leafId if provided, otherwise fall back to session default + const leafId = urlLeafId || defaultLeafId; + + // ============================================================ + // DATA STRUCTURES + // ============================================================ + + // Entry lookup by ID + const byId = new Map(); + for (const entry of entries) { + byId.set(entry.id, entry); + } + + // Tool call lookup (toolCallId -> {name, arguments}) + const toolCallMap = new Map(); + for (const entry of entries) { + if (entry.type === 'message' && entry.message.role === 'assistant') { + const content = entry.message.content; + if (Array.isArray(content)) { + for (const block of content) { + if (block.type === 'toolCall') { + toolCallMap.set(block.id, { name: block.name, arguments: block.arguments }); + } + } + } + } + } + + // Label lookup (entryId -> label string) + // Labels are stored in 'label' entries that reference their target via targetId + const labelMap = new Map(); + for (const entry of entries) { + if (entry.type === 'label' && entry.targetId && entry.label) { + labelMap.set(entry.targetId, entry.label); + } + } + + // ============================================================ + // TREE DATA PREPARATION (no DOM, pure data) + // ============================================================ + + /** + * Build tree structure from flat entries. + * Returns array of root nodes, each with { entry, children, label }. + */ + function buildTree() { + const nodeMap = new Map(); + const roots = []; + + // Create nodes + for (const entry of entries) { + nodeMap.set(entry.id, { + entry, + children: [], + label: labelMap.get(entry.id) + }); + } + + // Build parent-child relationships + for (const entry of entries) { + const node = nodeMap.get(entry.id); + if (entry.parentId === null || entry.parentId === undefined || entry.parentId === entry.id) { + roots.push(node); + } else { + const parent = nodeMap.get(entry.parentId); + if (parent) { + parent.children.push(node); + } else { + roots.push(node); + } + } + } + + // Sort children by timestamp + function sortChildren(node) { + node.children.sort((a, b) => + new Date(a.entry.timestamp).getTime() - new Date(b.entry.timestamp).getTime() + ); + node.children.forEach(sortChildren); + } + roots.forEach(sortChildren); + + return roots; + } + + /** + * Build set of entry IDs on path from root to target. + */ + function buildActivePathIds(targetId) { + const ids = new Set(); + let current = byId.get(targetId); + while (current) { + ids.add(current.id); + // Stop if no parent or self-referencing (root) + if (!current.parentId || current.parentId === current.id) { + break; + } + current = byId.get(current.parentId); + } + return ids; + } + + /** + * Get array of entries from root to target (the conversation path). + */ + function getPath(targetId) { + const path = []; + let current = byId.get(targetId); + while (current) { + path.unshift(current); + // Stop if no parent or self-referencing (root) + if (!current.parentId || current.parentId === current.id) { + break; + } + current = byId.get(current.parentId); + } + return path; + } + + // Tree node lookup for finding leaves + let treeNodeMap = null; + + /** + * Find the newest leaf node reachable from a given node. + * This allows clicking any node in a branch to show the full branch. + * Children are sorted by timestamp, so the newest is always last. + */ + function findNewestLeaf(nodeId) { + // Build tree node map lazily + if (!treeNodeMap) { + treeNodeMap = new Map(); + const tree = buildTree(); + function mapNodes(node) { + treeNodeMap.set(node.entry.id, node); + node.children.forEach(mapNodes); + } + tree.forEach(mapNodes); + } + + const node = treeNodeMap.get(nodeId); + if (!node) return nodeId; + + // Follow the newest (last) child at each level + let current = node; + while (current.children.length > 0) { + current = current.children[current.children.length - 1]; + } + return current.entry.id; + } + + /** + * Flatten tree into list with indentation and connector info. + * Returns array of { node, indent, showConnector, isLast, gutters, isVirtualRootChild, multipleRoots }. + * Matches tree-selector.ts logic exactly. + */ + function flattenTree(roots, activePathIds) { + const result = []; + const multipleRoots = roots.length > 1; + + // Mark which subtrees contain the active leaf + const containsActive = new Map(); + function markActive(node) { + let has = activePathIds.has(node.entry.id); + for (const child of node.children) { + if (markActive(child)) has = true; + } + containsActive.set(node, has); + return has; + } + roots.forEach(markActive); + + // Stack: [node, indent, justBranched, showConnector, isLast, gutters, isVirtualRootChild] + const stack = []; + + // Add roots (prioritize branch containing active leaf) + const orderedRoots = [...roots].sort((a, b) => + Number(containsActive.get(b)) - Number(containsActive.get(a)) + ); + for (let i = orderedRoots.length - 1; i >= 0; i--) { + const isLast = i === orderedRoots.length - 1; + stack.push([orderedRoots[i], multipleRoots ? 1 : 0, multipleRoots, multipleRoots, isLast, [], multipleRoots]); + } + + while (stack.length > 0) { + const [node, indent, justBranched, showConnector, isLast, gutters, isVirtualRootChild] = stack.pop(); + + result.push({ node, indent, showConnector, isLast, gutters, isVirtualRootChild, multipleRoots }); + + const children = node.children; + const multipleChildren = children.length > 1; + + // Order children (active branch first) + const orderedChildren = [...children].sort((a, b) => + Number(containsActive.get(b)) - Number(containsActive.get(a)) + ); + + // Calculate child indent (matches tree-selector.ts) + let childIndent; + if (multipleChildren) { + // Parent branches: children get +1 + childIndent = indent + 1; + } else if (justBranched && indent > 0) { + // First generation after a branch: +1 for visual grouping + childIndent = indent + 1; + } else { + // Single-child chain: stay flat + childIndent = indent; + } + + // Build gutters for children + const connectorDisplayed = showConnector && !isVirtualRootChild; + const currentDisplayIndent = multipleRoots ? Math.max(0, indent - 1) : indent; + const connectorPosition = Math.max(0, currentDisplayIndent - 1); + const childGutters = connectorDisplayed + ? [...gutters, { position: connectorPosition, show: !isLast }] + : gutters; + + // Add children in reverse order for stack + for (let i = orderedChildren.length - 1; i >= 0; i--) { + const childIsLast = i === orderedChildren.length - 1; + stack.push([orderedChildren[i], childIndent, multipleChildren, multipleChildren, childIsLast, childGutters, false]); + } + } + + return result; + } + + /** + * Build ASCII prefix string for tree node. + */ + function buildTreePrefix(flatNode) { + const { indent, showConnector, isLast, gutters, isVirtualRootChild, multipleRoots } = flatNode; + const displayIndent = multipleRoots ? Math.max(0, indent - 1) : indent; + const connector = showConnector && !isVirtualRootChild ? (isLast ? '└─ ' : '├─ ') : ''; + const connectorPosition = connector ? displayIndent - 1 : -1; + + const totalChars = displayIndent * 3; + const prefixChars = []; + for (let i = 0; i < totalChars; i++) { + const level = Math.floor(i / 3); + const posInLevel = i % 3; + + const gutter = gutters.find(g => g.position === level); + if (gutter) { + prefixChars.push(posInLevel === 0 ? (gutter.show ? '│' : ' ') : ' '); + } else if (connector && level === connectorPosition) { + if (posInLevel === 0) { + prefixChars.push(isLast ? '└' : '├'); + } else if (posInLevel === 1) { + prefixChars.push('─'); + } else { + prefixChars.push(' '); + } + } else { + prefixChars.push(' '); + } + } + return prefixChars.join(''); + } + + // ============================================================ + // FILTERING (pure data) + // ============================================================ + + let filterMode = 'default'; + let searchQuery = ''; + + function hasTextContent(content) { + if (typeof content === 'string') return content.trim().length > 0; + if (Array.isArray(content)) { + for (const c of content) { + if (c.type === 'text' && c.text && c.text.trim().length > 0) return true; + } + } + return false; + } + + function extractContent(content) { + if (typeof content === 'string') return content; + if (Array.isArray(content)) { + return content + .filter(c => c.type === 'text' && c.text) + .map(c => c.text) + .join(''); + } + return ''; + } + + function getSearchableText(entry, label) { + const parts = []; + if (label) parts.push(label); + + switch (entry.type) { + case 'message': { + const msg = entry.message; + parts.push(msg.role); + if (msg.content) parts.push(extractContent(msg.content)); + if (msg.role === 'bashExecution' && msg.command) parts.push(msg.command); + break; + } + case 'custom_message': + parts.push(entry.customType); + parts.push(typeof entry.content === 'string' ? entry.content : extractContent(entry.content)); + break; + case 'compaction': + parts.push('compaction'); + break; + case 'branch_summary': + parts.push('branch summary', entry.summary); + break; + case 'model_change': + parts.push('model', entry.modelId); + break; + case 'thinking_level_change': + parts.push('thinking', entry.thinkingLevel); + break; + } + + return parts.join(' ').toLowerCase(); + } + + /** + * Filter flat nodes based on current filterMode and searchQuery. + */ + function filterNodes(flatNodes, currentLeafId) { + const searchTokens = searchQuery.toLowerCase().split(/\s+/).filter(Boolean); + + const filtered = flatNodes.filter(flatNode => { + const entry = flatNode.node.entry; + const label = flatNode.node.label; + const isCurrentLeaf = entry.id === currentLeafId; + + // Always show current leaf + if (isCurrentLeaf) return true; + + // Hide assistant messages with only tool calls (no text) unless error/aborted + if (entry.type === 'message' && entry.message.role === 'assistant') { + const msg = entry.message; + const hasText = hasTextContent(msg.content); + const isErrorOrAborted = msg.stopReason && msg.stopReason !== 'stop' && msg.stopReason !== 'toolUse'; + if (!hasText && !isErrorOrAborted) return false; + } + + // Apply filter mode + const isSettingsEntry = ['label', 'custom', 'model_change', 'thinking_level_change'].includes(entry.type); + let passesFilter = true; + + switch (filterMode) { + case 'user-only': + passesFilter = entry.type === 'message' && entry.message.role === 'user'; + break; + case 'no-tools': + passesFilter = !isSettingsEntry && !(entry.type === 'message' && entry.message.role === 'toolResult'); + break; + case 'labeled-only': + passesFilter = label !== undefined; + break; + case 'all': + passesFilter = true; + break; + default: // 'default' + passesFilter = !isSettingsEntry; + break; + } + + if (!passesFilter) return false; + + // Apply search filter + if (searchTokens.length > 0) { + const nodeText = getSearchableText(entry, label); + if (!searchTokens.every(t => nodeText.includes(t))) return false; + } + + return true; + }); + + // Recalculate visual structure based on visible tree + recalculateVisualStructure(filtered, flatNodes); + + return filtered; + } + + /** + * Recompute indentation/connectors for the filtered view + * + * Filtering can hide intermediate entries; descendants attach to the nearest visible ancestor. + * Keep indentation semantics aligned with flattenTree() so single-child chains don't drift right. + */ + function recalculateVisualStructure(filteredNodes, allFlatNodes) { + if (filteredNodes.length === 0) return; + + const visibleIds = new Set(filteredNodes.map(n => n.node.entry.id)); + + // Build entry map for parent lookup (using full tree) + const entryMap = new Map(); + for (const flatNode of allFlatNodes) { + entryMap.set(flatNode.node.entry.id, flatNode); + } + + // Find nearest visible ancestor for a node + function findVisibleAncestor(nodeId) { + let currentId = entryMap.get(nodeId)?.node.entry.parentId; + while (currentId != null) { + if (visibleIds.has(currentId)) { + return currentId; + } + currentId = entryMap.get(currentId)?.node.entry.parentId; + } + return null; + } + + // Build visible tree structure + const visibleParent = new Map(); + const visibleChildren = new Map(); + visibleChildren.set(null, []); // root-level nodes + + for (const flatNode of filteredNodes) { + const nodeId = flatNode.node.entry.id; + const ancestorId = findVisibleAncestor(nodeId); + visibleParent.set(nodeId, ancestorId); + + if (!visibleChildren.has(ancestorId)) { + visibleChildren.set(ancestorId, []); + } + visibleChildren.get(ancestorId).push(nodeId); + } + + // Update multipleRoots based on visible roots + const visibleRootIds = visibleChildren.get(null); + const multipleRoots = visibleRootIds.length > 1; + + // Build a map for quick lookup: nodeId → FlatNode + const filteredNodeMap = new Map(); + for (const flatNode of filteredNodes) { + filteredNodeMap.set(flatNode.node.entry.id, flatNode); + } + + // DFS traversal of visible tree, applying same indentation rules as flattenTree() + // Stack items: [nodeId, indent, justBranched, showConnector, isLast, gutters, isVirtualRootChild] + const stack = []; + + // Add visible roots in reverse order (to process in forward order via stack) + for (let i = visibleRootIds.length - 1; i >= 0; i--) { + const isLast = i === visibleRootIds.length - 1; + stack.push([ + visibleRootIds[i], + multipleRoots ? 1 : 0, + multipleRoots, + multipleRoots, + isLast, + [], + multipleRoots + ]); + } + + while (stack.length > 0) { + const [nodeId, indent, justBranched, showConnector, isLast, gutters, isVirtualRootChild] = stack.pop(); + + const flatNode = filteredNodeMap.get(nodeId); + if (!flatNode) continue; + + // Update this node's visual properties + flatNode.indent = indent; + flatNode.showConnector = showConnector; + flatNode.isLast = isLast; + flatNode.gutters = gutters; + flatNode.isVirtualRootChild = isVirtualRootChild; + flatNode.multipleRoots = multipleRoots; + + // Get visible children of this node + const children = visibleChildren.get(nodeId) || []; + const multipleChildren = children.length > 1; + + // Calculate child indent using same rules as flattenTree(): + // - Parent branches (multiple children): children get +1 + // - Just branched and indent > 0: children get +1 for visual grouping + // - Single-child chain: stay flat + let childIndent; + if (multipleChildren) { + childIndent = indent + 1; + } else if (justBranched && indent > 0) { + childIndent = indent + 1; + } else { + childIndent = indent; + } + + // Build gutters for children (same logic as flattenTree) + const connectorDisplayed = showConnector && !isVirtualRootChild; + const currentDisplayIndent = multipleRoots ? Math.max(0, indent - 1) : indent; + const connectorPosition = Math.max(0, currentDisplayIndent - 1); + const childGutters = connectorDisplayed + ? [...gutters, { position: connectorPosition, show: !isLast }] + : gutters; + + // Add children in reverse order (to process in forward order via stack) + for (let i = children.length - 1; i >= 0; i--) { + const childIsLast = i === children.length - 1; + stack.push([ + children[i], + childIndent, + multipleChildren, + multipleChildren, + childIsLast, + childGutters, + false + ]); + } + } + } + + // ============================================================ + // TREE DISPLAY TEXT (pure data -> string) + // ============================================================ + + function shortenPath(p) { + if (typeof p !== 'string') return ''; + if (p.startsWith('/Users/')) { + const parts = p.split('/'); + if (parts.length > 2) return '~' + p.slice(('/Users/' + parts[2]).length); + } + if (p.startsWith('/home/')) { + const parts = p.split('/'); + if (parts.length > 2) return '~' + p.slice(('/home/' + parts[2]).length); + } + return p; + } + + function formatToolCall(name, args) { + switch (name) { + case 'read': { + const path = shortenPath(String(args.path || args.file_path || '')); + const offset = args.offset; + const limit = args.limit; + let display = path; + if (offset !== undefined || limit !== undefined) { + const start = offset ?? 1; + const end = limit !== undefined ? start + limit - 1 : ''; + display += `:${start}${end ? `-${end}` : ''}`; + } + return `[read: ${display}]`; + } + case 'write': + return `[write: ${shortenPath(String(args.path || args.file_path || ''))}]`; + case 'edit': + return `[edit: ${shortenPath(String(args.path || args.file_path || ''))}]`; + case 'bash': { + const rawCmd = String(args.command || ''); + const cmd = rawCmd.replace(/[\n\t]/g, ' ').trim().slice(0, 50); + return `[bash: ${cmd}${rawCmd.length > 50 ? '...' : ''}]`; + } + case 'grep': + return `[grep: /${args.pattern || ''}/ in ${shortenPath(String(args.path || '.'))}]`; + case 'find': + return `[find: ${args.pattern || ''} in ${shortenPath(String(args.path || '.'))}]`; + case 'ls': + return `[ls: ${shortenPath(String(args.path || '.'))}]`; + default: { + const argsStr = JSON.stringify(args).slice(0, 40); + return `[${name}: ${argsStr}${JSON.stringify(args).length > 40 ? '...' : ''}]`; + } + } + } + + function escapeHtml(text) { + const div = document.createElement('div'); + div.textContent = text; + return div.innerHTML; + } + + /** + * Truncate string to maxLen chars, append "..." if truncated. + */ + function truncate(s, maxLen = 100) { + if (s.length <= maxLen) return s; + return s.slice(0, maxLen) + '...'; + } + + /** + * Get display text for tree node (returns HTML string). + */ + function getTreeNodeDisplayHtml(entry, label) { + const normalize = s => s.replace(/[\n\t]/g, ' ').trim(); + const labelHtml = label ? `[${escapeHtml(label)}] ` : ''; + + switch (entry.type) { + case 'message': { + const msg = entry.message; + if (msg.role === 'user') { + const content = truncate(normalize(extractContent(msg.content))); + return labelHtml + `user: ${escapeHtml(content)}`; + } + if (msg.role === 'assistant') { + const textContent = truncate(normalize(extractContent(msg.content))); + if (textContent) { + return labelHtml + `assistant: ${escapeHtml(textContent)}`; + } + if (msg.stopReason === 'aborted') { + return labelHtml + `assistant: (aborted)`; + } + if (msg.errorMessage) { + return labelHtml + `assistant: ${escapeHtml(truncate(msg.errorMessage))}`; + } + return labelHtml + `assistant: (no text)`; + } + if (msg.role === 'toolResult') { + const toolCall = msg.toolCallId ? toolCallMap.get(msg.toolCallId) : null; + if (toolCall) { + return labelHtml + `${escapeHtml(formatToolCall(toolCall.name, toolCall.arguments))}`; + } + return labelHtml + `[${msg.toolName || 'tool'}]`; + } + if (msg.role === 'bashExecution') { + const cmd = truncate(normalize(msg.command || '')); + return labelHtml + `[bash]: ${escapeHtml(cmd)}`; + } + return labelHtml + `[${msg.role}]`; + } + case 'compaction': + return labelHtml + `[compaction: ${Math.round(entry.tokensBefore/1000)}k tokens]`; + case 'branch_summary': { + const summary = truncate(normalize(entry.summary || '')); + return labelHtml + `[branch summary]: ${escapeHtml(summary)}`; + } + case 'custom_message': { + const content = typeof entry.content === 'string' ? entry.content : extractContent(entry.content); + return labelHtml + `[${escapeHtml(entry.customType)}]: ${escapeHtml(truncate(normalize(content)))}`; + } + case 'model_change': + return labelHtml + `[model: ${entry.modelId}]`; + case 'thinking_level_change': + return labelHtml + `[thinking: ${entry.thinkingLevel}]`; + default: + return labelHtml + `[${entry.type}]`; + } + } + + // ============================================================ + // TREE RENDERING (DOM manipulation) + // ============================================================ + + let currentLeafId = leafId; + let currentTargetId = urlTargetId || leafId; + let treeRendered = false; + + function renderTree() { + const tree = buildTree(); + const activePathIds = buildActivePathIds(currentLeafId); + const flatNodes = flattenTree(tree, activePathIds); + const filtered = filterNodes(flatNodes, currentLeafId); + const container = document.getElementById('tree-container'); + + // Full render only on first call or when filter/search changes + if (!treeRendered) { + container.innerHTML = ''; + + for (const flatNode of filtered) { + const entry = flatNode.node.entry; + const isOnPath = activePathIds.has(entry.id); + const isTarget = entry.id === currentTargetId; + + const div = document.createElement('div'); + div.className = 'tree-node'; + if (isOnPath) div.classList.add('in-path'); + if (isTarget) div.classList.add('active'); + div.dataset.id = entry.id; + + const prefix = buildTreePrefix(flatNode); + const prefixSpan = document.createElement('span'); + prefixSpan.className = 'tree-prefix'; + prefixSpan.textContent = prefix; + + const marker = document.createElement('span'); + marker.className = 'tree-marker'; + marker.textContent = isOnPath ? '•' : ' '; + + const content = document.createElement('span'); + content.className = 'tree-content'; + content.innerHTML = getTreeNodeDisplayHtml(entry, flatNode.node.label); + + div.appendChild(prefixSpan); + div.appendChild(marker); + div.appendChild(content); + // Navigate to the newest leaf through this node, but scroll to the clicked node + div.addEventListener('click', () => { + const leafId = findNewestLeaf(entry.id); + navigateTo(leafId, 'target', entry.id); + }); + + container.appendChild(div); + } + + treeRendered = true; + } else { + // Just update markers and classes + const nodes = container.querySelectorAll('.tree-node'); + for (const node of nodes) { + const id = node.dataset.id; + const isOnPath = activePathIds.has(id); + const isTarget = id === currentTargetId; + + node.classList.toggle('in-path', isOnPath); + node.classList.toggle('active', isTarget); + + const marker = node.querySelector('.tree-marker'); + if (marker) { + marker.textContent = isOnPath ? '•' : ' '; + } + } + } + + document.getElementById('tree-status').textContent = `${filtered.length} / ${flatNodes.length} entries`; + + // Scroll active node into view after layout + setTimeout(() => { + const activeNode = container.querySelector('.tree-node.active'); + if (activeNode) { + activeNode.scrollIntoView({ block: 'nearest' }); + } + }, 0); + } + + function forceTreeRerender() { + treeRendered = false; + renderTree(); + } + + // ============================================================ + // MESSAGE RENDERING + // ============================================================ + + function formatTokens(count) { + if (count < 1000) return count.toString(); + if (count < 10000) return (count / 1000).toFixed(1) + 'k'; + if (count < 1000000) return Math.round(count / 1000) + 'k'; + return (count / 1000000).toFixed(1) + 'M'; + } + + function formatTimestamp(ts) { + if (!ts) return ''; + const date = new Date(ts); + return date.toLocaleTimeString(undefined, { hour: '2-digit', minute: '2-digit', second: '2-digit' }); + } + + function replaceTabs(text) { + return text.replace(/\t/g, ' '); + } + + /** Safely coerce value to string for display. Returns null if invalid type. */ + function str(value) { + if (typeof value === 'string') return value; + if (value == null) return ''; + return null; + } + + function getLanguageFromPath(filePath) { + const ext = filePath.split('.').pop()?.toLowerCase(); + const extToLang = { + ts: 'typescript', tsx: 'typescript', js: 'javascript', jsx: 'javascript', + py: 'python', rb: 'ruby', rs: 'rust', go: 'go', java: 'java', + c: 'c', cpp: 'cpp', h: 'c', hpp: 'cpp', cs: 'csharp', + php: 'php', sh: 'bash', bash: 'bash', zsh: 'bash', + sql: 'sql', html: 'html', css: 'css', scss: 'scss', + json: 'json', yaml: 'yaml', yml: 'yaml', xml: 'xml', + md: 'markdown', dockerfile: 'dockerfile' + }; + return extToLang[ext]; + } + + function findToolResult(toolCallId) { + for (const entry of entries) { + if (entry.type === 'message' && entry.message.role === 'toolResult') { + if (entry.message.toolCallId === toolCallId) { + return entry.message; + } + } + } + return null; + } + + function formatExpandableOutput(text, maxLines, lang) { + text = replaceTabs(text); + const lines = text.split('\n'); + const displayLines = lines.slice(0, maxLines); + const remaining = lines.length - maxLines; + + if (lang) { + let highlighted; + try { + highlighted = hljs.highlight(text, { language: lang }).value; + } catch { + highlighted = escapeHtml(text); + } + + if (remaining > 0) { + const previewCode = displayLines.join('\n'); + let previewHighlighted; + try { + previewHighlighted = hljs.highlight(previewCode, { language: lang }).value; + } catch { + previewHighlighted = escapeHtml(previewCode); + } + + return ``; + } + + return `
${highlighted}
`; + } + + // Plain text output + if (remaining > 0) { + let out = ''; + return out; + } + + let out = '
'; + for (const line of displayLines) { + out += `
${escapeHtml(replaceTabs(line))}
`; + } + out += '
'; + return out; + } + + function renderToolCall(call) { + const result = findToolResult(call.id); + const isError = result?.isError || false; + const statusClass = result ? (isError ? 'error' : 'success') : 'pending'; + + const getResultText = () => { + if (!result) return ''; + const textBlocks = result.content.filter(c => c.type === 'text'); + return textBlocks.map(c => c.text).join('\n'); + }; + + const getResultImages = () => { + if (!result) return []; + return result.content.filter(c => c.type === 'image'); + }; + + const renderResultImages = () => { + const images = getResultImages(); + if (images.length === 0) return ''; + return '
' + + images.map(img => ``).join('') + + '
'; + }; + + let html = `
`; + const args = call.arguments || {}; + const name = call.name; + + const invalidArg = '[invalid arg]'; + + switch (name) { + case 'bash': { + const command = str(args.command); + const cmdDisplay = command === null ? invalidArg : escapeHtml(command || '...'); + html += `
$ ${cmdDisplay}
`; + if (result) { + const output = getResultText().trim(); + if (output) html += formatExpandableOutput(output, 5); + } + break; + } + case 'read': { + const filePath = str(args.file_path ?? args.path); + const offset = args.offset; + const limit = args.limit; + + let pathHtml = filePath === null ? invalidArg : escapeHtml(shortenPath(filePath || '')); + if (filePath !== null && (offset !== undefined || limit !== undefined)) { + const startLine = offset ?? 1; + const endLine = limit !== undefined ? startLine + limit - 1 : ''; + pathHtml += `:${startLine}${endLine ? '-' + endLine : ''}`; + } + + html += `
read ${pathHtml}
`; + if (result) { + html += renderResultImages(); + const output = getResultText(); + const lang = filePath ? getLanguageFromPath(filePath) : null; + if (output) html += formatExpandableOutput(output, 10, lang); + } + break; + } + case 'write': { + const filePath = str(args.file_path ?? args.path); + const content = str(args.content); + + html += `
write ${filePath === null ? invalidArg : escapeHtml(shortenPath(filePath || ''))}`; + if (content !== null && content) { + const lines = content.split('\n'); + if (lines.length > 10) html += ` (${lines.length} lines)`; + } + html += '
'; + + if (content === null) { + html += `
[invalid content arg - expected string]
`; + } else if (content) { + const lang = filePath ? getLanguageFromPath(filePath) : null; + html += formatExpandableOutput(content, 10, lang); + } + if (result) { + const output = getResultText().trim(); + if (output) html += `
${escapeHtml(output)}
`; + } + break; + } + case 'edit': { + const filePath = str(args.file_path ?? args.path); + html += `
edit ${filePath === null ? invalidArg : escapeHtml(shortenPath(filePath || ''))}
`; + + if (result?.details?.diff) { + const diffLines = result.details.diff.split('\n'); + html += '
'; + for (const line of diffLines) { + const cls = line.match(/^\+/) ? 'diff-added' : line.match(/^-/) ? 'diff-removed' : 'diff-context'; + html += `
${escapeHtml(replaceTabs(line))}
`; + } + html += '
'; + } else if (result) { + const output = getResultText().trim(); + if (output) html += `
${escapeHtml(output)}
`; + } + break; + } + default: { + // Check for pre-rendered custom tool HTML + const rendered = renderedTools?.[call.id]; + if (rendered?.callHtml || rendered?.resultHtmlCollapsed || rendered?.resultHtmlExpanded) { + // Custom tool with pre-rendered HTML from TUI renderer + if (rendered.callHtml) { + html += `
${rendered.callHtml}
`; + } else { + html += `
${escapeHtml(name)}
`; + } + + if (rendered.resultHtmlCollapsed && rendered.resultHtmlExpanded && rendered.resultHtmlCollapsed !== rendered.resultHtmlExpanded) { + // Both collapsed and expanded differ - render expandable section + html += ``; + } else if (rendered.resultHtmlExpanded) { + // Only expanded exists (or collapsed is identical) - show directly + html += `
${rendered.resultHtmlExpanded}
`; + } else if (result) { + // No pre-rendered result HTML - fallback to JSON + const output = getResultText(); + if (output) html += formatExpandableOutput(output, 10); + } + } else { + // Fallback to JSON display (existing behavior) + html += `
${escapeHtml(name)}
`; + html += `
${escapeHtml(JSON.stringify(args, null, 2))}
`; + if (result) { + const output = getResultText(); + if (output) html += formatExpandableOutput(output, 10); + } + } + } + } + + html += '
'; + return html; + } + + /** + * Download the session data as a JSONL file. + * Reconstructs the original format: header line + entry lines. + */ + window.downloadSessionJson = function() { + // Build JSONL content: header first, then all entries + const lines = []; + if (header) { + lines.push(JSON.stringify({ type: 'header', ...header })); + } + for (const entry of entries) { + lines.push(JSON.stringify(entry)); + } + const jsonlContent = lines.join('\n'); + + // Create download + const blob = new Blob([jsonlContent], { type: 'application/x-ndjson' }); + const url = URL.createObjectURL(blob); + const a = document.createElement('a'); + a.href = url; + a.download = `${header?.id || 'session'}.jsonl`; + document.body.appendChild(a); + a.click(); + document.body.removeChild(a); + URL.revokeObjectURL(url); + } + + /** + * Build a shareable URL for a specific message. + * URL format: base?gistId&leafId=&targetId= + */ + function buildShareUrl(entryId) { + // Check for injected base URL (used when loaded in iframe via srcdoc) + const baseUrlMeta = document.querySelector('meta[name="pi-share-base-url"]'); + const baseUrl = baseUrlMeta ? baseUrlMeta.content : window.location.href.split('?')[0]; + + const url = new URL(window.location.href); + // Find the gist ID (first query param without value, e.g., ?abc123) + const gistId = Array.from(url.searchParams.keys()).find(k => !url.searchParams.get(k)); + + // Build the share URL + const params = new URLSearchParams(); + params.set('leafId', currentLeafId); + params.set('targetId', entryId); + + // If we have an injected base URL (iframe context), use it directly + if (baseUrlMeta) { + return `${baseUrl}&${params.toString()}`; + } + + // Otherwise build from current location (direct file access) + url.search = gistId ? `?${gistId}&${params.toString()}` : `?${params.toString()}`; + return url.toString(); + } + + /** + * Copy text to clipboard with visual feedback. + * Uses navigator.clipboard with fallback to execCommand for HTTP contexts. + */ + async function copyToClipboard(text, button) { + let success = false; + try { + if (navigator.clipboard && navigator.clipboard.writeText) { + await navigator.clipboard.writeText(text); + success = true; + } + } catch (err) { + // Clipboard API failed, try fallback + } + + // Fallback for HTTP or when Clipboard API is unavailable + if (!success) { + try { + const textarea = document.createElement('textarea'); + textarea.value = text; + textarea.style.position = 'fixed'; + textarea.style.opacity = '0'; + document.body.appendChild(textarea); + textarea.select(); + success = document.execCommand('copy'); + document.body.removeChild(textarea); + } catch (err) { + console.error('Failed to copy:', err); + } + } + + if (success && button) { + const originalHtml = button.innerHTML; + button.innerHTML = '✓'; + button.classList.add('copied'); + setTimeout(() => { + button.innerHTML = originalHtml; + button.classList.remove('copied'); + }, 1500); + } + } + + /** + * Render the copy-link button HTML for a message. + */ + function renderCopyLinkButton(entryId) { + return ``; + } + + function renderEntry(entry) { + const ts = formatTimestamp(entry.timestamp); + const tsHtml = ts ? `
${ts}
` : ''; + const entryId = `entry-${entry.id}`; + const copyBtnHtml = renderCopyLinkButton(entry.id); + + if (entry.type === 'message') { + const msg = entry.message; + + if (msg.role === 'user') { + let html = `
${copyBtnHtml}${tsHtml}`; + const content = msg.content; + + if (Array.isArray(content)) { + const images = content.filter(c => c.type === 'image'); + if (images.length > 0) { + html += '
'; + for (const img of images) { + html += ``; + } + html += '
'; + } + } + + const text = typeof content === 'string' ? content : + content.filter(c => c.type === 'text').map(c => c.text).join('\n'); + if (text.trim()) { + html += `
${safeMarkedParse(text)}
`; + } + html += '
'; + return html; + } + + if (msg.role === 'assistant') { + let html = `
${copyBtnHtml}${tsHtml}`; + + for (const block of msg.content) { + if (block.type === 'text' && block.text.trim()) { + html += `
${safeMarkedParse(block.text)}
`; + } else if (block.type === 'thinking' && block.thinking.trim()) { + html += `
+
${escapeHtml(block.thinking)}
+
Thinking ...
+
`; + } + } + + for (const block of msg.content) { + if (block.type === 'toolCall') { + html += renderToolCall(block); + } + } + + if (msg.stopReason === 'aborted') { + html += '
Aborted
'; + } else if (msg.stopReason === 'error') { + html += `
Error: ${escapeHtml(msg.errorMessage || 'Unknown error')}
`; + } + + html += '
'; + return html; + } + + if (msg.role === 'bashExecution') { + const isError = msg.cancelled || (msg.exitCode !== 0 && msg.exitCode !== null); + let html = `
${tsHtml}`; + html += `
$ ${escapeHtml(msg.command)}
`; + if (msg.output) html += formatExpandableOutput(msg.output, 10); + if (msg.cancelled) { + html += '
(cancelled)
'; + } else if (msg.exitCode !== 0 && msg.exitCode !== null) { + html += `
(exit ${msg.exitCode})
`; + } + html += '
'; + return html; + } + + if (msg.role === 'toolResult') return ''; + } + + if (entry.type === 'model_change') { + return `
${tsHtml}Switched to model: ${escapeHtml(entry.provider)}/${escapeHtml(entry.modelId)}
`; + } + + if (entry.type === 'compaction') { + return `
+
[compaction]
+
Compacted from ${entry.tokensBefore.toLocaleString()} tokens
+
Compacted from ${entry.tokensBefore.toLocaleString()} tokens\n\n${escapeHtml(entry.summary)}
+
`; + } + + if (entry.type === 'branch_summary') { + return `
${tsHtml} +
Branch Summary
+
${safeMarkedParse(entry.summary)}
+
`; + } + + if (entry.type === 'custom_message' && entry.display) { + return `
${tsHtml} +
[${escapeHtml(entry.customType)}]
+
${safeMarkedParse(typeof entry.content === 'string' ? entry.content : JSON.stringify(entry.content))}
+
`; + } + + return ''; + } + + // ============================================================ + // HEADER / STATS + // ============================================================ + + function computeStats(entryList) { + let userMessages = 0, assistantMessages = 0, toolResults = 0; + let customMessages = 0, compactions = 0, branchSummaries = 0, toolCalls = 0; + const tokens = { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }; + const cost = { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }; + const models = new Set(); + + for (const entry of entryList) { + if (entry.type === 'message') { + const msg = entry.message; + if (msg.role === 'user') userMessages++; + if (msg.role === 'assistant') { + assistantMessages++; + if (msg.model) models.add(msg.provider ? `${msg.provider}/${msg.model}` : msg.model); + if (msg.usage) { + tokens.input += msg.usage.input || 0; + tokens.output += msg.usage.output || 0; + tokens.cacheRead += msg.usage.cacheRead || 0; + tokens.cacheWrite += msg.usage.cacheWrite || 0; + if (msg.usage.cost) { + cost.input += msg.usage.cost.input || 0; + cost.output += msg.usage.cost.output || 0; + cost.cacheRead += msg.usage.cost.cacheRead || 0; + cost.cacheWrite += msg.usage.cost.cacheWrite || 0; + } + } + toolCalls += msg.content.filter(c => c.type === 'toolCall').length; + } + if (msg.role === 'toolResult') toolResults++; + } else if (entry.type === 'compaction') { + compactions++; + } else if (entry.type === 'branch_summary') { + branchSummaries++; + } else if (entry.type === 'custom_message') { + customMessages++; + } + } + + return { userMessages, assistantMessages, toolResults, customMessages, compactions, branchSummaries, toolCalls, tokens, cost, models: Array.from(models) }; + } + + const globalStats = computeStats(entries); + + function renderHeader() { + const totalCost = globalStats.cost.input + globalStats.cost.output + globalStats.cost.cacheRead + globalStats.cost.cacheWrite; + + const tokenParts = []; + if (globalStats.tokens.input) tokenParts.push(`↑${formatTokens(globalStats.tokens.input)}`); + if (globalStats.tokens.output) tokenParts.push(`↓${formatTokens(globalStats.tokens.output)}`); + if (globalStats.tokens.cacheRead) tokenParts.push(`R${formatTokens(globalStats.tokens.cacheRead)}`); + if (globalStats.tokens.cacheWrite) tokenParts.push(`W${formatTokens(globalStats.tokens.cacheWrite)}`); + + const msgParts = []; + if (globalStats.userMessages) msgParts.push(`${globalStats.userMessages} user`); + if (globalStats.assistantMessages) msgParts.push(`${globalStats.assistantMessages} assistant`); + if (globalStats.toolResults) msgParts.push(`${globalStats.toolResults} tool results`); + if (globalStats.customMessages) msgParts.push(`${globalStats.customMessages} custom`); + if (globalStats.compactions) msgParts.push(`${globalStats.compactions} compactions`); + if (globalStats.branchSummaries) msgParts.push(`${globalStats.branchSummaries} branch summaries`); + + let html = ` +
+

Session: ${escapeHtml(header?.id || 'unknown')}

+
+ Ctrl+T toggle thinking · Ctrl+O toggle tools + +
+
+
Date:${header?.timestamp ? new Date(header.timestamp).toLocaleString() : 'unknown'}
+
Models:${globalStats.models.join(', ') || 'unknown'}
+
Messages:${msgParts.join(', ') || '0'}
+
Tool Calls:${globalStats.toolCalls}
+
Tokens:${tokenParts.join(' ') || '0'}
+
Cost:$${totalCost.toFixed(3)}
+
+
`; + + // Render system prompt (user's base prompt, applies to all providers) + if (systemPrompt) { + const lines = systemPrompt.split('\n'); + const previewLines = 10; + if (lines.length > previewLines) { + const preview = lines.slice(0, previewLines).join('\n'); + const remaining = lines.length - previewLines; + html += ``; + } else { + html += `
+
System Prompt
+
${escapeHtml(systemPrompt)}
+
`; + } + } + + if (tools && tools.length > 0) { + html += `
+
Available Tools
+
+ ${tools.map(t => { + const hasParams = t.parameters && typeof t.parameters === 'object' && t.parameters.properties && Object.keys(t.parameters.properties).length > 0; + if (!hasParams) { + return `
${escapeHtml(t.name)} - ${escapeHtml(t.description)}
`; + } + const params = t.parameters; + const properties = params.properties; + const required = params.required || []; + let paramsHtml = ''; + for (const [name, prop] of Object.entries(properties)) { + const isRequired = required.includes(name); + const typeStr = prop.type || 'any'; + const reqLabel = isRequired ? 'required' : 'optional'; + paramsHtml += `
${escapeHtml(name)} ${escapeHtml(typeStr)} ${reqLabel}`; + if (prop.description) { + paramsHtml += `
${escapeHtml(prop.description)}
`; + } + paramsHtml += `
`; + } + return `
${escapeHtml(t.name)} - ${escapeHtml(t.description)}
${paramsHtml}
`; + }).join('')} +
+
`; + } + + return html; + } + + // ============================================================ + // NAVIGATION + // ============================================================ + + // Cache for rendered entry DOM nodes + const entryCache = new Map(); + + function renderEntryToNode(entry) { + // Check cache first + if (entryCache.has(entry.id)) { + return entryCache.get(entry.id).cloneNode(true); + } + + // Render to HTML string, then parse to node + const html = renderEntry(entry); + if (!html) return null; + + const template = document.createElement('template'); + template.innerHTML = html; + const node = template.content.firstElementChild; + + // Cache the node + if (node) { + entryCache.set(entry.id, node.cloneNode(true)); + } + return node; + } + + function navigateTo(targetId, scrollMode = 'target', scrollToEntryId = null) { + currentLeafId = targetId; + currentTargetId = scrollToEntryId || targetId; + const path = getPath(targetId); + + renderTree(); + + document.getElementById('header-container').innerHTML = renderHeader(); + + // Build messages using cached DOM nodes + const messagesEl = document.getElementById('messages'); + const fragment = document.createDocumentFragment(); + + for (const entry of path) { + const node = renderEntryToNode(entry); + if (node) { + fragment.appendChild(node); + } + } + + messagesEl.innerHTML = ''; + messagesEl.appendChild(fragment); + + // Attach click handlers for copy-link buttons + messagesEl.querySelectorAll('.copy-link-btn').forEach(btn => { + btn.addEventListener('click', (e) => { + e.stopPropagation(); + const entryId = btn.dataset.entryId; + const shareUrl = buildShareUrl(entryId); + copyToClipboard(shareUrl, btn); + }); + }); + + // Use setTimeout(0) to ensure DOM is fully laid out before scrolling + setTimeout(() => { + const content = document.getElementById('content'); + if (scrollMode === 'bottom') { + content.scrollTop = content.scrollHeight; + } else if (scrollMode === 'target') { + // If scrollToEntryId is provided, scroll to that specific entry + const scrollTargetId = scrollToEntryId || targetId; + const targetEl = document.getElementById(`entry-${scrollTargetId}`); + if (targetEl) { + targetEl.scrollIntoView({ block: 'center' }); + // Briefly highlight the target message + if (scrollToEntryId) { + targetEl.classList.add('highlight'); + setTimeout(() => targetEl.classList.remove('highlight'), 2000); + } + } + } + }, 0); + } + + // ============================================================ + // INITIALIZATION + // ============================================================ + + // Escape HTML tags in text (but not code blocks) + function escapeHtmlTags(text) { + return text.replace(/<(?=[a-zA-Z\/])/g, '<'); + } + + // Configure marked with syntax highlighting and HTML escaping for text + marked.use({ + breaks: true, + gfm: true, + renderer: { + // Code blocks: syntax highlight, no HTML escaping + code(token) { + const code = token.text; + const lang = token.lang; + let highlighted; + if (lang && hljs.getLanguage(lang)) { + try { + highlighted = hljs.highlight(code, { language: lang }).value; + } catch { + highlighted = escapeHtml(code); + } + } else { + // Auto-detect language if not specified + try { + highlighted = hljs.highlightAuto(code).value; + } catch { + highlighted = escapeHtml(code); + } + } + return `
${highlighted}
`; + }, + // Text content: escape HTML tags + text(token) { + return escapeHtmlTags(escapeHtml(token.text)); + }, + // Inline code: escape HTML + codespan(token) { + return `${escapeHtml(token.text)}`; + } + } + }); + + // Simple marked parse (escaping handled in renderers) + function safeMarkedParse(text) { + return marked.parse(text); + } + + // Search input + const searchInput = document.getElementById('tree-search'); + searchInput.addEventListener('input', (e) => { + searchQuery = e.target.value; + forceTreeRerender(); + }); + + // Filter buttons + document.querySelectorAll('.filter-btn').forEach(btn => { + btn.addEventListener('click', () => { + document.querySelectorAll('.filter-btn').forEach(b => b.classList.remove('active')); + btn.classList.add('active'); + filterMode = btn.dataset.filter; + forceTreeRerender(); + }); + }); + + // Sidebar toggle + const sidebar = document.getElementById('sidebar'); + const overlay = document.getElementById('sidebar-overlay'); + const hamburger = document.getElementById('hamburger'); + + hamburger.addEventListener('click', () => { + sidebar.classList.add('open'); + overlay.classList.add('open'); + hamburger.style.display = 'none'; + }); + + const closeSidebar = () => { + sidebar.classList.remove('open'); + overlay.classList.remove('open'); + hamburger.style.display = ''; + }; + + overlay.addEventListener('click', closeSidebar); + document.getElementById('sidebar-close').addEventListener('click', closeSidebar); + + // Toggle states + let thinkingExpanded = true; + let toolOutputsExpanded = false; + + const toggleThinking = () => { + thinkingExpanded = !thinkingExpanded; + document.querySelectorAll('.thinking-text').forEach(el => { + el.style.display = thinkingExpanded ? '' : 'none'; + }); + document.querySelectorAll('.thinking-collapsed').forEach(el => { + el.style.display = thinkingExpanded ? 'none' : 'block'; + }); + }; + + const toggleToolOutputs = () => { + toolOutputsExpanded = !toolOutputsExpanded; + document.querySelectorAll('.tool-output.expandable').forEach(el => { + el.classList.toggle('expanded', toolOutputsExpanded); + }); + document.querySelectorAll('.compaction').forEach(el => { + el.classList.toggle('expanded', toolOutputsExpanded); + }); + }; + + // Keyboard shortcuts + document.addEventListener('keydown', (e) => { + if (e.key === 'Escape') { + searchInput.value = ''; + searchQuery = ''; + navigateTo(leafId, 'bottom'); + } + if (e.ctrlKey && e.key === 't') { + e.preventDefault(); + toggleThinking(); + } + if (e.ctrlKey && e.key === 'o') { + e.preventDefault(); + toggleToolOutputs(); + } + }); + + // Initial render + // If URL has targetId, scroll to that specific message; otherwise stay at top + if (leafId) { + if (urlTargetId && byId.has(urlTargetId)) { + // Deep link: navigate to leaf and scroll to target message + navigateTo(leafId, 'target', urlTargetId); + } else { + navigateTo(leafId, 'none'); + } + } else if (entries.length > 0) { + // Fallback: use last entry if no leafId + navigateTo(entries[entries.length - 1].id, 'none'); + } + })(); diff --git a/packages/pi-coding-agent/src/core/export-html/tool-renderer.ts b/packages/pi-coding-agent/src/core/export-html/tool-renderer.ts new file mode 100644 index 000000000..c4b4fc099 --- /dev/null +++ b/packages/pi-coding-agent/src/core/export-html/tool-renderer.ts @@ -0,0 +1,114 @@ +/** + * Tool HTML renderer for custom tools in HTML export. + * + * Renders custom tool calls and results to HTML by invoking their TUI renderers + * and converting the ANSI output to HTML. + */ + +import type { ImageContent, TextContent } from "@gsd/pi-ai"; +import type { Theme } from "../../modes/interactive/theme/theme.js"; +import type { ToolDefinition } from "../extensions/types.js"; +import { ansiLinesToHtml } from "./ansi-to-html.js"; + +export interface ToolHtmlRendererDeps { + /** Function to look up tool definition by name */ + getToolDefinition: (name: string) => ToolDefinition | undefined; + /** Theme for styling */ + theme: Theme; + /** Terminal width for rendering (default: 100) */ + width?: number; +} + +export interface ToolHtmlRenderer { + /** Render a tool call to HTML. Returns undefined if tool has no custom renderer. */ + renderCall(toolName: string, args: unknown): string | undefined; + /** Render a tool result to collapsed/expanded HTML. Returns undefined if tool has no custom renderer. */ + renderResult( + toolName: string, + result: Array<{ type: string; text?: string; data?: string; mimeType?: string }>, + details: unknown, + isError: boolean, + ): { collapsed?: string; expanded?: string } | undefined; +} + +/** + * Create a tool HTML renderer. + * + * The renderer looks up tool definitions and invokes their renderCall/renderResult + * methods, converting the resulting TUI Component output (ANSI) to HTML. + */ +export function createToolHtmlRenderer(deps: ToolHtmlRendererDeps): ToolHtmlRenderer { + const { getToolDefinition, theme, width = 100 } = deps; + + return { + renderCall(toolName: string, args: unknown): string | undefined { + try { + const toolDef = getToolDefinition(toolName); + if (!toolDef?.renderCall) { + return undefined; + } + + const component = toolDef.renderCall(args, theme); + if (!component) { + return undefined; + } + const lines = component.render(width); + return ansiLinesToHtml(lines); + } catch { + // On error, return undefined to trigger JSON fallback + return undefined; + } + }, + + renderResult( + toolName: string, + result: Array<{ type: string; text?: string; data?: string; mimeType?: string }>, + details: unknown, + isError: boolean, + ): { collapsed?: string; expanded?: string } | undefined { + try { + const toolDef = getToolDefinition(toolName); + if (!toolDef?.renderResult) { + return undefined; + } + + // Build AgentToolResult from content array + // Cast content since session storage uses generic object types + const agentToolResult = { + content: result as (TextContent | ImageContent)[], + details, + isError, + }; + + // Render collapsed + const collapsedComponent = toolDef.renderResult( + agentToolResult, + { expanded: false, isPartial: false }, + theme, + ); + const collapsed = collapsedComponent ? ansiLinesToHtml(collapsedComponent.render(width)) : undefined; + + // Render expanded + const expandedComponent = toolDef.renderResult( + agentToolResult, + { expanded: true, isPartial: false }, + theme, + ); + const expanded = expandedComponent ? ansiLinesToHtml(expandedComponent.render(width)) : undefined; + + // Return collapsed only if it exists and differs from expanded + if (!expanded) { + return undefined; + } + + return { + ...(collapsed && collapsed !== expanded ? { collapsed } : {}), + expanded, + }; + } catch { + // On error, return undefined to trigger JSON fallback + return undefined; + } + }, + }; +} diff --git a/packages/pi-coding-agent/src/core/extensions/index.ts b/packages/pi-coding-agent/src/core/extensions/index.ts new file mode 100644 index 000000000..39b4a66e4 --- /dev/null +++ b/packages/pi-coding-agent/src/core/extensions/index.ts @@ -0,0 +1,171 @@ +/** + * Extension system for lifecycle events and custom tools. + */ + +export type { SlashCommandInfo, SlashCommandLocation, SlashCommandSource } from "../slash-commands.js"; +export { + createExtensionRuntime, + discoverAndLoadExtensions, + loadExtensionFromFactory, + loadExtensions, +} from "./loader.js"; +export type { + ExtensionErrorListener, + ForkHandler, + NavigateTreeHandler, + NewSessionHandler, + ShutdownHandler, + SwitchSessionHandler, +} from "./runner.js"; +export { ExtensionRunner } from "./runner.js"; +export type { + AgentEndEvent, + AgentStartEvent, + // Re-exports + AgentToolResult, + AgentToolUpdateCallback, + // App keybindings (for custom editors) + AppAction, + AppendEntryHandler, + // Events - Tool (ToolCallEvent types) + BashToolCallEvent, + BashToolResultEvent, + BeforeAgentStartEvent, + BeforeAgentStartEventResult, + BeforeProviderRequestEvent, + BeforeProviderRequestEventResult, + // Context + CompactOptions, + // Events - Agent + ContextEvent, + // Event Results + ContextEventResult, + ContextUsage, + CustomToolCallEvent, + CustomToolResultEvent, + EditToolCallEvent, + EditToolResultEvent, + ExecOptions, + ExecResult, + Extension, + ExtensionActions, + // API + ExtensionAPI, + ExtensionCommandContext, + ExtensionCommandContextActions, + ExtensionContext, + ExtensionContextActions, + // Errors + ExtensionError, + ExtensionEvent, + ExtensionFactory, + ExtensionFlag, + ExtensionHandler, + // Runtime + ExtensionRuntime, + ExtensionShortcut, + ExtensionUIContext, + ExtensionUIDialogOptions, + ExtensionWidgetOptions, + FindToolCallEvent, + FindToolResultEvent, + GetActiveToolsHandler, + GetAllToolsHandler, + GetCommandsHandler, + GetThinkingLevelHandler, + GrepToolCallEvent, + GrepToolResultEvent, + // Events - Input + InputEvent, + InputEventResult, + InputSource, + KeybindingsManager, + LoadExtensionsResult, + LsToolCallEvent, + LsToolResultEvent, + // Events - Message + MessageEndEvent, + // Message Rendering + MessageRenderer, + MessageRenderOptions, + MessageStartEvent, + MessageUpdateEvent, + ModelSelectEvent, + ModelSelectSource, + // Provider Registration + ProviderConfig, + ProviderModelConfig, + ReadToolCallEvent, + ReadToolResultEvent, + // Commands + RegisteredCommand, + RegisteredTool, + // Events - Resources + ResourcesDiscoverEvent, + ResourcesDiscoverResult, + SendMessageHandler, + SendUserMessageHandler, + SessionBeforeCompactEvent, + SessionBeforeCompactResult, + SessionBeforeForkEvent, + SessionBeforeForkResult, + SessionBeforeSwitchEvent, + SessionBeforeSwitchResult, + SessionBeforeTreeEvent, + SessionBeforeTreeResult, + SessionCompactEvent, + SessionDirectoryEvent, + SessionDirectoryHandler, + SessionDirectoryResult, + SessionEvent, + SessionForkEvent, + SessionShutdownEvent, + // Events - Session + SessionStartEvent, + SessionSwitchEvent, + SessionTreeEvent, + SetActiveToolsHandler, + SetLabelHandler, + SetModelHandler, + SetThinkingLevelHandler, + TerminalInputHandler, + // Events - Tool + ToolCallEvent, + ToolCallEventResult, + // Tools + ToolDefinition, + // Events - Tool Execution + ToolExecutionEndEvent, + ToolExecutionStartEvent, + ToolExecutionUpdateEvent, + ToolInfo, + ToolRenderResultOptions, + ToolResultEvent, + ToolResultEventResult, + TreePreparation, + TurnEndEvent, + TurnStartEvent, + // Events - User Bash + UserBashEvent, + UserBashEventResult, + WidgetPlacement, + WriteToolCallEvent, + WriteToolResultEvent, +} from "./types.js"; +// Type guards +export { + isBashToolResult, + isEditToolResult, + isFindToolResult, + isGrepToolResult, + isLsToolResult, + isReadToolResult, + isToolCallEventType, + isWriteToolResult, +} from "./types.js"; +export { + wrapRegisteredTool, + wrapRegisteredTools, + wrapToolsWithExtensions, + wrapToolWithExtensions, +} from "./wrapper.js"; diff --git a/packages/pi-coding-agent/src/core/extensions/loader.ts b/packages/pi-coding-agent/src/core/extensions/loader.ts new file mode 100644 index 000000000..c3bb146c8 --- /dev/null +++ b/packages/pi-coding-agent/src/core/extensions/loader.ts @@ -0,0 +1,545 @@ +/** + * Extension loader - loads TypeScript extension modules using jiti. + * + * Uses @mariozechner/jiti fork with virtualModules support for compiled Bun binaries. + */ + +import * as fs from "node:fs"; +import { createRequire } from "node:module"; +import * as os from "node:os"; +import * as path from "node:path"; +import { fileURLToPath } from "node:url"; +import { createJiti } from "@mariozechner/jiti"; +import * as _bundledPiAgentCore from "@gsd/pi-agent-core"; +import * as _bundledPiAi from "@gsd/pi-ai"; +import * as _bundledPiAiOauth from "@gsd/pi-ai/oauth"; +import type { KeyId } from "@gsd/pi-tui"; +import * as _bundledPiTui from "@gsd/pi-tui"; +// Static imports of packages that extensions may use. +// These MUST be static so Bun bundles them into the compiled binary. +// The virtualModules option then makes them available to extensions. +import * as _bundledTypebox from "@sinclair/typebox"; +import { getAgentDir, isBunBinary } from "../../config.js"; +// NOTE: This import works because loader.ts exports are NOT re-exported from index.ts, +// avoiding a circular dependency. Extensions can import from @gsd/pi-coding-agent. +import * as _bundledPiCodingAgent from "../../index.js"; +import { createEventBus, type EventBus } from "../event-bus.js"; +import type { ExecOptions } from "../exec.js"; +import { execCommand } from "../exec.js"; +import type { + Extension, + ExtensionAPI, + ExtensionFactory, + ExtensionRuntime, + LoadExtensionsResult, + MessageRenderer, + ProviderConfig, + RegisteredCommand, + ToolDefinition, +} from "./types.js"; + +/** Modules available to extensions via virtualModules (for compiled Bun binary) */ +const VIRTUAL_MODULES: Record = { + "@sinclair/typebox": _bundledTypebox, + "@gsd/pi-agent-core": _bundledPiAgentCore, + "@gsd/pi-tui": _bundledPiTui, + "@gsd/pi-ai": _bundledPiAi, + "@gsd/pi-ai/oauth": _bundledPiAiOauth, + "@gsd/pi-coding-agent": _bundledPiCodingAgent, +}; + +const require = createRequire(import.meta.url); + +/** + * Get aliases for jiti (used in Node.js/development mode). + * In Bun binary mode, virtualModules is used instead. + */ +let _aliases: Record | null = null; +function getAliases(): Record { + if (_aliases) return _aliases; + + const __dirname = path.dirname(fileURLToPath(import.meta.url)); + const packageIndex = path.resolve(__dirname, "../..", "index.js"); + + const typeboxEntry = require.resolve("@sinclair/typebox"); + const typeboxRoot = typeboxEntry.replace(/[\\/]build[\\/]cjs[\\/]index\.js$/, ""); + + const packagesRoot = path.resolve(__dirname, "../../../../"); + const resolveWorkspaceOrImport = (workspaceRelativePath: string, specifier: string): string => { + const workspacePath = path.join(packagesRoot, workspaceRelativePath); + if (fs.existsSync(workspacePath)) { + return workspacePath; + } + return fileURLToPath(import.meta.resolve(specifier)); + }; + + _aliases = { + "@gsd/pi-coding-agent": packageIndex, + "@gsd/pi-agent-core": resolveWorkspaceOrImport("agent/dist/index.js", "@gsd/pi-agent-core"), + "@gsd/pi-tui": resolveWorkspaceOrImport("tui/dist/index.js", "@gsd/pi-tui"), + "@gsd/pi-ai": resolveWorkspaceOrImport("ai/dist/index.js", "@gsd/pi-ai"), + "@gsd/pi-ai/oauth": resolveWorkspaceOrImport("ai/dist/oauth.js", "@gsd/pi-ai/oauth"), + "@sinclair/typebox": typeboxRoot, + }; + + return _aliases; +} + +const UNICODE_SPACES = /[\u00A0\u2000-\u200A\u202F\u205F\u3000]/g; + +function normalizeUnicodeSpaces(str: string): string { + return str.replace(UNICODE_SPACES, " "); +} + +function expandPath(p: string): string { + const normalized = normalizeUnicodeSpaces(p); + if (normalized.startsWith("~/")) { + return path.join(os.homedir(), normalized.slice(2)); + } + if (normalized.startsWith("~")) { + return path.join(os.homedir(), normalized.slice(1)); + } + return normalized; +} + +function resolvePath(extPath: string, cwd: string): string { + const expanded = expandPath(extPath); + if (path.isAbsolute(expanded)) { + return expanded; + } + return path.resolve(cwd, expanded); +} + +type HandlerFn = (...args: unknown[]) => Promise; + +/** + * Create a runtime with throwing stubs for action methods. + * Runner.bindCore() replaces these with real implementations. + */ +export function createExtensionRuntime(): ExtensionRuntime { + const notInitialized = () => { + throw new Error("Extension runtime not initialized. Action methods cannot be called during extension loading."); + }; + + const runtime: ExtensionRuntime = { + sendMessage: notInitialized, + sendUserMessage: notInitialized, + appendEntry: notInitialized, + setSessionName: notInitialized, + getSessionName: notInitialized, + setLabel: notInitialized, + getActiveTools: notInitialized, + getAllTools: notInitialized, + setActiveTools: notInitialized, + // registerTool() is valid during extension load; refresh is only needed post-bind. + refreshTools: () => {}, + getCommands: notInitialized, + setModel: () => Promise.reject(new Error("Extension runtime not initialized")), + getThinkingLevel: notInitialized, + setThinkingLevel: notInitialized, + flagValues: new Map(), + pendingProviderRegistrations: [], + // Pre-bind: queue registrations so bindCore() can flush them once the + // model registry is available. bindCore() replaces both with direct calls. + registerProvider: (name, config) => { + runtime.pendingProviderRegistrations.push({ name, config }); + }, + unregisterProvider: (name) => { + runtime.pendingProviderRegistrations = runtime.pendingProviderRegistrations.filter((r) => r.name !== name); + }, + }; + + return runtime; +} + +/** + * Create the ExtensionAPI for an extension. + * Registration methods write to the extension object. + * Action methods delegate to the shared runtime. + */ +function createExtensionAPI( + extension: Extension, + runtime: ExtensionRuntime, + cwd: string, + eventBus: EventBus, +): ExtensionAPI { + const api = { + // Registration methods - write to extension + on(event: string, handler: HandlerFn): void { + const list = extension.handlers.get(event) ?? []; + list.push(handler); + extension.handlers.set(event, list); + }, + + registerTool(tool: ToolDefinition): void { + extension.tools.set(tool.name, { + definition: tool, + extensionPath: extension.path, + }); + runtime.refreshTools(); + }, + + registerCommand(name: string, options: Omit): void { + extension.commands.set(name, { name, ...options }); + }, + + registerShortcut( + shortcut: KeyId, + options: { + description?: string; + handler: (ctx: import("./types.js").ExtensionContext) => Promise | void; + }, + ): void { + extension.shortcuts.set(shortcut, { shortcut, extensionPath: extension.path, ...options }); + }, + + registerFlag( + name: string, + options: { description?: string; type: "boolean" | "string"; default?: boolean | string }, + ): void { + extension.flags.set(name, { name, extensionPath: extension.path, ...options }); + if (options.default !== undefined && !runtime.flagValues.has(name)) { + runtime.flagValues.set(name, options.default); + } + }, + + registerMessageRenderer(customType: string, renderer: MessageRenderer): void { + extension.messageRenderers.set(customType, renderer as MessageRenderer); + }, + + // Flag access - checks extension registered it, reads from runtime + getFlag(name: string): boolean | string | undefined { + if (!extension.flags.has(name)) return undefined; + return runtime.flagValues.get(name); + }, + + // Action methods - delegate to shared runtime + sendMessage(message, options): void { + runtime.sendMessage(message, options); + }, + + sendUserMessage(content, options): void { + runtime.sendUserMessage(content, options); + }, + + appendEntry(customType: string, data?: unknown): void { + runtime.appendEntry(customType, data); + }, + + setSessionName(name: string): void { + runtime.setSessionName(name); + }, + + getSessionName(): string | undefined { + return runtime.getSessionName(); + }, + + setLabel(entryId: string, label: string | undefined): void { + runtime.setLabel(entryId, label); + }, + + exec(command: string, args: string[], options?: ExecOptions) { + return execCommand(command, args, options?.cwd ?? cwd, options); + }, + + getActiveTools(): string[] { + return runtime.getActiveTools(); + }, + + getAllTools() { + return runtime.getAllTools(); + }, + + setActiveTools(toolNames: string[]): void { + runtime.setActiveTools(toolNames); + }, + + getCommands() { + return runtime.getCommands(); + }, + + setModel(model) { + return runtime.setModel(model); + }, + + getThinkingLevel() { + return runtime.getThinkingLevel(); + }, + + setThinkingLevel(level) { + runtime.setThinkingLevel(level); + }, + + registerProvider(name: string, config: ProviderConfig) { + runtime.registerProvider(name, config); + }, + + unregisterProvider(name: string) { + runtime.unregisterProvider(name); + }, + + events: eventBus, + } as ExtensionAPI; + + return api; +} + +async function loadExtensionModule(extensionPath: string) { + const jiti = createJiti(import.meta.url, { + moduleCache: false, + // In Bun binary: use virtualModules for bundled packages (no filesystem resolution) + // Also disable tryNative so jiti handles ALL imports (not just the entry point) + // In Node.js/dev: use aliases to resolve to node_modules paths + ...(isBunBinary ? { virtualModules: VIRTUAL_MODULES, tryNative: false } : { alias: getAliases() }), + }); + + const module = await jiti.import(extensionPath, { default: true }); + const factory = module as ExtensionFactory; + return typeof factory !== "function" ? undefined : factory; +} + +/** + * Create an Extension object with empty collections. + */ +function createExtension(extensionPath: string, resolvedPath: string): Extension { + return { + path: extensionPath, + resolvedPath, + handlers: new Map(), + tools: new Map(), + messageRenderers: new Map(), + commands: new Map(), + flags: new Map(), + shortcuts: new Map(), + }; +} + +async function loadExtension( + extensionPath: string, + cwd: string, + eventBus: EventBus, + runtime: ExtensionRuntime, +): Promise<{ extension: Extension | null; error: string | null }> { + const resolvedPath = resolvePath(extensionPath, cwd); + + try { + const factory = await loadExtensionModule(resolvedPath); + if (!factory) { + return { extension: null, error: `Extension does not export a valid factory function: ${extensionPath}` }; + } + + const extension = createExtension(extensionPath, resolvedPath); + const api = createExtensionAPI(extension, runtime, cwd, eventBus); + await factory(api); + + return { extension, error: null }; + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + return { extension: null, error: `Failed to load extension: ${message}` }; + } +} + +/** + * Create an Extension from an inline factory function. + */ +export async function loadExtensionFromFactory( + factory: ExtensionFactory, + cwd: string, + eventBus: EventBus, + runtime: ExtensionRuntime, + extensionPath = "", +): Promise { + const extension = createExtension(extensionPath, extensionPath); + const api = createExtensionAPI(extension, runtime, cwd, eventBus); + await factory(api); + return extension; +} + +/** + * Load extensions from paths. + */ +export async function loadExtensions(paths: string[], cwd: string, eventBus?: EventBus): Promise { + const extensions: Extension[] = []; + const errors: Array<{ path: string; error: string }> = []; + const resolvedEventBus = eventBus ?? createEventBus(); + const runtime = createExtensionRuntime(); + + for (const extPath of paths) { + const { extension, error } = await loadExtension(extPath, cwd, resolvedEventBus, runtime); + + if (error) { + errors.push({ path: extPath, error }); + continue; + } + + if (extension) { + extensions.push(extension); + } + } + + return { + extensions, + errors, + runtime, + }; +} + +interface PiManifest { + extensions?: string[]; + themes?: string[]; + skills?: string[]; + prompts?: string[]; +} + +function readPiManifest(packageJsonPath: string): PiManifest | null { + try { + const content = fs.readFileSync(packageJsonPath, "utf-8"); + const pkg = JSON.parse(content); + if (pkg.pi && typeof pkg.pi === "object") { + return pkg.pi as PiManifest; + } + return null; + } catch { + return null; + } +} + +function isExtensionFile(name: string): boolean { + return name.endsWith(".ts") || name.endsWith(".js"); +} + +/** + * Resolve extension entry points from a directory. + * + * Checks for: + * 1. package.json with "pi.extensions" field -> returns declared paths + * 2. index.ts or index.js -> returns the index file + * + * Returns resolved paths or null if no entry points found. + */ +function resolveExtensionEntries(dir: string): string[] | null { + // Check for package.json with "pi" field first + const packageJsonPath = path.join(dir, "package.json"); + if (fs.existsSync(packageJsonPath)) { + const manifest = readPiManifest(packageJsonPath); + if (manifest?.extensions?.length) { + const entries: string[] = []; + for (const extPath of manifest.extensions) { + const resolvedExtPath = path.resolve(dir, extPath); + if (fs.existsSync(resolvedExtPath)) { + entries.push(resolvedExtPath); + } + } + if (entries.length > 0) { + return entries; + } + } + } + + // Check for index.ts or index.js + const indexTs = path.join(dir, "index.ts"); + const indexJs = path.join(dir, "index.js"); + if (fs.existsSync(indexTs)) { + return [indexTs]; + } + if (fs.existsSync(indexJs)) { + return [indexJs]; + } + + return null; +} + +/** + * Discover extensions in a directory. + * + * Discovery rules: + * 1. Direct files: `extensions/*.ts` or `*.js` → load + * 2. Subdirectory with index: `extensions/* /index.ts` or `index.js` → load + * 3. Subdirectory with package.json: `extensions/* /package.json` with "pi" field → load what it declares + * + * No recursion beyond one level. Complex packages must use package.json manifest. + */ +function discoverExtensionsInDir(dir: string): string[] { + if (!fs.existsSync(dir)) { + return []; + } + + const discovered: string[] = []; + + try { + const entries = fs.readdirSync(dir, { withFileTypes: true }); + + for (const entry of entries) { + const entryPath = path.join(dir, entry.name); + + // 1. Direct files: *.ts or *.js + if ((entry.isFile() || entry.isSymbolicLink()) && isExtensionFile(entry.name)) { + discovered.push(entryPath); + continue; + } + + // 2 & 3. Subdirectories + if (entry.isDirectory() || entry.isSymbolicLink()) { + const entries = resolveExtensionEntries(entryPath); + if (entries) { + discovered.push(...entries); + } + } + } + } catch { + return []; + } + + return discovered; +} + +/** + * Discover and load extensions from standard locations. + */ +export async function discoverAndLoadExtensions( + configuredPaths: string[], + cwd: string, + agentDir: string = getAgentDir(), + eventBus?: EventBus, +): Promise { + const allPaths: string[] = []; + const seen = new Set(); + + const addPaths = (paths: string[]) => { + for (const p of paths) { + const resolved = path.resolve(p); + if (!seen.has(resolved)) { + seen.add(resolved); + allPaths.push(p); + } + } + }; + + // 1. Project-local extensions: cwd/.pi/extensions/ + const localExtDir = path.join(cwd, ".pi", "extensions"); + addPaths(discoverExtensionsInDir(localExtDir)); + + // 2. Global extensions: agentDir/extensions/ + const globalExtDir = path.join(agentDir, "extensions"); + addPaths(discoverExtensionsInDir(globalExtDir)); + + // 3. Explicitly configured paths + for (const p of configuredPaths) { + const resolved = resolvePath(p, cwd); + if (fs.existsSync(resolved) && fs.statSync(resolved).isDirectory()) { + // Check for package.json with pi manifest or index.ts + const entries = resolveExtensionEntries(resolved); + if (entries) { + addPaths(entries); + continue; + } + // No explicit entries - discover individual files in directory + addPaths(discoverExtensionsInDir(resolved)); + continue; + } + + addPaths([resolved]); + } + + return loadExtensions(allPaths, cwd, eventBus); +} diff --git a/packages/pi-coding-agent/src/core/extensions/runner.ts b/packages/pi-coding-agent/src/core/extensions/runner.ts new file mode 100644 index 000000000..42fd75e64 --- /dev/null +++ b/packages/pi-coding-agent/src/core/extensions/runner.ts @@ -0,0 +1,884 @@ +/** + * Extension runner - executes extensions and manages their lifecycle. + */ + +import type { AgentMessage } from "@gsd/pi-agent-core"; +import type { ImageContent, Model } from "@gsd/pi-ai"; +import type { KeyId } from "@gsd/pi-tui"; +import { type Theme, theme } from "../../modes/interactive/theme/theme.js"; +import type { ResourceDiagnostic } from "../diagnostics.js"; +import type { KeyAction, KeybindingsConfig } from "../keybindings.js"; +import type { ModelRegistry } from "../model-registry.js"; +import type { SessionManager } from "../session-manager.js"; +import type { + BeforeAgentStartEvent, + BeforeAgentStartEventResult, + BeforeProviderRequestEvent, + CompactOptions, + ContextEvent, + ContextEventResult, + ContextUsage, + Extension, + ExtensionActions, + ExtensionCommandContext, + ExtensionCommandContextActions, + ExtensionContext, + ExtensionContextActions, + ExtensionError, + ExtensionEvent, + ExtensionFlag, + ExtensionRuntime, + ExtensionShortcut, + ExtensionUIContext, + InputEvent, + InputEventResult, + InputSource, + MessageRenderer, + RegisteredCommand, + RegisteredTool, + ResourcesDiscoverEvent, + ResourcesDiscoverResult, + SessionBeforeCompactResult, + SessionBeforeForkResult, + SessionBeforeSwitchResult, + SessionBeforeTreeResult, + ToolCallEvent, + ToolCallEventResult, + ToolResultEvent, + ToolResultEventResult, + UserBashEvent, + UserBashEventResult, +} from "./types.js"; + +// Keybindings for these actions cannot be overridden by extensions +const RESERVED_ACTIONS_FOR_EXTENSION_CONFLICTS: ReadonlyArray = [ + "interrupt", + "clear", + "exit", + "suspend", + "cycleThinkingLevel", + "cycleModelForward", + "cycleModelBackward", + "selectModel", + "expandTools", + "toggleThinking", + "externalEditor", + "followUp", + "submit", + "selectConfirm", + "selectCancel", + "copy", + "deleteToLineEnd", +]; + +type BuiltInKeyBindings = Partial>; + +const buildBuiltinKeybindings = (effectiveKeybindings: Required): BuiltInKeyBindings => { + const builtinKeybindings = {} as BuiltInKeyBindings; + for (const [action, keys] of Object.entries(effectiveKeybindings)) { + const keyAction = action as KeyAction; + const keyList = Array.isArray(keys) ? keys : [keys]; + const restrictOverride = RESERVED_ACTIONS_FOR_EXTENSION_CONFLICTS.includes(keyAction); + for (const key of keyList) { + const normalizedKey = key.toLowerCase() as KeyId; + builtinKeybindings[normalizedKey] = { + action: keyAction, + restrictOverride: restrictOverride, + }; + } + } + return builtinKeybindings; +}; + +/** Combined result from all before_agent_start handlers */ +interface BeforeAgentStartCombinedResult { + messages?: NonNullable[]; + systemPrompt?: string; +} + +/** + * Events handled by the generic emit() method. + * Events with dedicated emitXxx() methods are excluded for stronger type safety. + */ +type RunnerEmitEvent = Exclude< + ExtensionEvent, + | ToolCallEvent + | ToolResultEvent + | UserBashEvent + | ContextEvent + | BeforeProviderRequestEvent + | BeforeAgentStartEvent + | ResourcesDiscoverEvent + | InputEvent +>; + +type SessionBeforeEvent = Extract< + RunnerEmitEvent, + { type: "session_before_switch" | "session_before_fork" | "session_before_compact" | "session_before_tree" } +>; + +type SessionBeforeEventResult = + | SessionBeforeSwitchResult + | SessionBeforeForkResult + | SessionBeforeCompactResult + | SessionBeforeTreeResult; + +type RunnerEmitResult = TEvent extends { type: "session_before_switch" } + ? SessionBeforeSwitchResult | undefined + : TEvent extends { type: "session_before_fork" } + ? SessionBeforeForkResult | undefined + : TEvent extends { type: "session_before_compact" } + ? SessionBeforeCompactResult | undefined + : TEvent extends { type: "session_before_tree" } + ? SessionBeforeTreeResult | undefined + : undefined; + +export type ExtensionErrorListener = (error: ExtensionError) => void; + +export type NewSessionHandler = (options?: { + parentSession?: string; + setup?: (sessionManager: SessionManager) => Promise; +}) => Promise<{ cancelled: boolean }>; + +export type ForkHandler = (entryId: string) => Promise<{ cancelled: boolean }>; + +export type NavigateTreeHandler = ( + targetId: string, + options?: { summarize?: boolean; customInstructions?: string; replaceInstructions?: boolean; label?: string }, +) => Promise<{ cancelled: boolean }>; + +export type SwitchSessionHandler = (sessionPath: string) => Promise<{ cancelled: boolean }>; + +export type ReloadHandler = () => Promise; + +export type ShutdownHandler = () => void; + +/** + * Helper function to emit session_shutdown event to extensions. + * Returns true if the event was emitted, false if there were no handlers. + */ +export async function emitSessionShutdownEvent(extensionRunner: ExtensionRunner | undefined): Promise { + if (extensionRunner?.hasHandlers("session_shutdown")) { + await extensionRunner.emit({ + type: "session_shutdown", + }); + return true; + } + return false; +} + +const noOpUIContext: ExtensionUIContext = { + select: async () => undefined, + confirm: async () => false, + input: async () => undefined, + notify: () => {}, + onTerminalInput: () => () => {}, + setStatus: () => {}, + setWorkingMessage: () => {}, + setWidget: () => {}, + setFooter: () => {}, + setHeader: () => {}, + setTitle: () => {}, + custom: async () => undefined as never, + pasteToEditor: () => {}, + setEditorText: () => {}, + getEditorText: () => "", + editor: async () => undefined, + setEditorComponent: () => {}, + get theme() { + return theme; + }, + getAllThemes: () => [], + getTheme: () => undefined, + setTheme: (_theme: string | Theme) => ({ success: false, error: "UI not available" }), + getToolsExpanded: () => false, + setToolsExpanded: () => {}, +}; + +export class ExtensionRunner { + private extensions: Extension[]; + private runtime: ExtensionRuntime; + private uiContext: ExtensionUIContext; + private cwd: string; + private sessionManager: SessionManager; + private modelRegistry: ModelRegistry; + private errorListeners: Set = new Set(); + private getModel: () => Model | undefined = () => undefined; + private isIdleFn: () => boolean = () => true; + private waitForIdleFn: () => Promise = async () => {}; + private abortFn: () => void = () => {}; + private hasPendingMessagesFn: () => boolean = () => false; + private getContextUsageFn: () => ContextUsage | undefined = () => undefined; + private compactFn: (options?: CompactOptions) => void = () => {}; + private getSystemPromptFn: () => string = () => ""; + private newSessionHandler: NewSessionHandler = async () => ({ cancelled: false }); + private forkHandler: ForkHandler = async () => ({ cancelled: false }); + private navigateTreeHandler: NavigateTreeHandler = async () => ({ cancelled: false }); + private switchSessionHandler: SwitchSessionHandler = async () => ({ cancelled: false }); + private reloadHandler: ReloadHandler = async () => {}; + private shutdownHandler: ShutdownHandler = () => {}; + private shortcutDiagnostics: ResourceDiagnostic[] = []; + private commandDiagnostics: ResourceDiagnostic[] = []; + + constructor( + extensions: Extension[], + runtime: ExtensionRuntime, + cwd: string, + sessionManager: SessionManager, + modelRegistry: ModelRegistry, + ) { + this.extensions = extensions; + this.runtime = runtime; + this.uiContext = noOpUIContext; + this.cwd = cwd; + this.sessionManager = sessionManager; + this.modelRegistry = modelRegistry; + } + + bindCore(actions: ExtensionActions, contextActions: ExtensionContextActions): void { + // Copy actions into the shared runtime (all extension APIs reference this) + this.runtime.sendMessage = actions.sendMessage; + this.runtime.sendUserMessage = actions.sendUserMessage; + this.runtime.appendEntry = actions.appendEntry; + this.runtime.setSessionName = actions.setSessionName; + this.runtime.getSessionName = actions.getSessionName; + this.runtime.setLabel = actions.setLabel; + this.runtime.getActiveTools = actions.getActiveTools; + this.runtime.getAllTools = actions.getAllTools; + this.runtime.setActiveTools = actions.setActiveTools; + this.runtime.refreshTools = actions.refreshTools; + this.runtime.getCommands = actions.getCommands; + this.runtime.setModel = actions.setModel; + this.runtime.getThinkingLevel = actions.getThinkingLevel; + this.runtime.setThinkingLevel = actions.setThinkingLevel; + + // Context actions (required) + this.getModel = contextActions.getModel; + this.isIdleFn = contextActions.isIdle; + this.abortFn = contextActions.abort; + this.hasPendingMessagesFn = contextActions.hasPendingMessages; + this.shutdownHandler = contextActions.shutdown; + this.getContextUsageFn = contextActions.getContextUsage; + this.compactFn = contextActions.compact; + this.getSystemPromptFn = contextActions.getSystemPrompt; + + // Flush provider registrations queued during extension loading + for (const { name, config } of this.runtime.pendingProviderRegistrations) { + this.modelRegistry.registerProvider(name, config); + } + this.runtime.pendingProviderRegistrations = []; + + // From this point on, provider registration/unregistration takes effect immediately + // without requiring a /reload. + this.runtime.registerProvider = (name, config) => this.modelRegistry.registerProvider(name, config); + this.runtime.unregisterProvider = (name) => this.modelRegistry.unregisterProvider(name); + } + + bindCommandContext(actions?: ExtensionCommandContextActions): void { + if (actions) { + this.waitForIdleFn = actions.waitForIdle; + this.newSessionHandler = actions.newSession; + this.forkHandler = actions.fork; + this.navigateTreeHandler = actions.navigateTree; + this.switchSessionHandler = actions.switchSession; + this.reloadHandler = actions.reload; + return; + } + + this.waitForIdleFn = async () => {}; + this.newSessionHandler = async () => ({ cancelled: false }); + this.forkHandler = async () => ({ cancelled: false }); + this.navigateTreeHandler = async () => ({ cancelled: false }); + this.switchSessionHandler = async () => ({ cancelled: false }); + this.reloadHandler = async () => {}; + } + + setUIContext(uiContext?: ExtensionUIContext): void { + this.uiContext = uiContext ?? noOpUIContext; + } + + getUIContext(): ExtensionUIContext { + return this.uiContext; + } + + hasUI(): boolean { + return this.uiContext !== noOpUIContext; + } + + getExtensionPaths(): string[] { + return this.extensions.map((e) => e.path); + } + + /** Get all registered tools from all extensions (first registration per name wins). */ + getAllRegisteredTools(): RegisteredTool[] { + const toolsByName = new Map(); + for (const ext of this.extensions) { + for (const tool of ext.tools.values()) { + if (!toolsByName.has(tool.definition.name)) { + toolsByName.set(tool.definition.name, tool); + } + } + } + return Array.from(toolsByName.values()); + } + + /** Get a tool definition by name. Returns undefined if not found. */ + getToolDefinition(toolName: string): RegisteredTool["definition"] | undefined { + for (const ext of this.extensions) { + const tool = ext.tools.get(toolName); + if (tool) { + return tool.definition; + } + } + return undefined; + } + + getFlags(): Map { + const allFlags = new Map(); + for (const ext of this.extensions) { + for (const [name, flag] of ext.flags) { + if (!allFlags.has(name)) { + allFlags.set(name, flag); + } + } + } + return allFlags; + } + + setFlagValue(name: string, value: boolean | string): void { + this.runtime.flagValues.set(name, value); + } + + getFlagValues(): Map { + return new Map(this.runtime.flagValues); + } + + getShortcuts(effectiveKeybindings: Required): Map { + this.shortcutDiagnostics = []; + const builtinKeybindings = buildBuiltinKeybindings(effectiveKeybindings); + const extensionShortcuts = new Map(); + + const addDiagnostic = (message: string, extensionPath: string) => { + this.shortcutDiagnostics.push({ type: "warning", message, path: extensionPath }); + if (!this.hasUI()) { + console.warn(message); + } + }; + + for (const ext of this.extensions) { + for (const [key, shortcut] of ext.shortcuts) { + const normalizedKey = key.toLowerCase() as KeyId; + + const builtInKeybinding = builtinKeybindings[normalizedKey]; + if (builtInKeybinding?.restrictOverride === true) { + addDiagnostic( + `Extension shortcut '${key}' from ${shortcut.extensionPath} conflicts with built-in shortcut. Skipping.`, + shortcut.extensionPath, + ); + continue; + } + + if (builtInKeybinding?.restrictOverride === false) { + addDiagnostic( + `Extension shortcut conflict: '${key}' is built-in shortcut for ${builtInKeybinding.action} and ${shortcut.extensionPath}. Using ${shortcut.extensionPath}.`, + shortcut.extensionPath, + ); + } + + const existingExtensionShortcut = extensionShortcuts.get(normalizedKey); + if (existingExtensionShortcut) { + addDiagnostic( + `Extension shortcut conflict: '${key}' registered by both ${existingExtensionShortcut.extensionPath} and ${shortcut.extensionPath}. Using ${shortcut.extensionPath}.`, + shortcut.extensionPath, + ); + } + extensionShortcuts.set(normalizedKey, shortcut); + } + } + return extensionShortcuts; + } + + getShortcutDiagnostics(): ResourceDiagnostic[] { + return this.shortcutDiagnostics; + } + + onError(listener: ExtensionErrorListener): () => void { + this.errorListeners.add(listener); + return () => this.errorListeners.delete(listener); + } + + emitError(error: ExtensionError): void { + for (const listener of this.errorListeners) { + listener(error); + } + } + + hasHandlers(eventType: string): boolean { + for (const ext of this.extensions) { + const handlers = ext.handlers.get(eventType); + if (handlers && handlers.length > 0) { + return true; + } + } + return false; + } + + getMessageRenderer(customType: string): MessageRenderer | undefined { + for (const ext of this.extensions) { + const renderer = ext.messageRenderers.get(customType); + if (renderer) { + return renderer; + } + } + return undefined; + } + + getRegisteredCommands(reserved?: Set): RegisteredCommand[] { + this.commandDiagnostics = []; + + const commands: RegisteredCommand[] = []; + const commandOwners = new Map(); + for (const ext of this.extensions) { + for (const command of ext.commands.values()) { + if (reserved?.has(command.name)) { + const message = `Extension command '${command.name}' from ${ext.path} conflicts with built-in commands. Skipping.`; + this.commandDiagnostics.push({ type: "warning", message, path: ext.path }); + if (!this.hasUI()) { + console.warn(message); + } + continue; + } + + const existingOwner = commandOwners.get(command.name); + if (existingOwner) { + const message = `Extension command '${command.name}' from ${ext.path} conflicts with ${existingOwner}. Skipping.`; + this.commandDiagnostics.push({ type: "warning", message, path: ext.path }); + if (!this.hasUI()) { + console.warn(message); + } + continue; + } + + commandOwners.set(command.name, ext.path); + commands.push(command); + } + } + return commands; + } + + getCommandDiagnostics(): ResourceDiagnostic[] { + return this.commandDiagnostics; + } + + getRegisteredCommandsWithPaths(): Array<{ command: RegisteredCommand; extensionPath: string }> { + const result: Array<{ command: RegisteredCommand; extensionPath: string }> = []; + for (const ext of this.extensions) { + for (const command of ext.commands.values()) { + result.push({ command, extensionPath: ext.path }); + } + } + return result; + } + + getCommand(name: string): RegisteredCommand | undefined { + for (const ext of this.extensions) { + const command = ext.commands.get(name); + if (command) { + return command; + } + } + return undefined; + } + + /** + * Request a graceful shutdown. Called by extension tools and event handlers. + * The actual shutdown behavior is provided by the mode via bindExtensions(). + */ + shutdown(): void { + this.shutdownHandler(); + } + + /** + * Create an ExtensionContext for use in event handlers and tool execution. + * Context values are resolved at call time, so changes via bindCore/bindUI are reflected. + */ + createContext(): ExtensionContext { + const getModel = this.getModel; + return { + ui: this.uiContext, + hasUI: this.hasUI(), + cwd: this.cwd, + sessionManager: this.sessionManager, + modelRegistry: this.modelRegistry, + get model() { + return getModel(); + }, + isIdle: () => this.isIdleFn(), + abort: () => this.abortFn(), + hasPendingMessages: () => this.hasPendingMessagesFn(), + shutdown: () => this.shutdownHandler(), + getContextUsage: () => this.getContextUsageFn(), + compact: (options) => this.compactFn(options), + getSystemPrompt: () => this.getSystemPromptFn(), + }; + } + + createCommandContext(): ExtensionCommandContext { + return { + ...this.createContext(), + waitForIdle: () => this.waitForIdleFn(), + newSession: (options) => this.newSessionHandler(options), + fork: (entryId) => this.forkHandler(entryId), + navigateTree: (targetId, options) => this.navigateTreeHandler(targetId, options), + switchSession: (sessionPath) => this.switchSessionHandler(sessionPath), + reload: () => this.reloadHandler(), + }; + } + + private isSessionBeforeEvent(event: RunnerEmitEvent): event is SessionBeforeEvent { + return ( + event.type === "session_before_switch" || + event.type === "session_before_fork" || + event.type === "session_before_compact" || + event.type === "session_before_tree" + ); + } + + async emit(event: TEvent): Promise> { + const ctx = this.createContext(); + let result: SessionBeforeEventResult | undefined; + + for (const ext of this.extensions) { + const handlers = ext.handlers.get(event.type); + if (!handlers || handlers.length === 0) continue; + + for (const handler of handlers) { + try { + const handlerResult = await handler(event, ctx); + + if (this.isSessionBeforeEvent(event) && handlerResult) { + result = handlerResult as SessionBeforeEventResult; + if (result.cancel) { + return result as RunnerEmitResult; + } + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + const stack = err instanceof Error ? err.stack : undefined; + this.emitError({ + extensionPath: ext.path, + event: event.type, + error: message, + stack, + }); + } + } + } + + return result as RunnerEmitResult; + } + + async emitToolResult(event: ToolResultEvent): Promise { + const ctx = this.createContext(); + const currentEvent: ToolResultEvent = { ...event }; + let modified = false; + + for (const ext of this.extensions) { + const handlers = ext.handlers.get("tool_result"); + if (!handlers || handlers.length === 0) continue; + + for (const handler of handlers) { + try { + const handlerResult = (await handler(currentEvent, ctx)) as ToolResultEventResult | undefined; + if (!handlerResult) continue; + + if (handlerResult.content !== undefined) { + currentEvent.content = handlerResult.content; + modified = true; + } + if (handlerResult.details !== undefined) { + currentEvent.details = handlerResult.details; + modified = true; + } + if (handlerResult.isError !== undefined) { + currentEvent.isError = handlerResult.isError; + modified = true; + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + const stack = err instanceof Error ? err.stack : undefined; + this.emitError({ + extensionPath: ext.path, + event: "tool_result", + error: message, + stack, + }); + } + } + } + + if (!modified) { + return undefined; + } + + return { + content: currentEvent.content, + details: currentEvent.details, + isError: currentEvent.isError, + }; + } + + async emitToolCall(event: ToolCallEvent): Promise { + const ctx = this.createContext(); + let result: ToolCallEventResult | undefined; + + for (const ext of this.extensions) { + const handlers = ext.handlers.get("tool_call"); + if (!handlers || handlers.length === 0) continue; + + for (const handler of handlers) { + const handlerResult = await handler(event, ctx); + + if (handlerResult) { + result = handlerResult as ToolCallEventResult; + if (result.block) { + return result; + } + } + } + } + + return result; + } + + async emitUserBash(event: UserBashEvent): Promise { + const ctx = this.createContext(); + + for (const ext of this.extensions) { + const handlers = ext.handlers.get("user_bash"); + if (!handlers || handlers.length === 0) continue; + + for (const handler of handlers) { + try { + const handlerResult = await handler(event, ctx); + if (handlerResult) { + return handlerResult as UserBashEventResult; + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + const stack = err instanceof Error ? err.stack : undefined; + this.emitError({ + extensionPath: ext.path, + event: "user_bash", + error: message, + stack, + }); + } + } + } + + return undefined; + } + + async emitContext(messages: AgentMessage[]): Promise { + const ctx = this.createContext(); + let currentMessages = structuredClone(messages); + + for (const ext of this.extensions) { + const handlers = ext.handlers.get("context"); + if (!handlers || handlers.length === 0) continue; + + for (const handler of handlers) { + try { + const event: ContextEvent = { type: "context", messages: currentMessages }; + const handlerResult = await handler(event, ctx); + + if (handlerResult && (handlerResult as ContextEventResult).messages) { + currentMessages = (handlerResult as ContextEventResult).messages!; + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + const stack = err instanceof Error ? err.stack : undefined; + this.emitError({ + extensionPath: ext.path, + event: "context", + error: message, + stack, + }); + } + } + } + + return currentMessages; + } + + async emitBeforeProviderRequest(payload: unknown): Promise { + const ctx = this.createContext(); + let currentPayload = payload; + + for (const ext of this.extensions) { + const handlers = ext.handlers.get("before_provider_request"); + if (!handlers || handlers.length === 0) continue; + + for (const handler of handlers) { + try { + const event: BeforeProviderRequestEvent = { + type: "before_provider_request", + payload: currentPayload, + }; + const handlerResult = await handler(event, ctx); + if (handlerResult !== undefined) { + currentPayload = handlerResult; + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + const stack = err instanceof Error ? err.stack : undefined; + this.emitError({ + extensionPath: ext.path, + event: "before_provider_request", + error: message, + stack, + }); + } + } + } + + return currentPayload; + } + + async emitBeforeAgentStart( + prompt: string, + images: ImageContent[] | undefined, + systemPrompt: string, + ): Promise { + const ctx = this.createContext(); + const messages: NonNullable[] = []; + let currentSystemPrompt = systemPrompt; + let systemPromptModified = false; + + for (const ext of this.extensions) { + const handlers = ext.handlers.get("before_agent_start"); + if (!handlers || handlers.length === 0) continue; + + for (const handler of handlers) { + try { + const event: BeforeAgentStartEvent = { + type: "before_agent_start", + prompt, + images, + systemPrompt: currentSystemPrompt, + }; + const handlerResult = await handler(event, ctx); + + if (handlerResult) { + const result = handlerResult as BeforeAgentStartEventResult; + if (result.message) { + messages.push(result.message); + } + if (result.systemPrompt !== undefined) { + currentSystemPrompt = result.systemPrompt; + systemPromptModified = true; + } + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + const stack = err instanceof Error ? err.stack : undefined; + this.emitError({ + extensionPath: ext.path, + event: "before_agent_start", + error: message, + stack, + }); + } + } + } + + if (messages.length > 0 || systemPromptModified) { + return { + messages: messages.length > 0 ? messages : undefined, + systemPrompt: systemPromptModified ? currentSystemPrompt : undefined, + }; + } + + return undefined; + } + + async emitResourcesDiscover( + cwd: string, + reason: ResourcesDiscoverEvent["reason"], + ): Promise<{ + skillPaths: Array<{ path: string; extensionPath: string }>; + promptPaths: Array<{ path: string; extensionPath: string }>; + themePaths: Array<{ path: string; extensionPath: string }>; + }> { + const ctx = this.createContext(); + const skillPaths: Array<{ path: string; extensionPath: string }> = []; + const promptPaths: Array<{ path: string; extensionPath: string }> = []; + const themePaths: Array<{ path: string; extensionPath: string }> = []; + + for (const ext of this.extensions) { + const handlers = ext.handlers.get("resources_discover"); + if (!handlers || handlers.length === 0) continue; + + for (const handler of handlers) { + try { + const event: ResourcesDiscoverEvent = { type: "resources_discover", cwd, reason }; + const handlerResult = await handler(event, ctx); + const result = handlerResult as ResourcesDiscoverResult | undefined; + + if (result?.skillPaths?.length) { + skillPaths.push(...result.skillPaths.map((path) => ({ path, extensionPath: ext.path }))); + } + if (result?.promptPaths?.length) { + promptPaths.push(...result.promptPaths.map((path) => ({ path, extensionPath: ext.path }))); + } + if (result?.themePaths?.length) { + themePaths.push(...result.themePaths.map((path) => ({ path, extensionPath: ext.path }))); + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + const stack = err instanceof Error ? err.stack : undefined; + this.emitError({ + extensionPath: ext.path, + event: "resources_discover", + error: message, + stack, + }); + } + } + } + + return { skillPaths, promptPaths, themePaths }; + } + + /** Emit input event. Transforms chain, "handled" short-circuits. */ + async emitInput(text: string, images: ImageContent[] | undefined, source: InputSource): Promise { + const ctx = this.createContext(); + let currentText = text; + let currentImages = images; + + for (const ext of this.extensions) { + for (const handler of ext.handlers.get("input") ?? []) { + try { + const event: InputEvent = { type: "input", text: currentText, images: currentImages, source }; + const result = (await handler(event, ctx)) as InputEventResult | undefined; + if (result?.action === "handled") return result; + if (result?.action === "transform") { + currentText = result.text; + currentImages = result.images ?? currentImages; + } + } catch (err) { + this.emitError({ + extensionPath: ext.path, + event: "input", + error: err instanceof Error ? err.message : String(err), + stack: err instanceof Error ? err.stack : undefined, + }); + } + } + } + return currentText !== text || currentImages !== images + ? { action: "transform", text: currentText, images: currentImages } + : { action: "continue" }; + } +} diff --git a/packages/pi-coding-agent/src/core/extensions/types.ts b/packages/pi-coding-agent/src/core/extensions/types.ts new file mode 100644 index 000000000..11bd86361 --- /dev/null +++ b/packages/pi-coding-agent/src/core/extensions/types.ts @@ -0,0 +1,1411 @@ +/** + * Extension system types. + * + * Extensions are TypeScript modules that can: + * - Subscribe to agent lifecycle events + * - Register LLM-callable tools + * - Register commands, keyboard shortcuts, and CLI flags + * - Interact with the user via UI primitives + */ + +import type { + AgentMessage, + AgentToolResult, + AgentToolUpdateCallback, + ThinkingLevel, +} from "@gsd/pi-agent-core"; +import type { + Api, + AssistantMessageEvent, + AssistantMessageEventStream, + Context, + ImageContent, + Model, + OAuthCredentials, + OAuthLoginCallbacks, + SimpleStreamOptions, + TextContent, + ToolResultMessage, +} from "@gsd/pi-ai"; +import type { + AutocompleteItem, + Component, + EditorComponent, + EditorTheme, + KeyId, + OverlayHandle, + OverlayOptions, + TUI, +} from "@gsd/pi-tui"; +import type { Static, TSchema } from "@sinclair/typebox"; +import type { Theme } from "../../modes/interactive/theme/theme.js"; +import type { BashResult } from "../bash-executor.js"; +import type { CompactionPreparation, CompactionResult } from "../compaction/index.js"; +import type { EventBus } from "../event-bus.js"; +import type { ExecOptions, ExecResult } from "../exec.js"; +import type { ReadonlyFooterDataProvider } from "../footer-data-provider.js"; +import type { KeybindingsManager } from "../keybindings.js"; +import type { CustomMessage } from "../messages.js"; +import type { ModelRegistry } from "../model-registry.js"; +import type { + BranchSummaryEntry, + CompactionEntry, + ReadonlySessionManager, + SessionEntry, + SessionManager, +} from "../session-manager.js"; +import type { SlashCommandInfo } from "../slash-commands.js"; +import type { BashOperations } from "../tools/bash.js"; +import type { EditToolDetails } from "../tools/edit.js"; +import type { + BashToolDetails, + BashToolInput, + EditToolInput, + FindToolDetails, + FindToolInput, + GrepToolDetails, + GrepToolInput, + LsToolDetails, + LsToolInput, + ReadToolDetails, + ReadToolInput, + WriteToolInput, +} from "../tools/index.js"; + +export type { ExecOptions, ExecResult } from "../exec.js"; +export type { AgentToolResult, AgentToolUpdateCallback }; +export type { AppAction, KeybindingsManager } from "../keybindings.js"; + +// ============================================================================ +// UI Context +// ============================================================================ + +/** Options for extension UI dialogs. */ +export interface ExtensionUIDialogOptions { + /** AbortSignal to programmatically dismiss the dialog. */ + signal?: AbortSignal; + /** Timeout in milliseconds. Dialog auto-dismisses with live countdown display. */ + timeout?: number; +} + +/** Placement for extension widgets. */ +export type WidgetPlacement = "aboveEditor" | "belowEditor"; + +/** Options for extension widgets. */ +export interface ExtensionWidgetOptions { + /** Where the widget is rendered. Defaults to "aboveEditor". */ + placement?: WidgetPlacement; +} + +/** Raw terminal input listener for extensions. */ +export type TerminalInputHandler = (data: string) => { consume?: boolean; data?: string } | undefined; + +/** + * UI context for extensions to request interactive UI. + * Each mode (interactive, RPC, print) provides its own implementation. + */ +export interface ExtensionUIContext { + /** Show a selector and return the user's choice. */ + select(title: string, options: string[], opts?: ExtensionUIDialogOptions): Promise; + + /** Show a confirmation dialog. */ + confirm(title: string, message: string, opts?: ExtensionUIDialogOptions): Promise; + + /** Show a text input dialog. */ + input(title: string, placeholder?: string, opts?: ExtensionUIDialogOptions): Promise; + + /** Show a notification to the user. */ + notify(message: string, type?: "info" | "warning" | "error"): void; + + /** Listen to raw terminal input (interactive mode only). Returns an unsubscribe function. */ + onTerminalInput(handler: TerminalInputHandler): () => void; + + /** Set status text in the footer/status bar. Pass undefined to clear. */ + setStatus(key: string, text: string | undefined): void; + + /** Set the working/loading message shown during streaming. Call with no argument to restore default. */ + setWorkingMessage(message?: string): void; + + /** Set a widget to display above or below the editor. Accepts string array or component factory. */ + setWidget(key: string, content: string[] | undefined, options?: ExtensionWidgetOptions): void; + setWidget( + key: string, + content: ((tui: TUI, theme: Theme) => Component & { dispose?(): void }) | undefined, + options?: ExtensionWidgetOptions, + ): void; + + /** Set a custom footer component, or undefined to restore the built-in footer. + * + * The factory receives a FooterDataProvider for data not otherwise accessible: + * git branch and extension statuses from setStatus(). Token stats, model info, + * etc. are available via ctx.sessionManager and ctx.model. + */ + setFooter( + factory: + | ((tui: TUI, theme: Theme, footerData: ReadonlyFooterDataProvider) => Component & { dispose?(): void }) + | undefined, + ): void; + + /** Set a custom header component (shown at startup, above chat), or undefined to restore the built-in header. */ + setHeader(factory: ((tui: TUI, theme: Theme) => Component & { dispose?(): void }) | undefined): void; + + /** Set the terminal window/tab title. */ + setTitle(title: string): void; + + /** Show a custom component with keyboard focus. */ + custom( + factory: ( + tui: TUI, + theme: Theme, + keybindings: KeybindingsManager, + done: (result: T) => void, + ) => (Component & { dispose?(): void }) | Promise, + options?: { + overlay?: boolean; + /** Overlay positioning/sizing options. Can be static or a function for dynamic updates. */ + overlayOptions?: OverlayOptions | (() => OverlayOptions); + /** Called with the overlay handle after the overlay is shown. Use to control visibility. */ + onHandle?: (handle: OverlayHandle) => void; + }, + ): Promise; + + /** Paste text into the editor, triggering paste handling (collapse for large content). */ + pasteToEditor(text: string): void; + + /** Set the text in the core input editor. */ + setEditorText(text: string): void; + + /** Get the current text from the core input editor. */ + getEditorText(): string; + + /** Show a multi-line editor for text editing. */ + editor(title: string, prefill?: string): Promise; + + /** + * Set a custom editor component via factory function. + * Pass undefined to restore the default editor. + * + * The factory receives: + * - `theme`: EditorTheme for styling borders and autocomplete + * - `keybindings`: KeybindingsManager for app-level keybindings + * + * For full app keybinding support (escape, ctrl+d, model switching, etc.), + * extend `CustomEditor` from `@gsd/pi-coding-agent` and call + * `super.handleInput(data)` for keys you don't handle. + * + * @example + * ```ts + * import { CustomEditor } from "@gsd/pi-coding-agent"; + * + * class VimEditor extends CustomEditor { + * private mode: "normal" | "insert" = "insert"; + * + * handleInput(data: string): void { + * if (this.mode === "normal") { + * // Handle vim normal mode keys... + * if (data === "i") { this.mode = "insert"; return; } + * } + * super.handleInput(data); // App keybindings + text editing + * } + * } + * + * ctx.ui.setEditorComponent((tui, theme, keybindings) => + * new VimEditor(tui, theme, keybindings) + * ); + * ``` + */ + setEditorComponent( + factory: ((tui: TUI, theme: EditorTheme, keybindings: KeybindingsManager) => EditorComponent) | undefined, + ): void; + + /** Get the current theme for styling. */ + readonly theme: Theme; + + /** Get all available themes with their names and file paths. */ + getAllThemes(): { name: string; path: string | undefined }[]; + + /** Load a theme by name without switching to it. Returns undefined if not found. */ + getTheme(name: string): Theme | undefined; + + /** Set the current theme by name or Theme object. */ + setTheme(theme: string | Theme): { success: boolean; error?: string }; + + /** Get current tool output expansion state. */ + getToolsExpanded(): boolean; + + /** Set tool output expansion state. */ + setToolsExpanded(expanded: boolean): void; +} + +// ============================================================================ +// Extension Context +// ============================================================================ + +export interface ContextUsage { + /** Estimated context tokens, or null if unknown (e.g. right after compaction, before next LLM response). */ + tokens: number | null; + contextWindow: number; + /** Context usage as percentage of context window, or null if tokens is unknown. */ + percent: number | null; +} + +export interface CompactOptions { + customInstructions?: string; + onComplete?: (result: CompactionResult) => void; + onError?: (error: Error) => void; +} + +/** + * Context passed to extension event handlers. + */ +export interface ExtensionContext { + /** UI methods for user interaction */ + ui: ExtensionUIContext; + /** Whether UI is available (false in print/RPC mode) */ + hasUI: boolean; + /** Current working directory */ + cwd: string; + /** Session manager (read-only) */ + sessionManager: ReadonlySessionManager; + /** Model registry for API key resolution */ + modelRegistry: ModelRegistry; + /** Current model (may be undefined) */ + model: Model | undefined; + /** Whether the agent is idle (not streaming) */ + isIdle(): boolean; + /** Abort the current agent operation */ + abort(): void; + /** Whether there are queued messages waiting */ + hasPendingMessages(): boolean; + /** Gracefully shutdown pi and exit. Available in all contexts. */ + shutdown(): void; + /** Get current context usage for the active model. */ + getContextUsage(): ContextUsage | undefined; + /** Trigger compaction without awaiting completion. */ + compact(options?: CompactOptions): void; + /** Get the current effective system prompt. */ + getSystemPrompt(): string; +} + +/** + * Extended context for command handlers. + * Includes session control methods only safe in user-initiated commands. + */ +export interface ExtensionCommandContext extends ExtensionContext { + /** Wait for the agent to finish streaming */ + waitForIdle(): Promise; + + /** Start a new session, optionally with initialization. */ + newSession(options?: { + parentSession?: string; + setup?: (sessionManager: SessionManager) => Promise; + }): Promise<{ cancelled: boolean }>; + + /** Fork from a specific entry, creating a new session file. */ + fork(entryId: string): Promise<{ cancelled: boolean }>; + + /** Navigate to a different point in the session tree. */ + navigateTree( + targetId: string, + options?: { summarize?: boolean; customInstructions?: string; replaceInstructions?: boolean; label?: string }, + ): Promise<{ cancelled: boolean }>; + + /** Switch to a different session file. */ + switchSession(sessionPath: string): Promise<{ cancelled: boolean }>; + + /** Reload extensions, skills, prompts, and themes. */ + reload(): Promise; +} + +// ============================================================================ +// Tool Types +// ============================================================================ + +/** Rendering options for tool results */ +export interface ToolRenderResultOptions { + /** Whether the result view is expanded */ + expanded: boolean; + /** Whether this is a partial/streaming result */ + isPartial: boolean; +} + +/** + * Tool definition for registerTool(). + */ +export interface ToolDefinition { + /** Tool name (used in LLM tool calls) */ + name: string; + /** Human-readable label for UI */ + label: string; + /** Description for LLM */ + description: string; + /** Optional one-line snippet for the Available tools section in the default system prompt. Falls back to description when omitted. */ + promptSnippet?: string; + /** Optional guideline bullets appended to the default system prompt Guidelines section when this tool is active. */ + promptGuidelines?: string[]; + /** Parameter schema (TypeBox) */ + parameters: TParams; + + /** Execute the tool. */ + execute( + toolCallId: string, + params: Static, + signal: AbortSignal | undefined, + onUpdate: AgentToolUpdateCallback | undefined, + ctx: ExtensionContext, + ): Promise>; + + /** Custom rendering for tool call display */ + renderCall?: (args: Static, theme: Theme) => Component | undefined; + + /** Custom rendering for tool result display */ + renderResult?: ( + result: AgentToolResult, + options: ToolRenderResultOptions, + theme: Theme, + ) => Component | undefined; +} + +// ============================================================================ +// Resource Events +// ============================================================================ + +/** Fired after session_start to allow extensions to provide additional resource paths. */ +export interface ResourcesDiscoverEvent { + type: "resources_discover"; + cwd: string; + reason: "startup" | "reload"; +} + +/** Result from resources_discover event handler */ +export interface ResourcesDiscoverResult { + skillPaths?: string[]; + promptPaths?: string[]; + themePaths?: string[]; +} + +// ============================================================================ +// Session Events +// ============================================================================ + +/** Fired before session manager creation to allow custom session directory resolution */ +export interface SessionDirectoryEvent { + type: "session_directory"; + cwd: string; +} + +/** Fired on initial session load */ +export interface SessionStartEvent { + type: "session_start"; +} + +/** Fired before switching to another session (can be cancelled) */ +export interface SessionBeforeSwitchEvent { + type: "session_before_switch"; + reason: "new" | "resume"; + targetSessionFile?: string; +} + +/** Fired after switching to another session */ +export interface SessionSwitchEvent { + type: "session_switch"; + reason: "new" | "resume"; + previousSessionFile: string | undefined; +} + +/** Fired before forking a session (can be cancelled) */ +export interface SessionBeforeForkEvent { + type: "session_before_fork"; + entryId: string; +} + +/** Fired after forking a session */ +export interface SessionForkEvent { + type: "session_fork"; + previousSessionFile: string | undefined; +} + +/** Fired before context compaction (can be cancelled or customized) */ +export interface SessionBeforeCompactEvent { + type: "session_before_compact"; + preparation: CompactionPreparation; + branchEntries: SessionEntry[]; + customInstructions?: string; + signal: AbortSignal; +} + +/** Fired after context compaction */ +export interface SessionCompactEvent { + type: "session_compact"; + compactionEntry: CompactionEntry; + fromExtension: boolean; +} + +/** Fired on process exit */ +export interface SessionShutdownEvent { + type: "session_shutdown"; +} + +/** Preparation data for tree navigation */ +export interface TreePreparation { + targetId: string; + oldLeafId: string | null; + commonAncestorId: string | null; + entriesToSummarize: SessionEntry[]; + userWantsSummary: boolean; + /** Custom instructions for summarization */ + customInstructions?: string; + /** If true, customInstructions replaces the default prompt instead of being appended */ + replaceInstructions?: boolean; + /** Label to attach to the branch summary entry */ + label?: string; +} + +/** Fired before navigating in the session tree (can be cancelled) */ +export interface SessionBeforeTreeEvent { + type: "session_before_tree"; + preparation: TreePreparation; + signal: AbortSignal; +} + +/** Fired after navigating in the session tree */ +export interface SessionTreeEvent { + type: "session_tree"; + newLeafId: string | null; + oldLeafId: string | null; + summaryEntry?: BranchSummaryEntry; + fromExtension?: boolean; +} + +export type SessionEvent = + | SessionDirectoryEvent + | SessionStartEvent + | SessionBeforeSwitchEvent + | SessionSwitchEvent + | SessionBeforeForkEvent + | SessionForkEvent + | SessionBeforeCompactEvent + | SessionCompactEvent + | SessionShutdownEvent + | SessionBeforeTreeEvent + | SessionTreeEvent; + +// ============================================================================ +// Agent Events +// ============================================================================ + +/** Fired before each LLM call. Can modify messages. */ +export interface ContextEvent { + type: "context"; + messages: AgentMessage[]; +} + +/** Fired before a provider request is sent. Can replace the payload. */ +export interface BeforeProviderRequestEvent { + type: "before_provider_request"; + payload: unknown; +} + +/** Fired after user submits prompt but before agent loop. */ +export interface BeforeAgentStartEvent { + type: "before_agent_start"; + prompt: string; + images?: ImageContent[]; + systemPrompt: string; +} + +/** Fired when an agent loop starts */ +export interface AgentStartEvent { + type: "agent_start"; +} + +/** Fired when an agent loop ends */ +export interface AgentEndEvent { + type: "agent_end"; + messages: AgentMessage[]; +} + +/** Fired at the start of each turn */ +export interface TurnStartEvent { + type: "turn_start"; + turnIndex: number; + timestamp: number; +} + +/** Fired at the end of each turn */ +export interface TurnEndEvent { + type: "turn_end"; + turnIndex: number; + message: AgentMessage; + toolResults: ToolResultMessage[]; +} + +/** Fired when a message starts (user, assistant, or toolResult) */ +export interface MessageStartEvent { + type: "message_start"; + message: AgentMessage; +} + +/** Fired during assistant message streaming with token-by-token updates */ +export interface MessageUpdateEvent { + type: "message_update"; + message: AgentMessage; + assistantMessageEvent: AssistantMessageEvent; +} + +/** Fired when a message ends */ +export interface MessageEndEvent { + type: "message_end"; + message: AgentMessage; +} + +/** Fired when a tool starts executing */ +export interface ToolExecutionStartEvent { + type: "tool_execution_start"; + toolCallId: string; + toolName: string; + args: any; +} + +/** Fired during tool execution with partial/streaming output */ +export interface ToolExecutionUpdateEvent { + type: "tool_execution_update"; + toolCallId: string; + toolName: string; + args: any; + partialResult: any; +} + +/** Fired when a tool finishes executing */ +export interface ToolExecutionEndEvent { + type: "tool_execution_end"; + toolCallId: string; + toolName: string; + result: any; + isError: boolean; +} + +// ============================================================================ +// Model Events +// ============================================================================ + +export type ModelSelectSource = "set" | "cycle" | "restore"; + +/** Fired when a new model is selected */ +export interface ModelSelectEvent { + type: "model_select"; + model: Model; + previousModel: Model | undefined; + source: ModelSelectSource; +} + +// ============================================================================ +// User Bash Events +// ============================================================================ + +/** Fired when user executes a bash command via ! or !! prefix */ +export interface UserBashEvent { + type: "user_bash"; + /** The command to execute */ + command: string; + /** True if !! prefix was used (excluded from LLM context) */ + excludeFromContext: boolean; + /** Current working directory */ + cwd: string; +} + +// ============================================================================ +// Input Events +// ============================================================================ + +/** Source of user input */ +export type InputSource = "interactive" | "rpc" | "extension"; + +/** Fired when user input is received, before agent processing */ +export interface InputEvent { + type: "input"; + /** The input text */ + text: string; + /** Attached images, if any */ + images?: ImageContent[]; + /** Where the input came from */ + source: InputSource; +} + +/** Result from input event handler */ +export type InputEventResult = + | { action: "continue" } + | { action: "transform"; text: string; images?: ImageContent[] } + | { action: "handled" }; + +// ============================================================================ +// Tool Events +// ============================================================================ + +interface ToolCallEventBase { + type: "tool_call"; + toolCallId: string; +} + +export interface BashToolCallEvent extends ToolCallEventBase { + toolName: "bash"; + input: BashToolInput; +} + +export interface ReadToolCallEvent extends ToolCallEventBase { + toolName: "read"; + input: ReadToolInput; +} + +export interface EditToolCallEvent extends ToolCallEventBase { + toolName: "edit"; + input: EditToolInput; +} + +export interface WriteToolCallEvent extends ToolCallEventBase { + toolName: "write"; + input: WriteToolInput; +} + +export interface GrepToolCallEvent extends ToolCallEventBase { + toolName: "grep"; + input: GrepToolInput; +} + +export interface FindToolCallEvent extends ToolCallEventBase { + toolName: "find"; + input: FindToolInput; +} + +export interface LsToolCallEvent extends ToolCallEventBase { + toolName: "ls"; + input: LsToolInput; +} + +export interface CustomToolCallEvent extends ToolCallEventBase { + toolName: string; + input: Record; +} + +/** Fired before a tool executes. Can block. */ +export type ToolCallEvent = + | BashToolCallEvent + | ReadToolCallEvent + | EditToolCallEvent + | WriteToolCallEvent + | GrepToolCallEvent + | FindToolCallEvent + | LsToolCallEvent + | CustomToolCallEvent; + +interface ToolResultEventBase { + type: "tool_result"; + toolCallId: string; + input: Record; + content: (TextContent | ImageContent)[]; + isError: boolean; +} + +export interface BashToolResultEvent extends ToolResultEventBase { + toolName: "bash"; + details: BashToolDetails | undefined; +} + +export interface ReadToolResultEvent extends ToolResultEventBase { + toolName: "read"; + details: ReadToolDetails | undefined; +} + +export interface EditToolResultEvent extends ToolResultEventBase { + toolName: "edit"; + details: EditToolDetails | undefined; +} + +export interface WriteToolResultEvent extends ToolResultEventBase { + toolName: "write"; + details: undefined; +} + +export interface GrepToolResultEvent extends ToolResultEventBase { + toolName: "grep"; + details: GrepToolDetails | undefined; +} + +export interface FindToolResultEvent extends ToolResultEventBase { + toolName: "find"; + details: FindToolDetails | undefined; +} + +export interface LsToolResultEvent extends ToolResultEventBase { + toolName: "ls"; + details: LsToolDetails | undefined; +} + +export interface CustomToolResultEvent extends ToolResultEventBase { + toolName: string; + details: unknown; +} + +/** Fired after a tool executes. Can modify result. */ +export type ToolResultEvent = + | BashToolResultEvent + | ReadToolResultEvent + | EditToolResultEvent + | WriteToolResultEvent + | GrepToolResultEvent + | FindToolResultEvent + | LsToolResultEvent + | CustomToolResultEvent; + +// Type guards for ToolResultEvent +export function isBashToolResult(e: ToolResultEvent): e is BashToolResultEvent { + return e.toolName === "bash"; +} +export function isReadToolResult(e: ToolResultEvent): e is ReadToolResultEvent { + return e.toolName === "read"; +} +export function isEditToolResult(e: ToolResultEvent): e is EditToolResultEvent { + return e.toolName === "edit"; +} +export function isWriteToolResult(e: ToolResultEvent): e is WriteToolResultEvent { + return e.toolName === "write"; +} +export function isGrepToolResult(e: ToolResultEvent): e is GrepToolResultEvent { + return e.toolName === "grep"; +} +export function isFindToolResult(e: ToolResultEvent): e is FindToolResultEvent { + return e.toolName === "find"; +} +export function isLsToolResult(e: ToolResultEvent): e is LsToolResultEvent { + return e.toolName === "ls"; +} + +/** + * Type guard for narrowing ToolCallEvent by tool name. + * + * Built-in tools narrow automatically (no type params needed): + * ```ts + * if (isToolCallEventType("bash", event)) { + * event.input.command; // string + * } + * ``` + * + * Custom tools require explicit type parameters: + * ```ts + * if (isToolCallEventType<"my_tool", MyToolInput>("my_tool", event)) { + * event.input.action; // typed + * } + * ``` + * + * Note: Direct narrowing via `event.toolName === "bash"` doesn't work because + * CustomToolCallEvent.toolName is `string` which overlaps with all literals. + */ +export function isToolCallEventType(toolName: "bash", event: ToolCallEvent): event is BashToolCallEvent; +export function isToolCallEventType(toolName: "read", event: ToolCallEvent): event is ReadToolCallEvent; +export function isToolCallEventType(toolName: "edit", event: ToolCallEvent): event is EditToolCallEvent; +export function isToolCallEventType(toolName: "write", event: ToolCallEvent): event is WriteToolCallEvent; +export function isToolCallEventType(toolName: "grep", event: ToolCallEvent): event is GrepToolCallEvent; +export function isToolCallEventType(toolName: "find", event: ToolCallEvent): event is FindToolCallEvent; +export function isToolCallEventType(toolName: "ls", event: ToolCallEvent): event is LsToolCallEvent; +export function isToolCallEventType>( + toolName: TName, + event: ToolCallEvent, +): event is ToolCallEvent & { toolName: TName; input: TInput }; +export function isToolCallEventType(toolName: string, event: ToolCallEvent): boolean { + return event.toolName === toolName; +} + +/** Union of all event types */ +export type ExtensionEvent = + | ResourcesDiscoverEvent + | SessionEvent + | ContextEvent + | BeforeProviderRequestEvent + | BeforeAgentStartEvent + | AgentStartEvent + | AgentEndEvent + | TurnStartEvent + | TurnEndEvent + | MessageStartEvent + | MessageUpdateEvent + | MessageEndEvent + | ToolExecutionStartEvent + | ToolExecutionUpdateEvent + | ToolExecutionEndEvent + | ModelSelectEvent + | UserBashEvent + | InputEvent + | ToolCallEvent + | ToolResultEvent; + +// ============================================================================ +// Event Results +// ============================================================================ + +export interface ContextEventResult { + messages?: AgentMessage[]; +} + +export type BeforeProviderRequestEventResult = unknown; + +export interface ToolCallEventResult { + block?: boolean; + reason?: string; +} + +/** Result from user_bash event handler */ +export interface UserBashEventResult { + /** Custom operations to use for execution */ + operations?: BashOperations; + /** Full replacement: extension handled execution, use this result */ + result?: BashResult; +} + +export interface ToolResultEventResult { + content?: (TextContent | ImageContent)[]; + details?: unknown; + isError?: boolean; +} + +export interface BeforeAgentStartEventResult { + message?: Pick; + /** Replace the system prompt for this turn. If multiple extensions return this, they are chained. */ + systemPrompt?: string; +} + +export interface SessionDirectoryResult { + /** Custom session directory path. If multiple extensions return this, the last one wins. */ + sessionDir?: string; +} + +/** Special startup-only handler. Unlike other events, this receives no ExtensionContext. */ +export type SessionDirectoryHandler = ( + event: SessionDirectoryEvent, +) => Promise | SessionDirectoryResult | undefined; + +export interface SessionBeforeSwitchResult { + cancel?: boolean; +} + +export interface SessionBeforeForkResult { + cancel?: boolean; + skipConversationRestore?: boolean; +} + +export interface SessionBeforeCompactResult { + cancel?: boolean; + compaction?: CompactionResult; +} + +export interface SessionBeforeTreeResult { + cancel?: boolean; + summary?: { + summary: string; + details?: unknown; + }; + /** Override custom instructions for summarization */ + customInstructions?: string; + /** Override whether customInstructions replaces the default prompt */ + replaceInstructions?: boolean; + /** Override label to attach to the branch summary entry */ + label?: string; +} + +// ============================================================================ +// Message Rendering +// ============================================================================ + +export interface MessageRenderOptions { + expanded: boolean; +} + +export type MessageRenderer = ( + message: CustomMessage, + options: MessageRenderOptions, + theme: Theme, +) => Component | undefined; + +// ============================================================================ +// Command Registration +// ============================================================================ + +export interface RegisteredCommand { + name: string; + description?: string; + getArgumentCompletions?: (argumentPrefix: string) => AutocompleteItem[] | null; + handler: (args: string, ctx: ExtensionCommandContext) => Promise; +} + +// ============================================================================ +// Extension API +// ============================================================================ + +/** Handler function type for events */ +// biome-ignore lint/suspicious/noConfusingVoidType: void allows bare return statements +export type ExtensionHandler = (event: E, ctx: ExtensionContext) => Promise | R | void; + +/** + * ExtensionAPI passed to extension factory functions. + */ +export interface ExtensionAPI { + // ========================================================================= + // Event Subscription + // ========================================================================= + + on(event: "resources_discover", handler: ExtensionHandler): void; + on(event: "session_directory", handler: SessionDirectoryHandler): void; + on(event: "session_start", handler: ExtensionHandler): void; + on( + event: "session_before_switch", + handler: ExtensionHandler, + ): void; + on(event: "session_switch", handler: ExtensionHandler): void; + on(event: "session_before_fork", handler: ExtensionHandler): void; + on(event: "session_fork", handler: ExtensionHandler): void; + on( + event: "session_before_compact", + handler: ExtensionHandler, + ): void; + on(event: "session_compact", handler: ExtensionHandler): void; + on(event: "session_shutdown", handler: ExtensionHandler): void; + on(event: "session_before_tree", handler: ExtensionHandler): void; + on(event: "session_tree", handler: ExtensionHandler): void; + on(event: "context", handler: ExtensionHandler): void; + on( + event: "before_provider_request", + handler: ExtensionHandler, + ): void; + on(event: "before_agent_start", handler: ExtensionHandler): void; + on(event: "agent_start", handler: ExtensionHandler): void; + on(event: "agent_end", handler: ExtensionHandler): void; + on(event: "turn_start", handler: ExtensionHandler): void; + on(event: "turn_end", handler: ExtensionHandler): void; + on(event: "message_start", handler: ExtensionHandler): void; + on(event: "message_update", handler: ExtensionHandler): void; + on(event: "message_end", handler: ExtensionHandler): void; + on(event: "tool_execution_start", handler: ExtensionHandler): void; + on(event: "tool_execution_update", handler: ExtensionHandler): void; + on(event: "tool_execution_end", handler: ExtensionHandler): void; + on(event: "model_select", handler: ExtensionHandler): void; + on(event: "tool_call", handler: ExtensionHandler): void; + on(event: "tool_result", handler: ExtensionHandler): void; + on(event: "user_bash", handler: ExtensionHandler): void; + on(event: "input", handler: ExtensionHandler): void; + + // ========================================================================= + // Tool Registration + // ========================================================================= + + /** Register a tool that the LLM can call. */ + registerTool(tool: ToolDefinition): void; + + // ========================================================================= + // Command, Shortcut, Flag Registration + // ========================================================================= + + /** Register a custom command. */ + registerCommand(name: string, options: Omit): void; + + /** Register a keyboard shortcut. */ + registerShortcut( + shortcut: KeyId, + options: { + description?: string; + handler: (ctx: ExtensionContext) => Promise | void; + }, + ): void; + + /** Register a CLI flag. */ + registerFlag( + name: string, + options: { + description?: string; + type: "boolean" | "string"; + default?: boolean | string; + }, + ): void; + + /** Get the value of a registered CLI flag. */ + getFlag(name: string): boolean | string | undefined; + + // ========================================================================= + // Message Rendering + // ========================================================================= + + /** Register a custom renderer for CustomMessageEntry. */ + registerMessageRenderer(customType: string, renderer: MessageRenderer): void; + + // ========================================================================= + // Actions + // ========================================================================= + + /** Send a custom message to the session. */ + sendMessage( + message: Pick, "customType" | "content" | "display" | "details">, + options?: { triggerTurn?: boolean; deliverAs?: "steer" | "followUp" | "nextTurn" }, + ): void; + + /** + * Send a user message to the agent. Always triggers a turn. + * When the agent is streaming, use deliverAs to specify how to queue the message. + */ + sendUserMessage( + content: string | (TextContent | ImageContent)[], + options?: { deliverAs?: "steer" | "followUp" }, + ): void; + + /** Append a custom entry to the session for state persistence (not sent to LLM). */ + appendEntry(customType: string, data?: T): void; + + // ========================================================================= + // Session Metadata + // ========================================================================= + + /** Set the session display name (shown in session selector). */ + setSessionName(name: string): void; + + /** Get the current session name, if set. */ + getSessionName(): string | undefined; + + /** Set or clear a label on an entry. Labels are user-defined markers for bookmarking/navigation. */ + setLabel(entryId: string, label: string | undefined): void; + + /** Execute a shell command. */ + exec(command: string, args: string[], options?: ExecOptions): Promise; + + /** Get the list of currently active tool names. */ + getActiveTools(): string[]; + + /** Get all configured tools with name and description. */ + getAllTools(): ToolInfo[]; + + /** Set the active tools by name. */ + setActiveTools(toolNames: string[]): void; + + /** Get available slash commands in the current session. */ + getCommands(): SlashCommandInfo[]; + + // ========================================================================= + // Model and Thinking Level + // ========================================================================= + + /** Set the current model. Returns false if no API key available. */ + setModel(model: Model, options?: { persist?: boolean }): Promise; + + /** Get current thinking level. */ + getThinkingLevel(): ThinkingLevel; + + /** Set thinking level (clamped to model capabilities). */ + setThinkingLevel(level: ThinkingLevel): void; + + // ========================================================================= + // Provider Registration + // ========================================================================= + + /** + * Register or override a model provider. + * + * If `models` is provided: replaces all existing models for this provider. + * If only `baseUrl` is provided: overrides the URL for existing models. + * If `oauth` is provided: registers OAuth provider for /login support. + * If `streamSimple` is provided: registers a custom API stream handler. + * + * During initial extension load this call is queued and applied once the + * runner has bound its context. After that it takes effect immediately, so + * it is safe to call from command handlers or event callbacks without + * requiring a `/reload`. + * + * @example + * // Register a new provider with custom models + * pi.registerProvider("my-proxy", { + * baseUrl: "https://proxy.example.com", + * apiKey: "PROXY_API_KEY", + * api: "anthropic-messages", + * models: [ + * { + * id: "claude-sonnet-4-20250514", + * name: "Claude 4 Sonnet (proxy)", + * reasoning: false, + * input: ["text", "image"], + * cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }, + * contextWindow: 200000, + * maxTokens: 16384 + * } + * ] + * }); + * + * @example + * // Override baseUrl for an existing provider + * pi.registerProvider("anthropic", { + * baseUrl: "https://proxy.example.com" + * }); + * + * @example + * // Register provider with OAuth support + * pi.registerProvider("corporate-ai", { + * baseUrl: "https://ai.corp.com", + * api: "openai-responses", + * models: [...], + * oauth: { + * name: "Corporate AI (SSO)", + * async login(callbacks) { ... }, + * async refreshToken(credentials) { ... }, + * getApiKey(credentials) { return credentials.access; } + * } + * }); + */ + registerProvider(name: string, config: ProviderConfig): void; + + /** + * Unregister a previously registered provider. + * + * Removes all models belonging to the named provider and restores any + * built-in models that were overridden by it. Has no effect if the provider + * is not currently registered. + * + * Like `registerProvider`, this takes effect immediately when called after + * the initial load phase. + * + * @example + * pi.unregisterProvider("my-proxy"); + */ + unregisterProvider(name: string): void; + + /** Shared event bus for extension communication. */ + events: EventBus; +} + +// ============================================================================ +// Provider Registration Types +// ============================================================================ + +/** Configuration for registering a provider via pi.registerProvider(). */ +export interface ProviderConfig { + /** Base URL for the API endpoint. Required when defining models. */ + baseUrl?: string; + /** API key or environment variable name. Required when defining models (unless oauth provided). */ + apiKey?: string; + /** API type. Required at provider or model level when defining models. */ + api?: Api; + /** Optional streamSimple handler for custom APIs. */ + streamSimple?: (model: Model, context: Context, options?: SimpleStreamOptions) => AssistantMessageEventStream; + /** Custom headers to include in requests. */ + headers?: Record; + /** If true, adds Authorization: Bearer header with the resolved API key. */ + authHeader?: boolean; + /** Models to register. If provided, replaces all existing models for this provider. */ + models?: ProviderModelConfig[]; + /** OAuth provider for /login support. The `id` is set automatically from the provider name. */ + oauth?: { + /** Display name for the provider in login UI. */ + name: string; + /** Run the login flow, return credentials to persist. */ + login(callbacks: OAuthLoginCallbacks): Promise; + /** Refresh expired credentials, return updated credentials to persist. */ + refreshToken(credentials: OAuthCredentials): Promise; + /** Convert credentials to API key string for the provider. */ + getApiKey(credentials: OAuthCredentials): string; + /** Optional: modify models for this provider (e.g., update baseUrl based on credentials). */ + modifyModels?(models: Model[], credentials: OAuthCredentials): Model[]; + }; +} + +/** Configuration for a model within a provider. */ +export interface ProviderModelConfig { + /** Model ID (e.g., "claude-sonnet-4-20250514"). */ + id: string; + /** Display name (e.g., "Claude 4 Sonnet"). */ + name: string; + /** API type override for this model. */ + api?: Api; + /** Whether the model supports extended thinking. */ + reasoning: boolean; + /** Supported input types. */ + input: ("text" | "image")[]; + /** Cost per token (for tracking, can be 0). */ + cost: { input: number; output: number; cacheRead: number; cacheWrite: number }; + /** Maximum context window size in tokens. */ + contextWindow: number; + /** Maximum output tokens. */ + maxTokens: number; + /** Custom headers for this model. */ + headers?: Record; + /** OpenAI compatibility settings. */ + compat?: Model["compat"]; +} + +/** Extension factory function type. Supports both sync and async initialization. */ +export type ExtensionFactory = (pi: ExtensionAPI) => void | Promise; + +// ============================================================================ +// Loaded Extension Types +// ============================================================================ + +export interface RegisteredTool { + definition: ToolDefinition; + extensionPath: string; +} + +export interface ExtensionFlag { + name: string; + description?: string; + type: "boolean" | "string"; + default?: boolean | string; + extensionPath: string; +} + +export interface ExtensionShortcut { + shortcut: KeyId; + description?: string; + handler: (ctx: ExtensionContext) => Promise | void; + extensionPath: string; +} + +type HandlerFn = (...args: unknown[]) => Promise; + +export type SendMessageHandler = ( + message: Pick, "customType" | "content" | "display" | "details">, + options?: { triggerTurn?: boolean; deliverAs?: "steer" | "followUp" | "nextTurn" }, +) => void; + +export type SendUserMessageHandler = ( + content: string | (TextContent | ImageContent)[], + options?: { deliverAs?: "steer" | "followUp" }, +) => void; + +export type AppendEntryHandler = (customType: string, data?: T) => void; + +export type SetSessionNameHandler = (name: string) => void; + +export type GetSessionNameHandler = () => string | undefined; + +export type GetActiveToolsHandler = () => string[]; + +/** Tool info with name, description, and parameter schema */ +export type ToolInfo = Pick; + +export type GetAllToolsHandler = () => ToolInfo[]; + +export type GetCommandsHandler = () => SlashCommandInfo[]; + +export type SetActiveToolsHandler = (toolNames: string[]) => void; + +export type RefreshToolsHandler = () => void; + +export type SetModelHandler = (model: Model, options?: { persist?: boolean }) => Promise; + +export type GetThinkingLevelHandler = () => ThinkingLevel; + +export type SetThinkingLevelHandler = (level: ThinkingLevel) => void; + +export type SetLabelHandler = (entryId: string, label: string | undefined) => void; + +/** + * Shared state created by loader, used during registration and runtime. + * Contains flag values (defaults set during registration, CLI values set after). + */ +export interface ExtensionRuntimeState { + flagValues: Map; + /** Provider registrations queued during extension loading, processed when runner binds */ + pendingProviderRegistrations: Array<{ name: string; config: ProviderConfig }>; + /** + * Register or unregister a provider. + * + * Before bindCore(): queues registrations / removes from queue. + * After bindCore(): calls ModelRegistry directly for immediate effect. + */ + registerProvider: (name: string, config: ProviderConfig) => void; + unregisterProvider: (name: string) => void; +} + +/** + * Action implementations for pi.* API methods. + * Provided to runner.initialize(), copied into the shared runtime. + */ +export interface ExtensionActions { + sendMessage: SendMessageHandler; + sendUserMessage: SendUserMessageHandler; + appendEntry: AppendEntryHandler; + setSessionName: SetSessionNameHandler; + getSessionName: GetSessionNameHandler; + setLabel: SetLabelHandler; + getActiveTools: GetActiveToolsHandler; + getAllTools: GetAllToolsHandler; + setActiveTools: SetActiveToolsHandler; + refreshTools: RefreshToolsHandler; + getCommands: GetCommandsHandler; + setModel: SetModelHandler; + getThinkingLevel: GetThinkingLevelHandler; + setThinkingLevel: SetThinkingLevelHandler; +} + +/** + * Actions for ExtensionContext (ctx.* in event handlers). + * Required by all modes. + */ +export interface ExtensionContextActions { + getModel: () => Model | undefined; + isIdle: () => boolean; + abort: () => void; + hasPendingMessages: () => boolean; + shutdown: () => void; + getContextUsage: () => ContextUsage | undefined; + compact: (options?: CompactOptions) => void; + getSystemPrompt: () => string; +} + +/** + * Actions for ExtensionCommandContext (ctx.* in command handlers). + * Only needed for interactive mode where extension commands are invokable. + */ +export interface ExtensionCommandContextActions { + waitForIdle: () => Promise; + newSession: (options?: { + parentSession?: string; + setup?: (sessionManager: SessionManager) => Promise; + }) => Promise<{ cancelled: boolean }>; + fork: (entryId: string) => Promise<{ cancelled: boolean }>; + navigateTree: ( + targetId: string, + options?: { summarize?: boolean; customInstructions?: string; replaceInstructions?: boolean; label?: string }, + ) => Promise<{ cancelled: boolean }>; + switchSession: (sessionPath: string) => Promise<{ cancelled: boolean }>; + reload: () => Promise; +} + +/** + * Full runtime = state + actions. + * Created by loader with throwing action stubs, completed by runner.initialize(). + */ +export interface ExtensionRuntime extends ExtensionRuntimeState, ExtensionActions {} + +/** Loaded extension with all registered items. */ +export interface Extension { + path: string; + resolvedPath: string; + handlers: Map; + tools: Map; + messageRenderers: Map; + commands: Map; + flags: Map; + shortcuts: Map; +} + +/** Result of loading extensions. */ +export interface LoadExtensionsResult { + extensions: Extension[]; + errors: Array<{ path: string; error: string }>; + /** Shared runtime - actions are throwing stubs until runner.initialize() */ + runtime: ExtensionRuntime; +} + +// ============================================================================ +// Extension Error +// ============================================================================ + +export interface ExtensionError { + extensionPath: string; + event: string; + error: string; + stack?: string; +} diff --git a/packages/pi-coding-agent/src/core/extensions/wrapper.ts b/packages/pi-coding-agent/src/core/extensions/wrapper.ts new file mode 100644 index 000000000..b8d050dfc --- /dev/null +++ b/packages/pi-coding-agent/src/core/extensions/wrapper.ts @@ -0,0 +1,118 @@ +/** + * Tool wrappers for extensions. + */ + +import type { AgentTool, AgentToolUpdateCallback } from "@gsd/pi-agent-core"; +import type { ExtensionRunner } from "./runner.js"; +import type { RegisteredTool, ToolCallEventResult } from "./types.js"; + +/** + * Wrap a RegisteredTool into an AgentTool. + * Uses the runner's createContext() for consistent context across tools and event handlers. + */ +export function wrapRegisteredTool(registeredTool: RegisteredTool, runner: ExtensionRunner): AgentTool { + const { definition } = registeredTool; + return { + name: definition.name, + label: definition.label, + description: definition.description, + parameters: definition.parameters, + execute: (toolCallId, params, signal, onUpdate) => + definition.execute(toolCallId, params, signal, onUpdate, runner.createContext()), + }; +} + +/** + * Wrap all registered tools into AgentTools. + * Uses the runner's createContext() for consistent context across tools and event handlers. + */ +export function wrapRegisteredTools(registeredTools: RegisteredTool[], runner: ExtensionRunner): AgentTool[] { + return registeredTools.map((rt) => wrapRegisteredTool(rt, runner)); +} + +/** + * Wrap a tool with extension callbacks for interception. + * - Emits tool_call event before execution (can block) + * - Emits tool_result event after execution (can modify result) + */ +export function wrapToolWithExtensions(tool: AgentTool, runner: ExtensionRunner): AgentTool { + return { + ...tool, + execute: async ( + toolCallId: string, + params: Record, + signal?: AbortSignal, + onUpdate?: AgentToolUpdateCallback, + ) => { + // Emit tool_call event - extensions can block execution + if (runner.hasHandlers("tool_call")) { + try { + const callResult = (await runner.emitToolCall({ + type: "tool_call", + toolName: tool.name, + toolCallId, + input: params, + })) as ToolCallEventResult | undefined; + + if (callResult?.block) { + const reason = callResult.reason || "Tool execution was blocked by an extension"; + throw new Error(reason); + } + } catch (err) { + if (err instanceof Error) { + throw err; + } + throw new Error(`Extension failed, blocking execution: ${String(err)}`); + } + } + + // Execute the actual tool + try { + const result = await tool.execute(toolCallId, params, signal, onUpdate); + + // Emit tool_result event - extensions can modify the result + if (runner.hasHandlers("tool_result")) { + const resultResult = await runner.emitToolResult({ + type: "tool_result", + toolName: tool.name, + toolCallId, + input: params, + content: result.content, + details: result.details, + isError: false, + }); + + if (resultResult) { + return { + content: resultResult.content ?? result.content, + details: (resultResult.details ?? result.details) as T, + }; + } + } + + return result; + } catch (err) { + // Emit tool_result event for errors + if (runner.hasHandlers("tool_result")) { + await runner.emitToolResult({ + type: "tool_result", + toolName: tool.name, + toolCallId, + input: params, + content: [{ type: "text", text: err instanceof Error ? err.message : String(err) }], + details: undefined, + isError: true, + }); + } + throw err; + } + }, + }; +} + +/** + * Wrap all tools with extension callbacks. + */ +export function wrapToolsWithExtensions(tools: AgentTool[], runner: ExtensionRunner): AgentTool[] { + return tools.map((tool) => wrapToolWithExtensions(tool, runner)); +} diff --git a/packages/pi-coding-agent/src/core/footer-data-provider.ts b/packages/pi-coding-agent/src/core/footer-data-provider.ts new file mode 100644 index 000000000..f1b4889ad --- /dev/null +++ b/packages/pi-coding-agent/src/core/footer-data-provider.ts @@ -0,0 +1,144 @@ +import { existsSync, type FSWatcher, readFileSync, statSync, watch } from "fs"; +import { dirname, join, resolve } from "path"; + +/** + * Find the git HEAD path by walking up from cwd. + * Handles both regular git repos (.git is a directory) and worktrees (.git is a file). + */ +function findGitHeadPath(): string | null { + let dir = process.cwd(); + while (true) { + const gitPath = join(dir, ".git"); + if (existsSync(gitPath)) { + try { + const stat = statSync(gitPath); + if (stat.isFile()) { + const content = readFileSync(gitPath, "utf8").trim(); + if (content.startsWith("gitdir: ")) { + const gitDir = content.slice(8); + const headPath = resolve(dir, gitDir, "HEAD"); + if (existsSync(headPath)) return headPath; + } + } else if (stat.isDirectory()) { + const headPath = join(gitPath, "HEAD"); + if (existsSync(headPath)) return headPath; + } + } catch { + return null; + } + } + const parent = dirname(dir); + if (parent === dir) return null; + dir = parent; + } +} + +/** + * Provides git branch and extension statuses - data not otherwise accessible to extensions. + * Token stats, model info available via ctx.sessionManager and ctx.model. + */ +export class FooterDataProvider { + private extensionStatuses = new Map(); + private cachedBranch: string | null | undefined = undefined; + private gitWatcher: FSWatcher | null = null; + private branchChangeCallbacks = new Set<() => void>(); + private availableProviderCount = 0; + + constructor() { + this.setupGitWatcher(); + } + + /** Current git branch, null if not in repo, "detached" if detached HEAD */ + getGitBranch(): string | null { + if (this.cachedBranch !== undefined) return this.cachedBranch; + + try { + const gitHeadPath = findGitHeadPath(); + if (!gitHeadPath) { + this.cachedBranch = null; + return null; + } + const content = readFileSync(gitHeadPath, "utf8").trim(); + this.cachedBranch = content.startsWith("ref: refs/heads/") ? content.slice(16) : "detached"; + } catch { + this.cachedBranch = null; + } + return this.cachedBranch; + } + + /** Extension status texts set via ctx.ui.setStatus() */ + getExtensionStatuses(): ReadonlyMap { + return this.extensionStatuses; + } + + /** Subscribe to git branch changes. Returns unsubscribe function. */ + onBranchChange(callback: () => void): () => void { + this.branchChangeCallbacks.add(callback); + return () => this.branchChangeCallbacks.delete(callback); + } + + /** Internal: set extension status */ + setExtensionStatus(key: string, text: string | undefined): void { + if (text === undefined) { + this.extensionStatuses.delete(key); + } else { + this.extensionStatuses.set(key, text); + } + } + + /** Internal: clear extension statuses */ + clearExtensionStatuses(): void { + this.extensionStatuses.clear(); + } + + /** Number of unique providers with available models (for footer display) */ + getAvailableProviderCount(): number { + return this.availableProviderCount; + } + + /** Internal: update available provider count */ + setAvailableProviderCount(count: number): void { + this.availableProviderCount = count; + } + + /** Internal: cleanup */ + dispose(): void { + if (this.gitWatcher) { + this.gitWatcher.close(); + this.gitWatcher = null; + } + this.branchChangeCallbacks.clear(); + } + + private setupGitWatcher(): void { + if (this.gitWatcher) { + this.gitWatcher.close(); + this.gitWatcher = null; + } + + const gitHeadPath = findGitHeadPath(); + if (!gitHeadPath) return; + + // Watch the directory containing HEAD, not HEAD itself. + // Git uses atomic writes (write temp, rename over HEAD), which changes the inode. + // fs.watch on a file stops working after the inode changes. + const gitDir = dirname(gitHeadPath); + + try { + this.gitWatcher = watch(gitDir, (_eventType, filename) => { + if (filename === "HEAD") { + this.cachedBranch = undefined; + for (const cb of this.branchChangeCallbacks) cb(); + } + }); + } catch { + // Silently fail if we can't watch + } + } +} + +/** Read-only view for extensions - excludes setExtensionStatus, setAvailableProviderCount and dispose */ +export type ReadonlyFooterDataProvider = Pick< + FooterDataProvider, + "getGitBranch" | "getExtensionStatuses" | "getAvailableProviderCount" | "onBranchChange" +>; diff --git a/packages/pi-coding-agent/src/core/index.ts b/packages/pi-coding-agent/src/core/index.ts new file mode 100644 index 000000000..e84191d79 --- /dev/null +++ b/packages/pi-coding-agent/src/core/index.ts @@ -0,0 +1,61 @@ +/** + * Core modules shared between all run modes. + */ + +export { + AgentSession, + type AgentSessionConfig, + type AgentSessionEvent, + type AgentSessionEventListener, + type ModelCycleResult, + type PromptOptions, + type SessionStats, +} from "./agent-session.js"; +export { type BashExecutorOptions, type BashResult, executeBash, executeBashWithOperations } from "./bash-executor.js"; +export type { CompactionResult } from "./compaction/index.js"; +export { createEventBus, type EventBus, type EventBusController } from "./event-bus.js"; + +// Extensions system +export { + type AgentEndEvent, + type AgentStartEvent, + type AgentToolResult, + type AgentToolUpdateCallback, + type BeforeAgentStartEvent, + type ContextEvent, + discoverAndLoadExtensions, + type ExecOptions, + type ExecResult, + type Extension, + type ExtensionAPI, + type ExtensionCommandContext, + type ExtensionContext, + type ExtensionError, + type ExtensionEvent, + type ExtensionFactory, + type ExtensionFlag, + type ExtensionHandler, + ExtensionRunner, + type ExtensionShortcut, + type ExtensionUIContext, + type LoadExtensionsResult, + type MessageRenderer, + type RegisteredCommand, + type SessionBeforeCompactEvent, + type SessionBeforeForkEvent, + type SessionBeforeSwitchEvent, + type SessionBeforeTreeEvent, + type SessionCompactEvent, + type SessionForkEvent, + type SessionShutdownEvent, + type SessionStartEvent, + type SessionSwitchEvent, + type SessionTreeEvent, + type ToolCallEvent, + type ToolDefinition, + type ToolRenderResultOptions, + type ToolResultEvent, + type TurnEndEvent, + type TurnStartEvent, + wrapToolsWithExtensions, +} from "./extensions/index.js"; diff --git a/packages/pi-coding-agent/src/core/keybindings.ts b/packages/pi-coding-agent/src/core/keybindings.ts new file mode 100644 index 000000000..958560121 --- /dev/null +++ b/packages/pi-coding-agent/src/core/keybindings.ts @@ -0,0 +1,211 @@ +import { + DEFAULT_EDITOR_KEYBINDINGS, + type EditorAction, + type EditorKeybindingsConfig, + EditorKeybindingsManager, + type KeyId, + matchesKey, + setEditorKeybindings, +} from "@gsd/pi-tui"; +import { existsSync, readFileSync } from "fs"; +import { join } from "path"; +import { getAgentDir } from "../config.js"; + +/** + * Application-level actions (coding agent specific). + */ +export type AppAction = + | "interrupt" + | "clear" + | "exit" + | "suspend" + | "cycleThinkingLevel" + | "cycleModelForward" + | "cycleModelBackward" + | "selectModel" + | "expandTools" + | "toggleThinking" + | "toggleSessionNamedFilter" + | "externalEditor" + | "followUp" + | "dequeue" + | "pasteImage" + | "newSession" + | "tree" + | "fork" + | "resume"; + +/** + * All configurable actions. + */ +export type KeyAction = AppAction | EditorAction; + +/** + * Full keybindings configuration (app + editor actions). + */ +export type KeybindingsConfig = { + [K in KeyAction]?: KeyId | KeyId[]; +}; + +/** + * Default application keybindings. + */ +export const DEFAULT_APP_KEYBINDINGS: Record = { + interrupt: "escape", + clear: "ctrl+c", + exit: "ctrl+d", + suspend: "ctrl+z", + cycleThinkingLevel: "shift+tab", + cycleModelForward: "ctrl+p", + cycleModelBackward: "shift+ctrl+p", + selectModel: "ctrl+l", + expandTools: "ctrl+o", + toggleThinking: "ctrl+t", + toggleSessionNamedFilter: "ctrl+n", + externalEditor: "ctrl+g", + followUp: "alt+enter", + dequeue: "alt+up", + pasteImage: process.platform === "win32" ? "alt+v" : "ctrl+v", + newSession: [], + tree: [], + fork: [], + resume: [], +}; + +/** + * All default keybindings (app + editor). + */ +export const DEFAULT_KEYBINDINGS: Required = { + ...DEFAULT_EDITOR_KEYBINDINGS, + ...DEFAULT_APP_KEYBINDINGS, +}; + +// App actions list for type checking +const APP_ACTIONS: AppAction[] = [ + "interrupt", + "clear", + "exit", + "suspend", + "cycleThinkingLevel", + "cycleModelForward", + "cycleModelBackward", + "selectModel", + "expandTools", + "toggleThinking", + "toggleSessionNamedFilter", + "externalEditor", + "followUp", + "dequeue", + "pasteImage", + "newSession", + "tree", + "fork", + "resume", +]; + +function isAppAction(action: string): action is AppAction { + return APP_ACTIONS.includes(action as AppAction); +} + +/** + * Manages all keybindings (app + editor). + */ +export class KeybindingsManager { + private config: KeybindingsConfig; + private appActionToKeys: Map; + + private constructor(config: KeybindingsConfig) { + this.config = config; + this.appActionToKeys = new Map(); + this.buildMaps(); + } + + /** + * Create from config file and set up editor keybindings. + */ + static create(agentDir: string = getAgentDir()): KeybindingsManager { + const configPath = join(agentDir, "keybindings.json"); + const config = KeybindingsManager.loadFromFile(configPath); + const manager = new KeybindingsManager(config); + + // Set up editor keybindings globally + // Include both editor actions and expandTools (shared between app and editor) + const editorConfig: EditorKeybindingsConfig = {}; + for (const [action, keys] of Object.entries(config)) { + if (!isAppAction(action) || action === "expandTools") { + editorConfig[action as EditorAction] = keys; + } + } + setEditorKeybindings(new EditorKeybindingsManager(editorConfig)); + + return manager; + } + + /** + * Create in-memory. + */ + static inMemory(config: KeybindingsConfig = {}): KeybindingsManager { + return new KeybindingsManager(config); + } + + private static loadFromFile(path: string): KeybindingsConfig { + if (!existsSync(path)) return {}; + try { + return JSON.parse(readFileSync(path, "utf-8")); + } catch { + return {}; + } + } + + private buildMaps(): void { + this.appActionToKeys.clear(); + + // Set defaults for app actions + for (const [action, keys] of Object.entries(DEFAULT_APP_KEYBINDINGS)) { + const keyArray = Array.isArray(keys) ? keys : [keys]; + this.appActionToKeys.set(action as AppAction, [...keyArray]); + } + + // Override with user config (app actions only) + for (const [action, keys] of Object.entries(this.config)) { + if (keys === undefined || !isAppAction(action)) continue; + const keyArray = Array.isArray(keys) ? keys : [keys]; + this.appActionToKeys.set(action, keyArray); + } + } + + /** + * Check if input matches an app action. + */ + matches(data: string, action: AppAction): boolean { + const keys = this.appActionToKeys.get(action); + if (!keys) return false; + for (const key of keys) { + if (matchesKey(data, key)) return true; + } + return false; + } + + /** + * Get keys bound to an app action. + */ + getKeys(action: AppAction): KeyId[] { + return this.appActionToKeys.get(action) ?? []; + } + + /** + * Get the full effective config. + */ + getEffectiveConfig(): Required { + const result = { ...DEFAULT_KEYBINDINGS }; + for (const [action, keys] of Object.entries(this.config)) { + if (keys !== undefined) { + (result as KeybindingsConfig)[action as KeyAction] = keys; + } + } + return result; + } +} + +// Re-export for convenience +export type { EditorAction, KeyId }; diff --git a/packages/pi-coding-agent/src/core/messages.ts b/packages/pi-coding-agent/src/core/messages.ts new file mode 100644 index 000000000..762aa5b85 --- /dev/null +++ b/packages/pi-coding-agent/src/core/messages.ts @@ -0,0 +1,195 @@ +/** + * Custom message types and transformers for the coding agent. + * + * Extends the base AgentMessage type with coding-agent specific message types, + * and provides a transformer to convert them to LLM-compatible messages. + */ + +import type { AgentMessage } from "@gsd/pi-agent-core"; +import type { ImageContent, Message, TextContent } from "@gsd/pi-ai"; + +export const COMPACTION_SUMMARY_PREFIX = `The conversation history before this point was compacted into the following summary: + + +`; + +export const COMPACTION_SUMMARY_SUFFIX = ` +`; + +export const BRANCH_SUMMARY_PREFIX = `The following is a summary of a branch that this conversation came back from: + + +`; + +export const BRANCH_SUMMARY_SUFFIX = ``; + +/** + * Message type for bash executions via the ! command. + */ +export interface BashExecutionMessage { + role: "bashExecution"; + command: string; + output: string; + exitCode: number | undefined; + cancelled: boolean; + truncated: boolean; + fullOutputPath?: string; + timestamp: number; + /** If true, this message is excluded from LLM context (!! prefix) */ + excludeFromContext?: boolean; +} + +/** + * Message type for extension-injected messages via sendMessage(). + * These are custom messages that extensions can inject into the conversation. + */ +export interface CustomMessage { + role: "custom"; + customType: string; + content: string | (TextContent | ImageContent)[]; + display: boolean; + details?: T; + timestamp: number; +} + +export interface BranchSummaryMessage { + role: "branchSummary"; + summary: string; + fromId: string; + timestamp: number; +} + +export interface CompactionSummaryMessage { + role: "compactionSummary"; + summary: string; + tokensBefore: number; + timestamp: number; +} + +// Extend CustomAgentMessages via declaration merging +declare module "@gsd/pi-agent-core" { + interface CustomAgentMessages { + bashExecution: BashExecutionMessage; + custom: CustomMessage; + branchSummary: BranchSummaryMessage; + compactionSummary: CompactionSummaryMessage; + } +} + +/** + * Convert a BashExecutionMessage to user message text for LLM context. + */ +export function bashExecutionToText(msg: BashExecutionMessage): string { + let text = `Ran \`${msg.command}\`\n`; + if (msg.output) { + text += `\`\`\`\n${msg.output}\n\`\`\``; + } else { + text += "(no output)"; + } + if (msg.cancelled) { + text += "\n\n(command cancelled)"; + } else if (msg.exitCode !== null && msg.exitCode !== undefined && msg.exitCode !== 0) { + text += `\n\nCommand exited with code ${msg.exitCode}`; + } + if (msg.truncated && msg.fullOutputPath) { + text += `\n\n[Output truncated. Full output: ${msg.fullOutputPath}]`; + } + return text; +} + +export function createBranchSummaryMessage(summary: string, fromId: string, timestamp: string): BranchSummaryMessage { + return { + role: "branchSummary", + summary, + fromId, + timestamp: new Date(timestamp).getTime(), + }; +} + +export function createCompactionSummaryMessage( + summary: string, + tokensBefore: number, + timestamp: string, +): CompactionSummaryMessage { + return { + role: "compactionSummary", + summary: summary, + tokensBefore, + timestamp: new Date(timestamp).getTime(), + }; +} + +/** Convert CustomMessageEntry to AgentMessage format */ +export function createCustomMessage( + customType: string, + content: string | (TextContent | ImageContent)[], + display: boolean, + details: unknown | undefined, + timestamp: string, +): CustomMessage { + return { + role: "custom", + customType, + content, + display, + details, + timestamp: new Date(timestamp).getTime(), + }; +} + +/** + * Transform AgentMessages (including custom types) to LLM-compatible Messages. + * + * This is used by: + * - Agent's transormToLlm option (for prompt calls and queued messages) + * - Compaction's generateSummary (for summarization) + * - Custom extensions and tools + */ +export function convertToLlm(messages: AgentMessage[]): Message[] { + return messages + .map((m): Message | undefined => { + switch (m.role) { + case "bashExecution": + // Skip messages excluded from context (!! prefix) + if (m.excludeFromContext) { + return undefined; + } + return { + role: "user", + content: [{ type: "text", text: bashExecutionToText(m) }], + timestamp: m.timestamp, + }; + case "custom": { + const content = typeof m.content === "string" ? [{ type: "text" as const, text: m.content }] : m.content; + return { + role: "user", + content, + timestamp: m.timestamp, + }; + } + case "branchSummary": + return { + role: "user", + content: [{ type: "text" as const, text: BRANCH_SUMMARY_PREFIX + m.summary + BRANCH_SUMMARY_SUFFIX }], + timestamp: m.timestamp, + }; + case "compactionSummary": + return { + role: "user", + content: [ + { type: "text" as const, text: COMPACTION_SUMMARY_PREFIX + m.summary + COMPACTION_SUMMARY_SUFFIX }, + ], + timestamp: m.timestamp, + }; + case "user": + case "assistant": + case "toolResult": + return m; + default: + // biome-ignore lint/correctness/noSwitchDeclarations: fine + const _exhaustiveCheck: never = m; + return undefined; + } + }) + .filter((m) => m !== undefined); +} diff --git a/packages/pi-coding-agent/src/core/model-registry.ts b/packages/pi-coding-agent/src/core/model-registry.ts new file mode 100644 index 000000000..6cfdc3c4f --- /dev/null +++ b/packages/pi-coding-agent/src/core/model-registry.ts @@ -0,0 +1,694 @@ +/** + * Model registry - manages built-in and custom models, provides API key resolution. + */ + +import { + type Api, + type AssistantMessageEventStream, + type Context, + getModels, + getProviders, + type KnownProvider, + type Model, + type OAuthProviderInterface, + type OpenAICompletionsCompat, + type OpenAIResponsesCompat, + registerApiProvider, + resetApiProviders, + type SimpleStreamOptions, +} from "@gsd/pi-ai"; +import { registerOAuthProvider, resetOAuthProviders } from "@gsd/pi-ai/oauth"; +import { type Static, Type } from "@sinclair/typebox"; +import AjvModule from "ajv"; +import { existsSync, readFileSync } from "fs"; +import { join } from "path"; +import { getAgentDir } from "../config.js"; +import type { AuthStorage } from "./auth-storage.js"; +import { clearConfigValueCache, resolveConfigValue, resolveHeaders } from "./resolve-config-value.js"; + +const Ajv = (AjvModule as any).default || AjvModule; +const ajv = new Ajv(); + +// Schema for OpenRouter routing preferences +const OpenRouterRoutingSchema = Type.Object({ + only: Type.Optional(Type.Array(Type.String())), + order: Type.Optional(Type.Array(Type.String())), +}); + +// Schema for Vercel AI Gateway routing preferences +const VercelGatewayRoutingSchema = Type.Object({ + only: Type.Optional(Type.Array(Type.String())), + order: Type.Optional(Type.Array(Type.String())), +}); + +// Schema for OpenAI compatibility settings +const OpenAICompletionsCompatSchema = Type.Object({ + supportsStore: Type.Optional(Type.Boolean()), + supportsDeveloperRole: Type.Optional(Type.Boolean()), + supportsReasoningEffort: Type.Optional(Type.Boolean()), + supportsUsageInStreaming: Type.Optional(Type.Boolean()), + maxTokensField: Type.Optional(Type.Union([Type.Literal("max_completion_tokens"), Type.Literal("max_tokens")])), + requiresToolResultName: Type.Optional(Type.Boolean()), + requiresAssistantAfterToolResult: Type.Optional(Type.Boolean()), + requiresThinkingAsText: Type.Optional(Type.Boolean()), + requiresMistralToolIds: Type.Optional(Type.Boolean()), + thinkingFormat: Type.Optional(Type.Union([Type.Literal("openai"), Type.Literal("zai"), Type.Literal("qwen")])), + openRouterRouting: Type.Optional(OpenRouterRoutingSchema), + vercelGatewayRouting: Type.Optional(VercelGatewayRoutingSchema), +}); + +const OpenAIResponsesCompatSchema = Type.Object({ + // Reserved for future use +}); + +const OpenAICompatSchema = Type.Union([OpenAICompletionsCompatSchema, OpenAIResponsesCompatSchema]); + +// Schema for custom model definition +// Most fields are optional with sensible defaults for local models (Ollama, LM Studio, etc.) +const ModelDefinitionSchema = Type.Object({ + id: Type.String({ minLength: 1 }), + name: Type.Optional(Type.String({ minLength: 1 })), + api: Type.Optional(Type.String({ minLength: 1 })), + baseUrl: Type.Optional(Type.String({ minLength: 1 })), + reasoning: Type.Optional(Type.Boolean()), + input: Type.Optional(Type.Array(Type.Union([Type.Literal("text"), Type.Literal("image")]))), + cost: Type.Optional( + Type.Object({ + input: Type.Number(), + output: Type.Number(), + cacheRead: Type.Number(), + cacheWrite: Type.Number(), + }), + ), + contextWindow: Type.Optional(Type.Number()), + maxTokens: Type.Optional(Type.Number()), + headers: Type.Optional(Type.Record(Type.String(), Type.String())), + compat: Type.Optional(OpenAICompatSchema), +}); + +// Schema for per-model overrides (all fields optional, merged with built-in model) +const ModelOverrideSchema = Type.Object({ + name: Type.Optional(Type.String({ minLength: 1 })), + reasoning: Type.Optional(Type.Boolean()), + input: Type.Optional(Type.Array(Type.Union([Type.Literal("text"), Type.Literal("image")]))), + cost: Type.Optional( + Type.Object({ + input: Type.Optional(Type.Number()), + output: Type.Optional(Type.Number()), + cacheRead: Type.Optional(Type.Number()), + cacheWrite: Type.Optional(Type.Number()), + }), + ), + contextWindow: Type.Optional(Type.Number()), + maxTokens: Type.Optional(Type.Number()), + headers: Type.Optional(Type.Record(Type.String(), Type.String())), + compat: Type.Optional(OpenAICompatSchema), +}); + +type ModelOverride = Static; + +const ProviderConfigSchema = Type.Object({ + baseUrl: Type.Optional(Type.String({ minLength: 1 })), + apiKey: Type.Optional(Type.String({ minLength: 1 })), + api: Type.Optional(Type.String({ minLength: 1 })), + headers: Type.Optional(Type.Record(Type.String(), Type.String())), + authHeader: Type.Optional(Type.Boolean()), + models: Type.Optional(Type.Array(ModelDefinitionSchema)), + modelOverrides: Type.Optional(Type.Record(Type.String(), ModelOverrideSchema)), +}); + +const ModelsConfigSchema = Type.Object({ + providers: Type.Record(Type.String(), ProviderConfigSchema), +}); + +ajv.addSchema(ModelsConfigSchema, "ModelsConfig"); + +type ModelsConfig = Static; + +/** Provider override config (baseUrl, headers, apiKey) without custom models */ +interface ProviderOverride { + baseUrl?: string; + headers?: Record; + apiKey?: string; +} + +/** Result of loading custom models from models.json */ +interface CustomModelsResult { + models: Model[]; + /** Providers with baseUrl/headers/apiKey overrides for built-in models */ + overrides: Map; + /** Per-model overrides: provider -> modelId -> override */ + modelOverrides: Map>; + error: string | undefined; +} + +function emptyCustomModelsResult(error?: string): CustomModelsResult { + return { models: [], overrides: new Map(), modelOverrides: new Map(), error }; +} + +function mergeCompat( + baseCompat: Model["compat"], + overrideCompat: ModelOverride["compat"], +): Model["compat"] | undefined { + if (!overrideCompat) return baseCompat; + + const base = baseCompat as OpenAICompletionsCompat | OpenAIResponsesCompat | undefined; + const override = overrideCompat as OpenAICompletionsCompat | OpenAIResponsesCompat; + const merged = { ...base, ...override } as OpenAICompletionsCompat | OpenAIResponsesCompat; + + const baseCompletions = base as OpenAICompletionsCompat | undefined; + const overrideCompletions = override as OpenAICompletionsCompat; + const mergedCompletions = merged as OpenAICompletionsCompat; + + if (baseCompletions?.openRouterRouting || overrideCompletions.openRouterRouting) { + mergedCompletions.openRouterRouting = { + ...baseCompletions?.openRouterRouting, + ...overrideCompletions.openRouterRouting, + }; + } + + if (baseCompletions?.vercelGatewayRouting || overrideCompletions.vercelGatewayRouting) { + mergedCompletions.vercelGatewayRouting = { + ...baseCompletions?.vercelGatewayRouting, + ...overrideCompletions.vercelGatewayRouting, + }; + } + + return merged as Model["compat"]; +} + +/** + * Deep merge a model override into a model. + * Handles nested objects (cost, compat) by merging rather than replacing. + */ +function applyModelOverride(model: Model, override: ModelOverride): Model { + const result = { ...model }; + + // Simple field overrides + if (override.name !== undefined) result.name = override.name; + if (override.reasoning !== undefined) result.reasoning = override.reasoning; + if (override.input !== undefined) result.input = override.input as ("text" | "image")[]; + if (override.contextWindow !== undefined) result.contextWindow = override.contextWindow; + if (override.maxTokens !== undefined) result.maxTokens = override.maxTokens; + + // Merge cost (partial override) + if (override.cost) { + result.cost = { + input: override.cost.input ?? model.cost.input, + output: override.cost.output ?? model.cost.output, + cacheRead: override.cost.cacheRead ?? model.cost.cacheRead, + cacheWrite: override.cost.cacheWrite ?? model.cost.cacheWrite, + }; + } + + // Merge headers + if (override.headers) { + const resolvedHeaders = resolveHeaders(override.headers); + result.headers = resolvedHeaders ? { ...model.headers, ...resolvedHeaders } : model.headers; + } + + // Deep merge compat + result.compat = mergeCompat(model.compat, override.compat); + + return result; +} + +/** Clear the config value command cache. Exported for testing. */ +export const clearApiKeyCache = clearConfigValueCache; + +/** + * Model registry - loads and manages models, resolves API keys via AuthStorage. + */ +export class ModelRegistry { + private models: Model[] = []; + private customProviderApiKeys: Map = new Map(); + private registeredProviders: Map = new Map(); + private loadError: string | undefined = undefined; + + constructor( + readonly authStorage: AuthStorage, + private modelsJsonPath: string | undefined = join(getAgentDir(), "models.json"), + ) { + // Set up fallback resolver for custom provider API keys + this.authStorage.setFallbackResolver((provider) => { + const keyConfig = this.customProviderApiKeys.get(provider); + if (keyConfig) { + return resolveConfigValue(keyConfig); + } + return undefined; + }); + + // Load models + this.loadModels(); + } + + /** + * Reload models from disk (built-in + custom from models.json). + */ + refresh(): void { + this.customProviderApiKeys.clear(); + this.loadError = undefined; + + // Ensure dynamic API/OAuth registrations are rebuilt from current provider state. + resetApiProviders(); + resetOAuthProviders(); + + this.loadModels(); + + for (const [providerName, config] of this.registeredProviders.entries()) { + this.applyProviderConfig(providerName, config); + } + } + + /** + * Get any error from loading models.json (undefined if no error). + */ + getError(): string | undefined { + return this.loadError; + } + + private loadModels(): void { + // Load custom models and overrides from models.json + const { + models: customModels, + overrides, + modelOverrides, + error, + } = this.modelsJsonPath ? this.loadCustomModels(this.modelsJsonPath) : emptyCustomModelsResult(); + + if (error) { + this.loadError = error; + // Keep built-in models even if custom models failed to load + } + + const builtInModels = this.loadBuiltInModels(overrides, modelOverrides); + let combined = this.mergeCustomModels(builtInModels, customModels); + + // Let OAuth providers modify their models (e.g., update baseUrl) + for (const oauthProvider of this.authStorage.getOAuthProviders()) { + const cred = this.authStorage.get(oauthProvider.id); + if (cred?.type === "oauth" && oauthProvider.modifyModels) { + combined = oauthProvider.modifyModels(combined, cred); + } + } + + this.models = combined; + } + + /** Load built-in models and apply provider/model overrides */ + private loadBuiltInModels( + overrides: Map, + modelOverrides: Map>, + ): Model[] { + return getProviders().flatMap((provider) => { + const models = getModels(provider as KnownProvider) as Model[]; + const providerOverride = overrides.get(provider); + const perModelOverrides = modelOverrides.get(provider); + + return models.map((m) => { + let model = m; + + // Apply provider-level baseUrl/headers override + if (providerOverride) { + const resolvedHeaders = resolveHeaders(providerOverride.headers); + model = { + ...model, + baseUrl: providerOverride.baseUrl ?? model.baseUrl, + headers: resolvedHeaders ? { ...model.headers, ...resolvedHeaders } : model.headers, + }; + } + + // Apply per-model override + const modelOverride = perModelOverrides?.get(m.id); + if (modelOverride) { + model = applyModelOverride(model, modelOverride); + } + + return model; + }); + }); + } + + /** Merge custom models into built-in list by provider+id (custom wins on conflicts). */ + private mergeCustomModels(builtInModels: Model[], customModels: Model[]): Model[] { + const merged = [...builtInModels]; + for (const customModel of customModels) { + const existingIndex = merged.findIndex((m) => m.provider === customModel.provider && m.id === customModel.id); + if (existingIndex >= 0) { + merged[existingIndex] = customModel; + } else { + merged.push(customModel); + } + } + return merged; + } + + private loadCustomModels(modelsJsonPath: string): CustomModelsResult { + if (!existsSync(modelsJsonPath)) { + return emptyCustomModelsResult(); + } + + try { + const content = readFileSync(modelsJsonPath, "utf-8"); + const config: ModelsConfig = JSON.parse(content); + + // Validate schema + const validate = ajv.getSchema("ModelsConfig")!; + if (!validate(config)) { + const errors = + validate.errors?.map((e: any) => ` - ${e.instancePath || "root"}: ${e.message}`).join("\n") || + "Unknown schema error"; + return emptyCustomModelsResult(`Invalid models.json schema:\n${errors}\n\nFile: ${modelsJsonPath}`); + } + + // Additional validation + this.validateConfig(config); + + const overrides = new Map(); + const modelOverrides = new Map>(); + + for (const [providerName, providerConfig] of Object.entries(config.providers)) { + // Apply provider-level baseUrl/headers/apiKey override to built-in models when configured. + if (providerConfig.baseUrl || providerConfig.headers || providerConfig.apiKey) { + overrides.set(providerName, { + baseUrl: providerConfig.baseUrl, + headers: providerConfig.headers, + apiKey: providerConfig.apiKey, + }); + } + + // Store API key for fallback resolver. + if (providerConfig.apiKey) { + this.customProviderApiKeys.set(providerName, providerConfig.apiKey); + } + + if (providerConfig.modelOverrides) { + modelOverrides.set(providerName, new Map(Object.entries(providerConfig.modelOverrides))); + } + } + + return { models: this.parseModels(config), overrides, modelOverrides, error: undefined }; + } catch (error) { + if (error instanceof SyntaxError) { + return emptyCustomModelsResult(`Failed to parse models.json: ${error.message}\n\nFile: ${modelsJsonPath}`); + } + return emptyCustomModelsResult( + `Failed to load models.json: ${error instanceof Error ? error.message : error}\n\nFile: ${modelsJsonPath}`, + ); + } + } + + private validateConfig(config: ModelsConfig): void { + for (const [providerName, providerConfig] of Object.entries(config.providers)) { + const hasProviderApi = !!providerConfig.api; + const models = providerConfig.models ?? []; + const hasModelOverrides = + providerConfig.modelOverrides && Object.keys(providerConfig.modelOverrides).length > 0; + + if (models.length === 0) { + // Override-only config: needs baseUrl OR modelOverrides (or both) + if (!providerConfig.baseUrl && !hasModelOverrides) { + throw new Error(`Provider ${providerName}: must specify "baseUrl", "modelOverrides", or "models".`); + } + } else { + // Custom models are merged into provider models and require endpoint + auth. + if (!providerConfig.baseUrl) { + throw new Error(`Provider ${providerName}: "baseUrl" is required when defining custom models.`); + } + if (!providerConfig.apiKey) { + throw new Error(`Provider ${providerName}: "apiKey" is required when defining custom models.`); + } + } + + for (const modelDef of models) { + const hasModelApi = !!modelDef.api; + + if (!hasProviderApi && !hasModelApi) { + throw new Error( + `Provider ${providerName}, model ${modelDef.id}: no "api" specified. Set at provider or model level.`, + ); + } + + if (!modelDef.id) throw new Error(`Provider ${providerName}: model missing "id"`); + // Validate contextWindow/maxTokens only if provided (they have defaults) + if (modelDef.contextWindow !== undefined && modelDef.contextWindow <= 0) + throw new Error(`Provider ${providerName}, model ${modelDef.id}: invalid contextWindow`); + if (modelDef.maxTokens !== undefined && modelDef.maxTokens <= 0) + throw new Error(`Provider ${providerName}, model ${modelDef.id}: invalid maxTokens`); + } + } + } + + private parseModels(config: ModelsConfig): Model[] { + const models: Model[] = []; + + for (const [providerName, providerConfig] of Object.entries(config.providers)) { + const modelDefs = providerConfig.models ?? []; + if (modelDefs.length === 0) continue; // Override-only, no custom models + + // Store API key config for fallback resolver + if (providerConfig.apiKey) { + this.customProviderApiKeys.set(providerName, providerConfig.apiKey); + } + + for (const modelDef of modelDefs) { + const api = modelDef.api || providerConfig.api; + if (!api) continue; + + // Merge headers: provider headers are base, model headers override + // Resolve env vars and shell commands in header values + const providerHeaders = resolveHeaders(providerConfig.headers); + const modelHeaders = resolveHeaders(modelDef.headers); + let headers = providerHeaders || modelHeaders ? { ...providerHeaders, ...modelHeaders } : undefined; + + // If authHeader is true, add Authorization header with resolved API key + if (providerConfig.authHeader && providerConfig.apiKey) { + const resolvedKey = resolveConfigValue(providerConfig.apiKey); + if (resolvedKey) { + headers = { ...headers, Authorization: `Bearer ${resolvedKey}` }; + } + } + + // Provider baseUrl is required when custom models are defined. + // Individual models can override it with modelDef.baseUrl. + const defaultCost = { input: 0, output: 0, cacheRead: 0, cacheWrite: 0 }; + models.push({ + id: modelDef.id, + name: modelDef.name ?? modelDef.id, + api: api as Api, + provider: providerName, + baseUrl: modelDef.baseUrl ?? providerConfig.baseUrl!, + reasoning: modelDef.reasoning ?? false, + input: (modelDef.input ?? ["text"]) as ("text" | "image")[], + cost: modelDef.cost ?? defaultCost, + contextWindow: modelDef.contextWindow ?? 128000, + maxTokens: modelDef.maxTokens ?? 16384, + headers, + compat: modelDef.compat, + } as Model); + } + } + + return models; + } + + /** + * Get all models (built-in + custom). + * If models.json had errors, returns only built-in models. + */ + getAll(): Model[] { + return this.models; + } + + /** + * Get only models that have auth configured. + * This is a fast check that doesn't refresh OAuth tokens. + */ + getAvailable(): Model[] { + return this.models.filter((m) => this.authStorage.hasAuth(m.provider)); + } + + /** + * Find a model by provider and ID. + */ + find(provider: string, modelId: string): Model | undefined { + return this.models.find((m) => m.provider === provider && m.id === modelId); + } + + /** + * Get API key for a model. + */ + async getApiKey(model: Model): Promise { + return this.authStorage.getApiKey(model.provider); + } + + /** + * Get API key for a provider. + */ + async getApiKeyForProvider(provider: string): Promise { + return this.authStorage.getApiKey(provider); + } + + /** + * Check if a model is using OAuth credentials (subscription). + */ + isUsingOAuth(model: Model): boolean { + const cred = this.authStorage.get(model.provider); + return cred?.type === "oauth"; + } + + /** + * Register a provider dynamically (from extensions). + * + * If provider has models: replaces all existing models for this provider. + * If provider has only baseUrl/headers: overrides existing models' URLs. + * If provider has oauth: registers OAuth provider for /login support. + */ + registerProvider(providerName: string, config: ProviderConfigInput): void { + this.registeredProviders.set(providerName, config); + this.applyProviderConfig(providerName, config); + } + + /** + * Unregister a previously registered provider. + * + * Removes the provider from the registry and reloads models from disk so that + * built-in models overridden by this provider are restored to their original state. + * Also resets dynamic OAuth and API stream registrations before reapplying + * remaining dynamic providers. + * Has no effect if the provider was never registered. + */ + unregisterProvider(providerName: string): void { + if (!this.registeredProviders.has(providerName)) return; + this.registeredProviders.delete(providerName); + this.customProviderApiKeys.delete(providerName); + this.refresh(); + } + + private applyProviderConfig(providerName: string, config: ProviderConfigInput): void { + // Register OAuth provider if provided + if (config.oauth) { + // Ensure the OAuth provider ID matches the provider name + const oauthProvider: OAuthProviderInterface = { + ...config.oauth, + id: providerName, + }; + registerOAuthProvider(oauthProvider); + } + + if (config.streamSimple) { + if (!config.api) { + throw new Error(`Provider ${providerName}: "api" is required when registering streamSimple.`); + } + const streamSimple = config.streamSimple; + registerApiProvider( + { + api: config.api, + stream: (model, context, options) => streamSimple(model, context, options as SimpleStreamOptions), + streamSimple, + }, + `provider:${providerName}`, + ); + } + + // Store API key for auth resolution + if (config.apiKey) { + this.customProviderApiKeys.set(providerName, config.apiKey); + } + + if (config.models && config.models.length > 0) { + // Full replacement: remove existing models for this provider + this.models = this.models.filter((m) => m.provider !== providerName); + + // Validate required fields + if (!config.baseUrl) { + throw new Error(`Provider ${providerName}: "baseUrl" is required when defining models.`); + } + if (!config.apiKey && !config.oauth) { + throw new Error(`Provider ${providerName}: "apiKey" or "oauth" is required when defining models.`); + } + + // Parse and add new models + for (const modelDef of config.models) { + const api = modelDef.api || config.api; + if (!api) { + throw new Error(`Provider ${providerName}, model ${modelDef.id}: no "api" specified.`); + } + + // Merge headers + const providerHeaders = resolveHeaders(config.headers); + const modelHeaders = resolveHeaders(modelDef.headers); + let headers = providerHeaders || modelHeaders ? { ...providerHeaders, ...modelHeaders } : undefined; + + // If authHeader is true, add Authorization header + if (config.authHeader && config.apiKey) { + const resolvedKey = resolveConfigValue(config.apiKey); + if (resolvedKey) { + headers = { ...headers, Authorization: `Bearer ${resolvedKey}` }; + } + } + + this.models.push({ + id: modelDef.id, + name: modelDef.name, + api: api as Api, + provider: providerName, + baseUrl: config.baseUrl, + reasoning: modelDef.reasoning, + input: modelDef.input as ("text" | "image")[], + cost: modelDef.cost, + contextWindow: modelDef.contextWindow, + maxTokens: modelDef.maxTokens, + headers, + compat: modelDef.compat, + } as Model); + } + + // Apply OAuth modifyModels if credentials exist (e.g., to update baseUrl) + if (config.oauth?.modifyModels) { + const cred = this.authStorage.get(providerName); + if (cred?.type === "oauth") { + this.models = config.oauth.modifyModels(this.models, cred); + } + } + } else if (config.baseUrl) { + // Override-only: update baseUrl/headers for existing models + const resolvedHeaders = resolveHeaders(config.headers); + this.models = this.models.map((m) => { + if (m.provider !== providerName) return m; + return { + ...m, + baseUrl: config.baseUrl ?? m.baseUrl, + headers: resolvedHeaders ? { ...m.headers, ...resolvedHeaders } : m.headers, + }; + }); + } + } +} + +/** + * Input type for registerProvider API. + */ +export interface ProviderConfigInput { + baseUrl?: string; + apiKey?: string; + api?: Api; + streamSimple?: (model: Model, context: Context, options?: SimpleStreamOptions) => AssistantMessageEventStream; + headers?: Record; + authHeader?: boolean; + /** OAuth provider for /login support */ + oauth?: Omit; + models?: Array<{ + id: string; + name: string; + api?: Api; + baseUrl?: string; + reasoning: boolean; + input: ("text" | "image")[]; + cost: { input: number; output: number; cacheRead: number; cacheWrite: number }; + contextWindow: number; + maxTokens: number; + headers?: Record; + compat?: Model["compat"]; + }>; +} diff --git a/packages/pi-coding-agent/src/core/model-resolver.ts b/packages/pi-coding-agent/src/core/model-resolver.ts new file mode 100644 index 000000000..e05cb9429 --- /dev/null +++ b/packages/pi-coding-agent/src/core/model-resolver.ts @@ -0,0 +1,594 @@ +/** + * Model resolution, scoping, and initial selection + */ + +import type { ThinkingLevel } from "@gsd/pi-agent-core"; +import { type Api, type KnownProvider, type Model, modelsAreEqual } from "@gsd/pi-ai"; +import chalk from "chalk"; +import { minimatch } from "minimatch"; +import { isValidThinkingLevel } from "../cli/args.js"; +import { DEFAULT_THINKING_LEVEL } from "./defaults.js"; +import type { ModelRegistry } from "./model-registry.js"; + +/** Default model IDs for each known provider */ +export const defaultModelPerProvider: Record = { + "amazon-bedrock": "us.anthropic.claude-opus-4-6-v1", + anthropic: "claude-opus-4-6", + openai: "gpt-5.4", + "azure-openai-responses": "gpt-5.2", + "openai-codex": "gpt-5.4", + google: "gemini-2.5-pro", + "google-gemini-cli": "gemini-2.5-pro", + "google-antigravity": "gemini-3.1-pro-high", + "google-vertex": "gemini-3-pro-preview", + "github-copilot": "gpt-4o", + openrouter: "openai/gpt-5.1-codex", + "vercel-ai-gateway": "anthropic/claude-opus-4-6", + xai: "grok-4-fast-non-reasoning", + groq: "openai/gpt-oss-120b", + cerebras: "zai-glm-4.6", + zai: "glm-4.6", + mistral: "devstral-medium-latest", + minimax: "MiniMax-M2.1", + "minimax-cn": "MiniMax-M2.1", + huggingface: "moonshotai/Kimi-K2.5", + opencode: "claude-opus-4-6", + "opencode-go": "kimi-k2.5", + "kimi-coding": "kimi-k2-thinking", +}; + +export interface ScopedModel { + model: Model; + /** Thinking level if explicitly specified in pattern (e.g., "model:high"), undefined otherwise */ + thinkingLevel?: ThinkingLevel; +} + +/** + * Helper to check if a model ID looks like an alias (no date suffix) + * Dates are typically in format: -20241022 or -20250929 + */ +function isAlias(id: string): boolean { + // Check if ID ends with -latest + if (id.endsWith("-latest")) return true; + + // Check if ID ends with a date pattern (-YYYYMMDD) + const datePattern = /-\d{8}$/; + return !datePattern.test(id); +} + +/** + * Try to match a pattern to a model from the available models list. + * Returns the matched model or undefined if no match found. + */ +function tryMatchModel(modelPattern: string, availableModels: Model[]): Model | undefined { + // Check for provider/modelId format (provider is everything before the first /) + const slashIndex = modelPattern.indexOf("/"); + if (slashIndex !== -1) { + const provider = modelPattern.substring(0, slashIndex); + const modelId = modelPattern.substring(slashIndex + 1); + const providerMatch = availableModels.find( + (m) => m.provider.toLowerCase() === provider.toLowerCase() && m.id.toLowerCase() === modelId.toLowerCase(), + ); + if (providerMatch) { + return providerMatch; + } + // No exact provider/model match - fall through to other matching + } + + // Check for exact ID match (case-insensitive) + const exactMatch = availableModels.find((m) => m.id.toLowerCase() === modelPattern.toLowerCase()); + if (exactMatch) { + return exactMatch; + } + + // No exact match - fall back to partial matching + const matches = availableModels.filter( + (m) => + m.id.toLowerCase().includes(modelPattern.toLowerCase()) || + m.name?.toLowerCase().includes(modelPattern.toLowerCase()), + ); + + if (matches.length === 0) { + return undefined; + } + + // Separate into aliases and dated versions + const aliases = matches.filter((m) => isAlias(m.id)); + const datedVersions = matches.filter((m) => !isAlias(m.id)); + + if (aliases.length > 0) { + // Prefer alias - if multiple aliases, pick the one that sorts highest + aliases.sort((a, b) => b.id.localeCompare(a.id)); + return aliases[0]; + } else { + // No alias found, pick latest dated version + datedVersions.sort((a, b) => b.id.localeCompare(a.id)); + return datedVersions[0]; + } +} + +export interface ParsedModelResult { + model: Model | undefined; + /** Thinking level if explicitly specified in pattern, undefined otherwise */ + thinkingLevel?: ThinkingLevel; + warning: string | undefined; +} + +function buildFallbackModel(provider: string, modelId: string, availableModels: Model[]): Model | undefined { + const providerModels = availableModels.filter((m) => m.provider === provider); + if (providerModels.length === 0) return undefined; + + const defaultId = defaultModelPerProvider[provider as KnownProvider]; + const baseModel = defaultId + ? (providerModels.find((m) => m.id === defaultId) ?? providerModels[0]) + : providerModels[0]; + + return { + ...baseModel, + id: modelId, + name: modelId, + }; +} + +/** + * Parse a pattern to extract model and thinking level. + * Handles models with colons in their IDs (e.g., OpenRouter's :exacto suffix). + * + * Algorithm: + * 1. Try to match full pattern as a model + * 2. If found, return it with "off" thinking level + * 3. If not found and has colons, split on last colon: + * - If suffix is valid thinking level, use it and recurse on prefix + * - If suffix is invalid, warn and recurse on prefix with "off" + * + * @internal Exported for testing + */ +export function parseModelPattern( + pattern: string, + availableModels: Model[], + options?: { allowInvalidThinkingLevelFallback?: boolean }, +): ParsedModelResult { + // Try exact match first + const exactMatch = tryMatchModel(pattern, availableModels); + if (exactMatch) { + return { model: exactMatch, thinkingLevel: undefined, warning: undefined }; + } + + // No match - try splitting on last colon if present + const lastColonIndex = pattern.lastIndexOf(":"); + if (lastColonIndex === -1) { + // No colons, pattern simply doesn't match any model + return { model: undefined, thinkingLevel: undefined, warning: undefined }; + } + + const prefix = pattern.substring(0, lastColonIndex); + const suffix = pattern.substring(lastColonIndex + 1); + + if (isValidThinkingLevel(suffix)) { + // Valid thinking level - recurse on prefix and use this level + const result = parseModelPattern(prefix, availableModels, options); + if (result.model) { + // Only use this thinking level if no warning from inner recursion + return { + model: result.model, + thinkingLevel: result.warning ? undefined : suffix, + warning: result.warning, + }; + } + return result; + } else { + // Invalid suffix + const allowFallback = options?.allowInvalidThinkingLevelFallback ?? true; + if (!allowFallback) { + // In strict mode (CLI --model parsing), treat it as part of the model id and fail. + // This avoids accidentally resolving to a different model. + return { model: undefined, thinkingLevel: undefined, warning: undefined }; + } + + // Scope mode: recurse on prefix and warn + const result = parseModelPattern(prefix, availableModels, options); + if (result.model) { + return { + model: result.model, + thinkingLevel: undefined, + warning: `Invalid thinking level "${suffix}" in pattern "${pattern}". Using default instead.`, + }; + } + return result; + } +} + +/** + * Resolve model patterns to actual Model objects with optional thinking levels + * Format: "pattern:level" where :level is optional + * For each pattern, finds all matching models and picks the best version: + * 1. Prefer alias (e.g., claude-sonnet-4-5) over dated versions (claude-sonnet-4-5-20250929) + * 2. If no alias, pick the latest dated version + * + * Supports models with colons in their IDs (e.g., OpenRouter's model:exacto). + * The algorithm tries to match the full pattern first, then progressively + * strips colon-suffixes to find a match. + */ +export async function resolveModelScope(patterns: string[], modelRegistry: ModelRegistry): Promise { + const availableModels = await modelRegistry.getAvailable(); + const scopedModels: ScopedModel[] = []; + + for (const pattern of patterns) { + // Check if pattern contains glob characters + if (pattern.includes("*") || pattern.includes("?") || pattern.includes("[")) { + // Extract optional thinking level suffix (e.g., "provider/*:high") + const colonIdx = pattern.lastIndexOf(":"); + let globPattern = pattern; + let thinkingLevel: ThinkingLevel | undefined; + + if (colonIdx !== -1) { + const suffix = pattern.substring(colonIdx + 1); + if (isValidThinkingLevel(suffix)) { + thinkingLevel = suffix; + globPattern = pattern.substring(0, colonIdx); + } + } + + // Match against "provider/modelId" format OR just model ID + // This allows "*sonnet*" to match without requiring "anthropic/*sonnet*" + const matchingModels = availableModels.filter((m) => { + const fullId = `${m.provider}/${m.id}`; + return minimatch(fullId, globPattern, { nocase: true }) || minimatch(m.id, globPattern, { nocase: true }); + }); + + if (matchingModels.length === 0) { + console.warn(chalk.yellow(`Warning: No models match pattern "${pattern}"`)); + continue; + } + + for (const model of matchingModels) { + if (!scopedModels.find((sm) => modelsAreEqual(sm.model, model))) { + scopedModels.push({ model, thinkingLevel }); + } + } + continue; + } + + const { model, thinkingLevel, warning } = parseModelPattern(pattern, availableModels); + + if (warning) { + console.warn(chalk.yellow(`Warning: ${warning}`)); + } + + if (!model) { + console.warn(chalk.yellow(`Warning: No models match pattern "${pattern}"`)); + continue; + } + + // Avoid duplicates + if (!scopedModels.find((sm) => modelsAreEqual(sm.model, model))) { + scopedModels.push({ model, thinkingLevel }); + } + } + + return scopedModels; +} + +export interface ResolveCliModelResult { + model: Model | undefined; + thinkingLevel?: ThinkingLevel; + warning: string | undefined; + /** + * Error message suitable for CLI display. + * When set, model will be undefined. + */ + error: string | undefined; +} + +/** + * Resolve a single model from CLI flags. + * + * Supports: + * - --provider --model + * - --model / + * - Fuzzy matching (same rules as model scoping: exact id, then partial id/name) + * + * Note: This does not apply the thinking level by itself, but it may *parse* and + * return a thinking level from ":" so the caller can apply it. + */ +export function resolveCliModel(options: { + cliProvider?: string; + cliModel?: string; + modelRegistry: ModelRegistry; +}): ResolveCliModelResult { + const { cliProvider, cliModel, modelRegistry } = options; + + if (!cliModel) { + return { model: undefined, warning: undefined, error: undefined }; + } + + // Important: use *all* models here, not just models with pre-configured auth. + // This allows "--api-key" to be used for first-time setup. + const availableModels = modelRegistry.getAll(); + if (availableModels.length === 0) { + return { + model: undefined, + warning: undefined, + error: "No models available. Check your installation or add models to models.json.", + }; + } + + // Build canonical provider lookup (case-insensitive) + const providerMap = new Map(); + for (const m of availableModels) { + providerMap.set(m.provider.toLowerCase(), m.provider); + } + + let provider = cliProvider ? providerMap.get(cliProvider.toLowerCase()) : undefined; + if (cliProvider && !provider) { + return { + model: undefined, + warning: undefined, + error: `Unknown provider "${cliProvider}". Use --list-models to see available providers/models.`, + }; + } + + // If no explicit --provider, try to interpret "provider/model" format first. + // When the prefix before the first slash matches a known provider, prefer that + // interpretation over matching models whose IDs literally contain slashes + // (e.g. "zai/glm-5" should resolve to provider=zai, model=glm-5, not to a + // vercel-ai-gateway model with id "zai/glm-5"). + let pattern = cliModel; + let inferredProvider = false; + + if (!provider) { + const slashIndex = cliModel.indexOf("/"); + if (slashIndex !== -1) { + const maybeProvider = cliModel.substring(0, slashIndex); + const canonical = providerMap.get(maybeProvider.toLowerCase()); + if (canonical) { + provider = canonical; + pattern = cliModel.substring(slashIndex + 1); + inferredProvider = true; + } + } + } + + // If no provider was inferred from the slash, try exact matches without provider inference. + // This handles models whose IDs naturally contain slashes (e.g. OpenRouter-style IDs). + if (!provider) { + const lower = cliModel.toLowerCase(); + const exact = availableModels.find( + (m) => m.id.toLowerCase() === lower || `${m.provider}/${m.id}`.toLowerCase() === lower, + ); + if (exact) { + return { model: exact, warning: undefined, thinkingLevel: undefined, error: undefined }; + } + } + + if (cliProvider && provider) { + // If both were provided, tolerate --model / by stripping the provider prefix + const prefix = `${provider}/`; + if (cliModel.toLowerCase().startsWith(prefix.toLowerCase())) { + pattern = cliModel.substring(prefix.length); + } + } + + const candidates = provider ? availableModels.filter((m) => m.provider === provider) : availableModels; + const { model, thinkingLevel, warning } = parseModelPattern(pattern, candidates, { + allowInvalidThinkingLevelFallback: false, + }); + + if (model) { + return { model, thinkingLevel, warning, error: undefined }; + } + + // If we inferred a provider from the slash but found no match within that provider, + // fall back to matching the full input as a raw model id across all models. + // This handles OpenRouter-style IDs like "openai/gpt-4o:extended" where "openai" + // looks like a provider but the full string is actually a model id on openrouter. + if (inferredProvider) { + const lower = cliModel.toLowerCase(); + const exact = availableModels.find( + (m) => m.id.toLowerCase() === lower || `${m.provider}/${m.id}`.toLowerCase() === lower, + ); + if (exact) { + return { model: exact, warning: undefined, thinkingLevel: undefined, error: undefined }; + } + // Also try parseModelPattern on the full input against all models + const fallback = parseModelPattern(cliModel, availableModels, { + allowInvalidThinkingLevelFallback: false, + }); + if (fallback.model) { + return { + model: fallback.model, + thinkingLevel: fallback.thinkingLevel, + warning: fallback.warning, + error: undefined, + }; + } + } + + if (provider) { + const fallbackModel = buildFallbackModel(provider, pattern, availableModels); + if (fallbackModel) { + const fallbackWarning = warning + ? `${warning} Model "${pattern}" not found for provider "${provider}". Using custom model id.` + : `Model "${pattern}" not found for provider "${provider}". Using custom model id.`; + return { model: fallbackModel, thinkingLevel: undefined, warning: fallbackWarning, error: undefined }; + } + } + + const display = provider ? `${provider}/${pattern}` : cliModel; + return { + model: undefined, + thinkingLevel: undefined, + warning, + error: `Model "${display}" not found. Use --list-models to see available models.`, + }; +} + +export interface InitialModelResult { + model: Model | undefined; + thinkingLevel: ThinkingLevel; + fallbackMessage: string | undefined; +} + +/** + * Find the initial model to use based on priority: + * 1. CLI args (provider + model) + * 2. First model from scoped models (if not continuing/resuming) + * 3. Restored from session (if continuing/resuming) + * 4. Saved default from settings + * 5. First available model with valid API key + */ +export async function findInitialModel(options: { + cliProvider?: string; + cliModel?: string; + scopedModels: ScopedModel[]; + isContinuing: boolean; + defaultProvider?: string; + defaultModelId?: string; + defaultThinkingLevel?: ThinkingLevel; + modelRegistry: ModelRegistry; +}): Promise { + const { + cliProvider, + cliModel, + scopedModels, + isContinuing, + defaultProvider, + defaultModelId, + defaultThinkingLevel, + modelRegistry, + } = options; + + let model: Model | undefined; + let thinkingLevel: ThinkingLevel = DEFAULT_THINKING_LEVEL; + + // 1. CLI args take priority + if (cliProvider && cliModel) { + const resolved = resolveCliModel({ + cliProvider, + cliModel, + modelRegistry, + }); + if (resolved.error) { + console.error(chalk.red(resolved.error)); + process.exit(1); + } + if (resolved.model) { + return { model: resolved.model, thinkingLevel: DEFAULT_THINKING_LEVEL, fallbackMessage: undefined }; + } + } + + // 2. Use first model from scoped models (skip if continuing/resuming) + if (scopedModels.length > 0 && !isContinuing) { + return { + model: scopedModels[0].model, + thinkingLevel: scopedModels[0].thinkingLevel ?? defaultThinkingLevel ?? DEFAULT_THINKING_LEVEL, + fallbackMessage: undefined, + }; + } + + // 3. Try saved default from settings + if (defaultProvider && defaultModelId) { + const found = modelRegistry.find(defaultProvider, defaultModelId); + if (found) { + model = found; + if (defaultThinkingLevel) { + thinkingLevel = defaultThinkingLevel; + } + return { model, thinkingLevel, fallbackMessage: undefined }; + } + } + + // 4. Try first available model with valid API key + const availableModels = await modelRegistry.getAvailable(); + + if (availableModels.length > 0) { + // Try to find a default model from known providers + for (const provider of Object.keys(defaultModelPerProvider) as KnownProvider[]) { + const defaultId = defaultModelPerProvider[provider]; + const match = availableModels.find((m) => m.provider === provider && m.id === defaultId); + if (match) { + return { model: match, thinkingLevel: DEFAULT_THINKING_LEVEL, fallbackMessage: undefined }; + } + } + + // If no default found, use first available + return { model: availableModels[0], thinkingLevel: DEFAULT_THINKING_LEVEL, fallbackMessage: undefined }; + } + + // 5. No model found + return { model: undefined, thinkingLevel: DEFAULT_THINKING_LEVEL, fallbackMessage: undefined }; +} + +/** + * Restore model from session, with fallback to available models + */ +export async function restoreModelFromSession( + savedProvider: string, + savedModelId: string, + currentModel: Model | undefined, + shouldPrintMessages: boolean, + modelRegistry: ModelRegistry, +): Promise<{ model: Model | undefined; fallbackMessage: string | undefined }> { + const restoredModel = modelRegistry.find(savedProvider, savedModelId); + + // Check if restored model exists and has a valid API key + const hasApiKey = restoredModel ? !!(await modelRegistry.getApiKey(restoredModel)) : false; + + if (restoredModel && hasApiKey) { + if (shouldPrintMessages) { + console.log(chalk.dim(`Restored model: ${savedProvider}/${savedModelId}`)); + } + return { model: restoredModel, fallbackMessage: undefined }; + } + + // Model not found or no API key - fall back + const reason = !restoredModel ? "model no longer exists" : "no API key available"; + + if (shouldPrintMessages) { + console.error(chalk.yellow(`Warning: Could not restore model ${savedProvider}/${savedModelId} (${reason}).`)); + } + + // If we already have a model, use it as fallback + if (currentModel) { + if (shouldPrintMessages) { + console.log(chalk.dim(`Falling back to: ${currentModel.provider}/${currentModel.id}`)); + } + return { + model: currentModel, + fallbackMessage: `Could not restore model ${savedProvider}/${savedModelId} (${reason}). Using ${currentModel.provider}/${currentModel.id}.`, + }; + } + + // Try to find any available model + const availableModels = await modelRegistry.getAvailable(); + + if (availableModels.length > 0) { + // Try to find a default model from known providers + let fallbackModel: Model | undefined; + for (const provider of Object.keys(defaultModelPerProvider) as KnownProvider[]) { + const defaultId = defaultModelPerProvider[provider]; + const match = availableModels.find((m) => m.provider === provider && m.id === defaultId); + if (match) { + fallbackModel = match; + break; + } + } + + // If no default found, use first available + if (!fallbackModel) { + fallbackModel = availableModels[0]; + } + + if (shouldPrintMessages) { + console.log(chalk.dim(`Falling back to: ${fallbackModel.provider}/${fallbackModel.id}`)); + } + + return { + model: fallbackModel, + fallbackMessage: `Could not restore model ${savedProvider}/${savedModelId} (${reason}). Using ${fallbackModel.provider}/${fallbackModel.id}.`, + }; + } + + // No models available + return { model: undefined, fallbackMessage: undefined }; +} diff --git a/packages/pi-coding-agent/src/core/package-manager.ts b/packages/pi-coding-agent/src/core/package-manager.ts new file mode 100644 index 000000000..483758203 --- /dev/null +++ b/packages/pi-coding-agent/src/core/package-manager.ts @@ -0,0 +1,1794 @@ +import { spawn, spawnSync } from "node:child_process"; +import { createHash } from "node:crypto"; +import { existsSync, mkdirSync, readdirSync, readFileSync, rmSync, statSync, writeFileSync } from "node:fs"; +import { homedir, tmpdir } from "node:os"; +import { basename, dirname, join, relative, resolve, sep } from "node:path"; +import ignore from "ignore"; +import { minimatch } from "minimatch"; +import { CONFIG_DIR_NAME } from "../config.js"; +import { type GitSource, parseGitUrl } from "../utils/git.js"; +import type { PackageSource, SettingsManager } from "./settings-manager.js"; + +const NETWORK_TIMEOUT_MS = 10000; + +function isOfflineModeEnabled(): boolean { + const value = process.env.PI_OFFLINE; + if (!value) return false; + return value === "1" || value.toLowerCase() === "true" || value.toLowerCase() === "yes"; +} + +export interface PathMetadata { + source: string; + scope: SourceScope; + origin: "package" | "top-level"; + baseDir?: string; +} + +export interface ResolvedResource { + path: string; + enabled: boolean; + metadata: PathMetadata; +} + +export interface ResolvedPaths { + extensions: ResolvedResource[]; + skills: ResolvedResource[]; + prompts: ResolvedResource[]; + themes: ResolvedResource[]; +} + +export type MissingSourceAction = "install" | "skip" | "error"; + +export interface ProgressEvent { + type: "start" | "progress" | "complete" | "error"; + action: "install" | "remove" | "update" | "clone" | "pull"; + source: string; + message?: string; +} + +export type ProgressCallback = (event: ProgressEvent) => void; + +export interface PackageManager { + resolve(onMissing?: (source: string) => Promise): Promise; + install(source: string, options?: { local?: boolean }): Promise; + remove(source: string, options?: { local?: boolean }): Promise; + update(source?: string): Promise; + resolveExtensionSources( + sources: string[], + options?: { local?: boolean; temporary?: boolean }, + ): Promise; + addSourceToSettings(source: string, options?: { local?: boolean }): boolean; + removeSourceFromSettings(source: string, options?: { local?: boolean }): boolean; + setProgressCallback(callback: ProgressCallback | undefined): void; + getInstalledPath(source: string, scope: "user" | "project"): string | undefined; +} + +interface PackageManagerOptions { + cwd: string; + agentDir: string; + settingsManager: SettingsManager; +} + +type SourceScope = "user" | "project" | "temporary"; + +type NpmSource = { + type: "npm"; + spec: string; + name: string; + pinned: boolean; +}; + +type LocalSource = { + type: "local"; + path: string; +}; + +type ParsedSource = NpmSource | GitSource | LocalSource; + +interface PiManifest { + extensions?: string[]; + skills?: string[]; + prompts?: string[]; + themes?: string[]; +} + +interface ResourceAccumulator { + extensions: Map; + skills: Map; + prompts: Map; + themes: Map; +} + +interface PackageFilter { + extensions?: string[]; + skills?: string[]; + prompts?: string[]; + themes?: string[]; +} + +type ResourceType = "extensions" | "skills" | "prompts" | "themes"; + +const RESOURCE_TYPES: ResourceType[] = ["extensions", "skills", "prompts", "themes"]; + +const FILE_PATTERNS: Record = { + extensions: /\.(ts|js)$/, + skills: /\.md$/, + prompts: /\.md$/, + themes: /\.json$/, +}; + +const IGNORE_FILE_NAMES = [".gitignore", ".ignore", ".fdignore"]; + +type IgnoreMatcher = ReturnType; + +function toPosixPath(p: string): string { + return p.split(sep).join("/"); +} + +function prefixIgnorePattern(line: string, prefix: string): string | null { + const trimmed = line.trim(); + if (!trimmed) return null; + if (trimmed.startsWith("#") && !trimmed.startsWith("\\#")) return null; + + let pattern = line; + let negated = false; + + if (pattern.startsWith("!")) { + negated = true; + pattern = pattern.slice(1); + } else if (pattern.startsWith("\\!")) { + pattern = pattern.slice(1); + } + + if (pattern.startsWith("/")) { + pattern = pattern.slice(1); + } + + const prefixed = prefix ? `${prefix}${pattern}` : pattern; + return negated ? `!${prefixed}` : prefixed; +} + +function addIgnoreRules(ig: IgnoreMatcher, dir: string, rootDir: string): void { + const relativeDir = relative(rootDir, dir); + const prefix = relativeDir ? `${toPosixPath(relativeDir)}/` : ""; + + for (const filename of IGNORE_FILE_NAMES) { + const ignorePath = join(dir, filename); + if (!existsSync(ignorePath)) continue; + try { + const content = readFileSync(ignorePath, "utf-8"); + const patterns = content + .split(/\r?\n/) + .map((line) => prefixIgnorePattern(line, prefix)) + .filter((line): line is string => Boolean(line)); + if (patterns.length > 0) { + ig.add(patterns); + } + } catch {} + } +} + +function isPattern(s: string): boolean { + return s.startsWith("!") || s.startsWith("+") || s.startsWith("-") || s.includes("*") || s.includes("?"); +} + +function splitPatterns(entries: string[]): { plain: string[]; patterns: string[] } { + const plain: string[] = []; + const patterns: string[] = []; + for (const entry of entries) { + if (isPattern(entry)) { + patterns.push(entry); + } else { + plain.push(entry); + } + } + return { plain, patterns }; +} + +function collectFiles( + dir: string, + filePattern: RegExp, + skipNodeModules = true, + ignoreMatcher?: IgnoreMatcher, + rootDir?: string, +): string[] { + const files: string[] = []; + if (!existsSync(dir)) return files; + + const root = rootDir ?? dir; + const ig = ignoreMatcher ?? ignore(); + addIgnoreRules(ig, dir, root); + + try { + const entries = readdirSync(dir, { withFileTypes: true }); + for (const entry of entries) { + if (entry.name.startsWith(".")) continue; + if (skipNodeModules && entry.name === "node_modules") continue; + + const fullPath = join(dir, entry.name); + let isDir = entry.isDirectory(); + let isFile = entry.isFile(); + + if (entry.isSymbolicLink()) { + try { + const stats = statSync(fullPath); + isDir = stats.isDirectory(); + isFile = stats.isFile(); + } catch { + continue; + } + } + + const relPath = toPosixPath(relative(root, fullPath)); + const ignorePath = isDir ? `${relPath}/` : relPath; + if (ig.ignores(ignorePath)) continue; + + if (isDir) { + files.push(...collectFiles(fullPath, filePattern, skipNodeModules, ig, root)); + } else if (isFile && filePattern.test(entry.name)) { + files.push(fullPath); + } + } + } catch { + // Ignore errors + } + + return files; +} + +function collectSkillEntries( + dir: string, + includeRootFiles = true, + ignoreMatcher?: IgnoreMatcher, + rootDir?: string, +): string[] { + const entries: string[] = []; + if (!existsSync(dir)) return entries; + + const root = rootDir ?? dir; + const ig = ignoreMatcher ?? ignore(); + addIgnoreRules(ig, dir, root); + + try { + const dirEntries = readdirSync(dir, { withFileTypes: true }); + for (const entry of dirEntries) { + if (entry.name.startsWith(".")) continue; + if (entry.name === "node_modules") continue; + + const fullPath = join(dir, entry.name); + let isDir = entry.isDirectory(); + let isFile = entry.isFile(); + + if (entry.isSymbolicLink()) { + try { + const stats = statSync(fullPath); + isDir = stats.isDirectory(); + isFile = stats.isFile(); + } catch { + continue; + } + } + + const relPath = toPosixPath(relative(root, fullPath)); + const ignorePath = isDir ? `${relPath}/` : relPath; + if (ig.ignores(ignorePath)) continue; + + if (isDir) { + entries.push(...collectSkillEntries(fullPath, false, ig, root)); + } else if (isFile) { + const isRootMd = includeRootFiles && entry.name.endsWith(".md"); + const isSkillMd = !includeRootFiles && entry.name === "SKILL.md"; + if (isRootMd || isSkillMd) { + entries.push(fullPath); + } + } + } + } catch { + // Ignore errors + } + + return entries; +} + +function collectAutoSkillEntries(dir: string, includeRootFiles = true): string[] { + return collectSkillEntries(dir, includeRootFiles); +} + +function findGitRepoRoot(startDir: string): string | null { + let dir = resolve(startDir); + while (true) { + if (existsSync(join(dir, ".git"))) { + return dir; + } + const parent = dirname(dir); + if (parent === dir) { + return null; + } + dir = parent; + } +} + +function collectAncestorAgentsSkillDirs(startDir: string): string[] { + const skillDirs: string[] = []; + const resolvedStartDir = resolve(startDir); + const gitRepoRoot = findGitRepoRoot(resolvedStartDir); + + let dir = resolvedStartDir; + while (true) { + skillDirs.push(join(dir, ".agents", "skills")); + if (gitRepoRoot && dir === gitRepoRoot) { + break; + } + const parent = dirname(dir); + if (parent === dir) { + break; + } + dir = parent; + } + + return skillDirs; +} + +function collectAutoPromptEntries(dir: string): string[] { + const entries: string[] = []; + if (!existsSync(dir)) return entries; + + const ig = ignore(); + addIgnoreRules(ig, dir, dir); + + try { + const dirEntries = readdirSync(dir, { withFileTypes: true }); + for (const entry of dirEntries) { + if (entry.name.startsWith(".")) continue; + if (entry.name === "node_modules") continue; + + const fullPath = join(dir, entry.name); + let isFile = entry.isFile(); + if (entry.isSymbolicLink()) { + try { + isFile = statSync(fullPath).isFile(); + } catch { + continue; + } + } + + const relPath = toPosixPath(relative(dir, fullPath)); + if (ig.ignores(relPath)) continue; + + if (isFile && entry.name.endsWith(".md")) { + entries.push(fullPath); + } + } + } catch { + // Ignore errors + } + + return entries; +} + +function collectAutoThemeEntries(dir: string): string[] { + const entries: string[] = []; + if (!existsSync(dir)) return entries; + + const ig = ignore(); + addIgnoreRules(ig, dir, dir); + + try { + const dirEntries = readdirSync(dir, { withFileTypes: true }); + for (const entry of dirEntries) { + if (entry.name.startsWith(".")) continue; + if (entry.name === "node_modules") continue; + + const fullPath = join(dir, entry.name); + let isFile = entry.isFile(); + if (entry.isSymbolicLink()) { + try { + isFile = statSync(fullPath).isFile(); + } catch { + continue; + } + } + + const relPath = toPosixPath(relative(dir, fullPath)); + if (ig.ignores(relPath)) continue; + + if (isFile && entry.name.endsWith(".json")) { + entries.push(fullPath); + } + } + } catch { + // Ignore errors + } + + return entries; +} + +function readPiManifestFile(packageJsonPath: string): PiManifest | null { + try { + const content = readFileSync(packageJsonPath, "utf-8"); + const pkg = JSON.parse(content) as { pi?: PiManifest }; + return pkg.pi ?? null; + } catch { + return null; + } +} + +function resolveExtensionEntries(dir: string): string[] | null { + const packageJsonPath = join(dir, "package.json"); + if (existsSync(packageJsonPath)) { + const manifest = readPiManifestFile(packageJsonPath); + if (manifest?.extensions?.length) { + const entries: string[] = []; + for (const extPath of manifest.extensions) { + const resolvedExtPath = resolve(dir, extPath); + if (existsSync(resolvedExtPath)) { + entries.push(resolvedExtPath); + } + } + if (entries.length > 0) { + return entries; + } + } + } + + const indexTs = join(dir, "index.ts"); + const indexJs = join(dir, "index.js"); + if (existsSync(indexTs)) { + return [indexTs]; + } + if (existsSync(indexJs)) { + return [indexJs]; + } + + return null; +} + +function collectAutoExtensionEntries(dir: string): string[] { + const entries: string[] = []; + if (!existsSync(dir)) return entries; + + // First check if this directory itself has explicit extension entries (package.json or index) + const rootEntries = resolveExtensionEntries(dir); + if (rootEntries) { + return rootEntries; + } + + // Otherwise, discover extensions from directory contents + const ig = ignore(); + addIgnoreRules(ig, dir, dir); + + try { + const dirEntries = readdirSync(dir, { withFileTypes: true }); + for (const entry of dirEntries) { + if (entry.name.startsWith(".")) continue; + if (entry.name === "node_modules") continue; + + const fullPath = join(dir, entry.name); + let isDir = entry.isDirectory(); + let isFile = entry.isFile(); + + if (entry.isSymbolicLink()) { + try { + const stats = statSync(fullPath); + isDir = stats.isDirectory(); + isFile = stats.isFile(); + } catch { + continue; + } + } + + const relPath = toPosixPath(relative(dir, fullPath)); + const ignorePath = isDir ? `${relPath}/` : relPath; + if (ig.ignores(ignorePath)) continue; + + if (isFile && (entry.name.endsWith(".ts") || entry.name.endsWith(".js"))) { + entries.push(fullPath); + } else if (isDir) { + const resolvedEntries = resolveExtensionEntries(fullPath); + if (resolvedEntries) { + entries.push(...resolvedEntries); + } + } + } + } catch { + // Ignore errors + } + + return entries; +} + +/** + * Collect resource files from a directory based on resource type. + * Extensions use smart discovery (index.ts in subdirs), others use recursive collection. + */ +function collectResourceFiles(dir: string, resourceType: ResourceType): string[] { + if (resourceType === "skills") { + return collectSkillEntries(dir); + } + if (resourceType === "extensions") { + return collectAutoExtensionEntries(dir); + } + return collectFiles(dir, FILE_PATTERNS[resourceType]); +} + +function matchesAnyPattern(filePath: string, patterns: string[], baseDir: string): boolean { + const rel = relative(baseDir, filePath); + const name = basename(filePath); + const isSkillFile = name === "SKILL.md"; + const parentDir = isSkillFile ? dirname(filePath) : undefined; + const parentRel = isSkillFile ? relative(baseDir, parentDir!) : undefined; + const parentName = isSkillFile ? basename(parentDir!) : undefined; + + return patterns.some((pattern) => { + if (minimatch(rel, pattern) || minimatch(name, pattern) || minimatch(filePath, pattern)) { + return true; + } + if (!isSkillFile) return false; + return minimatch(parentRel!, pattern) || minimatch(parentName!, pattern) || minimatch(parentDir!, pattern); + }); +} + +function normalizeExactPattern(pattern: string): string { + if (pattern.startsWith("./") || pattern.startsWith(".\\")) { + return pattern.slice(2); + } + return pattern; +} + +function matchesAnyExactPattern(filePath: string, patterns: string[], baseDir: string): boolean { + if (patterns.length === 0) return false; + const rel = relative(baseDir, filePath); + const name = basename(filePath); + const isSkillFile = name === "SKILL.md"; + const parentDir = isSkillFile ? dirname(filePath) : undefined; + const parentRel = isSkillFile ? relative(baseDir, parentDir!) : undefined; + + return patterns.some((pattern) => { + const normalized = normalizeExactPattern(pattern); + if (normalized === rel || normalized === filePath) { + return true; + } + if (!isSkillFile) return false; + return normalized === parentRel || normalized === parentDir; + }); +} + +function getOverridePatterns(entries: string[]): string[] { + return entries.filter((pattern) => pattern.startsWith("!") || pattern.startsWith("+") || pattern.startsWith("-")); +} + +function isEnabledByOverrides(filePath: string, patterns: string[], baseDir: string): boolean { + const overrides = getOverridePatterns(patterns); + const excludes = overrides.filter((pattern) => pattern.startsWith("!")).map((pattern) => pattern.slice(1)); + const forceIncludes = overrides.filter((pattern) => pattern.startsWith("+")).map((pattern) => pattern.slice(1)); + const forceExcludes = overrides.filter((pattern) => pattern.startsWith("-")).map((pattern) => pattern.slice(1)); + + let enabled = true; + if (excludes.length > 0 && matchesAnyPattern(filePath, excludes, baseDir)) { + enabled = false; + } + if (forceIncludes.length > 0 && matchesAnyExactPattern(filePath, forceIncludes, baseDir)) { + enabled = true; + } + if (forceExcludes.length > 0 && matchesAnyExactPattern(filePath, forceExcludes, baseDir)) { + enabled = false; + } + return enabled; +} + +/** + * Apply patterns to paths and return a Set of enabled paths. + * Pattern types: + * - Plain patterns: include matching paths + * - `!pattern`: exclude matching paths + * - `+path`: force-include exact path (overrides exclusions) + * - `-path`: force-exclude exact path (overrides force-includes) + */ +function applyPatterns(allPaths: string[], patterns: string[], baseDir: string): Set { + const includes: string[] = []; + const excludes: string[] = []; + const forceIncludes: string[] = []; + const forceExcludes: string[] = []; + + for (const p of patterns) { + if (p.startsWith("+")) { + forceIncludes.push(p.slice(1)); + } else if (p.startsWith("-")) { + forceExcludes.push(p.slice(1)); + } else if (p.startsWith("!")) { + excludes.push(p.slice(1)); + } else { + includes.push(p); + } + } + + // Step 1: Apply includes (or all if no includes) + let result: string[]; + if (includes.length === 0) { + result = [...allPaths]; + } else { + result = allPaths.filter((filePath) => matchesAnyPattern(filePath, includes, baseDir)); + } + + // Step 2: Apply excludes + if (excludes.length > 0) { + result = result.filter((filePath) => !matchesAnyPattern(filePath, excludes, baseDir)); + } + + // Step 3: Force-include (add back from allPaths, overriding exclusions) + if (forceIncludes.length > 0) { + for (const filePath of allPaths) { + if (!result.includes(filePath) && matchesAnyExactPattern(filePath, forceIncludes, baseDir)) { + result.push(filePath); + } + } + } + + // Step 4: Force-exclude (remove even if included or force-included) + if (forceExcludes.length > 0) { + result = result.filter((filePath) => !matchesAnyExactPattern(filePath, forceExcludes, baseDir)); + } + + return new Set(result); +} + +export class DefaultPackageManager implements PackageManager { + private cwd: string; + private agentDir: string; + private settingsManager: SettingsManager; + private globalNpmRoot: string | undefined; + private progressCallback: ProgressCallback | undefined; + + constructor(options: PackageManagerOptions) { + this.cwd = options.cwd; + this.agentDir = options.agentDir; + this.settingsManager = options.settingsManager; + } + + setProgressCallback(callback: ProgressCallback | undefined): void { + this.progressCallback = callback; + } + + addSourceToSettings(source: string, options?: { local?: boolean }): boolean { + const scope: SourceScope = options?.local ? "project" : "user"; + const currentSettings = + scope === "project" ? this.settingsManager.getProjectSettings() : this.settingsManager.getGlobalSettings(); + const currentPackages = currentSettings.packages ?? []; + const normalizedSource = this.normalizePackageSourceForSettings(source, scope); + const exists = currentPackages.some((existing) => this.packageSourcesMatch(existing, source, scope)); + if (exists) { + return false; + } + const nextPackages = [...currentPackages, normalizedSource]; + if (scope === "project") { + this.settingsManager.setProjectPackages(nextPackages); + } else { + this.settingsManager.setPackages(nextPackages); + } + return true; + } + + removeSourceFromSettings(source: string, options?: { local?: boolean }): boolean { + const scope: SourceScope = options?.local ? "project" : "user"; + const currentSettings = + scope === "project" ? this.settingsManager.getProjectSettings() : this.settingsManager.getGlobalSettings(); + const currentPackages = currentSettings.packages ?? []; + const nextPackages = currentPackages.filter((existing) => !this.packageSourcesMatch(existing, source, scope)); + const changed = nextPackages.length !== currentPackages.length; + if (!changed) { + return false; + } + if (scope === "project") { + this.settingsManager.setProjectPackages(nextPackages); + } else { + this.settingsManager.setPackages(nextPackages); + } + return true; + } + + getInstalledPath(source: string, scope: "user" | "project"): string | undefined { + const parsed = this.parseSource(source); + if (parsed.type === "npm") { + const path = this.getNpmInstallPath(parsed, scope); + return existsSync(path) ? path : undefined; + } + if (parsed.type === "git") { + const path = this.getGitInstallPath(parsed, scope); + return existsSync(path) ? path : undefined; + } + if (parsed.type === "local") { + const baseDir = this.getBaseDirForScope(scope); + const path = this.resolvePathFromBase(parsed.path, baseDir); + return existsSync(path) ? path : undefined; + } + return undefined; + } + + private emitProgress(event: ProgressEvent): void { + this.progressCallback?.(event); + } + + private async withProgress( + action: ProgressEvent["action"], + source: string, + message: string, + operation: () => Promise, + ): Promise { + this.emitProgress({ type: "start", action, source, message }); + try { + await operation(); + this.emitProgress({ type: "complete", action, source }); + } catch (error) { + const errorMessage = error instanceof Error ? error.message : String(error); + this.emitProgress({ type: "error", action, source, message: errorMessage }); + throw error; + } + } + + async resolve(onMissing?: (source: string) => Promise): Promise { + const accumulator = this.createAccumulator(); + const globalSettings = this.settingsManager.getGlobalSettings(); + const projectSettings = this.settingsManager.getProjectSettings(); + + // Collect all packages with scope (project first so cwd resources win collisions) + const allPackages: Array<{ pkg: PackageSource; scope: SourceScope }> = []; + for (const pkg of projectSettings.packages ?? []) { + allPackages.push({ pkg, scope: "project" }); + } + for (const pkg of globalSettings.packages ?? []) { + allPackages.push({ pkg, scope: "user" }); + } + + // Dedupe: project scope wins over global for same package identity + const packageSources = this.dedupePackages(allPackages); + await this.resolvePackageSources(packageSources, accumulator, onMissing); + + const globalBaseDir = this.agentDir; + const projectBaseDir = join(this.cwd, CONFIG_DIR_NAME); + + for (const resourceType of RESOURCE_TYPES) { + const target = this.getTargetMap(accumulator, resourceType); + const globalEntries = (globalSettings[resourceType] ?? []) as string[]; + const projectEntries = (projectSettings[resourceType] ?? []) as string[]; + this.resolveLocalEntries( + projectEntries, + resourceType, + target, + { + source: "local", + scope: "project", + origin: "top-level", + }, + projectBaseDir, + ); + this.resolveLocalEntries( + globalEntries, + resourceType, + target, + { + source: "local", + scope: "user", + origin: "top-level", + }, + globalBaseDir, + ); + } + + this.addAutoDiscoveredResources(accumulator, globalSettings, projectSettings, globalBaseDir, projectBaseDir); + + return this.toResolvedPaths(accumulator); + } + + async resolveExtensionSources( + sources: string[], + options?: { local?: boolean; temporary?: boolean }, + ): Promise { + const accumulator = this.createAccumulator(); + const scope: SourceScope = options?.temporary ? "temporary" : options?.local ? "project" : "user"; + const packageSources = sources.map((source) => ({ pkg: source as PackageSource, scope })); + await this.resolvePackageSources(packageSources, accumulator); + return this.toResolvedPaths(accumulator); + } + + async install(source: string, options?: { local?: boolean }): Promise { + const parsed = this.parseSource(source); + const scope: SourceScope = options?.local ? "project" : "user"; + await this.withProgress("install", source, `Installing ${source}...`, async () => { + if (parsed.type === "npm") { + await this.installNpm(parsed, scope, false); + return; + } + if (parsed.type === "git") { + await this.installGit(parsed, scope); + return; + } + if (parsed.type === "local") { + const resolved = this.resolvePath(parsed.path); + if (!existsSync(resolved)) { + throw new Error(`Path does not exist: ${resolved}`); + } + return; + } + throw new Error(`Unsupported install source: ${source}`); + }); + } + + async remove(source: string, options?: { local?: boolean }): Promise { + const parsed = this.parseSource(source); + const scope: SourceScope = options?.local ? "project" : "user"; + await this.withProgress("remove", source, `Removing ${source}...`, async () => { + if (parsed.type === "npm") { + await this.uninstallNpm(parsed, scope); + return; + } + if (parsed.type === "git") { + await this.removeGit(parsed, scope); + return; + } + if (parsed.type === "local") { + return; + } + throw new Error(`Unsupported remove source: ${source}`); + }); + } + + async update(source?: string): Promise { + const globalSettings = this.settingsManager.getGlobalSettings(); + const projectSettings = this.settingsManager.getProjectSettings(); + const identity = source ? this.getPackageIdentity(source) : undefined; + + for (const pkg of globalSettings.packages ?? []) { + const sourceStr = typeof pkg === "string" ? pkg : pkg.source; + if (identity && this.getPackageIdentity(sourceStr, "user") !== identity) continue; + await this.updateSourceForScope(sourceStr, "user"); + } + for (const pkg of projectSettings.packages ?? []) { + const sourceStr = typeof pkg === "string" ? pkg : pkg.source; + if (identity && this.getPackageIdentity(sourceStr, "project") !== identity) continue; + await this.updateSourceForScope(sourceStr, "project"); + } + } + + private async updateSourceForScope(source: string, scope: SourceScope): Promise { + if (isOfflineModeEnabled()) { + return; + } + const parsed = this.parseSource(source); + if (parsed.type === "npm") { + if (parsed.pinned) return; + await this.withProgress("update", source, `Updating ${source}...`, async () => { + await this.installNpm(parsed, scope, false); + }); + return; + } + if (parsed.type === "git") { + if (parsed.pinned) return; + await this.withProgress("update", source, `Updating ${source}...`, async () => { + await this.updateGit(parsed, scope); + }); + return; + } + } + + private async resolvePackageSources( + sources: Array<{ pkg: PackageSource; scope: SourceScope }>, + accumulator: ResourceAccumulator, + onMissing?: (source: string) => Promise, + ): Promise { + for (const { pkg, scope } of sources) { + const sourceStr = typeof pkg === "string" ? pkg : pkg.source; + const filter = typeof pkg === "object" ? pkg : undefined; + const parsed = this.parseSource(sourceStr); + const metadata: PathMetadata = { source: sourceStr, scope, origin: "package" }; + + if (parsed.type === "local") { + const baseDir = this.getBaseDirForScope(scope); + this.resolveLocalExtensionSource(parsed, accumulator, filter, metadata, baseDir); + continue; + } + + const installMissing = async (): Promise => { + if (isOfflineModeEnabled()) { + return false; + } + if (!onMissing) { + await this.installParsedSource(parsed, scope); + return true; + } + const action = await onMissing(sourceStr); + if (action === "skip") return false; + if (action === "error") throw new Error(`Missing source: ${sourceStr}`); + await this.installParsedSource(parsed, scope); + return true; + }; + + if (parsed.type === "npm") { + const installedPath = this.getNpmInstallPath(parsed, scope); + const needsInstall = !existsSync(installedPath) || (await this.npmNeedsUpdate(parsed, installedPath)); + if (needsInstall) { + const installed = await installMissing(); + if (!installed) continue; + } + metadata.baseDir = installedPath; + this.collectPackageResources(installedPath, accumulator, filter, metadata); + continue; + } + + if (parsed.type === "git") { + const installedPath = this.getGitInstallPath(parsed, scope); + if (!existsSync(installedPath)) { + const installed = await installMissing(); + if (!installed) continue; + } else if (scope === "temporary" && !parsed.pinned && !isOfflineModeEnabled()) { + await this.refreshTemporaryGitSource(parsed, sourceStr); + } + metadata.baseDir = installedPath; + this.collectPackageResources(installedPath, accumulator, filter, metadata); + } + } + } + + private resolveLocalExtensionSource( + source: LocalSource, + accumulator: ResourceAccumulator, + filter: PackageFilter | undefined, + metadata: PathMetadata, + baseDir: string, + ): void { + const resolved = this.resolvePathFromBase(source.path, baseDir); + if (!existsSync(resolved)) { + return; + } + + try { + const stats = statSync(resolved); + if (stats.isFile()) { + metadata.baseDir = dirname(resolved); + this.addResource(accumulator.extensions, resolved, metadata, true); + return; + } + if (stats.isDirectory()) { + metadata.baseDir = resolved; + const resources = this.collectPackageResources(resolved, accumulator, filter, metadata); + if (!resources) { + this.addResource(accumulator.extensions, resolved, metadata, true); + } + } + } catch { + return; + } + } + + private async installParsedSource(parsed: ParsedSource, scope: SourceScope): Promise { + if (parsed.type === "npm") { + await this.installNpm(parsed, scope, scope === "temporary"); + return; + } + if (parsed.type === "git") { + await this.installGit(parsed, scope); + return; + } + } + + private getPackageSourceString(pkg: PackageSource): string { + return typeof pkg === "string" ? pkg : pkg.source; + } + + private getSourceMatchKeyForInput(source: string): string { + const parsed = this.parseSource(source); + if (parsed.type === "npm") { + return `npm:${parsed.name}`; + } + if (parsed.type === "git") { + return `git:${parsed.host}/${parsed.path}`; + } + return `local:${this.resolvePath(parsed.path)}`; + } + + private getSourceMatchKeyForSettings(source: string, scope: SourceScope): string { + const parsed = this.parseSource(source); + if (parsed.type === "npm") { + return `npm:${parsed.name}`; + } + if (parsed.type === "git") { + return `git:${parsed.host}/${parsed.path}`; + } + const baseDir = this.getBaseDirForScope(scope); + return `local:${this.resolvePathFromBase(parsed.path, baseDir)}`; + } + + private packageSourcesMatch(existing: PackageSource, inputSource: string, scope: SourceScope): boolean { + const left = this.getSourceMatchKeyForSettings(this.getPackageSourceString(existing), scope); + const right = this.getSourceMatchKeyForInput(inputSource); + return left === right; + } + + private normalizePackageSourceForSettings(source: string, scope: SourceScope): string { + const parsed = this.parseSource(source); + if (parsed.type !== "local") { + return source; + } + const baseDir = this.getBaseDirForScope(scope); + const resolved = this.resolvePath(parsed.path); + const rel = relative(baseDir, resolved); + return rel || "."; + } + + private parseSource(source: string): ParsedSource { + if (source.startsWith("npm:")) { + const spec = source.slice("npm:".length).trim(); + const { name, version } = this.parseNpmSpec(spec); + return { + type: "npm", + spec, + name, + pinned: Boolean(version), + }; + } + + const trimmed = source.trim(); + const isWindowsAbsolutePath = /^[A-Za-z]:[\\/]|^\\\\/.test(trimmed); + const isLocalPathLike = + trimmed.startsWith(".") || + trimmed.startsWith("/") || + trimmed === "~" || + trimmed.startsWith("~/") || + isWindowsAbsolutePath; + if (isLocalPathLike) { + return { type: "local", path: source }; + } + + // Try parsing as git URL + const gitParsed = parseGitUrl(source); + if (gitParsed) { + return gitParsed; + } + + return { type: "local", path: source }; + } + + /** + * Check if an npm package needs to be updated. + * - For unpinned packages: check if registry has a newer version + * - For pinned packages: check if installed version matches the pinned version + */ + private async npmNeedsUpdate(source: NpmSource, installedPath: string): Promise { + if (isOfflineModeEnabled()) { + return false; + } + + const installedVersion = this.getInstalledNpmVersion(installedPath); + if (!installedVersion) return true; + + const { version: pinnedVersion } = this.parseNpmSpec(source.spec); + if (pinnedVersion) { + // Pinned: check if installed matches pinned (exact match for now) + return installedVersion !== pinnedVersion; + } + + // Unpinned: check registry for latest version + try { + const latestVersion = await this.getLatestNpmVersion(source.name); + return latestVersion !== installedVersion; + } catch { + // If we can't check registry, assume it's fine + return false; + } + } + + private getInstalledNpmVersion(installedPath: string): string | undefined { + const packageJsonPath = join(installedPath, "package.json"); + if (!existsSync(packageJsonPath)) return undefined; + try { + const content = readFileSync(packageJsonPath, "utf-8"); + const pkg = JSON.parse(content) as { version?: string }; + return pkg.version; + } catch { + return undefined; + } + } + + private async getLatestNpmVersion(packageName: string): Promise { + const response = await fetch(`https://registry.npmjs.org/${packageName}/latest`, { + signal: AbortSignal.timeout(NETWORK_TIMEOUT_MS), + }); + if (!response.ok) throw new Error(`Failed to fetch npm registry: ${response.status}`); + const data = (await response.json()) as { version: string }; + return data.version; + } + + /** + * Get a unique identity for a package, ignoring version/ref. + * Used to detect when the same package is in both global and project settings. + * For git packages, uses normalized host/path to ensure SSH and HTTPS URLs + * for the same repository are treated as identical. + */ + private getPackageIdentity(source: string, scope?: SourceScope): string { + const parsed = this.parseSource(source); + if (parsed.type === "npm") { + return `npm:${parsed.name}`; + } + if (parsed.type === "git") { + // Use host/path for identity to normalize SSH and HTTPS + return `git:${parsed.host}/${parsed.path}`; + } + if (scope) { + const baseDir = this.getBaseDirForScope(scope); + return `local:${this.resolvePathFromBase(parsed.path, baseDir)}`; + } + return `local:${this.resolvePath(parsed.path)}`; + } + + /** + * Dedupe packages: if same package identity appears in both global and project, + * keep only the project one (project wins). + */ + private dedupePackages( + packages: Array<{ pkg: PackageSource; scope: SourceScope }>, + ): Array<{ pkg: PackageSource; scope: SourceScope }> { + const seen = new Map(); + + for (const entry of packages) { + const sourceStr = typeof entry.pkg === "string" ? entry.pkg : entry.pkg.source; + const identity = this.getPackageIdentity(sourceStr, entry.scope); + + const existing = seen.get(identity); + if (!existing) { + seen.set(identity, entry); + } else if (entry.scope === "project" && existing.scope === "user") { + // Project wins over user + seen.set(identity, entry); + } + // If existing is project and new is global, keep existing (project) + // If both are same scope, keep first one + } + + return Array.from(seen.values()); + } + + private parseNpmSpec(spec: string): { name: string; version?: string } { + const match = spec.match(/^(@?[^@]+(?:\/[^@]+)?)(?:@(.+))?$/); + if (!match) { + return { name: spec }; + } + const name = match[1] ?? spec; + const version = match[2]; + return { name, version }; + } + + private async installNpm(source: NpmSource, scope: SourceScope, temporary: boolean): Promise { + if (scope === "user" && !temporary) { + await this.runCommand("npm", ["install", "-g", source.spec]); + return; + } + const installRoot = this.getNpmInstallRoot(scope, temporary); + this.ensureNpmProject(installRoot); + await this.runCommand("npm", ["install", source.spec, "--prefix", installRoot]); + } + + private async uninstallNpm(source: NpmSource, scope: SourceScope): Promise { + if (scope === "user") { + await this.runCommand("npm", ["uninstall", "-g", source.name]); + return; + } + const installRoot = this.getNpmInstallRoot(scope, false); + if (!existsSync(installRoot)) { + return; + } + await this.runCommand("npm", ["uninstall", source.name, "--prefix", installRoot]); + } + + private async installGit(source: GitSource, scope: SourceScope): Promise { + const targetDir = this.getGitInstallPath(source, scope); + if (existsSync(targetDir)) { + return; + } + const gitRoot = this.getGitInstallRoot(scope); + if (gitRoot) { + this.ensureGitIgnore(gitRoot); + } + mkdirSync(dirname(targetDir), { recursive: true }); + + await this.runCommand("git", ["clone", source.repo, targetDir]); + if (source.ref) { + await this.runCommand("git", ["checkout", source.ref], { cwd: targetDir }); + } + const packageJsonPath = join(targetDir, "package.json"); + if (existsSync(packageJsonPath)) { + await this.runCommand("npm", ["install"], { cwd: targetDir }); + } + } + + private async updateGit(source: GitSource, scope: SourceScope): Promise { + const targetDir = this.getGitInstallPath(source, scope); + if (!existsSync(targetDir)) { + await this.installGit(source, scope); + return; + } + + // Fetch latest from remote (handles force-push by getting new history) + await this.runCommand("git", ["fetch", "--prune", "origin"], { cwd: targetDir }); + + // Reset to tracking branch. Fall back to origin/HEAD when no upstream is configured. + try { + await this.runCommand("git", ["reset", "--hard", "@{upstream}"], { cwd: targetDir }); + } catch { + await this.runCommand("git", ["remote", "set-head", "origin", "-a"], { cwd: targetDir }).catch(() => {}); + await this.runCommand("git", ["reset", "--hard", "origin/HEAD"], { cwd: targetDir }); + } + + // Clean untracked files (extensions should be pristine) + await this.runCommand("git", ["clean", "-fdx"], { cwd: targetDir }); + + const packageJsonPath = join(targetDir, "package.json"); + if (existsSync(packageJsonPath)) { + await this.runCommand("npm", ["install"], { cwd: targetDir }); + } + } + + private async refreshTemporaryGitSource(source: GitSource, sourceStr: string): Promise { + if (isOfflineModeEnabled()) { + return; + } + try { + await this.withProgress("pull", sourceStr, `Refreshing ${sourceStr}...`, async () => { + await this.updateGit(source, "temporary"); + }); + } catch { + // Keep cached temporary checkout if refresh fails. + } + } + + private async removeGit(source: GitSource, scope: SourceScope): Promise { + const targetDir = this.getGitInstallPath(source, scope); + if (!existsSync(targetDir)) return; + rmSync(targetDir, { recursive: true, force: true }); + this.pruneEmptyGitParents(targetDir, this.getGitInstallRoot(scope)); + } + + private pruneEmptyGitParents(targetDir: string, installRoot: string | undefined): void { + if (!installRoot) return; + const resolvedRoot = resolve(installRoot); + let current = dirname(targetDir); + while (current.startsWith(resolvedRoot) && current !== resolvedRoot) { + if (!existsSync(current)) { + current = dirname(current); + continue; + } + const entries = readdirSync(current); + if (entries.length > 0) { + break; + } + try { + rmSync(current, { recursive: true, force: true }); + } catch { + break; + } + current = dirname(current); + } + } + + private ensureNpmProject(installRoot: string): void { + if (!existsSync(installRoot)) { + mkdirSync(installRoot, { recursive: true }); + } + this.ensureGitIgnore(installRoot); + const packageJsonPath = join(installRoot, "package.json"); + if (!existsSync(packageJsonPath)) { + const pkgJson = { name: "pi-extensions", private: true }; + writeFileSync(packageJsonPath, JSON.stringify(pkgJson, null, 2), "utf-8"); + } + } + + private ensureGitIgnore(dir: string): void { + if (!existsSync(dir)) { + mkdirSync(dir, { recursive: true }); + } + const ignorePath = join(dir, ".gitignore"); + if (!existsSync(ignorePath)) { + writeFileSync(ignorePath, "*\n!.gitignore\n", "utf-8"); + } + } + + private getNpmInstallRoot(scope: SourceScope, temporary: boolean): string { + if (temporary) { + return this.getTemporaryDir("npm"); + } + if (scope === "project") { + return join(this.cwd, CONFIG_DIR_NAME, "npm"); + } + return join(this.getGlobalNpmRoot(), ".."); + } + + private getGlobalNpmRoot(): string { + if (this.globalNpmRoot) { + return this.globalNpmRoot; + } + const result = this.runCommandSync("npm", ["root", "-g"]); + this.globalNpmRoot = result.trim(); + return this.globalNpmRoot; + } + + private getNpmInstallPath(source: NpmSource, scope: SourceScope): string { + if (scope === "temporary") { + return join(this.getTemporaryDir("npm"), "node_modules", source.name); + } + if (scope === "project") { + return join(this.cwd, CONFIG_DIR_NAME, "npm", "node_modules", source.name); + } + return join(this.getGlobalNpmRoot(), source.name); + } + + private getGitInstallPath(source: GitSource, scope: SourceScope): string { + if (scope === "temporary") { + return this.getTemporaryDir(`git-${source.host}`, source.path); + } + if (scope === "project") { + return join(this.cwd, CONFIG_DIR_NAME, "git", source.host, source.path); + } + return join(this.agentDir, "git", source.host, source.path); + } + + private getGitInstallRoot(scope: SourceScope): string | undefined { + if (scope === "temporary") { + return undefined; + } + if (scope === "project") { + return join(this.cwd, CONFIG_DIR_NAME, "git"); + } + return join(this.agentDir, "git"); + } + + private getTemporaryDir(prefix: string, suffix?: string): string { + const hash = createHash("sha256") + .update(`${prefix}-${suffix ?? ""}`) + .digest("hex") + .slice(0, 8); + return join(tmpdir(), "pi-extensions", prefix, hash, suffix ?? ""); + } + + private getBaseDirForScope(scope: SourceScope): string { + if (scope === "project") { + return join(this.cwd, CONFIG_DIR_NAME); + } + if (scope === "user") { + return this.agentDir; + } + return this.cwd; + } + + private resolvePath(input: string): string { + const trimmed = input.trim(); + if (trimmed === "~") return homedir(); + if (trimmed.startsWith("~/")) return join(homedir(), trimmed.slice(2)); + if (trimmed.startsWith("~")) return join(homedir(), trimmed.slice(1)); + return resolve(this.cwd, trimmed); + } + + private resolvePathFromBase(input: string, baseDir: string): string { + const trimmed = input.trim(); + if (trimmed === "~") return homedir(); + if (trimmed.startsWith("~/")) return join(homedir(), trimmed.slice(2)); + if (trimmed.startsWith("~")) return join(homedir(), trimmed.slice(1)); + return resolve(baseDir, trimmed); + } + + private collectPackageResources( + packageRoot: string, + accumulator: ResourceAccumulator, + filter: PackageFilter | undefined, + metadata: PathMetadata, + ): boolean { + if (filter) { + for (const resourceType of RESOURCE_TYPES) { + const patterns = filter[resourceType as keyof PackageFilter]; + const target = this.getTargetMap(accumulator, resourceType); + if (patterns !== undefined) { + this.applyPackageFilter(packageRoot, patterns, resourceType, target, metadata); + } else { + this.collectDefaultResources(packageRoot, resourceType, target, metadata); + } + } + return true; + } + + const manifest = this.readPiManifest(packageRoot); + if (manifest) { + for (const resourceType of RESOURCE_TYPES) { + const entries = manifest[resourceType as keyof PiManifest]; + this.addManifestEntries( + entries, + packageRoot, + resourceType, + this.getTargetMap(accumulator, resourceType), + metadata, + ); + } + return true; + } + + let hasAnyDir = false; + for (const resourceType of RESOURCE_TYPES) { + const dir = join(packageRoot, resourceType); + if (existsSync(dir)) { + // Collect all files from the directory (all enabled by default) + const files = collectResourceFiles(dir, resourceType); + for (const f of files) { + this.addResource(this.getTargetMap(accumulator, resourceType), f, metadata, true); + } + hasAnyDir = true; + } + } + return hasAnyDir; + } + + private collectDefaultResources( + packageRoot: string, + resourceType: ResourceType, + target: Map, + metadata: PathMetadata, + ): void { + const manifest = this.readPiManifest(packageRoot); + const entries = manifest?.[resourceType as keyof PiManifest]; + if (entries) { + this.addManifestEntries(entries, packageRoot, resourceType, target, metadata); + return; + } + const dir = join(packageRoot, resourceType); + if (existsSync(dir)) { + // Collect all files from the directory (all enabled by default) + const files = collectResourceFiles(dir, resourceType); + for (const f of files) { + this.addResource(target, f, metadata, true); + } + } + } + + private applyPackageFilter( + packageRoot: string, + userPatterns: string[], + resourceType: ResourceType, + target: Map, + metadata: PathMetadata, + ): void { + const { allFiles } = this.collectManifestFiles(packageRoot, resourceType); + + if (userPatterns.length === 0) { + // Empty array explicitly disables all resources of this type + for (const f of allFiles) { + this.addResource(target, f, metadata, false); + } + return; + } + + // Apply user patterns + const enabledByUser = applyPatterns(allFiles, userPatterns, packageRoot); + + for (const f of allFiles) { + const enabled = enabledByUser.has(f); + this.addResource(target, f, metadata, enabled); + } + } + + /** + * Collect all files from a package for a resource type, applying manifest patterns. + * Returns { allFiles, enabledByManifest } where enabledByManifest is the set of files + * that pass the manifest's own patterns. + */ + private collectManifestFiles( + packageRoot: string, + resourceType: ResourceType, + ): { allFiles: string[]; enabledByManifest: Set } { + const manifest = this.readPiManifest(packageRoot); + const entries = manifest?.[resourceType as keyof PiManifest]; + if (entries && entries.length > 0) { + const allFiles = this.collectFilesFromManifestEntries(entries, packageRoot, resourceType); + const manifestPatterns = entries.filter(isPattern); + const enabledByManifest = + manifestPatterns.length > 0 ? applyPatterns(allFiles, manifestPatterns, packageRoot) : new Set(allFiles); + return { allFiles: Array.from(enabledByManifest), enabledByManifest }; + } + + const conventionDir = join(packageRoot, resourceType); + if (!existsSync(conventionDir)) { + return { allFiles: [], enabledByManifest: new Set() }; + } + const allFiles = collectResourceFiles(conventionDir, resourceType); + return { allFiles, enabledByManifest: new Set(allFiles) }; + } + + private readPiManifest(packageRoot: string): PiManifest | null { + const packageJsonPath = join(packageRoot, "package.json"); + if (!existsSync(packageJsonPath)) { + return null; + } + + try { + const content = readFileSync(packageJsonPath, "utf-8"); + const pkg = JSON.parse(content) as { pi?: PiManifest }; + return pkg.pi ?? null; + } catch { + return null; + } + } + + private addManifestEntries( + entries: string[] | undefined, + root: string, + resourceType: ResourceType, + target: Map, + metadata: PathMetadata, + ): void { + if (!entries) return; + + const allFiles = this.collectFilesFromManifestEntries(entries, root, resourceType); + const patterns = entries.filter(isPattern); + const enabledPaths = applyPatterns(allFiles, patterns, root); + + for (const f of allFiles) { + if (enabledPaths.has(f)) { + this.addResource(target, f, metadata, true); + } + } + } + + private collectFilesFromManifestEntries(entries: string[], root: string, resourceType: ResourceType): string[] { + const plain = entries.filter((entry) => !isPattern(entry)); + const resolved = plain.map((entry) => resolve(root, entry)); + return this.collectFilesFromPaths(resolved, resourceType); + } + + private resolveLocalEntries( + entries: string[], + resourceType: ResourceType, + target: Map, + metadata: PathMetadata, + baseDir: string, + ): void { + if (entries.length === 0) return; + + // Collect all files from plain entries (non-pattern entries) + const { plain, patterns } = splitPatterns(entries); + const resolvedPlain = plain.map((p) => this.resolvePathFromBase(p, baseDir)); + const allFiles = this.collectFilesFromPaths(resolvedPlain, resourceType); + + // Determine which files are enabled based on patterns + const enabledPaths = applyPatterns(allFiles, patterns, baseDir); + + // Add all files with their enabled state + for (const f of allFiles) { + this.addResource(target, f, metadata, enabledPaths.has(f)); + } + } + + private addAutoDiscoveredResources( + accumulator: ResourceAccumulator, + globalSettings: ReturnType, + projectSettings: ReturnType, + globalBaseDir: string, + projectBaseDir: string, + ): void { + const userMetadata: PathMetadata = { + source: "auto", + scope: "user", + origin: "top-level", + baseDir: globalBaseDir, + }; + const projectMetadata: PathMetadata = { + source: "auto", + scope: "project", + origin: "top-level", + baseDir: projectBaseDir, + }; + + const userOverrides = { + extensions: (globalSettings.extensions ?? []) as string[], + skills: (globalSettings.skills ?? []) as string[], + prompts: (globalSettings.prompts ?? []) as string[], + themes: (globalSettings.themes ?? []) as string[], + }; + const projectOverrides = { + extensions: (projectSettings.extensions ?? []) as string[], + skills: (projectSettings.skills ?? []) as string[], + prompts: (projectSettings.prompts ?? []) as string[], + themes: (projectSettings.themes ?? []) as string[], + }; + + const userDirs = { + extensions: join(globalBaseDir, "extensions"), + skills: join(globalBaseDir, "skills"), + prompts: join(globalBaseDir, "prompts"), + themes: join(globalBaseDir, "themes"), + }; + const projectDirs = { + extensions: join(projectBaseDir, "extensions"), + skills: join(projectBaseDir, "skills"), + prompts: join(projectBaseDir, "prompts"), + themes: join(projectBaseDir, "themes"), + }; + const userAgentsSkillsDir = join(homedir(), ".agents", "skills"); + const projectAgentsSkillDirs = collectAncestorAgentsSkillDirs(this.cwd).filter( + (dir) => resolve(dir) !== resolve(userAgentsSkillsDir), + ); + + const addResources = ( + resourceType: ResourceType, + paths: string[], + metadata: PathMetadata, + overrides: string[], + baseDir: string, + ) => { + const target = this.getTargetMap(accumulator, resourceType); + for (const path of paths) { + const enabled = isEnabledByOverrides(path, overrides, baseDir); + this.addResource(target, path, metadata, enabled); + } + }; + + addResources( + "extensions", + collectAutoExtensionEntries(projectDirs.extensions), + projectMetadata, + projectOverrides.extensions, + projectBaseDir, + ); + addResources( + "skills", + [ + ...collectAutoSkillEntries(projectDirs.skills), + ...projectAgentsSkillDirs.flatMap((dir) => collectAutoSkillEntries(dir)), + ], + projectMetadata, + projectOverrides.skills, + projectBaseDir, + ); + addResources( + "prompts", + collectAutoPromptEntries(projectDirs.prompts), + projectMetadata, + projectOverrides.prompts, + projectBaseDir, + ); + addResources( + "themes", + collectAutoThemeEntries(projectDirs.themes), + projectMetadata, + projectOverrides.themes, + projectBaseDir, + ); + + addResources( + "extensions", + collectAutoExtensionEntries(userDirs.extensions), + userMetadata, + userOverrides.extensions, + globalBaseDir, + ); + addResources( + "skills", + [...collectAutoSkillEntries(userDirs.skills), ...collectAutoSkillEntries(userAgentsSkillsDir)], + userMetadata, + userOverrides.skills, + globalBaseDir, + ); + addResources( + "prompts", + collectAutoPromptEntries(userDirs.prompts), + userMetadata, + userOverrides.prompts, + globalBaseDir, + ); + addResources( + "themes", + collectAutoThemeEntries(userDirs.themes), + userMetadata, + userOverrides.themes, + globalBaseDir, + ); + } + + private collectFilesFromPaths(paths: string[], resourceType: ResourceType): string[] { + const files: string[] = []; + for (const p of paths) { + if (!existsSync(p)) continue; + + try { + const stats = statSync(p); + if (stats.isFile()) { + files.push(p); + } else if (stats.isDirectory()) { + files.push(...collectResourceFiles(p, resourceType)); + } + } catch { + // Ignore errors + } + } + return files; + } + + private getTargetMap( + accumulator: ResourceAccumulator, + resourceType: ResourceType, + ): Map { + switch (resourceType) { + case "extensions": + return accumulator.extensions; + case "skills": + return accumulator.skills; + case "prompts": + return accumulator.prompts; + case "themes": + return accumulator.themes; + default: + throw new Error(`Unknown resource type: ${resourceType}`); + } + } + + private addResource( + map: Map, + path: string, + metadata: PathMetadata, + enabled: boolean, + ): void { + if (!path) return; + if (!map.has(path)) { + map.set(path, { metadata, enabled }); + } + } + + private createAccumulator(): ResourceAccumulator { + return { + extensions: new Map(), + skills: new Map(), + prompts: new Map(), + themes: new Map(), + }; + } + + private toResolvedPaths(accumulator: ResourceAccumulator): ResolvedPaths { + const toResolved = (entries: Map): ResolvedResource[] => { + return Array.from(entries.entries()).map(([path, { metadata, enabled }]) => ({ + path, + enabled, + metadata, + })); + }; + + return { + extensions: toResolved(accumulator.extensions), + skills: toResolved(accumulator.skills), + prompts: toResolved(accumulator.prompts), + themes: toResolved(accumulator.themes), + }; + } + + private runCommand(command: string, args: string[], options?: { cwd?: string }): Promise { + return new Promise((resolvePromise, reject) => { + const child = spawn(command, args, { + cwd: options?.cwd, + stdio: "inherit", + shell: process.platform === "win32", + }); + child.on("error", reject); + child.on("exit", (code) => { + if (code === 0) { + resolvePromise(); + } else { + reject(new Error(`${command} ${args.join(" ")} failed with code ${code}`)); + } + }); + }); + } + + private runCommandSync(command: string, args: string[]): string { + const result = spawnSync(command, args, { + stdio: ["ignore", "pipe", "pipe"], + encoding: "utf-8", + shell: process.platform === "win32", + }); + if (result.status !== 0) { + throw new Error(`Failed to run ${command} ${args.join(" ")}: ${result.stderr || result.stdout}`); + } + return (result.stdout || result.stderr || "").trim(); + } +} diff --git a/packages/pi-coding-agent/src/core/prompt-templates.ts b/packages/pi-coding-agent/src/core/prompt-templates.ts new file mode 100644 index 000000000..d7ed0953c --- /dev/null +++ b/packages/pi-coding-agent/src/core/prompt-templates.ts @@ -0,0 +1,299 @@ +import { existsSync, readdirSync, readFileSync, statSync } from "fs"; +import { homedir } from "os"; +import { basename, isAbsolute, join, resolve, sep } from "path"; +import { CONFIG_DIR_NAME, getPromptsDir } from "../config.js"; +import { parseFrontmatter } from "../utils/frontmatter.js"; + +/** + * Represents a prompt template loaded from a markdown file + */ +export interface PromptTemplate { + name: string; + description: string; + content: string; + source: string; // "user", "project", or "path" + filePath: string; // Absolute path to the template file +} + +/** + * Parse command arguments respecting quoted strings (bash-style) + * Returns array of arguments + */ +export function parseCommandArgs(argsString: string): string[] { + const args: string[] = []; + let current = ""; + let inQuote: string | null = null; + + for (let i = 0; i < argsString.length; i++) { + const char = argsString[i]; + + if (inQuote) { + if (char === inQuote) { + inQuote = null; + } else { + current += char; + } + } else if (char === '"' || char === "'") { + inQuote = char; + } else if (char === " " || char === "\t") { + if (current) { + args.push(current); + current = ""; + } + } else { + current += char; + } + } + + if (current) { + args.push(current); + } + + return args; +} + +/** + * Substitute argument placeholders in template content + * Supports: + * - $1, $2, ... for positional args + * - $@ and $ARGUMENTS for all args + * - ${@:N} for args from Nth onwards (bash-style slicing) + * - ${@:N:L} for L args starting from Nth + * + * Note: Replacement happens on the template string only. Argument values + * containing patterns like $1, $@, or $ARGUMENTS are NOT recursively substituted. + */ +export function substituteArgs(content: string, args: string[]): string { + let result = content; + + // Replace $1, $2, etc. with positional args FIRST (before wildcards) + // This prevents wildcard replacement values containing $ patterns from being re-substituted + result = result.replace(/\$(\d+)/g, (_, num) => { + const index = parseInt(num, 10) - 1; + return args[index] ?? ""; + }); + + // Replace ${@:start} or ${@:start:length} with sliced args (bash-style) + // Process BEFORE simple $@ to avoid conflicts + result = result.replace(/\$\{@:(\d+)(?::(\d+))?\}/g, (_, startStr, lengthStr) => { + let start = parseInt(startStr, 10) - 1; // Convert to 0-indexed (user provides 1-indexed) + // Treat 0 as 1 (bash convention: args start at 1) + if (start < 0) start = 0; + + if (lengthStr) { + const length = parseInt(lengthStr, 10); + return args.slice(start, start + length).join(" "); + } + return args.slice(start).join(" "); + }); + + // Pre-compute all args joined (optimization) + const allArgs = args.join(" "); + + // Replace $ARGUMENTS with all args joined (new syntax, aligns with Claude, Codex, OpenCode) + result = result.replace(/\$ARGUMENTS/g, allArgs); + + // Replace $@ with all args joined (existing syntax) + result = result.replace(/\$@/g, allArgs); + + return result; +} + +function loadTemplateFromFile(filePath: string, source: string, sourceLabel: string): PromptTemplate | null { + try { + const rawContent = readFileSync(filePath, "utf-8"); + const { frontmatter, body } = parseFrontmatter>(rawContent); + + const name = basename(filePath).replace(/\.md$/, ""); + + // Get description from frontmatter or first non-empty line + let description = frontmatter.description || ""; + if (!description) { + const firstLine = body.split("\n").find((line) => line.trim()); + if (firstLine) { + // Truncate if too long + description = firstLine.slice(0, 60); + if (firstLine.length > 60) description += "..."; + } + } + + // Append source to description + description = description ? `${description} ${sourceLabel}` : sourceLabel; + + return { + name, + description, + content: body, + source, + filePath, + }; + } catch { + return null; + } +} + +/** + * Scan a directory for .md files (non-recursive) and load them as prompt templates. + */ +function loadTemplatesFromDir(dir: string, source: string, sourceLabel: string): PromptTemplate[] { + const templates: PromptTemplate[] = []; + + if (!existsSync(dir)) { + return templates; + } + + try { + const entries = readdirSync(dir, { withFileTypes: true }); + + for (const entry of entries) { + const fullPath = join(dir, entry.name); + + // For symlinks, check if they point to a file + let isFile = entry.isFile(); + if (entry.isSymbolicLink()) { + try { + const stats = statSync(fullPath); + isFile = stats.isFile(); + } catch { + // Broken symlink, skip it + continue; + } + } + + if (isFile && entry.name.endsWith(".md")) { + const template = loadTemplateFromFile(fullPath, source, sourceLabel); + if (template) { + templates.push(template); + } + } + } + } catch { + return templates; + } + + return templates; +} + +export interface LoadPromptTemplatesOptions { + /** Working directory for project-local templates. Default: process.cwd() */ + cwd?: string; + /** Agent config directory for global templates. Default: from getPromptsDir() */ + agentDir?: string; + /** Explicit prompt template paths (files or directories) */ + promptPaths?: string[]; + /** Include default prompt directories. Default: true */ + includeDefaults?: boolean; +} + +function normalizePath(input: string): string { + const trimmed = input.trim(); + if (trimmed === "~") return homedir(); + if (trimmed.startsWith("~/")) return join(homedir(), trimmed.slice(2)); + if (trimmed.startsWith("~")) return join(homedir(), trimmed.slice(1)); + return trimmed; +} + +function resolvePromptPath(p: string, cwd: string): string { + const normalized = normalizePath(p); + return isAbsolute(normalized) ? normalized : resolve(cwd, normalized); +} + +function buildPathSourceLabel(p: string): string { + const base = basename(p).replace(/\.md$/, "") || "path"; + return `(path:${base})`; +} + +/** + * Load all prompt templates from: + * 1. Global: agentDir/prompts/ + * 2. Project: cwd/{CONFIG_DIR_NAME}/prompts/ + * 3. Explicit prompt paths + */ +export function loadPromptTemplates(options: LoadPromptTemplatesOptions = {}): PromptTemplate[] { + const resolvedCwd = options.cwd ?? process.cwd(); + const resolvedAgentDir = options.agentDir ?? getPromptsDir(); + const promptPaths = options.promptPaths ?? []; + const includeDefaults = options.includeDefaults ?? true; + + const templates: PromptTemplate[] = []; + + if (includeDefaults) { + // 1. Load global templates from agentDir/prompts/ + // Note: if agentDir is provided, it should be the agent dir, not the prompts dir + const globalPromptsDir = options.agentDir ? join(options.agentDir, "prompts") : resolvedAgentDir; + templates.push(...loadTemplatesFromDir(globalPromptsDir, "user", "(user)")); + + // 2. Load project templates from cwd/{CONFIG_DIR_NAME}/prompts/ + const projectPromptsDir = resolve(resolvedCwd, CONFIG_DIR_NAME, "prompts"); + templates.push(...loadTemplatesFromDir(projectPromptsDir, "project", "(project)")); + } + + const userPromptsDir = options.agentDir ? join(options.agentDir, "prompts") : resolvedAgentDir; + const projectPromptsDir = resolve(resolvedCwd, CONFIG_DIR_NAME, "prompts"); + + const isUnderPath = (target: string, root: string): boolean => { + const normalizedRoot = resolve(root); + if (target === normalizedRoot) { + return true; + } + const prefix = normalizedRoot.endsWith(sep) ? normalizedRoot : `${normalizedRoot}${sep}`; + return target.startsWith(prefix); + }; + + const getSourceInfo = (resolvedPath: string): { source: string; label: string } => { + if (!includeDefaults) { + if (isUnderPath(resolvedPath, userPromptsDir)) { + return { source: "user", label: "(user)" }; + } + if (isUnderPath(resolvedPath, projectPromptsDir)) { + return { source: "project", label: "(project)" }; + } + } + return { source: "path", label: buildPathSourceLabel(resolvedPath) }; + }; + + // 3. Load explicit prompt paths + for (const rawPath of promptPaths) { + const resolvedPath = resolvePromptPath(rawPath, resolvedCwd); + if (!existsSync(resolvedPath)) { + continue; + } + + try { + const stats = statSync(resolvedPath); + const { source, label } = getSourceInfo(resolvedPath); + if (stats.isDirectory()) { + templates.push(...loadTemplatesFromDir(resolvedPath, source, label)); + } else if (stats.isFile() && resolvedPath.endsWith(".md")) { + const template = loadTemplateFromFile(resolvedPath, source, label); + if (template) { + templates.push(template); + } + } + } catch { + // Ignore read failures + } + } + + return templates; +} + +/** + * Expand a prompt template if it matches a template name. + * Returns the expanded content or the original text if not a template. + */ +export function expandPromptTemplate(text: string, templates: PromptTemplate[]): string { + if (!text.startsWith("/")) return text; + + const spaceIndex = text.indexOf(" "); + const templateName = spaceIndex === -1 ? text.slice(1) : text.slice(1, spaceIndex); + const argsString = spaceIndex === -1 ? "" : text.slice(spaceIndex + 1); + + const template = templates.find((t) => t.name === templateName); + if (template) { + const args = parseCommandArgs(argsString); + return substituteArgs(template.content, args); + } + + return text; +} diff --git a/packages/pi-coding-agent/src/core/resolve-config-value.ts b/packages/pi-coding-agent/src/core/resolve-config-value.ts new file mode 100644 index 000000000..da127869b --- /dev/null +++ b/packages/pi-coding-agent/src/core/resolve-config-value.ts @@ -0,0 +1,64 @@ +/** + * Resolve configuration values that may be shell commands, environment variables, or literals. + * Used by auth-storage.ts and model-registry.ts. + */ + +import { execSync } from "child_process"; + +// Cache for shell command results (persists for process lifetime) +const commandResultCache = new Map(); + +/** + * Resolve a config value (API key, header value, etc.) to an actual value. + * - If starts with "!", executes the rest as a shell command and uses stdout (cached) + * - Otherwise checks environment variable first, then treats as literal (not cached) + */ +export function resolveConfigValue(config: string): string | undefined { + if (config.startsWith("!")) { + return executeCommand(config); + } + const envValue = process.env[config]; + return envValue || config; +} + +function executeCommand(commandConfig: string): string | undefined { + if (commandResultCache.has(commandConfig)) { + return commandResultCache.get(commandConfig); + } + + const command = commandConfig.slice(1); + let result: string | undefined; + try { + const output = execSync(command, { + encoding: "utf-8", + timeout: 10000, + stdio: ["ignore", "pipe", "ignore"], + }); + result = output.trim() || undefined; + } catch { + result = undefined; + } + + commandResultCache.set(commandConfig, result); + return result; +} + +/** + * Resolve all header values using the same resolution logic as API keys. + */ +export function resolveHeaders(headers: Record | undefined): Record | undefined { + if (!headers) return undefined; + const resolved: Record = {}; + for (const [key, value] of Object.entries(headers)) { + const resolvedValue = resolveConfigValue(value); + if (resolvedValue) { + resolved[key] = resolvedValue; + } + } + return Object.keys(resolved).length > 0 ? resolved : undefined; +} + +/** Clear the config value command cache. Exported for testing. */ +export function clearConfigValueCache(): void { + commandResultCache.clear(); +} diff --git a/packages/pi-coding-agent/src/core/resource-loader.ts b/packages/pi-coding-agent/src/core/resource-loader.ts new file mode 100644 index 000000000..1c650f797 --- /dev/null +++ b/packages/pi-coding-agent/src/core/resource-loader.ts @@ -0,0 +1,868 @@ +import { existsSync, readdirSync, readFileSync, statSync } from "node:fs"; +import { homedir } from "node:os"; +import { join, resolve, sep } from "node:path"; +import chalk from "chalk"; +import { CONFIG_DIR_NAME, getAgentDir } from "../config.js"; +import { loadThemeFromPath, type Theme } from "../modes/interactive/theme/theme.js"; +import type { ResourceDiagnostic } from "./diagnostics.js"; + +export type { ResourceCollision, ResourceDiagnostic } from "./diagnostics.js"; + +import { createEventBus, type EventBus } from "./event-bus.js"; +import { createExtensionRuntime, loadExtensionFromFactory, loadExtensions } from "./extensions/loader.js"; +import type { Extension, ExtensionFactory, ExtensionRuntime, LoadExtensionsResult } from "./extensions/types.js"; +import { DefaultPackageManager, type PathMetadata } from "./package-manager.js"; +import type { PromptTemplate } from "./prompt-templates.js"; +import { loadPromptTemplates } from "./prompt-templates.js"; +import { SettingsManager } from "./settings-manager.js"; +import type { Skill } from "./skills.js"; +import { loadSkills } from "./skills.js"; + +export interface ResourceExtensionPaths { + skillPaths?: Array<{ path: string; metadata: PathMetadata }>; + promptPaths?: Array<{ path: string; metadata: PathMetadata }>; + themePaths?: Array<{ path: string; metadata: PathMetadata }>; +} + +export interface ResourceLoader { + getExtensions(): LoadExtensionsResult; + getSkills(): { skills: Skill[]; diagnostics: ResourceDiagnostic[] }; + getPrompts(): { prompts: PromptTemplate[]; diagnostics: ResourceDiagnostic[] }; + getThemes(): { themes: Theme[]; diagnostics: ResourceDiagnostic[] }; + getAgentsFiles(): { agentsFiles: Array<{ path: string; content: string }> }; + getSystemPrompt(): string | undefined; + getAppendSystemPrompt(): string[]; + getPathMetadata(): Map; + extendResources(paths: ResourceExtensionPaths): void; + reload(): Promise; +} + +function resolvePromptInput(input: string | undefined, description: string): string | undefined { + if (!input) { + return undefined; + } + + if (existsSync(input)) { + try { + return readFileSync(input, "utf-8"); + } catch (error) { + console.error(chalk.yellow(`Warning: Could not read ${description} file ${input}: ${error}`)); + return input; + } + } + + return input; +} + +function loadContextFileFromDir(dir: string): { path: string; content: string } | null { + const candidates = ["AGENTS.md", "CLAUDE.md"]; + for (const filename of candidates) { + const filePath = join(dir, filename); + if (existsSync(filePath)) { + try { + return { + path: filePath, + content: readFileSync(filePath, "utf-8"), + }; + } catch (error) { + console.error(chalk.yellow(`Warning: Could not read ${filePath}: ${error}`)); + } + } + } + return null; +} + +function loadProjectContextFiles( + options: { cwd?: string; agentDir?: string } = {}, +): Array<{ path: string; content: string }> { + const resolvedCwd = options.cwd ?? process.cwd(); + const resolvedAgentDir = options.agentDir ?? getAgentDir(); + + const contextFiles: Array<{ path: string; content: string }> = []; + const seenPaths = new Set(); + + const globalContext = loadContextFileFromDir(resolvedAgentDir); + if (globalContext) { + contextFiles.push(globalContext); + seenPaths.add(globalContext.path); + } + + const ancestorContextFiles: Array<{ path: string; content: string }> = []; + + let currentDir = resolvedCwd; + const root = resolve("/"); + + while (true) { + const contextFile = loadContextFileFromDir(currentDir); + if (contextFile && !seenPaths.has(contextFile.path)) { + ancestorContextFiles.unshift(contextFile); + seenPaths.add(contextFile.path); + } + + if (currentDir === root) break; + + const parentDir = resolve(currentDir, ".."); + if (parentDir === currentDir) break; + currentDir = parentDir; + } + + contextFiles.push(...ancestorContextFiles); + + return contextFiles; +} + +export interface DefaultResourceLoaderOptions { + cwd?: string; + agentDir?: string; + settingsManager?: SettingsManager; + eventBus?: EventBus; + additionalExtensionPaths?: string[]; + additionalSkillPaths?: string[]; + additionalPromptTemplatePaths?: string[]; + additionalThemePaths?: string[]; + extensionFactories?: ExtensionFactory[]; + noExtensions?: boolean; + noSkills?: boolean; + noPromptTemplates?: boolean; + noThemes?: boolean; + systemPrompt?: string; + appendSystemPrompt?: string; + extensionsOverride?: (base: LoadExtensionsResult) => LoadExtensionsResult; + skillsOverride?: (base: { skills: Skill[]; diagnostics: ResourceDiagnostic[] }) => { + skills: Skill[]; + diagnostics: ResourceDiagnostic[]; + }; + promptsOverride?: (base: { prompts: PromptTemplate[]; diagnostics: ResourceDiagnostic[] }) => { + prompts: PromptTemplate[]; + diagnostics: ResourceDiagnostic[]; + }; + themesOverride?: (base: { themes: Theme[]; diagnostics: ResourceDiagnostic[] }) => { + themes: Theme[]; + diagnostics: ResourceDiagnostic[]; + }; + agentsFilesOverride?: (base: { agentsFiles: Array<{ path: string; content: string }> }) => { + agentsFiles: Array<{ path: string; content: string }>; + }; + systemPromptOverride?: (base: string | undefined) => string | undefined; + appendSystemPromptOverride?: (base: string[]) => string[]; +} + +export class DefaultResourceLoader implements ResourceLoader { + private cwd: string; + private agentDir: string; + private settingsManager: SettingsManager; + private eventBus: EventBus; + private packageManager: DefaultPackageManager; + private additionalExtensionPaths: string[]; + private additionalSkillPaths: string[]; + private additionalPromptTemplatePaths: string[]; + private additionalThemePaths: string[]; + private extensionFactories: ExtensionFactory[]; + private noExtensions: boolean; + private noSkills: boolean; + private noPromptTemplates: boolean; + private noThemes: boolean; + private systemPromptSource?: string; + private appendSystemPromptSource?: string; + private extensionsOverride?: (base: LoadExtensionsResult) => LoadExtensionsResult; + private skillsOverride?: (base: { skills: Skill[]; diagnostics: ResourceDiagnostic[] }) => { + skills: Skill[]; + diagnostics: ResourceDiagnostic[]; + }; + private promptsOverride?: (base: { prompts: PromptTemplate[]; diagnostics: ResourceDiagnostic[] }) => { + prompts: PromptTemplate[]; + diagnostics: ResourceDiagnostic[]; + }; + private themesOverride?: (base: { themes: Theme[]; diagnostics: ResourceDiagnostic[] }) => { + themes: Theme[]; + diagnostics: ResourceDiagnostic[]; + }; + private agentsFilesOverride?: (base: { agentsFiles: Array<{ path: string; content: string }> }) => { + agentsFiles: Array<{ path: string; content: string }>; + }; + private systemPromptOverride?: (base: string | undefined) => string | undefined; + private appendSystemPromptOverride?: (base: string[]) => string[]; + + private extensionsResult: LoadExtensionsResult; + private skills: Skill[]; + private skillDiagnostics: ResourceDiagnostic[]; + private prompts: PromptTemplate[]; + private promptDiagnostics: ResourceDiagnostic[]; + private themes: Theme[]; + private themeDiagnostics: ResourceDiagnostic[]; + private agentsFiles: Array<{ path: string; content: string }>; + private systemPrompt?: string; + private appendSystemPrompt: string[]; + private pathMetadata: Map; + private lastSkillPaths: string[]; + private lastPromptPaths: string[]; + private lastThemePaths: string[]; + + constructor(options: DefaultResourceLoaderOptions) { + this.cwd = options.cwd ?? process.cwd(); + this.agentDir = options.agentDir ?? getAgentDir(); + this.settingsManager = options.settingsManager ?? SettingsManager.create(this.cwd, this.agentDir); + this.eventBus = options.eventBus ?? createEventBus(); + this.packageManager = new DefaultPackageManager({ + cwd: this.cwd, + agentDir: this.agentDir, + settingsManager: this.settingsManager, + }); + this.additionalExtensionPaths = options.additionalExtensionPaths ?? []; + this.additionalSkillPaths = options.additionalSkillPaths ?? []; + this.additionalPromptTemplatePaths = options.additionalPromptTemplatePaths ?? []; + this.additionalThemePaths = options.additionalThemePaths ?? []; + this.extensionFactories = options.extensionFactories ?? []; + this.noExtensions = options.noExtensions ?? false; + this.noSkills = options.noSkills ?? false; + this.noPromptTemplates = options.noPromptTemplates ?? false; + this.noThemes = options.noThemes ?? false; + this.systemPromptSource = options.systemPrompt; + this.appendSystemPromptSource = options.appendSystemPrompt; + this.extensionsOverride = options.extensionsOverride; + this.skillsOverride = options.skillsOverride; + this.promptsOverride = options.promptsOverride; + this.themesOverride = options.themesOverride; + this.agentsFilesOverride = options.agentsFilesOverride; + this.systemPromptOverride = options.systemPromptOverride; + this.appendSystemPromptOverride = options.appendSystemPromptOverride; + + this.extensionsResult = { extensions: [], errors: [], runtime: createExtensionRuntime() }; + this.skills = []; + this.skillDiagnostics = []; + this.prompts = []; + this.promptDiagnostics = []; + this.themes = []; + this.themeDiagnostics = []; + this.agentsFiles = []; + this.appendSystemPrompt = []; + this.pathMetadata = new Map(); + this.lastSkillPaths = []; + this.lastPromptPaths = []; + this.lastThemePaths = []; + } + + getExtensions(): LoadExtensionsResult { + return this.extensionsResult; + } + + getSkills(): { skills: Skill[]; diagnostics: ResourceDiagnostic[] } { + return { skills: this.skills, diagnostics: this.skillDiagnostics }; + } + + getPrompts(): { prompts: PromptTemplate[]; diagnostics: ResourceDiagnostic[] } { + return { prompts: this.prompts, diagnostics: this.promptDiagnostics }; + } + + getThemes(): { themes: Theme[]; diagnostics: ResourceDiagnostic[] } { + return { themes: this.themes, diagnostics: this.themeDiagnostics }; + } + + getAgentsFiles(): { agentsFiles: Array<{ path: string; content: string }> } { + return { agentsFiles: this.agentsFiles }; + } + + getSystemPrompt(): string | undefined { + return this.systemPrompt; + } + + getAppendSystemPrompt(): string[] { + return this.appendSystemPrompt; + } + + getPathMetadata(): Map { + return this.pathMetadata; + } + + extendResources(paths: ResourceExtensionPaths): void { + const skillPaths = this.normalizeExtensionPaths(paths.skillPaths ?? []); + const promptPaths = this.normalizeExtensionPaths(paths.promptPaths ?? []); + const themePaths = this.normalizeExtensionPaths(paths.themePaths ?? []); + + if (skillPaths.length > 0) { + this.lastSkillPaths = this.mergePaths( + this.lastSkillPaths, + skillPaths.map((entry) => entry.path), + ); + this.updateSkillsFromPaths(this.lastSkillPaths, skillPaths); + } + + if (promptPaths.length > 0) { + this.lastPromptPaths = this.mergePaths( + this.lastPromptPaths, + promptPaths.map((entry) => entry.path), + ); + this.updatePromptsFromPaths(this.lastPromptPaths, promptPaths); + } + + if (themePaths.length > 0) { + this.lastThemePaths = this.mergePaths( + this.lastThemePaths, + themePaths.map((entry) => entry.path), + ); + this.updateThemesFromPaths(this.lastThemePaths, themePaths); + } + } + + async reload(): Promise { + const resolvedPaths = await this.packageManager.resolve(); + const cliExtensionPaths = await this.packageManager.resolveExtensionSources(this.additionalExtensionPaths, { + temporary: true, + }); + + // Helper to extract enabled paths and store metadata + const getEnabledResources = ( + resources: Array<{ path: string; enabled: boolean; metadata: PathMetadata }>, + ): Array<{ path: string; enabled: boolean; metadata: PathMetadata }> => { + for (const r of resources) { + if (!this.pathMetadata.has(r.path)) { + this.pathMetadata.set(r.path, r.metadata); + } + } + return resources.filter((r) => r.enabled); + }; + + const getEnabledPaths = ( + resources: Array<{ path: string; enabled: boolean; metadata: PathMetadata }>, + ): string[] => getEnabledResources(resources).map((r) => r.path); + + // Store metadata and get enabled paths + this.pathMetadata = new Map(); + const enabledExtensions = getEnabledPaths(resolvedPaths.extensions); + const enabledSkillResources = getEnabledResources(resolvedPaths.skills); + const enabledPrompts = getEnabledPaths(resolvedPaths.prompts); + const enabledThemes = getEnabledPaths(resolvedPaths.themes); + + const mapSkillPath = (resource: { path: string; metadata: PathMetadata }): string => { + if (resource.metadata.source !== "auto" && resource.metadata.origin !== "package") { + return resource.path; + } + try { + const stats = statSync(resource.path); + if (!stats.isDirectory()) { + return resource.path; + } + } catch { + return resource.path; + } + const skillFile = join(resource.path, "SKILL.md"); + if (existsSync(skillFile)) { + if (!this.pathMetadata.has(skillFile)) { + this.pathMetadata.set(skillFile, resource.metadata); + } + return skillFile; + } + return resource.path; + }; + + const enabledSkills = enabledSkillResources.map(mapSkillPath); + + // Add CLI paths metadata + for (const r of cliExtensionPaths.extensions) { + if (!this.pathMetadata.has(r.path)) { + this.pathMetadata.set(r.path, { source: "cli", scope: "temporary", origin: "top-level" }); + } + } + for (const r of cliExtensionPaths.skills) { + if (!this.pathMetadata.has(r.path)) { + this.pathMetadata.set(r.path, { source: "cli", scope: "temporary", origin: "top-level" }); + } + } + + const cliEnabledExtensions = getEnabledPaths(cliExtensionPaths.extensions); + const cliEnabledSkills = getEnabledPaths(cliExtensionPaths.skills); + const cliEnabledPrompts = getEnabledPaths(cliExtensionPaths.prompts); + const cliEnabledThemes = getEnabledPaths(cliExtensionPaths.themes); + + const extensionPaths = this.noExtensions + ? cliEnabledExtensions + : this.mergePaths(cliEnabledExtensions, enabledExtensions); + + const extensionsResult = await loadExtensions(extensionPaths, this.cwd, this.eventBus); + const inlineExtensions = await this.loadExtensionFactories(extensionsResult.runtime); + extensionsResult.extensions.push(...inlineExtensions.extensions); + extensionsResult.errors.push(...inlineExtensions.errors); + + // Detect extension conflicts (tools, commands, flags with same names from different extensions) + // Keep all extensions loaded. Conflicts are reported as diagnostics, and precedence is handled by load order. + const conflicts = this.detectExtensionConflicts(extensionsResult.extensions); + for (const conflict of conflicts) { + extensionsResult.errors.push({ path: conflict.path, error: conflict.message }); + } + + this.extensionsResult = this.extensionsOverride ? this.extensionsOverride(extensionsResult) : extensionsResult; + + const skillPaths = this.noSkills + ? this.mergePaths(cliEnabledSkills, this.additionalSkillPaths) + : this.mergePaths([...enabledSkills, ...cliEnabledSkills], this.additionalSkillPaths); + + this.lastSkillPaths = skillPaths; + this.updateSkillsFromPaths(skillPaths); + + const promptPaths = this.noPromptTemplates + ? this.mergePaths(cliEnabledPrompts, this.additionalPromptTemplatePaths) + : this.mergePaths([...enabledPrompts, ...cliEnabledPrompts], this.additionalPromptTemplatePaths); + + this.lastPromptPaths = promptPaths; + this.updatePromptsFromPaths(promptPaths); + + const themePaths = this.noThemes + ? this.mergePaths(cliEnabledThemes, this.additionalThemePaths) + : this.mergePaths([...enabledThemes, ...cliEnabledThemes], this.additionalThemePaths); + + this.lastThemePaths = themePaths; + this.updateThemesFromPaths(themePaths); + + for (const extension of this.extensionsResult.extensions) { + this.addDefaultMetadataForPath(extension.path); + } + + const agentsFiles = { agentsFiles: loadProjectContextFiles({ cwd: this.cwd, agentDir: this.agentDir }) }; + const resolvedAgentsFiles = this.agentsFilesOverride ? this.agentsFilesOverride(agentsFiles) : agentsFiles; + this.agentsFiles = resolvedAgentsFiles.agentsFiles; + + const baseSystemPrompt = resolvePromptInput( + this.systemPromptSource ?? this.discoverSystemPromptFile(), + "system prompt", + ); + this.systemPrompt = this.systemPromptOverride ? this.systemPromptOverride(baseSystemPrompt) : baseSystemPrompt; + + const appendSource = this.appendSystemPromptSource ?? this.discoverAppendSystemPromptFile(); + const resolvedAppend = resolvePromptInput(appendSource, "append system prompt"); + const baseAppend = resolvedAppend ? [resolvedAppend] : []; + this.appendSystemPrompt = this.appendSystemPromptOverride + ? this.appendSystemPromptOverride(baseAppend) + : baseAppend; + } + + private normalizeExtensionPaths( + entries: Array<{ path: string; metadata: PathMetadata }>, + ): Array<{ path: string; metadata: PathMetadata }> { + return entries.map((entry) => ({ + path: this.resolveResourcePath(entry.path), + metadata: entry.metadata, + })); + } + + private updateSkillsFromPaths( + skillPaths: string[], + extensionPaths: Array<{ path: string; metadata: PathMetadata }> = [], + ): void { + let skillsResult: { skills: Skill[]; diagnostics: ResourceDiagnostic[] }; + if (this.noSkills && skillPaths.length === 0) { + skillsResult = { skills: [], diagnostics: [] }; + } else { + skillsResult = loadSkills({ + cwd: this.cwd, + agentDir: this.agentDir, + skillPaths, + includeDefaults: false, + }); + } + const resolvedSkills = this.skillsOverride ? this.skillsOverride(skillsResult) : skillsResult; + this.skills = resolvedSkills.skills; + this.skillDiagnostics = resolvedSkills.diagnostics; + this.applyExtensionMetadata( + extensionPaths, + this.skills.map((skill) => skill.filePath), + ); + for (const skill of this.skills) { + this.addDefaultMetadataForPath(skill.filePath); + } + } + + private updatePromptsFromPaths( + promptPaths: string[], + extensionPaths: Array<{ path: string; metadata: PathMetadata }> = [], + ): void { + let promptsResult: { prompts: PromptTemplate[]; diagnostics: ResourceDiagnostic[] }; + if (this.noPromptTemplates && promptPaths.length === 0) { + promptsResult = { prompts: [], diagnostics: [] }; + } else { + const allPrompts = loadPromptTemplates({ + cwd: this.cwd, + agentDir: this.agentDir, + promptPaths, + includeDefaults: false, + }); + promptsResult = this.dedupePrompts(allPrompts); + } + const resolvedPrompts = this.promptsOverride ? this.promptsOverride(promptsResult) : promptsResult; + this.prompts = resolvedPrompts.prompts; + this.promptDiagnostics = resolvedPrompts.diagnostics; + this.applyExtensionMetadata( + extensionPaths, + this.prompts.map((prompt) => prompt.filePath), + ); + for (const prompt of this.prompts) { + this.addDefaultMetadataForPath(prompt.filePath); + } + } + + private updateThemesFromPaths( + themePaths: string[], + extensionPaths: Array<{ path: string; metadata: PathMetadata }> = [], + ): void { + let themesResult: { themes: Theme[]; diagnostics: ResourceDiagnostic[] }; + if (this.noThemes && themePaths.length === 0) { + themesResult = { themes: [], diagnostics: [] }; + } else { + const loaded = this.loadThemes(themePaths, false); + const deduped = this.dedupeThemes(loaded.themes); + themesResult = { themes: deduped.themes, diagnostics: [...loaded.diagnostics, ...deduped.diagnostics] }; + } + const resolvedThemes = this.themesOverride ? this.themesOverride(themesResult) : themesResult; + this.themes = resolvedThemes.themes; + this.themeDiagnostics = resolvedThemes.diagnostics; + const themePathsWithSource = this.themes.flatMap((theme) => (theme.sourcePath ? [theme.sourcePath] : [])); + this.applyExtensionMetadata(extensionPaths, themePathsWithSource); + for (const theme of this.themes) { + if (theme.sourcePath) { + this.addDefaultMetadataForPath(theme.sourcePath); + } + } + } + + private applyExtensionMetadata( + extensionPaths: Array<{ path: string; metadata: PathMetadata }>, + resourcePaths: string[], + ): void { + if (extensionPaths.length === 0) { + return; + } + + const normalized = extensionPaths.map((entry) => ({ + path: resolve(entry.path), + metadata: entry.metadata, + })); + + for (const entry of normalized) { + if (!this.pathMetadata.has(entry.path)) { + this.pathMetadata.set(entry.path, entry.metadata); + } + } + + for (const resourcePath of resourcePaths) { + const normalizedResourcePath = resolve(resourcePath); + if (this.pathMetadata.has(normalizedResourcePath) || this.pathMetadata.has(resourcePath)) { + continue; + } + const match = normalized.find( + (entry) => + normalizedResourcePath === entry.path || normalizedResourcePath.startsWith(`${entry.path}${sep}`), + ); + if (match) { + this.pathMetadata.set(normalizedResourcePath, match.metadata); + } + } + } + + private mergePaths(primary: string[], additional: string[]): string[] { + const merged: string[] = []; + const seen = new Set(); + + for (const p of [...primary, ...additional]) { + const resolved = this.resolveResourcePath(p); + if (seen.has(resolved)) continue; + seen.add(resolved); + merged.push(resolved); + } + + return merged; + } + + private resolveResourcePath(p: string): string { + const trimmed = p.trim(); + let expanded = trimmed; + if (trimmed === "~") { + expanded = homedir(); + } else if (trimmed.startsWith("~/")) { + expanded = join(homedir(), trimmed.slice(2)); + } else if (trimmed.startsWith("~")) { + expanded = join(homedir(), trimmed.slice(1)); + } + return resolve(this.cwd, expanded); + } + + private loadThemes( + paths: string[], + includeDefaults: boolean = true, + ): { + themes: Theme[]; + diagnostics: ResourceDiagnostic[]; + } { + const themes: Theme[] = []; + const diagnostics: ResourceDiagnostic[] = []; + if (includeDefaults) { + const defaultDirs = [join(this.agentDir, "themes"), join(this.cwd, CONFIG_DIR_NAME, "themes")]; + + for (const dir of defaultDirs) { + this.loadThemesFromDir(dir, themes, diagnostics); + } + } + + for (const p of paths) { + const resolved = resolve(this.cwd, p); + if (!existsSync(resolved)) { + diagnostics.push({ type: "warning", message: "theme path does not exist", path: resolved }); + continue; + } + + try { + const stats = statSync(resolved); + if (stats.isDirectory()) { + this.loadThemesFromDir(resolved, themes, diagnostics); + } else if (stats.isFile() && resolved.endsWith(".json")) { + this.loadThemeFromFile(resolved, themes, diagnostics); + } else { + diagnostics.push({ type: "warning", message: "theme path is not a json file", path: resolved }); + } + } catch (error) { + const message = error instanceof Error ? error.message : "failed to read theme path"; + diagnostics.push({ type: "warning", message, path: resolved }); + } + } + + return { themes, diagnostics }; + } + + private loadThemesFromDir(dir: string, themes: Theme[], diagnostics: ResourceDiagnostic[]): void { + if (!existsSync(dir)) { + return; + } + + try { + const entries = readdirSync(dir, { withFileTypes: true }); + for (const entry of entries) { + let isFile = entry.isFile(); + if (entry.isSymbolicLink()) { + try { + isFile = statSync(join(dir, entry.name)).isFile(); + } catch { + continue; + } + } + if (!isFile) { + continue; + } + if (!entry.name.endsWith(".json")) { + continue; + } + this.loadThemeFromFile(join(dir, entry.name), themes, diagnostics); + } + } catch (error) { + const message = error instanceof Error ? error.message : "failed to read theme directory"; + diagnostics.push({ type: "warning", message, path: dir }); + } + } + + private loadThemeFromFile(filePath: string, themes: Theme[], diagnostics: ResourceDiagnostic[]): void { + try { + themes.push(loadThemeFromPath(filePath)); + } catch (error) { + const message = error instanceof Error ? error.message : "failed to load theme"; + diagnostics.push({ type: "warning", message, path: filePath }); + } + } + + private async loadExtensionFactories(runtime: ExtensionRuntime): Promise<{ + extensions: Extension[]; + errors: Array<{ path: string; error: string }>; + }> { + const extensions: Extension[] = []; + const errors: Array<{ path: string; error: string }> = []; + + for (const [index, factory] of this.extensionFactories.entries()) { + const extensionPath = ``; + try { + const extension = await loadExtensionFromFactory(factory, this.cwd, this.eventBus, runtime, extensionPath); + extensions.push(extension); + } catch (error) { + const message = error instanceof Error ? error.message : "failed to load extension"; + errors.push({ path: extensionPath, error: message }); + } + } + + return { extensions, errors }; + } + + private dedupePrompts(prompts: PromptTemplate[]): { prompts: PromptTemplate[]; diagnostics: ResourceDiagnostic[] } { + const seen = new Map(); + const diagnostics: ResourceDiagnostic[] = []; + + for (const prompt of prompts) { + const existing = seen.get(prompt.name); + if (existing) { + diagnostics.push({ + type: "collision", + message: `name "/${prompt.name}" collision`, + path: prompt.filePath, + collision: { + resourceType: "prompt", + name: prompt.name, + winnerPath: existing.filePath, + loserPath: prompt.filePath, + }, + }); + } else { + seen.set(prompt.name, prompt); + } + } + + return { prompts: Array.from(seen.values()), diagnostics }; + } + + private dedupeThemes(themes: Theme[]): { themes: Theme[]; diagnostics: ResourceDiagnostic[] } { + const seen = new Map(); + const diagnostics: ResourceDiagnostic[] = []; + + for (const t of themes) { + const name = t.name ?? "unnamed"; + const existing = seen.get(name); + if (existing) { + diagnostics.push({ + type: "collision", + message: `name "${name}" collision`, + path: t.sourcePath, + collision: { + resourceType: "theme", + name, + winnerPath: existing.sourcePath ?? "", + loserPath: t.sourcePath ?? "", + }, + }); + } else { + seen.set(name, t); + } + } + + return { themes: Array.from(seen.values()), diagnostics }; + } + + private discoverSystemPromptFile(): string | undefined { + const projectPath = join(this.cwd, CONFIG_DIR_NAME, "SYSTEM.md"); + if (existsSync(projectPath)) { + return projectPath; + } + + const globalPath = join(this.agentDir, "SYSTEM.md"); + if (existsSync(globalPath)) { + return globalPath; + } + + return undefined; + } + + private discoverAppendSystemPromptFile(): string | undefined { + const projectPath = join(this.cwd, CONFIG_DIR_NAME, "APPEND_SYSTEM.md"); + if (existsSync(projectPath)) { + return projectPath; + } + + const globalPath = join(this.agentDir, "APPEND_SYSTEM.md"); + if (existsSync(globalPath)) { + return globalPath; + } + + return undefined; + } + + private addDefaultMetadataForPath(filePath: string): void { + if (!filePath || filePath.startsWith("<")) { + return; + } + + const normalizedPath = resolve(filePath); + if (this.pathMetadata.has(normalizedPath) || this.pathMetadata.has(filePath)) { + return; + } + + const agentRoots = [ + join(this.agentDir, "skills"), + join(this.agentDir, "prompts"), + join(this.agentDir, "themes"), + join(this.agentDir, "extensions"), + ]; + const projectRoots = [ + join(this.cwd, CONFIG_DIR_NAME, "skills"), + join(this.cwd, CONFIG_DIR_NAME, "prompts"), + join(this.cwd, CONFIG_DIR_NAME, "themes"), + join(this.cwd, CONFIG_DIR_NAME, "extensions"), + ]; + + for (const root of agentRoots) { + if (this.isUnderPath(normalizedPath, root)) { + this.pathMetadata.set(normalizedPath, { source: "local", scope: "user", origin: "top-level" }); + return; + } + } + + for (const root of projectRoots) { + if (this.isUnderPath(normalizedPath, root)) { + this.pathMetadata.set(normalizedPath, { source: "local", scope: "project", origin: "top-level" }); + return; + } + } + } + + private isUnderPath(target: string, root: string): boolean { + const normalizedRoot = resolve(root); + if (target === normalizedRoot) { + return true; + } + const prefix = normalizedRoot.endsWith(sep) ? normalizedRoot : `${normalizedRoot}${sep}`; + return target.startsWith(prefix); + } + + private detectExtensionConflicts(extensions: Extension[]): Array<{ path: string; message: string }> { + const conflicts: Array<{ path: string; message: string }> = []; + + // Track which extension registered each tool, command, and flag + const toolOwners = new Map(); + const commandOwners = new Map(); + const flagOwners = new Map(); + + for (const ext of extensions) { + // Check tools + for (const toolName of ext.tools.keys()) { + const existingOwner = toolOwners.get(toolName); + if (existingOwner && existingOwner !== ext.path) { + conflicts.push({ + path: ext.path, + message: `Tool "${toolName}" conflicts with ${existingOwner}`, + }); + } else { + toolOwners.set(toolName, ext.path); + } + } + + // Check commands + for (const commandName of ext.commands.keys()) { + const existingOwner = commandOwners.get(commandName); + if (existingOwner && existingOwner !== ext.path) { + conflicts.push({ + path: ext.path, + message: `Command "/${commandName}" conflicts with ${existingOwner}`, + }); + } else { + commandOwners.set(commandName, ext.path); + } + } + + // Check flags + for (const flagName of ext.flags.keys()) { + const existingOwner = flagOwners.get(flagName); + if (existingOwner && existingOwner !== ext.path) { + conflicts.push({ + path: ext.path, + message: `Flag "--${flagName}" conflicts with ${existingOwner}`, + }); + } else { + flagOwners.set(flagName, ext.path); + } + } + } + + return conflicts; + } +} diff --git a/packages/pi-coding-agent/src/core/sdk.ts b/packages/pi-coding-agent/src/core/sdk.ts new file mode 100644 index 000000000..5832c1c71 --- /dev/null +++ b/packages/pi-coding-agent/src/core/sdk.ts @@ -0,0 +1,373 @@ +import { join } from "node:path"; +import { Agent, type AgentMessage, type ThinkingLevel } from "@gsd/pi-agent-core"; +import type { Message, Model } from "@gsd/pi-ai"; +import { getAgentDir, getDocsPath } from "../config.js"; +import { AgentSession } from "./agent-session.js"; +import { AuthStorage } from "./auth-storage.js"; +import { DEFAULT_THINKING_LEVEL } from "./defaults.js"; +import type { ExtensionRunner, LoadExtensionsResult, ToolDefinition } from "./extensions/index.js"; +import { convertToLlm } from "./messages.js"; +import { ModelRegistry } from "./model-registry.js"; +import { findInitialModel } from "./model-resolver.js"; +import type { ResourceLoader } from "./resource-loader.js"; +import { DefaultResourceLoader } from "./resource-loader.js"; +import { SessionManager } from "./session-manager.js"; +import { SettingsManager } from "./settings-manager.js"; +import { time } from "./timings.js"; +import { + allTools, + bashTool, + codingTools, + createBashTool, + createCodingTools, + createEditTool, + createFindTool, + createGrepTool, + createLsTool, + createReadOnlyTools, + createReadTool, + createWriteTool, + editTool, + findTool, + grepTool, + lsTool, + readOnlyTools, + readTool, + type Tool, + type ToolName, + writeTool, +} from "./tools/index.js"; + +export interface CreateAgentSessionOptions { + /** Working directory for project-local discovery. Default: process.cwd() */ + cwd?: string; + /** Global config directory. Default: ~/.pi/agent */ + agentDir?: string; + + /** Auth storage for credentials. Default: AuthStorage.create(agentDir/auth.json) */ + authStorage?: AuthStorage; + /** Model registry. Default: new ModelRegistry(authStorage, agentDir/models.json) */ + modelRegistry?: ModelRegistry; + + /** Model to use. Default: from settings, else first available */ + model?: Model; + /** Thinking level. Default: from settings, else 'medium' (clamped to model capabilities) */ + thinkingLevel?: ThinkingLevel; + /** Models available for cycling (Ctrl+P in interactive mode) */ + scopedModels?: Array<{ model: Model; thinkingLevel?: ThinkingLevel }>; + + /** Built-in tools to use. Default: codingTools [read, bash, edit, write] */ + tools?: Tool[]; + /** Custom tools to register (in addition to built-in tools). */ + customTools?: ToolDefinition[]; + + /** Resource loader. When omitted, DefaultResourceLoader is used. */ + resourceLoader?: ResourceLoader; + + /** Session manager. Default: SessionManager.create(cwd) */ + sessionManager?: SessionManager; + + /** Settings manager. Default: SettingsManager.create(cwd, agentDir) */ + settingsManager?: SettingsManager; +} + +/** Result from createAgentSession */ +export interface CreateAgentSessionResult { + /** The created session */ + session: AgentSession; + /** Extensions result (for UI context setup in interactive mode) */ + extensionsResult: LoadExtensionsResult; + /** Warning if session was restored with a different model than saved */ + modelFallbackMessage?: string; +} + +// Re-exports + +export type { + ExtensionAPI, + ExtensionCommandContext, + ExtensionContext, + ExtensionFactory, + SlashCommandInfo, + SlashCommandLocation, + SlashCommandSource, + ToolDefinition, +} from "./extensions/index.js"; +export type { PromptTemplate } from "./prompt-templates.js"; +export type { Skill } from "./skills.js"; +export type { Tool } from "./tools/index.js"; + +export { + // Pre-built tools (use process.cwd()) + readTool, + bashTool, + editTool, + writeTool, + grepTool, + findTool, + lsTool, + codingTools, + readOnlyTools, + allTools as allBuiltInTools, + // Tool factories (for custom cwd) + createCodingTools, + createReadOnlyTools, + createReadTool, + createBashTool, + createEditTool, + createWriteTool, + createGrepTool, + createFindTool, + createLsTool, +}; + +// Helper Functions + +function getDefaultAgentDir(): string { + return getAgentDir(); +} + +/** + * Create an AgentSession with the specified options. + * + * @example + * ```typescript + * // Minimal - uses defaults + * const { session } = await createAgentSession(); + * + * // With explicit model + * import { getModel } from '@gsd/pi-ai'; + * const { session } = await createAgentSession({ + * model: getModel('anthropic', 'claude-opus-4-5'), + * thinkingLevel: 'high', + * }); + * + * // Continue previous session + * const { session, modelFallbackMessage } = await createAgentSession({ + * continueSession: true, + * }); + * + * // Full control + * const loader = new DefaultResourceLoader({ + * cwd: process.cwd(), + * agentDir: getAgentDir(), + * settingsManager: SettingsManager.create(), + * }); + * await loader.reload(); + * const { session } = await createAgentSession({ + * model: myModel, + * tools: [readTool, bashTool], + * resourceLoader: loader, + * sessionManager: SessionManager.inMemory(), + * }); + * ``` + */ +export async function createAgentSession(options: CreateAgentSessionOptions = {}): Promise { + const cwd = options.cwd ?? process.cwd(); + const agentDir = options.agentDir ?? getDefaultAgentDir(); + let resourceLoader = options.resourceLoader; + + // Use provided or create AuthStorage and ModelRegistry + const authPath = options.agentDir ? join(agentDir, "auth.json") : undefined; + const modelsPath = options.agentDir ? join(agentDir, "models.json") : undefined; + const authStorage = options.authStorage ?? AuthStorage.create(authPath); + const modelRegistry = options.modelRegistry ?? new ModelRegistry(authStorage, modelsPath); + + const settingsManager = options.settingsManager ?? SettingsManager.create(cwd, agentDir); + const sessionManager = options.sessionManager ?? SessionManager.create(cwd); + + if (!resourceLoader) { + resourceLoader = new DefaultResourceLoader({ cwd, agentDir, settingsManager }); + await resourceLoader.reload(); + time("resourceLoader.reload"); + } + + // Check if session has existing data to restore + const existingSession = sessionManager.buildSessionContext(); + const hasExistingSession = existingSession.messages.length > 0; + const hasThinkingEntry = sessionManager.getBranch().some((entry) => entry.type === "thinking_level_change"); + + let model = options.model; + let modelFallbackMessage: string | undefined; + + // If session has data, try to restore model from it + if (!model && hasExistingSession && existingSession.model) { + const restoredModel = modelRegistry.find(existingSession.model.provider, existingSession.model.modelId); + if (restoredModel && (await modelRegistry.getApiKey(restoredModel))) { + model = restoredModel; + } + if (!model) { + modelFallbackMessage = `Could not restore model ${existingSession.model.provider}/${existingSession.model.modelId}`; + } + } + + // If still no model, use findInitialModel (checks settings default, then provider defaults) + if (!model) { + const result = await findInitialModel({ + scopedModels: [], + isContinuing: hasExistingSession, + defaultProvider: settingsManager.getDefaultProvider(), + defaultModelId: settingsManager.getDefaultModel(), + defaultThinkingLevel: settingsManager.getDefaultThinkingLevel(), + modelRegistry, + }); + model = result.model; + if (!model) { + modelFallbackMessage = `No models available. Use /login or set an API key environment variable. See ${join(getDocsPath(), "providers.md")}. Then use /model to select a model.`; + } else if (modelFallbackMessage) { + modelFallbackMessage += `. Using ${model.provider}/${model.id}`; + } + } + + let thinkingLevel = options.thinkingLevel; + + // If session has data, restore thinking level from it + if (thinkingLevel === undefined && hasExistingSession) { + thinkingLevel = hasThinkingEntry + ? (existingSession.thinkingLevel as ThinkingLevel) + : (settingsManager.getDefaultThinkingLevel() ?? DEFAULT_THINKING_LEVEL); + } + + // Fall back to settings default + if (thinkingLevel === undefined) { + thinkingLevel = settingsManager.getDefaultThinkingLevel() ?? DEFAULT_THINKING_LEVEL; + } + + // Clamp to model capabilities + if (!model || !model.reasoning) { + thinkingLevel = "off"; + } + + const defaultActiveToolNames: ToolName[] = ["read", "bash", "edit", "write"]; + const initialActiveToolNames: ToolName[] = options.tools + ? options.tools.map((t) => t.name).filter((n): n is ToolName => n in allTools) + : defaultActiveToolNames; + + let agent: Agent; + + // Create convertToLlm wrapper that filters images if blockImages is enabled (defense-in-depth) + const convertToLlmWithBlockImages = (messages: AgentMessage[]): Message[] => { + const converted = convertToLlm(messages); + // Check setting dynamically so mid-session changes take effect + if (!settingsManager.getBlockImages()) { + return converted; + } + // Filter out ImageContent from all messages, replacing with text placeholder + return converted.map((msg) => { + if (msg.role === "user" || msg.role === "toolResult") { + const content = msg.content; + if (Array.isArray(content)) { + const hasImages = content.some((c) => c.type === "image"); + if (hasImages) { + const filteredContent = content + .map((c) => + c.type === "image" ? { type: "text" as const, text: "Image reading is disabled." } : c, + ) + .filter( + (c, i, arr) => + // Dedupe consecutive "Image reading is disabled." texts + !( + c.type === "text" && + c.text === "Image reading is disabled." && + i > 0 && + arr[i - 1].type === "text" && + (arr[i - 1] as { type: "text"; text: string }).text === "Image reading is disabled." + ), + ); + return { ...msg, content: filteredContent }; + } + } + } + return msg; + }); + }; + + const extensionRunnerRef: { current?: ExtensionRunner } = {}; + + agent = new Agent({ + initialState: { + systemPrompt: "", + model, + thinkingLevel, + tools: [], + }, + convertToLlm: convertToLlmWithBlockImages, + onPayload: async (payload, _model) => { + const runner = extensionRunnerRef.current; + if (!runner?.hasHandlers("before_provider_request")) { + return payload; + } + return runner.emitBeforeProviderRequest(payload); + }, + sessionId: sessionManager.getSessionId(), + transformContext: async (messages) => { + const runner = extensionRunnerRef.current; + if (!runner) return messages; + return runner.emitContext(messages); + }, + steeringMode: settingsManager.getSteeringMode(), + followUpMode: settingsManager.getFollowUpMode(), + transport: settingsManager.getTransport(), + thinkingBudgets: settingsManager.getThinkingBudgets(), + maxRetryDelayMs: settingsManager.getRetrySettings().maxDelayMs, + getApiKey: async (provider) => { + // Use the provider argument from the in-flight request; + // agent.state.model may already be switched mid-turn. + const resolvedProvider = provider || agent.state.model?.provider; + if (!resolvedProvider) { + throw new Error("No model selected"); + } + const key = await modelRegistry.getApiKeyForProvider(resolvedProvider); + if (!key) { + const model = agent.state.model; + const isOAuth = model && modelRegistry.isUsingOAuth(model); + if (isOAuth) { + throw new Error( + `Authentication failed for "${resolvedProvider}". ` + + `Credentials may have expired or network is unavailable. ` + + `Run '/login ${resolvedProvider}' to re-authenticate.`, + ); + } + throw new Error( + `No API key found for "${resolvedProvider}". ` + + `Set an API key environment variable or run '/login ${resolvedProvider}'.`, + ); + } + return key; + }, + }); + + // Restore messages if session has existing data + if (hasExistingSession) { + agent.replaceMessages(existingSession.messages); + if (!hasThinkingEntry) { + sessionManager.appendThinkingLevelChange(thinkingLevel); + } + } else { + // Save initial model and thinking level for new sessions so they can be restored on resume + if (model) { + sessionManager.appendModelChange(model.provider, model.id); + } + sessionManager.appendThinkingLevelChange(thinkingLevel); + } + + const session = new AgentSession({ + agent, + sessionManager, + settingsManager, + cwd, + scopedModels: options.scopedModels, + resourceLoader, + customTools: options.customTools, + modelRegistry, + initialActiveToolNames, + extensionRunnerRef, + }); + const extensionsResult = resourceLoader.getExtensions(); + + return { + session, + extensionsResult, + modelFallbackMessage, + }; +} diff --git a/packages/pi-coding-agent/src/core/session-manager.ts b/packages/pi-coding-agent/src/core/session-manager.ts new file mode 100644 index 000000000..19db70185 --- /dev/null +++ b/packages/pi-coding-agent/src/core/session-manager.ts @@ -0,0 +1,1410 @@ +import type { AgentMessage } from "@gsd/pi-agent-core"; +import type { ImageContent, Message, TextContent } from "@gsd/pi-ai"; +import { randomUUID } from "crypto"; +import { + appendFileSync, + closeSync, + existsSync, + mkdirSync, + openSync, + readdirSync, + readFileSync, + readSync, + statSync, + writeFileSync, +} from "fs"; +import { readdir, readFile, stat } from "fs/promises"; +import { join, resolve } from "path"; +import { getAgentDir as getDefaultAgentDir, getSessionsDir } from "../config.js"; +import { + type BashExecutionMessage, + type CustomMessage, + createBranchSummaryMessage, + createCompactionSummaryMessage, + createCustomMessage, +} from "./messages.js"; + +export const CURRENT_SESSION_VERSION = 3; + +export interface SessionHeader { + type: "session"; + version?: number; // v1 sessions don't have this + id: string; + timestamp: string; + cwd: string; + parentSession?: string; +} + +export interface NewSessionOptions { + parentSession?: string; +} + +export interface SessionEntryBase { + type: string; + id: string; + parentId: string | null; + timestamp: string; +} + +export interface SessionMessageEntry extends SessionEntryBase { + type: "message"; + message: AgentMessage; +} + +export interface ThinkingLevelChangeEntry extends SessionEntryBase { + type: "thinking_level_change"; + thinkingLevel: string; +} + +export interface ModelChangeEntry extends SessionEntryBase { + type: "model_change"; + provider: string; + modelId: string; +} + +export interface CompactionEntry extends SessionEntryBase { + type: "compaction"; + summary: string; + firstKeptEntryId: string; + tokensBefore: number; + /** Extension-specific data (e.g., ArtifactIndex, version markers for structured compaction) */ + details?: T; + /** True if generated by an extension, undefined/false if pi-generated (backward compatible) */ + fromHook?: boolean; +} + +export interface BranchSummaryEntry extends SessionEntryBase { + type: "branch_summary"; + fromId: string; + summary: string; + /** Extension-specific data (not sent to LLM) */ + details?: T; + /** True if generated by an extension, false if pi-generated */ + fromHook?: boolean; +} + +/** + * Custom entry for extensions to store extension-specific data in the session. + * Use customType to identify your extension's entries. + * + * Purpose: Persist extension state across session reloads. On reload, extensions can + * scan entries for their customType and reconstruct internal state. + * + * Does NOT participate in LLM context (ignored by buildSessionContext). + * For injecting content into context, see CustomMessageEntry. + */ +export interface CustomEntry extends SessionEntryBase { + type: "custom"; + customType: string; + data?: T; +} + +/** Label entry for user-defined bookmarks/markers on entries. */ +export interface LabelEntry extends SessionEntryBase { + type: "label"; + targetId: string; + label: string | undefined; +} + +/** Session metadata entry (e.g., user-defined display name). */ +export interface SessionInfoEntry extends SessionEntryBase { + type: "session_info"; + name?: string; +} + +/** + * Custom message entry for extensions to inject messages into LLM context. + * Use customType to identify your extension's entries. + * + * Unlike CustomEntry, this DOES participate in LLM context. + * The content is converted to a user message in buildSessionContext(). + * Use details for extension-specific metadata (not sent to LLM). + * + * display controls TUI rendering: + * - false: hidden entirely + * - true: rendered with distinct styling (different from user messages) + */ +export interface CustomMessageEntry extends SessionEntryBase { + type: "custom_message"; + customType: string; + content: string | (TextContent | ImageContent)[]; + details?: T; + display: boolean; +} + +/** Session entry - has id/parentId for tree structure (returned by "read" methods in SessionManager) */ +export type SessionEntry = + | SessionMessageEntry + | ThinkingLevelChangeEntry + | ModelChangeEntry + | CompactionEntry + | BranchSummaryEntry + | CustomEntry + | CustomMessageEntry + | LabelEntry + | SessionInfoEntry; + +/** Raw file entry (includes header) */ +export type FileEntry = SessionHeader | SessionEntry; + +/** Tree node for getTree() - defensive copy of session structure */ +export interface SessionTreeNode { + entry: SessionEntry; + children: SessionTreeNode[]; + /** Resolved label for this entry, if any */ + label?: string; +} + +export interface SessionContext { + messages: AgentMessage[]; + thinkingLevel: string; + model: { provider: string; modelId: string } | null; +} + +export interface SessionInfo { + path: string; + id: string; + /** Working directory where the session was started. Empty string for old sessions. */ + cwd: string; + /** User-defined display name from session_info entries. */ + name?: string; + /** Path to the parent session (if this session was forked). */ + parentSessionPath?: string; + created: Date; + modified: Date; + messageCount: number; + firstMessage: string; + allMessagesText: string; +} + +export type ReadonlySessionManager = Pick< + SessionManager, + | "getCwd" + | "getSessionDir" + | "getSessionId" + | "getSessionFile" + | "getLeafId" + | "getLeafEntry" + | "getEntry" + | "getLabel" + | "getBranch" + | "getHeader" + | "getEntries" + | "getTree" + | "getSessionName" +>; + +/** Generate a unique short ID (8 hex chars, collision-checked) */ +function generateId(byId: { has(id: string): boolean }): string { + for (let i = 0; i < 100; i++) { + const id = randomUUID().slice(0, 8); + if (!byId.has(id)) return id; + } + // Fallback to full UUID if somehow we have collisions + return randomUUID(); +} + +/** Migrate v1 → v2: add id/parentId tree structure. Mutates in place. */ +function migrateV1ToV2(entries: FileEntry[]): void { + const ids = new Set(); + let prevId: string | null = null; + + for (const entry of entries) { + if (entry.type === "session") { + entry.version = 2; + continue; + } + + entry.id = generateId(ids); + entry.parentId = prevId; + prevId = entry.id; + + // Convert firstKeptEntryIndex to firstKeptEntryId for compaction + if (entry.type === "compaction") { + const comp = entry as CompactionEntry & { firstKeptEntryIndex?: number }; + if (typeof comp.firstKeptEntryIndex === "number") { + const targetEntry = entries[comp.firstKeptEntryIndex]; + if (targetEntry && targetEntry.type !== "session") { + comp.firstKeptEntryId = targetEntry.id; + } + delete comp.firstKeptEntryIndex; + } + } + } +} + +/** Migrate v2 → v3: rename hookMessage role to custom. Mutates in place. */ +function migrateV2ToV3(entries: FileEntry[]): void { + for (const entry of entries) { + if (entry.type === "session") { + entry.version = 3; + continue; + } + + // Update message entries with hookMessage role + if (entry.type === "message") { + const msgEntry = entry as SessionMessageEntry; + if (msgEntry.message && (msgEntry.message as { role: string }).role === "hookMessage") { + (msgEntry.message as { role: string }).role = "custom"; + } + } + } +} + +/** + * Run all necessary migrations to bring entries to current version. + * Mutates entries in place. Returns true if any migration was applied. + */ +function migrateToCurrentVersion(entries: FileEntry[]): boolean { + const header = entries.find((e) => e.type === "session") as SessionHeader | undefined; + const version = header?.version ?? 1; + + if (version >= CURRENT_SESSION_VERSION) return false; + + if (version < 2) migrateV1ToV2(entries); + if (version < 3) migrateV2ToV3(entries); + + return true; +} + +/** Exported for testing */ +export function migrateSessionEntries(entries: FileEntry[]): void { + migrateToCurrentVersion(entries); +} + +/** Exported for compaction.test.ts */ +export function parseSessionEntries(content: string): FileEntry[] { + const entries: FileEntry[] = []; + const lines = content.trim().split("\n"); + + for (const line of lines) { + if (!line.trim()) continue; + try { + const entry = JSON.parse(line) as FileEntry; + entries.push(entry); + } catch { + // Skip malformed lines + } + } + + return entries; +} + +export function getLatestCompactionEntry(entries: SessionEntry[]): CompactionEntry | null { + for (let i = entries.length - 1; i >= 0; i--) { + if (entries[i].type === "compaction") { + return entries[i] as CompactionEntry; + } + } + return null; +} + +/** + * Build the session context from entries using tree traversal. + * If leafId is provided, walks from that entry to root. + * Handles compaction and branch summaries along the path. + */ +export function buildSessionContext( + entries: SessionEntry[], + leafId?: string | null, + byId?: Map, +): SessionContext { + // Build uuid index if not available + if (!byId) { + byId = new Map(); + for (const entry of entries) { + byId.set(entry.id, entry); + } + } + + // Find leaf + let leaf: SessionEntry | undefined; + if (leafId === null) { + // Explicitly null - return no messages (navigated to before first entry) + return { messages: [], thinkingLevel: "off", model: null }; + } + if (leafId) { + leaf = byId.get(leafId); + } + if (!leaf) { + // Fallback to last entry (when leafId is undefined) + leaf = entries[entries.length - 1]; + } + + if (!leaf) { + return { messages: [], thinkingLevel: "off", model: null }; + } + + // Walk from leaf to root, collecting path + const path: SessionEntry[] = []; + let current: SessionEntry | undefined = leaf; + while (current) { + path.unshift(current); + current = current.parentId ? byId.get(current.parentId) : undefined; + } + + // Extract settings and find compaction + let thinkingLevel = "off"; + let model: { provider: string; modelId: string } | null = null; + let compaction: CompactionEntry | null = null; + + for (const entry of path) { + if (entry.type === "thinking_level_change") { + thinkingLevel = entry.thinkingLevel; + } else if (entry.type === "model_change") { + model = { provider: entry.provider, modelId: entry.modelId }; + } else if (entry.type === "message" && entry.message.role === "assistant") { + model = { provider: entry.message.provider, modelId: entry.message.model }; + } else if (entry.type === "compaction") { + compaction = entry; + } + } + + // Build messages and collect corresponding entries + // When there's a compaction, we need to: + // 1. Emit summary first (entry = compaction) + // 2. Emit kept messages (from firstKeptEntryId up to compaction) + // 3. Emit messages after compaction + const messages: AgentMessage[] = []; + + const appendMessage = (entry: SessionEntry) => { + if (entry.type === "message") { + messages.push(entry.message); + } else if (entry.type === "custom_message") { + messages.push( + createCustomMessage(entry.customType, entry.content, entry.display, entry.details, entry.timestamp), + ); + } else if (entry.type === "branch_summary" && entry.summary) { + messages.push(createBranchSummaryMessage(entry.summary, entry.fromId, entry.timestamp)); + } + }; + + if (compaction) { + // Emit summary first + messages.push(createCompactionSummaryMessage(compaction.summary, compaction.tokensBefore, compaction.timestamp)); + + // Find compaction index in path + const compactionIdx = path.findIndex((e) => e.type === "compaction" && e.id === compaction.id); + + // Emit kept messages (before compaction, starting from firstKeptEntryId) + let foundFirstKept = false; + for (let i = 0; i < compactionIdx; i++) { + const entry = path[i]; + if (entry.id === compaction.firstKeptEntryId) { + foundFirstKept = true; + } + if (foundFirstKept) { + appendMessage(entry); + } + } + + // Emit messages after compaction + for (let i = compactionIdx + 1; i < path.length; i++) { + const entry = path[i]; + appendMessage(entry); + } + } else { + // No compaction - emit all messages, handle branch summaries and custom messages + for (const entry of path) { + appendMessage(entry); + } + } + + return { messages, thinkingLevel, model }; +} + +/** + * Compute the default session directory for a cwd. + * Encodes cwd into a safe directory name under ~/.pi/agent/sessions/. + */ +function getDefaultSessionDir(cwd: string): string { + const safePath = `--${cwd.replace(/^[/\\]/, "").replace(/[/\\:]/g, "-")}--`; + const sessionDir = join(getDefaultAgentDir(), "sessions", safePath); + if (!existsSync(sessionDir)) { + mkdirSync(sessionDir, { recursive: true }); + } + return sessionDir; +} + +/** Exported for testing */ +export function loadEntriesFromFile(filePath: string): FileEntry[] { + if (!existsSync(filePath)) return []; + + const content = readFileSync(filePath, "utf8"); + const entries: FileEntry[] = []; + const lines = content.trim().split("\n"); + + for (const line of lines) { + if (!line.trim()) continue; + try { + const entry = JSON.parse(line) as FileEntry; + entries.push(entry); + } catch { + // Skip malformed lines + } + } + + // Validate session header + if (entries.length === 0) return entries; + const header = entries[0]; + if (header.type !== "session" || typeof (header as any).id !== "string") { + return []; + } + + return entries; +} + +function isValidSessionFile(filePath: string): boolean { + try { + const fd = openSync(filePath, "r"); + const buffer = Buffer.alloc(512); + const bytesRead = readSync(fd, buffer, 0, 512, 0); + closeSync(fd); + const firstLine = buffer.toString("utf8", 0, bytesRead).split("\n")[0]; + if (!firstLine) return false; + const header = JSON.parse(firstLine); + return header.type === "session" && typeof header.id === "string"; + } catch { + return false; + } +} + +/** Exported for testing */ +export function findMostRecentSession(sessionDir: string): string | null { + try { + const files = readdirSync(sessionDir) + .filter((f) => f.endsWith(".jsonl")) + .map((f) => join(sessionDir, f)) + .filter(isValidSessionFile) + .map((path) => ({ path, mtime: statSync(path).mtime })) + .sort((a, b) => b.mtime.getTime() - a.mtime.getTime()); + + return files[0]?.path || null; + } catch { + return null; + } +} + +function isMessageWithContent(message: AgentMessage): message is Message { + return typeof (message as Message).role === "string" && "content" in message; +} + +function extractTextContent(message: Message): string { + const content = message.content; + if (typeof content === "string") { + return content; + } + return content + .filter((block): block is TextContent => block.type === "text") + .map((block) => block.text) + .join(" "); +} + +function getLastActivityTime(entries: FileEntry[]): number | undefined { + let lastActivityTime: number | undefined; + + for (const entry of entries) { + if (entry.type !== "message") continue; + + const message = (entry as SessionMessageEntry).message; + if (!isMessageWithContent(message)) continue; + if (message.role !== "user" && message.role !== "assistant") continue; + + const msgTimestamp = (message as { timestamp?: number }).timestamp; + if (typeof msgTimestamp === "number") { + lastActivityTime = Math.max(lastActivityTime ?? 0, msgTimestamp); + continue; + } + + const entryTimestamp = (entry as SessionEntryBase).timestamp; + if (typeof entryTimestamp === "string") { + const t = new Date(entryTimestamp).getTime(); + if (!Number.isNaN(t)) { + lastActivityTime = Math.max(lastActivityTime ?? 0, t); + } + } + } + + return lastActivityTime; +} + +function getSessionModifiedDate(entries: FileEntry[], header: SessionHeader, statsMtime: Date): Date { + const lastActivityTime = getLastActivityTime(entries); + if (typeof lastActivityTime === "number" && lastActivityTime > 0) { + return new Date(lastActivityTime); + } + + const headerTime = typeof header.timestamp === "string" ? new Date(header.timestamp).getTime() : NaN; + return !Number.isNaN(headerTime) ? new Date(headerTime) : statsMtime; +} + +async function buildSessionInfo(filePath: string): Promise { + try { + const content = await readFile(filePath, "utf8"); + const entries: FileEntry[] = []; + const lines = content.trim().split("\n"); + + for (const line of lines) { + if (!line.trim()) continue; + try { + entries.push(JSON.parse(line) as FileEntry); + } catch { + // Skip malformed lines + } + } + + if (entries.length === 0) return null; + const header = entries[0]; + if (header.type !== "session") return null; + + const stats = await stat(filePath); + let messageCount = 0; + let firstMessage = ""; + const allMessages: string[] = []; + let name: string | undefined; + + for (const entry of entries) { + // Extract session name (use latest) + if (entry.type === "session_info") { + const infoEntry = entry as SessionInfoEntry; + if (infoEntry.name) { + name = infoEntry.name.trim(); + } + } + + if (entry.type !== "message") continue; + messageCount++; + + const message = (entry as SessionMessageEntry).message; + if (!isMessageWithContent(message)) continue; + if (message.role !== "user" && message.role !== "assistant") continue; + + const textContent = extractTextContent(message); + if (!textContent) continue; + + allMessages.push(textContent); + if (!firstMessage && message.role === "user") { + firstMessage = textContent; + } + } + + const cwd = typeof (header as SessionHeader).cwd === "string" ? (header as SessionHeader).cwd : ""; + const parentSessionPath = (header as SessionHeader).parentSession; + + const modified = getSessionModifiedDate(entries, header as SessionHeader, stats.mtime); + + return { + path: filePath, + id: (header as SessionHeader).id, + cwd, + name, + parentSessionPath, + created: new Date((header as SessionHeader).timestamp), + modified, + messageCount, + firstMessage: firstMessage || "(no messages)", + allMessagesText: allMessages.join(" "), + }; + } catch { + return null; + } +} + +export type SessionListProgress = (loaded: number, total: number) => void; + +async function listSessionsFromDir( + dir: string, + onProgress?: SessionListProgress, + progressOffset = 0, + progressTotal?: number, +): Promise { + const sessions: SessionInfo[] = []; + if (!existsSync(dir)) { + return sessions; + } + + try { + const dirEntries = await readdir(dir); + const files = dirEntries.filter((f) => f.endsWith(".jsonl")).map((f) => join(dir, f)); + const total = progressTotal ?? files.length; + + let loaded = 0; + const results = await Promise.all( + files.map(async (file) => { + const info = await buildSessionInfo(file); + loaded++; + onProgress?.(progressOffset + loaded, total); + return info; + }), + ); + for (const info of results) { + if (info) { + sessions.push(info); + } + } + } catch { + // Return empty list on error + } + + return sessions; +} + +/** + * Manages conversation sessions as append-only trees stored in JSONL files. + * + * Each session entry has an id and parentId forming a tree structure. The "leaf" + * pointer tracks the current position. Appending creates a child of the current leaf. + * Branching moves the leaf to an earlier entry, allowing new branches without + * modifying history. + * + * Use buildSessionContext() to get the resolved message list for the LLM, which + * handles compaction summaries and follows the path from root to current leaf. + */ +export class SessionManager { + private sessionId: string = ""; + private sessionFile: string | undefined; + private sessionDir: string; + private cwd: string; + private persist: boolean; + private flushed: boolean = false; + private fileEntries: FileEntry[] = []; + private byId: Map = new Map(); + private labelsById: Map = new Map(); + private leafId: string | null = null; + + private constructor(cwd: string, sessionDir: string, sessionFile: string | undefined, persist: boolean) { + this.cwd = cwd; + this.sessionDir = sessionDir; + this.persist = persist; + if (persist && sessionDir && !existsSync(sessionDir)) { + mkdirSync(sessionDir, { recursive: true }); + } + + if (sessionFile) { + this.setSessionFile(sessionFile); + } else { + this.newSession(); + } + } + + /** Switch to a different session file (used for resume and branching) */ + setSessionFile(sessionFile: string): void { + this.sessionFile = resolve(sessionFile); + if (existsSync(this.sessionFile)) { + this.fileEntries = loadEntriesFromFile(this.sessionFile); + + // If file was empty or corrupted (no valid header), truncate and start fresh + // to avoid appending messages without a session header (which breaks the session) + if (this.fileEntries.length === 0) { + const explicitPath = this.sessionFile; + this.newSession(); + this.sessionFile = explicitPath; + this._rewriteFile(); + this.flushed = true; + return; + } + + const header = this.fileEntries.find((e) => e.type === "session") as SessionHeader | undefined; + this.sessionId = header?.id ?? randomUUID(); + + if (migrateToCurrentVersion(this.fileEntries)) { + this._rewriteFile(); + } + + this._buildIndex(); + this.flushed = true; + } else { + const explicitPath = this.sessionFile; + this.newSession(); + this.sessionFile = explicitPath; // preserve explicit path from --session flag + } + } + + newSession(options?: NewSessionOptions): string | undefined { + this.sessionId = randomUUID(); + const timestamp = new Date().toISOString(); + const header: SessionHeader = { + type: "session", + version: CURRENT_SESSION_VERSION, + id: this.sessionId, + timestamp, + cwd: this.cwd, + parentSession: options?.parentSession, + }; + this.fileEntries = [header]; + this.byId.clear(); + this.labelsById.clear(); + this.leafId = null; + this.flushed = false; + + if (this.persist) { + const fileTimestamp = timestamp.replace(/[:.]/g, "-"); + this.sessionFile = join(this.getSessionDir(), `${fileTimestamp}_${this.sessionId}.jsonl`); + } + return this.sessionFile; + } + + private _buildIndex(): void { + this.byId.clear(); + this.labelsById.clear(); + this.leafId = null; + for (const entry of this.fileEntries) { + if (entry.type === "session") continue; + this.byId.set(entry.id, entry); + this.leafId = entry.id; + if (entry.type === "label") { + if (entry.label) { + this.labelsById.set(entry.targetId, entry.label); + } else { + this.labelsById.delete(entry.targetId); + } + } + } + } + + private _rewriteFile(): void { + if (!this.persist || !this.sessionFile) return; + const content = `${this.fileEntries.map((e) => JSON.stringify(e)).join("\n")}\n`; + writeFileSync(this.sessionFile, content); + } + + isPersisted(): boolean { + return this.persist; + } + + getCwd(): string { + return this.cwd; + } + + getSessionDir(): string { + return this.sessionDir; + } + + getSessionId(): string { + return this.sessionId; + } + + getSessionFile(): string | undefined { + return this.sessionFile; + } + + _persist(entry: SessionEntry): void { + if (!this.persist || !this.sessionFile) return; + + const hasAssistant = this.fileEntries.some((e) => e.type === "message" && e.message.role === "assistant"); + if (!hasAssistant) { + // Mark as not flushed so when assistant arrives, all entries get written + this.flushed = false; + return; + } + + if (!this.flushed) { + for (const e of this.fileEntries) { + appendFileSync(this.sessionFile, `${JSON.stringify(e)}\n`); + } + this.flushed = true; + } else { + appendFileSync(this.sessionFile, `${JSON.stringify(entry)}\n`); + } + } + + private _appendEntry(entry: SessionEntry): void { + this.fileEntries.push(entry); + this.byId.set(entry.id, entry); + this.leafId = entry.id; + this._persist(entry); + } + + /** Append a message as child of current leaf, then advance leaf. Returns entry id. + * Does not allow writing CompactionSummaryMessage and BranchSummaryMessage directly. + * Reason: we want these to be top-level entries in the session, not message session entries, + * so it is easier to find them. + * These need to be appended via appendCompaction() and appendBranchSummary() methods. + */ + appendMessage(message: Message | CustomMessage | BashExecutionMessage): string { + const entry: SessionMessageEntry = { + type: "message", + id: generateId(this.byId), + parentId: this.leafId, + timestamp: new Date().toISOString(), + message, + }; + this._appendEntry(entry); + return entry.id; + } + + /** Append a thinking level change as child of current leaf, then advance leaf. Returns entry id. */ + appendThinkingLevelChange(thinkingLevel: string): string { + const entry: ThinkingLevelChangeEntry = { + type: "thinking_level_change", + id: generateId(this.byId), + parentId: this.leafId, + timestamp: new Date().toISOString(), + thinkingLevel, + }; + this._appendEntry(entry); + return entry.id; + } + + /** Append a model change as child of current leaf, then advance leaf. Returns entry id. */ + appendModelChange(provider: string, modelId: string): string { + const entry: ModelChangeEntry = { + type: "model_change", + id: generateId(this.byId), + parentId: this.leafId, + timestamp: new Date().toISOString(), + provider, + modelId, + }; + this._appendEntry(entry); + return entry.id; + } + + /** Append a compaction summary as child of current leaf, then advance leaf. Returns entry id. */ + appendCompaction( + summary: string, + firstKeptEntryId: string, + tokensBefore: number, + details?: T, + fromHook?: boolean, + ): string { + const entry: CompactionEntry = { + type: "compaction", + id: generateId(this.byId), + parentId: this.leafId, + timestamp: new Date().toISOString(), + summary, + firstKeptEntryId, + tokensBefore, + details, + fromHook, + }; + this._appendEntry(entry); + return entry.id; + } + + /** Append a custom entry (for extensions) as child of current leaf, then advance leaf. Returns entry id. */ + appendCustomEntry(customType: string, data?: unknown): string { + const entry: CustomEntry = { + type: "custom", + customType, + data, + id: generateId(this.byId), + parentId: this.leafId, + timestamp: new Date().toISOString(), + }; + this._appendEntry(entry); + return entry.id; + } + + /** Append a session info entry (e.g., display name). Returns entry id. */ + appendSessionInfo(name: string): string { + const entry: SessionInfoEntry = { + type: "session_info", + id: generateId(this.byId), + parentId: this.leafId, + timestamp: new Date().toISOString(), + name: name.trim(), + }; + this._appendEntry(entry); + return entry.id; + } + + /** Get the current session name from the latest session_info entry, if any. */ + getSessionName(): string | undefined { + // Walk entries in reverse to find the latest session_info with a name + const entries = this.getEntries(); + for (let i = entries.length - 1; i >= 0; i--) { + const entry = entries[i]; + if (entry.type === "session_info" && entry.name) { + return entry.name; + } + } + return undefined; + } + + /** + * Append a custom message entry (for extensions) that participates in LLM context. + * @param customType Extension identifier for filtering on reload + * @param content Message content (string or TextContent/ImageContent array) + * @param display Whether to show in TUI (true = styled display, false = hidden) + * @param details Optional extension-specific metadata (not sent to LLM) + * @returns Entry id + */ + appendCustomMessageEntry( + customType: string, + content: string | (TextContent | ImageContent)[], + display: boolean, + details?: T, + ): string { + const entry: CustomMessageEntry = { + type: "custom_message", + customType, + content, + display, + details, + id: generateId(this.byId), + parentId: this.leafId, + timestamp: new Date().toISOString(), + }; + this._appendEntry(entry); + return entry.id; + } + + // ========================================================================= + // Tree Traversal + // ========================================================================= + + getLeafId(): string | null { + return this.leafId; + } + + getLeafEntry(): SessionEntry | undefined { + return this.leafId ? this.byId.get(this.leafId) : undefined; + } + + getEntry(id: string): SessionEntry | undefined { + return this.byId.get(id); + } + + /** + * Get all direct children of an entry. + */ + getChildren(parentId: string): SessionEntry[] { + const children: SessionEntry[] = []; + for (const entry of this.byId.values()) { + if (entry.parentId === parentId) { + children.push(entry); + } + } + return children; + } + + /** + * Get the label for an entry, if any. + */ + getLabel(id: string): string | undefined { + return this.labelsById.get(id); + } + + /** + * Set or clear a label on an entry. + * Labels are user-defined markers for bookmarking/navigation. + * Pass undefined or empty string to clear the label. + */ + appendLabelChange(targetId: string, label: string | undefined): string { + if (!this.byId.has(targetId)) { + throw new Error(`Entry ${targetId} not found`); + } + const entry: LabelEntry = { + type: "label", + id: generateId(this.byId), + parentId: this.leafId, + timestamp: new Date().toISOString(), + targetId, + label, + }; + this._appendEntry(entry); + if (label) { + this.labelsById.set(targetId, label); + } else { + this.labelsById.delete(targetId); + } + return entry.id; + } + + /** + * Walk from entry to root, returning all entries in path order. + * Includes all entry types (messages, compaction, model changes, etc.). + * Use buildSessionContext() to get the resolved messages for the LLM. + */ + getBranch(fromId?: string): SessionEntry[] { + const path: SessionEntry[] = []; + const startId = fromId ?? this.leafId; + let current = startId ? this.byId.get(startId) : undefined; + while (current) { + path.unshift(current); + current = current.parentId ? this.byId.get(current.parentId) : undefined; + } + return path; + } + + /** + * Build the session context (what gets sent to the LLM). + * Uses tree traversal from current leaf. + */ + buildSessionContext(): SessionContext { + return buildSessionContext(this.getEntries(), this.leafId, this.byId); + } + + /** + * Get session header. + */ + getHeader(): SessionHeader | null { + const h = this.fileEntries.find((e) => e.type === "session"); + return h ? (h as SessionHeader) : null; + } + + /** + * Get all session entries (excludes header). Returns a shallow copy. + * The session is append-only: use appendXXX() to add entries, branch() to + * change the leaf pointer. Entries cannot be modified or deleted. + */ + getEntries(): SessionEntry[] { + return this.fileEntries.filter((e): e is SessionEntry => e.type !== "session"); + } + + /** + * Get the session as a tree structure. Returns a shallow defensive copy of all entries. + * A well-formed session has exactly one root (first entry with parentId === null). + * Orphaned entries (broken parent chain) are also returned as roots. + */ + getTree(): SessionTreeNode[] { + const entries = this.getEntries(); + const nodeMap = new Map(); + const roots: SessionTreeNode[] = []; + + // Create nodes with resolved labels + for (const entry of entries) { + const label = this.labelsById.get(entry.id); + nodeMap.set(entry.id, { entry, children: [], label }); + } + + // Build tree + for (const entry of entries) { + const node = nodeMap.get(entry.id)!; + if (entry.parentId === null || entry.parentId === entry.id) { + roots.push(node); + } else { + const parent = nodeMap.get(entry.parentId); + if (parent) { + parent.children.push(node); + } else { + // Orphan - treat as root + roots.push(node); + } + } + } + + // Sort children by timestamp (oldest first, newest at bottom) + // Use iterative approach to avoid stack overflow on deep trees + const stack: SessionTreeNode[] = [...roots]; + while (stack.length > 0) { + const node = stack.pop()!; + node.children.sort((a, b) => new Date(a.entry.timestamp).getTime() - new Date(b.entry.timestamp).getTime()); + stack.push(...node.children); + } + + return roots; + } + + // ========================================================================= + // Branching + // ========================================================================= + + /** + * Start a new branch from an earlier entry. + * Moves the leaf pointer to the specified entry. The next appendXXX() call + * will create a child of that entry, forming a new branch. Existing entries + * are not modified or deleted. + */ + branch(branchFromId: string): void { + if (!this.byId.has(branchFromId)) { + throw new Error(`Entry ${branchFromId} not found`); + } + this.leafId = branchFromId; + } + + /** + * Reset the leaf pointer to null (before any entries). + * The next appendXXX() call will create a new root entry (parentId = null). + * Use this when navigating to re-edit the first user message. + */ + resetLeaf(): void { + this.leafId = null; + } + + /** + * Start a new branch with a summary of the abandoned path. + * Same as branch(), but also appends a branch_summary entry that captures + * context from the abandoned conversation path. + */ + branchWithSummary(branchFromId: string | null, summary: string, details?: unknown, fromHook?: boolean): string { + if (branchFromId !== null && !this.byId.has(branchFromId)) { + throw new Error(`Entry ${branchFromId} not found`); + } + this.leafId = branchFromId; + const entry: BranchSummaryEntry = { + type: "branch_summary", + id: generateId(this.byId), + parentId: branchFromId, + timestamp: new Date().toISOString(), + fromId: branchFromId ?? "root", + summary, + details, + fromHook, + }; + this._appendEntry(entry); + return entry.id; + } + + /** + * Create a new session file containing only the path from root to the specified leaf. + * Useful for extracting a single conversation path from a branched session. + * Returns the new session file path, or undefined if not persisting. + */ + createBranchedSession(leafId: string): string | undefined { + const previousSessionFile = this.sessionFile; + const path = this.getBranch(leafId); + if (path.length === 0) { + throw new Error(`Entry ${leafId} not found`); + } + + // Filter out LabelEntry from path - we'll recreate them from the resolved map + const pathWithoutLabels = path.filter((e) => e.type !== "label"); + + const newSessionId = randomUUID(); + const timestamp = new Date().toISOString(); + const fileTimestamp = timestamp.replace(/[:.]/g, "-"); + const newSessionFile = join(this.getSessionDir(), `${fileTimestamp}_${newSessionId}.jsonl`); + + const header: SessionHeader = { + type: "session", + version: CURRENT_SESSION_VERSION, + id: newSessionId, + timestamp, + cwd: this.cwd, + parentSession: this.persist ? previousSessionFile : undefined, + }; + + // Collect labels for entries in the path + const pathEntryIds = new Set(pathWithoutLabels.map((e) => e.id)); + const labelsToWrite: Array<{ targetId: string; label: string }> = []; + for (const [targetId, label] of this.labelsById) { + if (pathEntryIds.has(targetId)) { + labelsToWrite.push({ targetId, label }); + } + } + + if (this.persist) { + // Build label entries + const lastEntryId = pathWithoutLabels[pathWithoutLabels.length - 1]?.id || null; + let parentId = lastEntryId; + const labelEntries: LabelEntry[] = []; + for (const { targetId, label } of labelsToWrite) { + const labelEntry: LabelEntry = { + type: "label", + id: generateId(new Set(pathEntryIds)), + parentId, + timestamp: new Date().toISOString(), + targetId, + label, + }; + pathEntryIds.add(labelEntry.id); + labelEntries.push(labelEntry); + parentId = labelEntry.id; + } + + this.fileEntries = [header, ...pathWithoutLabels, ...labelEntries]; + this.sessionId = newSessionId; + this.sessionFile = newSessionFile; + this._buildIndex(); + + // Only write the file now if it contains an assistant message. + // Otherwise defer to _persist(), which creates the file on the + // first assistant response, matching the newSession() contract + // and avoiding the duplicate-header bug when _persist()'s + // no-assistant guard later resets flushed to false. + const hasAssistant = this.fileEntries.some((e) => e.type === "message" && e.message.role === "assistant"); + if (hasAssistant) { + this._rewriteFile(); + this.flushed = true; + } else { + this.flushed = false; + } + + return newSessionFile; + } + + // In-memory mode: replace current session with the path + labels + const labelEntries: LabelEntry[] = []; + let parentId = pathWithoutLabels[pathWithoutLabels.length - 1]?.id || null; + for (const { targetId, label } of labelsToWrite) { + const labelEntry: LabelEntry = { + type: "label", + id: generateId(new Set([...pathEntryIds, ...labelEntries.map((e) => e.id)])), + parentId, + timestamp: new Date().toISOString(), + targetId, + label, + }; + labelEntries.push(labelEntry); + parentId = labelEntry.id; + } + this.fileEntries = [header, ...pathWithoutLabels, ...labelEntries]; + this.sessionId = newSessionId; + this._buildIndex(); + return undefined; + } + + /** + * Create a new session. + * @param cwd Working directory (stored in session header) + * @param sessionDir Optional session directory. If omitted, uses default (~/.pi/agent/sessions//). + */ + static create(cwd: string, sessionDir?: string): SessionManager { + const dir = sessionDir ?? getDefaultSessionDir(cwd); + return new SessionManager(cwd, dir, undefined, true); + } + + /** + * Open a specific session file. + * @param path Path to session file + * @param sessionDir Optional session directory for /new or /branch. If omitted, derives from file's parent. + */ + static open(path: string, sessionDir?: string): SessionManager { + // Extract cwd from session header if possible, otherwise use process.cwd() + const entries = loadEntriesFromFile(path); + const header = entries.find((e) => e.type === "session") as SessionHeader | undefined; + const cwd = header?.cwd ?? process.cwd(); + // If no sessionDir provided, derive from file's parent directory + const dir = sessionDir ?? resolve(path, ".."); + return new SessionManager(cwd, dir, path, true); + } + + /** + * Continue the most recent session, or create new if none. + * @param cwd Working directory + * @param sessionDir Optional session directory. If omitted, uses default (~/.pi/agent/sessions//). + */ + static continueRecent(cwd: string, sessionDir?: string): SessionManager { + const dir = sessionDir ?? getDefaultSessionDir(cwd); + const mostRecent = findMostRecentSession(dir); + if (mostRecent) { + return new SessionManager(cwd, dir, mostRecent, true); + } + return new SessionManager(cwd, dir, undefined, true); + } + + /** Create an in-memory session (no file persistence) */ + static inMemory(cwd: string = process.cwd()): SessionManager { + return new SessionManager(cwd, "", undefined, false); + } + + /** + * Fork a session from another project directory into the current project. + * Creates a new session in the target cwd with the full history from the source session. + * @param sourcePath Path to the source session file + * @param targetCwd Target working directory (where the new session will be stored) + * @param sessionDir Optional session directory. If omitted, uses default for targetCwd. + */ + static forkFrom(sourcePath: string, targetCwd: string, sessionDir?: string): SessionManager { + const sourceEntries = loadEntriesFromFile(sourcePath); + if (sourceEntries.length === 0) { + throw new Error(`Cannot fork: source session file is empty or invalid: ${sourcePath}`); + } + + const sourceHeader = sourceEntries.find((e) => e.type === "session") as SessionHeader | undefined; + if (!sourceHeader) { + throw new Error(`Cannot fork: source session has no header: ${sourcePath}`); + } + + const dir = sessionDir ?? getDefaultSessionDir(targetCwd); + if (!existsSync(dir)) { + mkdirSync(dir, { recursive: true }); + } + + // Create new session file with new ID but forked content + const newSessionId = randomUUID(); + const timestamp = new Date().toISOString(); + const fileTimestamp = timestamp.replace(/[:.]/g, "-"); + const newSessionFile = join(dir, `${fileTimestamp}_${newSessionId}.jsonl`); + + // Write new header pointing to source as parent, with updated cwd + const newHeader: SessionHeader = { + type: "session", + version: CURRENT_SESSION_VERSION, + id: newSessionId, + timestamp, + cwd: targetCwd, + parentSession: sourcePath, + }; + appendFileSync(newSessionFile, `${JSON.stringify(newHeader)}\n`); + + // Copy all non-header entries from source + for (const entry of sourceEntries) { + if (entry.type !== "session") { + appendFileSync(newSessionFile, `${JSON.stringify(entry)}\n`); + } + } + + return new SessionManager(targetCwd, dir, newSessionFile, true); + } + + /** + * List all sessions for a directory. + * @param cwd Working directory (used to compute default session directory) + * @param sessionDir Optional session directory. If omitted, uses default (~/.pi/agent/sessions//). + * @param onProgress Optional callback for progress updates (loaded, total) + */ + static async list(cwd: string, sessionDir?: string, onProgress?: SessionListProgress): Promise { + const dir = sessionDir ?? getDefaultSessionDir(cwd); + const sessions = await listSessionsFromDir(dir, onProgress); + sessions.sort((a, b) => b.modified.getTime() - a.modified.getTime()); + return sessions; + } + + /** + * List all sessions across all project directories. + * @param onProgress Optional callback for progress updates (loaded, total) + */ + static async listAll(onProgress?: SessionListProgress): Promise { + const sessionsDir = getSessionsDir(); + + try { + if (!existsSync(sessionsDir)) { + return []; + } + const entries = await readdir(sessionsDir, { withFileTypes: true }); + const dirs = entries.filter((e) => e.isDirectory()).map((e) => join(sessionsDir, e.name)); + + // Count total files first for accurate progress + let totalFiles = 0; + const dirFiles: string[][] = []; + for (const dir of dirs) { + try { + const files = (await readdir(dir)).filter((f) => f.endsWith(".jsonl")); + dirFiles.push(files.map((f) => join(dir, f))); + totalFiles += files.length; + } catch { + dirFiles.push([]); + } + } + + // Process all files with progress tracking + let loaded = 0; + const sessions: SessionInfo[] = []; + const allFiles = dirFiles.flat(); + + const results = await Promise.all( + allFiles.map(async (file) => { + const info = await buildSessionInfo(file); + loaded++; + onProgress?.(loaded, totalFiles); + return info; + }), + ); + + for (const info of results) { + if (info) { + sessions.push(info); + } + } + + sessions.sort((a, b) => b.modified.getTime() - a.modified.getTime()); + return sessions; + } catch { + return []; + } + } +} diff --git a/packages/pi-coding-agent/src/core/settings-manager.ts b/packages/pi-coding-agent/src/core/settings-manager.ts new file mode 100644 index 000000000..ef61acad8 --- /dev/null +++ b/packages/pi-coding-agent/src/core/settings-manager.ts @@ -0,0 +1,942 @@ +import type { Transport } from "@gsd/pi-ai"; +import { existsSync, mkdirSync, readFileSync, writeFileSync } from "fs"; +import { dirname, join } from "path"; +import lockfile from "proper-lockfile"; +import { CONFIG_DIR_NAME, getAgentDir } from "../config.js"; + +export interface CompactionSettings { + enabled?: boolean; // default: true + reserveTokens?: number; // default: 16384 + keepRecentTokens?: number; // default: 20000 +} + +export interface BranchSummarySettings { + reserveTokens?: number; // default: 16384 (tokens reserved for prompt + LLM response) + skipPrompt?: boolean; // default: false - when true, skips "Summarize branch?" prompt and defaults to no summary +} + +export interface RetrySettings { + enabled?: boolean; // default: true + maxRetries?: number; // default: 3 + baseDelayMs?: number; // default: 2000 (exponential backoff: 2s, 4s, 8s) + maxDelayMs?: number; // default: 60000 (max server-requested delay before failing) +} + +export interface TerminalSettings { + showImages?: boolean; // default: true (only relevant if terminal supports images) + clearOnShrink?: boolean; // default: false (clear empty rows when content shrinks) +} + +export interface ImageSettings { + autoResize?: boolean; // default: true (resize images to 2000x2000 max for better model compatibility) + blockImages?: boolean; // default: false - when true, prevents all images from being sent to LLM providers +} + +export interface ThinkingBudgetsSettings { + minimal?: number; + low?: number; + medium?: number; + high?: number; +} + +export interface MarkdownSettings { + codeBlockIndent?: string; // default: " " +} + +export type TransportSetting = Transport; + +/** + * Package source for npm/git packages. + * - String form: load all resources from the package + * - Object form: filter which resources to load + */ +export type PackageSource = + | string + | { + source: string; + extensions?: string[]; + skills?: string[]; + prompts?: string[]; + themes?: string[]; + }; + +export interface Settings { + lastChangelogVersion?: string; + defaultProvider?: string; + defaultModel?: string; + defaultThinkingLevel?: "off" | "minimal" | "low" | "medium" | "high" | "xhigh"; + transport?: TransportSetting; // default: "sse" + steeringMode?: "all" | "one-at-a-time"; + followUpMode?: "all" | "one-at-a-time"; + theme?: string; + compaction?: CompactionSettings; + branchSummary?: BranchSummarySettings; + retry?: RetrySettings; + hideThinkingBlock?: boolean; + shellPath?: string; // Custom shell path (e.g., for Cygwin users on Windows) + quietStartup?: boolean; + shellCommandPrefix?: string; // Prefix prepended to every bash command (e.g., "shopt -s expand_aliases" for alias support) + collapseChangelog?: boolean; // Show condensed changelog after update (use /changelog for full) + packages?: PackageSource[]; // Array of npm/git package sources (string or object with filtering) + extensions?: string[]; // Array of local extension file paths or directories + skills?: string[]; // Array of local skill file paths or directories + prompts?: string[]; // Array of local prompt template paths or directories + themes?: string[]; // Array of local theme file paths or directories + enableSkillCommands?: boolean; // default: true - register skills as /skill:name commands + terminal?: TerminalSettings; + images?: ImageSettings; + enabledModels?: string[]; // Model patterns for cycling (same format as --models CLI flag) + doubleEscapeAction?: "fork" | "tree" | "none"; // Action for double-escape with empty editor (default: "tree") + treeFilterMode?: "default" | "no-tools" | "user-only" | "labeled-only" | "all"; // Default filter when opening /tree + thinkingBudgets?: ThinkingBudgetsSettings; // Custom token budgets for thinking levels + editorPaddingX?: number; // Horizontal padding for input editor (default: 0) + autocompleteMaxVisible?: number; // Max visible items in autocomplete dropdown (default: 5) + showHardwareCursor?: boolean; // Show terminal cursor while still positioning it for IME + markdown?: MarkdownSettings; +} + +/** Deep merge settings: project/overrides take precedence, nested objects merge recursively */ +function deepMergeSettings(base: Settings, overrides: Settings): Settings { + const result: Settings = { ...base }; + + for (const key of Object.keys(overrides) as (keyof Settings)[]) { + const overrideValue = overrides[key]; + const baseValue = base[key]; + + if (overrideValue === undefined) { + continue; + } + + // For nested objects, merge recursively + if ( + typeof overrideValue === "object" && + overrideValue !== null && + !Array.isArray(overrideValue) && + typeof baseValue === "object" && + baseValue !== null && + !Array.isArray(baseValue) + ) { + (result as Record)[key] = { ...baseValue, ...overrideValue }; + } else { + // For primitives and arrays, override value wins + (result as Record)[key] = overrideValue; + } + } + + return result; +} + +export type SettingsScope = "global" | "project"; + +export interface SettingsStorage { + withLock(scope: SettingsScope, fn: (current: string | undefined) => string | undefined): void; +} + +export interface SettingsError { + scope: SettingsScope; + error: Error; +} + +export class FileSettingsStorage implements SettingsStorage { + private globalSettingsPath: string; + private projectSettingsPath: string; + + constructor(cwd: string = process.cwd(), agentDir: string = getAgentDir()) { + this.globalSettingsPath = join(agentDir, "settings.json"); + this.projectSettingsPath = join(cwd, CONFIG_DIR_NAME, "settings.json"); + } + + private acquireLockSyncWithRetry(path: string): () => void { + const maxAttempts = 10; + const delayMs = 20; + let lastError: unknown; + + for (let attempt = 1; attempt <= maxAttempts; attempt++) { + try { + return lockfile.lockSync(path, { realpath: false }); + } catch (error) { + const code = + typeof error === "object" && error !== null && "code" in error + ? String((error as { code?: unknown }).code) + : undefined; + if (code !== "ELOCKED" || attempt === maxAttempts) { + throw error; + } + lastError = error; + const start = Date.now(); + while (Date.now() - start < delayMs) { + // Sleep synchronously to avoid changing callers to async. + } + } + } + + throw (lastError as Error) ?? new Error("Failed to acquire settings lock"); + } + + withLock(scope: SettingsScope, fn: (current: string | undefined) => string | undefined): void { + const path = scope === "global" ? this.globalSettingsPath : this.projectSettingsPath; + const dir = dirname(path); + + let release: (() => void) | undefined; + try { + // Only create directory and lock if file exists or we need to write + const fileExists = existsSync(path); + if (fileExists) { + release = this.acquireLockSyncWithRetry(path); + } + const current = fileExists ? readFileSync(path, "utf-8") : undefined; + const next = fn(current); + if (next !== undefined) { + // Only create directory when we actually need to write + if (!existsSync(dir)) { + mkdirSync(dir, { recursive: true }); + } + if (!release) { + release = this.acquireLockSyncWithRetry(path); + } + writeFileSync(path, next, "utf-8"); + } + } finally { + if (release) { + release(); + } + } + } +} + +export class InMemorySettingsStorage implements SettingsStorage { + private global: string | undefined; + private project: string | undefined; + + withLock(scope: SettingsScope, fn: (current: string | undefined) => string | undefined): void { + const current = scope === "global" ? this.global : this.project; + const next = fn(current); + if (next !== undefined) { + if (scope === "global") { + this.global = next; + } else { + this.project = next; + } + } + } +} + +export class SettingsManager { + private storage: SettingsStorage; + private globalSettings: Settings; + private projectSettings: Settings; + private settings: Settings; + private modifiedFields = new Set(); // Track global fields modified during session + private modifiedNestedFields = new Map>(); // Track global nested field modifications + private modifiedProjectFields = new Set(); // Track project fields modified during session + private modifiedProjectNestedFields = new Map>(); // Track project nested field modifications + private globalSettingsLoadError: Error | null = null; // Track if global settings file had parse errors + private projectSettingsLoadError: Error | null = null; // Track if project settings file had parse errors + private writeQueue: Promise = Promise.resolve(); + private errors: SettingsError[]; + + private constructor( + storage: SettingsStorage, + initialGlobal: Settings, + initialProject: Settings, + globalLoadError: Error | null = null, + projectLoadError: Error | null = null, + initialErrors: SettingsError[] = [], + ) { + this.storage = storage; + this.globalSettings = initialGlobal; + this.projectSettings = initialProject; + this.globalSettingsLoadError = globalLoadError; + this.projectSettingsLoadError = projectLoadError; + this.errors = [...initialErrors]; + this.settings = deepMergeSettings(this.globalSettings, this.projectSettings); + } + + /** Create a SettingsManager that loads from files */ + static create(cwd: string = process.cwd(), agentDir: string = getAgentDir()): SettingsManager { + const storage = new FileSettingsStorage(cwd, agentDir); + return SettingsManager.fromStorage(storage); + } + + /** Create a SettingsManager from an arbitrary storage backend */ + static fromStorage(storage: SettingsStorage): SettingsManager { + const globalLoad = SettingsManager.tryLoadFromStorage(storage, "global"); + const projectLoad = SettingsManager.tryLoadFromStorage(storage, "project"); + const initialErrors: SettingsError[] = []; + if (globalLoad.error) { + initialErrors.push({ scope: "global", error: globalLoad.error }); + } + if (projectLoad.error) { + initialErrors.push({ scope: "project", error: projectLoad.error }); + } + + return new SettingsManager( + storage, + globalLoad.settings, + projectLoad.settings, + globalLoad.error, + projectLoad.error, + initialErrors, + ); + } + + /** Create an in-memory SettingsManager (no file I/O) */ + static inMemory(settings: Partial = {}): SettingsManager { + const storage = new InMemorySettingsStorage(); + return new SettingsManager(storage, settings, {}); + } + + private static loadFromStorage(storage: SettingsStorage, scope: SettingsScope): Settings { + let content: string | undefined; + storage.withLock(scope, (current) => { + content = current; + return undefined; + }); + + if (!content) { + return {}; + } + const settings = JSON.parse(content); + return SettingsManager.migrateSettings(settings); + } + + private static tryLoadFromStorage( + storage: SettingsStorage, + scope: SettingsScope, + ): { settings: Settings; error: Error | null } { + try { + return { settings: SettingsManager.loadFromStorage(storage, scope), error: null }; + } catch (error) { + return { settings: {}, error: error as Error }; + } + } + + /** Migrate old settings format to new format */ + private static migrateSettings(settings: Record): Settings { + // Migrate queueMode -> steeringMode + if ("queueMode" in settings && !("steeringMode" in settings)) { + settings.steeringMode = settings.queueMode; + delete settings.queueMode; + } + + // Migrate legacy websockets boolean -> transport enum + if (!("transport" in settings) && typeof settings.websockets === "boolean") { + settings.transport = settings.websockets ? "websocket" : "sse"; + delete settings.websockets; + } + + // Migrate old skills object format to new array format + if ( + "skills" in settings && + typeof settings.skills === "object" && + settings.skills !== null && + !Array.isArray(settings.skills) + ) { + const skillsSettings = settings.skills as { + enableSkillCommands?: boolean; + customDirectories?: unknown; + }; + if (skillsSettings.enableSkillCommands !== undefined && settings.enableSkillCommands === undefined) { + settings.enableSkillCommands = skillsSettings.enableSkillCommands; + } + if (Array.isArray(skillsSettings.customDirectories) && skillsSettings.customDirectories.length > 0) { + settings.skills = skillsSettings.customDirectories; + } else { + delete settings.skills; + } + } + + return settings as Settings; + } + + getGlobalSettings(): Settings { + return structuredClone(this.globalSettings); + } + + getProjectSettings(): Settings { + return structuredClone(this.projectSettings); + } + + reload(): void { + const globalLoad = SettingsManager.tryLoadFromStorage(this.storage, "global"); + if (!globalLoad.error) { + this.globalSettings = globalLoad.settings; + this.globalSettingsLoadError = null; + } else { + this.globalSettingsLoadError = globalLoad.error; + this.recordError("global", globalLoad.error); + } + + this.modifiedFields.clear(); + this.modifiedNestedFields.clear(); + this.modifiedProjectFields.clear(); + this.modifiedProjectNestedFields.clear(); + + const projectLoad = SettingsManager.tryLoadFromStorage(this.storage, "project"); + if (!projectLoad.error) { + this.projectSettings = projectLoad.settings; + this.projectSettingsLoadError = null; + } else { + this.projectSettingsLoadError = projectLoad.error; + this.recordError("project", projectLoad.error); + } + + this.settings = deepMergeSettings(this.globalSettings, this.projectSettings); + } + + /** Apply additional overrides on top of current settings */ + applyOverrides(overrides: Partial): void { + this.settings = deepMergeSettings(this.settings, overrides); + } + + /** Mark a global field as modified during this session */ + private markModified(field: keyof Settings, nestedKey?: string): void { + this.modifiedFields.add(field); + if (nestedKey) { + if (!this.modifiedNestedFields.has(field)) { + this.modifiedNestedFields.set(field, new Set()); + } + this.modifiedNestedFields.get(field)!.add(nestedKey); + } + } + + /** Mark a project field as modified during this session */ + private markProjectModified(field: keyof Settings, nestedKey?: string): void { + this.modifiedProjectFields.add(field); + if (nestedKey) { + if (!this.modifiedProjectNestedFields.has(field)) { + this.modifiedProjectNestedFields.set(field, new Set()); + } + this.modifiedProjectNestedFields.get(field)!.add(nestedKey); + } + } + + private recordError(scope: SettingsScope, error: unknown): void { + const normalizedError = error instanceof Error ? error : new Error(String(error)); + this.errors.push({ scope, error: normalizedError }); + } + + private clearModifiedScope(scope: SettingsScope): void { + if (scope === "global") { + this.modifiedFields.clear(); + this.modifiedNestedFields.clear(); + return; + } + + this.modifiedProjectFields.clear(); + this.modifiedProjectNestedFields.clear(); + } + + private enqueueWrite(scope: SettingsScope, task: () => void): void { + this.writeQueue = this.writeQueue + .then(() => { + task(); + this.clearModifiedScope(scope); + }) + .catch((error) => { + this.recordError(scope, error); + }); + } + + private cloneModifiedNestedFields(source: Map>): Map> { + const snapshot = new Map>(); + for (const [key, value] of source.entries()) { + snapshot.set(key, new Set(value)); + } + return snapshot; + } + + private persistScopedSettings( + scope: SettingsScope, + snapshotSettings: Settings, + modifiedFields: Set, + modifiedNestedFields: Map>, + ): void { + this.storage.withLock(scope, (current) => { + const currentFileSettings = current + ? SettingsManager.migrateSettings(JSON.parse(current) as Record) + : {}; + const mergedSettings: Settings = { ...currentFileSettings }; + for (const field of modifiedFields) { + const value = snapshotSettings[field]; + if (modifiedNestedFields.has(field) && typeof value === "object" && value !== null) { + const nestedModified = modifiedNestedFields.get(field)!; + const baseNested = (currentFileSettings[field] as Record) ?? {}; + const inMemoryNested = value as Record; + const mergedNested = { ...baseNested }; + for (const nestedKey of nestedModified) { + mergedNested[nestedKey] = inMemoryNested[nestedKey]; + } + (mergedSettings as Record)[field] = mergedNested; + } else { + (mergedSettings as Record)[field] = value; + } + } + + return JSON.stringify(mergedSettings, null, 2); + }); + } + + private save(): void { + this.settings = deepMergeSettings(this.globalSettings, this.projectSettings); + + if (this.globalSettingsLoadError) { + return; + } + + const snapshotGlobalSettings = structuredClone(this.globalSettings); + const modifiedFields = new Set(this.modifiedFields); + const modifiedNestedFields = this.cloneModifiedNestedFields(this.modifiedNestedFields); + + this.enqueueWrite("global", () => { + this.persistScopedSettings("global", snapshotGlobalSettings, modifiedFields, modifiedNestedFields); + }); + } + + private saveProjectSettings(settings: Settings): void { + this.projectSettings = structuredClone(settings); + this.settings = deepMergeSettings(this.globalSettings, this.projectSettings); + + if (this.projectSettingsLoadError) { + return; + } + + const snapshotProjectSettings = structuredClone(this.projectSettings); + const modifiedFields = new Set(this.modifiedProjectFields); + const modifiedNestedFields = this.cloneModifiedNestedFields(this.modifiedProjectNestedFields); + this.enqueueWrite("project", () => { + this.persistScopedSettings("project", snapshotProjectSettings, modifiedFields, modifiedNestedFields); + }); + } + + async flush(): Promise { + await this.writeQueue; + } + + drainErrors(): SettingsError[] { + const drained = [...this.errors]; + this.errors = []; + return drained; + } + + getLastChangelogVersion(): string | undefined { + return this.settings.lastChangelogVersion; + } + + setLastChangelogVersion(version: string): void { + this.globalSettings.lastChangelogVersion = version; + this.markModified("lastChangelogVersion"); + this.save(); + } + + getDefaultProvider(): string | undefined { + return this.settings.defaultProvider; + } + + getDefaultModel(): string | undefined { + return this.settings.defaultModel; + } + + setDefaultProvider(provider: string): void { + this.globalSettings.defaultProvider = provider; + this.markModified("defaultProvider"); + this.save(); + } + + setDefaultModel(modelId: string): void { + this.globalSettings.defaultModel = modelId; + this.markModified("defaultModel"); + this.save(); + } + + setDefaultModelAndProvider(provider: string, modelId: string): void { + this.globalSettings.defaultProvider = provider; + this.globalSettings.defaultModel = modelId; + this.markModified("defaultProvider"); + this.markModified("defaultModel"); + this.save(); + } + + getSteeringMode(): "all" | "one-at-a-time" { + return this.settings.steeringMode || "one-at-a-time"; + } + + setSteeringMode(mode: "all" | "one-at-a-time"): void { + this.globalSettings.steeringMode = mode; + this.markModified("steeringMode"); + this.save(); + } + + getFollowUpMode(): "all" | "one-at-a-time" { + return this.settings.followUpMode || "one-at-a-time"; + } + + setFollowUpMode(mode: "all" | "one-at-a-time"): void { + this.globalSettings.followUpMode = mode; + this.markModified("followUpMode"); + this.save(); + } + + getTheme(): string | undefined { + return this.settings.theme; + } + + setTheme(theme: string): void { + this.globalSettings.theme = theme; + this.markModified("theme"); + this.save(); + } + + getDefaultThinkingLevel(): "off" | "minimal" | "low" | "medium" | "high" | "xhigh" | undefined { + return this.settings.defaultThinkingLevel; + } + + setDefaultThinkingLevel(level: "off" | "minimal" | "low" | "medium" | "high" | "xhigh"): void { + this.globalSettings.defaultThinkingLevel = level; + this.markModified("defaultThinkingLevel"); + this.save(); + } + + getTransport(): TransportSetting { + return this.settings.transport ?? "sse"; + } + + setTransport(transport: TransportSetting): void { + this.globalSettings.transport = transport; + this.markModified("transport"); + this.save(); + } + + getCompactionEnabled(): boolean { + return this.settings.compaction?.enabled ?? true; + } + + setCompactionEnabled(enabled: boolean): void { + if (!this.globalSettings.compaction) { + this.globalSettings.compaction = {}; + } + this.globalSettings.compaction.enabled = enabled; + this.markModified("compaction", "enabled"); + this.save(); + } + + getCompactionReserveTokens(): number { + return this.settings.compaction?.reserveTokens ?? 16384; + } + + getCompactionKeepRecentTokens(): number { + return this.settings.compaction?.keepRecentTokens ?? 20000; + } + + getCompactionSettings(): { enabled: boolean; reserveTokens: number; keepRecentTokens: number } { + return { + enabled: this.getCompactionEnabled(), + reserveTokens: this.getCompactionReserveTokens(), + keepRecentTokens: this.getCompactionKeepRecentTokens(), + }; + } + + getBranchSummarySettings(): { reserveTokens: number; skipPrompt: boolean } { + return { + reserveTokens: this.settings.branchSummary?.reserveTokens ?? 16384, + skipPrompt: this.settings.branchSummary?.skipPrompt ?? false, + }; + } + + getBranchSummarySkipPrompt(): boolean { + return this.settings.branchSummary?.skipPrompt ?? false; + } + + getRetryEnabled(): boolean { + return this.settings.retry?.enabled ?? true; + } + + setRetryEnabled(enabled: boolean): void { + if (!this.globalSettings.retry) { + this.globalSettings.retry = {}; + } + this.globalSettings.retry.enabled = enabled; + this.markModified("retry", "enabled"); + this.save(); + } + + getRetrySettings(): { enabled: boolean; maxRetries: number; baseDelayMs: number; maxDelayMs: number } { + return { + enabled: this.getRetryEnabled(), + maxRetries: this.settings.retry?.maxRetries ?? 3, + baseDelayMs: this.settings.retry?.baseDelayMs ?? 2000, + maxDelayMs: this.settings.retry?.maxDelayMs ?? 60000, + }; + } + + getHideThinkingBlock(): boolean { + return this.settings.hideThinkingBlock ?? false; + } + + setHideThinkingBlock(hide: boolean): void { + this.globalSettings.hideThinkingBlock = hide; + this.markModified("hideThinkingBlock"); + this.save(); + } + + getShellPath(): string | undefined { + return this.settings.shellPath; + } + + setShellPath(path: string | undefined): void { + this.globalSettings.shellPath = path; + this.markModified("shellPath"); + this.save(); + } + + getQuietStartup(): boolean { + return this.settings.quietStartup ?? false; + } + + setQuietStartup(quiet: boolean): void { + this.globalSettings.quietStartup = quiet; + this.markModified("quietStartup"); + this.save(); + } + + getShellCommandPrefix(): string | undefined { + return this.settings.shellCommandPrefix; + } + + setShellCommandPrefix(prefix: string | undefined): void { + this.globalSettings.shellCommandPrefix = prefix; + this.markModified("shellCommandPrefix"); + this.save(); + } + + getCollapseChangelog(): boolean { + return this.settings.collapseChangelog ?? false; + } + + setCollapseChangelog(collapse: boolean): void { + this.globalSettings.collapseChangelog = collapse; + this.markModified("collapseChangelog"); + this.save(); + } + + getPackages(): PackageSource[] { + return [...(this.settings.packages ?? [])]; + } + + setPackages(packages: PackageSource[]): void { + this.globalSettings.packages = packages; + this.markModified("packages"); + this.save(); + } + + setProjectPackages(packages: PackageSource[]): void { + const projectSettings = structuredClone(this.projectSettings); + projectSettings.packages = packages; + this.markProjectModified("packages"); + this.saveProjectSettings(projectSettings); + } + + getExtensionPaths(): string[] { + return [...(this.settings.extensions ?? [])]; + } + + setExtensionPaths(paths: string[]): void { + this.globalSettings.extensions = paths; + this.markModified("extensions"); + this.save(); + } + + setProjectExtensionPaths(paths: string[]): void { + const projectSettings = structuredClone(this.projectSettings); + projectSettings.extensions = paths; + this.markProjectModified("extensions"); + this.saveProjectSettings(projectSettings); + } + + getSkillPaths(): string[] { + return [...(this.settings.skills ?? [])]; + } + + setSkillPaths(paths: string[]): void { + this.globalSettings.skills = paths; + this.markModified("skills"); + this.save(); + } + + setProjectSkillPaths(paths: string[]): void { + const projectSettings = structuredClone(this.projectSettings); + projectSettings.skills = paths; + this.markProjectModified("skills"); + this.saveProjectSettings(projectSettings); + } + + getPromptTemplatePaths(): string[] { + return [...(this.settings.prompts ?? [])]; + } + + setPromptTemplatePaths(paths: string[]): void { + this.globalSettings.prompts = paths; + this.markModified("prompts"); + this.save(); + } + + setProjectPromptTemplatePaths(paths: string[]): void { + const projectSettings = structuredClone(this.projectSettings); + projectSettings.prompts = paths; + this.markProjectModified("prompts"); + this.saveProjectSettings(projectSettings); + } + + getThemePaths(): string[] { + return [...(this.settings.themes ?? [])]; + } + + setThemePaths(paths: string[]): void { + this.globalSettings.themes = paths; + this.markModified("themes"); + this.save(); + } + + setProjectThemePaths(paths: string[]): void { + const projectSettings = structuredClone(this.projectSettings); + projectSettings.themes = paths; + this.markProjectModified("themes"); + this.saveProjectSettings(projectSettings); + } + + getEnableSkillCommands(): boolean { + return this.settings.enableSkillCommands ?? true; + } + + setEnableSkillCommands(enabled: boolean): void { + this.globalSettings.enableSkillCommands = enabled; + this.markModified("enableSkillCommands"); + this.save(); + } + + getThinkingBudgets(): ThinkingBudgetsSettings | undefined { + return this.settings.thinkingBudgets; + } + + getShowImages(): boolean { + return this.settings.terminal?.showImages ?? true; + } + + setShowImages(show: boolean): void { + if (!this.globalSettings.terminal) { + this.globalSettings.terminal = {}; + } + this.globalSettings.terminal.showImages = show; + this.markModified("terminal", "showImages"); + this.save(); + } + + getClearOnShrink(): boolean { + // Settings takes precedence, then env var, then default false + if (this.settings.terminal?.clearOnShrink !== undefined) { + return this.settings.terminal.clearOnShrink; + } + return process.env.PI_CLEAR_ON_SHRINK === "1"; + } + + setClearOnShrink(enabled: boolean): void { + if (!this.globalSettings.terminal) { + this.globalSettings.terminal = {}; + } + this.globalSettings.terminal.clearOnShrink = enabled; + this.markModified("terminal", "clearOnShrink"); + this.save(); + } + + getImageAutoResize(): boolean { + return this.settings.images?.autoResize ?? true; + } + + setImageAutoResize(enabled: boolean): void { + if (!this.globalSettings.images) { + this.globalSettings.images = {}; + } + this.globalSettings.images.autoResize = enabled; + this.markModified("images", "autoResize"); + this.save(); + } + + getBlockImages(): boolean { + return this.settings.images?.blockImages ?? false; + } + + setBlockImages(blocked: boolean): void { + if (!this.globalSettings.images) { + this.globalSettings.images = {}; + } + this.globalSettings.images.blockImages = blocked; + this.markModified("images", "blockImages"); + this.save(); + } + + getEnabledModels(): string[] | undefined { + return this.settings.enabledModels; + } + + setEnabledModels(patterns: string[] | undefined): void { + this.globalSettings.enabledModels = patterns; + this.markModified("enabledModels"); + this.save(); + } + + getDoubleEscapeAction(): "fork" | "tree" | "none" { + return this.settings.doubleEscapeAction ?? "tree"; + } + + setDoubleEscapeAction(action: "fork" | "tree" | "none"): void { + this.globalSettings.doubleEscapeAction = action; + this.markModified("doubleEscapeAction"); + this.save(); + } + + getTreeFilterMode(): "default" | "no-tools" | "user-only" | "labeled-only" | "all" { + const mode = this.settings.treeFilterMode; + const valid = ["default", "no-tools", "user-only", "labeled-only", "all"]; + return mode && valid.includes(mode) ? mode : "default"; + } + + setTreeFilterMode(mode: "default" | "no-tools" | "user-only" | "labeled-only" | "all"): void { + this.globalSettings.treeFilterMode = mode; + this.markModified("treeFilterMode"); + this.save(); + } + + getShowHardwareCursor(): boolean { + return this.settings.showHardwareCursor ?? process.env.PI_HARDWARE_CURSOR === "1"; + } + + setShowHardwareCursor(enabled: boolean): void { + this.globalSettings.showHardwareCursor = enabled; + this.markModified("showHardwareCursor"); + this.save(); + } + + getEditorPaddingX(): number { + return this.settings.editorPaddingX ?? 0; + } + + setEditorPaddingX(padding: number): void { + this.globalSettings.editorPaddingX = Math.max(0, Math.min(3, Math.floor(padding))); + this.markModified("editorPaddingX"); + this.save(); + } + + getAutocompleteMaxVisible(): number { + return this.settings.autocompleteMaxVisible ?? 5; + } + + setAutocompleteMaxVisible(maxVisible: number): void { + this.globalSettings.autocompleteMaxVisible = Math.max(3, Math.min(20, Math.floor(maxVisible))); + this.markModified("autocompleteMaxVisible"); + this.save(); + } + + getCodeBlockIndent(): string { + return this.settings.markdown?.codeBlockIndent ?? " "; + } +} diff --git a/packages/pi-coding-agent/src/core/skills.ts b/packages/pi-coding-agent/src/core/skills.ts new file mode 100644 index 000000000..70e1aa647 --- /dev/null +++ b/packages/pi-coding-agent/src/core/skills.ts @@ -0,0 +1,459 @@ +import { existsSync, readdirSync, readFileSync, realpathSync, statSync } from "fs"; +import ignore from "ignore"; +import { homedir } from "os"; +import { basename, dirname, isAbsolute, join, relative, resolve, sep } from "path"; +import { CONFIG_DIR_NAME, getAgentDir } from "../config.js"; +import { parseFrontmatter } from "../utils/frontmatter.js"; +import type { ResourceDiagnostic } from "./diagnostics.js"; + +/** Max name length per spec */ +const MAX_NAME_LENGTH = 64; + +/** Max description length per spec */ +const MAX_DESCRIPTION_LENGTH = 1024; + +const IGNORE_FILE_NAMES = [".gitignore", ".ignore", ".fdignore"]; + +type IgnoreMatcher = ReturnType; + +function toPosixPath(p: string): string { + return p.split(sep).join("/"); +} + +function prefixIgnorePattern(line: string, prefix: string): string | null { + const trimmed = line.trim(); + if (!trimmed) return null; + if (trimmed.startsWith("#") && !trimmed.startsWith("\\#")) return null; + + let pattern = line; + let negated = false; + + if (pattern.startsWith("!")) { + negated = true; + pattern = pattern.slice(1); + } else if (pattern.startsWith("\\!")) { + pattern = pattern.slice(1); + } + + if (pattern.startsWith("/")) { + pattern = pattern.slice(1); + } + + const prefixed = prefix ? `${prefix}${pattern}` : pattern; + return negated ? `!${prefixed}` : prefixed; +} + +function addIgnoreRules(ig: IgnoreMatcher, dir: string, rootDir: string): void { + const relativeDir = relative(rootDir, dir); + const prefix = relativeDir ? `${toPosixPath(relativeDir)}/` : ""; + + for (const filename of IGNORE_FILE_NAMES) { + const ignorePath = join(dir, filename); + if (!existsSync(ignorePath)) continue; + try { + const content = readFileSync(ignorePath, "utf-8"); + const patterns = content + .split(/\r?\n/) + .map((line) => prefixIgnorePattern(line, prefix)) + .filter((line): line is string => Boolean(line)); + if (patterns.length > 0) { + ig.add(patterns); + } + } catch {} + } +} + +export interface SkillFrontmatter { + name?: string; + description?: string; + "disable-model-invocation"?: boolean; + [key: string]: unknown; +} + +export interface Skill { + name: string; + description: string; + filePath: string; + baseDir: string; + source: string; + disableModelInvocation: boolean; +} + +export interface LoadSkillsResult { + skills: Skill[]; + diagnostics: ResourceDiagnostic[]; +} + +/** + * Validate skill name per Agent Skills spec. + * Returns array of validation error messages (empty if valid). + */ +function validateName(name: string, parentDirName: string): string[] { + const errors: string[] = []; + + if (name !== parentDirName) { + errors.push(`name "${name}" does not match parent directory "${parentDirName}"`); + } + + if (name.length > MAX_NAME_LENGTH) { + errors.push(`name exceeds ${MAX_NAME_LENGTH} characters (${name.length})`); + } + + if (!/^[a-z0-9-]+$/.test(name)) { + errors.push(`name contains invalid characters (must be lowercase a-z, 0-9, hyphens only)`); + } + + if (name.startsWith("-") || name.endsWith("-")) { + errors.push(`name must not start or end with a hyphen`); + } + + if (name.includes("--")) { + errors.push(`name must not contain consecutive hyphens`); + } + + return errors; +} + +/** + * Validate description per Agent Skills spec. + */ +function validateDescription(description: string | undefined): string[] { + const errors: string[] = []; + + if (!description || description.trim() === "") { + errors.push("description is required"); + } else if (description.length > MAX_DESCRIPTION_LENGTH) { + errors.push(`description exceeds ${MAX_DESCRIPTION_LENGTH} characters (${description.length})`); + } + + return errors; +} + +export interface LoadSkillsFromDirOptions { + /** Directory to scan for skills */ + dir: string; + /** Source identifier for these skills */ + source: string; +} + +/** + * Load skills from a directory. + * + * Discovery rules: + * - direct .md children in the root + * - recursive SKILL.md under subdirectories + */ +export function loadSkillsFromDir(options: LoadSkillsFromDirOptions): LoadSkillsResult { + const { dir, source } = options; + return loadSkillsFromDirInternal(dir, source, true); +} + +function loadSkillsFromDirInternal( + dir: string, + source: string, + includeRootFiles: boolean, + ignoreMatcher?: IgnoreMatcher, + rootDir?: string, +): LoadSkillsResult { + const skills: Skill[] = []; + const diagnostics: ResourceDiagnostic[] = []; + + if (!existsSync(dir)) { + return { skills, diagnostics }; + } + + const root = rootDir ?? dir; + const ig = ignoreMatcher ?? ignore(); + addIgnoreRules(ig, dir, root); + + try { + const entries = readdirSync(dir, { withFileTypes: true }); + + for (const entry of entries) { + if (entry.name.startsWith(".")) { + continue; + } + + // Skip node_modules to avoid scanning dependencies + if (entry.name === "node_modules") { + continue; + } + + const fullPath = join(dir, entry.name); + + // For symlinks, check if they point to a directory and follow them + let isDirectory = entry.isDirectory(); + let isFile = entry.isFile(); + if (entry.isSymbolicLink()) { + try { + const stats = statSync(fullPath); + isDirectory = stats.isDirectory(); + isFile = stats.isFile(); + } catch { + // Broken symlink, skip it + continue; + } + } + + const relPath = toPosixPath(relative(root, fullPath)); + const ignorePath = isDirectory ? `${relPath}/` : relPath; + if (ig.ignores(ignorePath)) { + continue; + } + + if (isDirectory) { + const subResult = loadSkillsFromDirInternal(fullPath, source, false, ig, root); + skills.push(...subResult.skills); + diagnostics.push(...subResult.diagnostics); + continue; + } + + if (!isFile) { + continue; + } + + const isRootMd = includeRootFiles && entry.name.endsWith(".md"); + const isSkillMd = !includeRootFiles && entry.name === "SKILL.md"; + if (!isRootMd && !isSkillMd) { + continue; + } + + const result = loadSkillFromFile(fullPath, source); + if (result.skill) { + skills.push(result.skill); + } + diagnostics.push(...result.diagnostics); + } + } catch {} + + return { skills, diagnostics }; +} + +function loadSkillFromFile( + filePath: string, + source: string, +): { skill: Skill | null; diagnostics: ResourceDiagnostic[] } { + const diagnostics: ResourceDiagnostic[] = []; + + try { + const rawContent = readFileSync(filePath, "utf-8"); + const { frontmatter } = parseFrontmatter(rawContent); + const skillDir = dirname(filePath); + const parentDirName = basename(skillDir); + + // Validate description + const descErrors = validateDescription(frontmatter.description); + for (const error of descErrors) { + diagnostics.push({ type: "warning", message: error, path: filePath }); + } + + // Use name from frontmatter, or fall back to parent directory name + const name = frontmatter.name || parentDirName; + + // Validate name + const nameErrors = validateName(name, parentDirName); + for (const error of nameErrors) { + diagnostics.push({ type: "warning", message: error, path: filePath }); + } + + // Still load the skill even with warnings (unless description is completely missing) + if (!frontmatter.description || frontmatter.description.trim() === "") { + return { skill: null, diagnostics }; + } + + return { + skill: { + name, + description: frontmatter.description, + filePath, + baseDir: skillDir, + source, + disableModelInvocation: frontmatter["disable-model-invocation"] === true, + }, + diagnostics, + }; + } catch (error) { + const message = error instanceof Error ? error.message : "failed to parse skill file"; + diagnostics.push({ type: "warning", message, path: filePath }); + return { skill: null, diagnostics }; + } +} + +/** + * Format skills for inclusion in a system prompt. + * Uses XML format per Agent Skills standard. + * See: https://agentskills.io/integrate-skills + * + * Skills with disableModelInvocation=true are excluded from the prompt + * (they can only be invoked explicitly via /skill:name commands). + */ +export function formatSkillsForPrompt(skills: Skill[]): string { + const visibleSkills = skills.filter((s) => !s.disableModelInvocation); + + if (visibleSkills.length === 0) { + return ""; + } + + const lines = [ + "\n\nThe following skills provide specialized instructions for specific tasks.", + "Use the read tool to load a skill's file when the task matches its description.", + "When a skill file references a relative path, resolve it against the skill directory (parent of SKILL.md / dirname of the path) and use that absolute path in tool commands.", + "", + "", + ]; + + for (const skill of visibleSkills) { + lines.push(" "); + lines.push(` ${escapeXml(skill.name)}`); + lines.push(` ${escapeXml(skill.description)}`); + lines.push(` ${escapeXml(skill.filePath)}`); + lines.push(" "); + } + + lines.push(""); + + return lines.join("\n"); +} + +function escapeXml(str: string): string { + return str + .replace(/&/g, "&") + .replace(//g, ">") + .replace(/"/g, """) + .replace(/'/g, "'"); +} + +export interface LoadSkillsOptions { + /** Working directory for project-local skills. Default: process.cwd() */ + cwd?: string; + /** Agent config directory for global skills. Default: ~/.pi/agent */ + agentDir?: string; + /** Explicit skill paths (files or directories) */ + skillPaths?: string[]; + /** Include default skills directories. Default: true */ + includeDefaults?: boolean; +} + +function normalizePath(input: string): string { + const trimmed = input.trim(); + if (trimmed === "~") return homedir(); + if (trimmed.startsWith("~/")) return join(homedir(), trimmed.slice(2)); + if (trimmed.startsWith("~")) return join(homedir(), trimmed.slice(1)); + return trimmed; +} + +function resolveSkillPath(p: string, cwd: string): string { + const normalized = normalizePath(p); + return isAbsolute(normalized) ? normalized : resolve(cwd, normalized); +} + +/** + * Load skills from all configured locations. + * Returns skills and any validation diagnostics. + */ +export function loadSkills(options: LoadSkillsOptions = {}): LoadSkillsResult { + const { cwd = process.cwd(), agentDir, skillPaths = [], includeDefaults = true } = options; + + // Resolve agentDir - if not provided, use default from config + const resolvedAgentDir = agentDir ?? getAgentDir(); + + const skillMap = new Map(); + const realPathSet = new Set(); + const allDiagnostics: ResourceDiagnostic[] = []; + const collisionDiagnostics: ResourceDiagnostic[] = []; + + function addSkills(result: LoadSkillsResult) { + allDiagnostics.push(...result.diagnostics); + for (const skill of result.skills) { + // Resolve symlinks to detect duplicate files + let realPath: string; + try { + realPath = realpathSync(skill.filePath); + } catch { + realPath = skill.filePath; + } + + // Skip silently if we've already loaded this exact file (via symlink) + if (realPathSet.has(realPath)) { + continue; + } + + const existing = skillMap.get(skill.name); + if (existing) { + collisionDiagnostics.push({ + type: "collision", + message: `name "${skill.name}" collision`, + path: skill.filePath, + collision: { + resourceType: "skill", + name: skill.name, + winnerPath: existing.filePath, + loserPath: skill.filePath, + }, + }); + } else { + skillMap.set(skill.name, skill); + realPathSet.add(realPath); + } + } + } + + if (includeDefaults) { + addSkills(loadSkillsFromDirInternal(join(resolvedAgentDir, "skills"), "user", true)); + addSkills(loadSkillsFromDirInternal(resolve(cwd, CONFIG_DIR_NAME, "skills"), "project", true)); + } + + const userSkillsDir = join(resolvedAgentDir, "skills"); + const projectSkillsDir = resolve(cwd, CONFIG_DIR_NAME, "skills"); + + const isUnderPath = (target: string, root: string): boolean => { + const normalizedRoot = resolve(root); + if (target === normalizedRoot) { + return true; + } + const prefix = normalizedRoot.endsWith(sep) ? normalizedRoot : `${normalizedRoot}${sep}`; + return target.startsWith(prefix); + }; + + const getSource = (resolvedPath: string): "user" | "project" | "path" => { + if (!includeDefaults) { + if (isUnderPath(resolvedPath, userSkillsDir)) return "user"; + if (isUnderPath(resolvedPath, projectSkillsDir)) return "project"; + } + return "path"; + }; + + for (const rawPath of skillPaths) { + const resolvedPath = resolveSkillPath(rawPath, cwd); + if (!existsSync(resolvedPath)) { + allDiagnostics.push({ type: "warning", message: "skill path does not exist", path: resolvedPath }); + continue; + } + + try { + const stats = statSync(resolvedPath); + const source = getSource(resolvedPath); + if (stats.isDirectory()) { + addSkills(loadSkillsFromDirInternal(resolvedPath, source, true)); + } else if (stats.isFile() && resolvedPath.endsWith(".md")) { + const result = loadSkillFromFile(resolvedPath, source); + if (result.skill) { + addSkills({ skills: [result.skill], diagnostics: result.diagnostics }); + } else { + allDiagnostics.push(...result.diagnostics); + } + } else { + allDiagnostics.push({ type: "warning", message: "skill path is not a markdown file", path: resolvedPath }); + } + } catch (error) { + const message = error instanceof Error ? error.message : "failed to read skill path"; + allDiagnostics.push({ type: "warning", message, path: resolvedPath }); + } + } + + return { + skills: Array.from(skillMap.values()), + diagnostics: [...allDiagnostics, ...collisionDiagnostics], + }; +} diff --git a/packages/pi-coding-agent/src/core/slash-commands.ts b/packages/pi-coding-agent/src/core/slash-commands.ts new file mode 100644 index 000000000..0f814a6ba --- /dev/null +++ b/packages/pi-coding-agent/src/core/slash-commands.ts @@ -0,0 +1,38 @@ +export type SlashCommandSource = "extension" | "prompt" | "skill"; + +export type SlashCommandLocation = "user" | "project" | "path"; + +export interface SlashCommandInfo { + name: string; + description?: string; + source: SlashCommandSource; + location?: SlashCommandLocation; + path?: string; +} + +export interface BuiltinSlashCommand { + name: string; + description: string; +} + +export const BUILTIN_SLASH_COMMANDS: ReadonlyArray = [ + { name: "settings", description: "Open settings menu" }, + { name: "model", description: "Select model (opens selector UI)" }, + { name: "scoped-models", description: "Enable/disable models for Ctrl+P cycling" }, + { name: "export", description: "Export session to HTML file" }, + { name: "share", description: "Share session as a secret GitHub gist" }, + { name: "copy", description: "Copy last agent message to clipboard" }, + { name: "name", description: "Set session display name" }, + { name: "session", description: "Show session info and stats" }, + { name: "changelog", description: "Show changelog entries" }, + { name: "hotkeys", description: "Show all keyboard shortcuts" }, + { name: "fork", description: "Create a new fork from a previous message" }, + { name: "tree", description: "Navigate session tree (switch branches)" }, + { name: "login", description: "Login with OAuth provider" }, + { name: "logout", description: "Logout from OAuth provider" }, + { name: "new", description: "Start a new session" }, + { name: "compact", description: "Manually compact the session context" }, + { name: "resume", description: "Resume a different session" }, + { name: "reload", description: "Reload extensions, skills, prompts, and themes" }, + { name: "quit", description: "Quit pi" }, +]; diff --git a/packages/pi-coding-agent/src/core/system-prompt.ts b/packages/pi-coding-agent/src/core/system-prompt.ts new file mode 100644 index 000000000..949809765 --- /dev/null +++ b/packages/pi-coding-agent/src/core/system-prompt.ts @@ -0,0 +1,218 @@ +/** + * System prompt construction and project context loading + */ + +import { getDocsPath, getExamplesPath, getReadmePath } from "../config.js"; +import { formatSkillsForPrompt, type Skill } from "./skills.js"; + +/** Tool descriptions for system prompt */ +const toolDescriptions: Record = { + read: "Read file contents", + bash: "Execute bash commands (ls, grep, find, etc.)", + edit: "Make surgical edits to files (find exact text and replace)", + write: "Create or overwrite files", + grep: "Search file contents for patterns (respects .gitignore)", + find: "Find files by glob pattern (respects .gitignore)", + ls: "List directory contents", +}; + +export interface BuildSystemPromptOptions { + /** Custom system prompt (replaces default). */ + customPrompt?: string; + /** Tools to include in prompt. Default: [read, bash, edit, write] */ + selectedTools?: string[]; + /** Optional one-line tool snippets keyed by tool name. */ + toolSnippets?: Record; + /** Additional guideline bullets appended to the default system prompt guidelines. */ + promptGuidelines?: string[]; + /** Text to append to system prompt. */ + appendSystemPrompt?: string; + /** Working directory. Default: process.cwd() */ + cwd?: string; + /** Pre-loaded context files. */ + contextFiles?: Array<{ path: string; content: string }>; + /** Pre-loaded skills. */ + skills?: Skill[]; +} + +/** Build the system prompt with tools, guidelines, and context */ +export function buildSystemPrompt(options: BuildSystemPromptOptions = {}): string { + const { + customPrompt, + selectedTools, + toolSnippets, + promptGuidelines, + appendSystemPrompt, + cwd, + contextFiles: providedContextFiles, + skills: providedSkills, + } = options; + const resolvedCwd = cwd ?? process.cwd(); + + const now = new Date(); + const dateTime = now.toLocaleString("en-US", { + weekday: "long", + year: "numeric", + month: "long", + day: "numeric", + hour: "2-digit", + minute: "2-digit", + second: "2-digit", + timeZoneName: "short", + }); + + const appendSection = appendSystemPrompt ? `\n\n${appendSystemPrompt}` : ""; + + const contextFiles = providedContextFiles ?? []; + const skills = providedSkills ?? []; + + if (customPrompt) { + let prompt = customPrompt; + + if (appendSection) { + prompt += appendSection; + } + + // Append project context files + if (contextFiles.length > 0) { + prompt += "\n\n# Project Context\n\n"; + prompt += "Project-specific instructions and guidelines:\n\n"; + for (const { path: filePath, content } of contextFiles) { + prompt += `## ${filePath}\n\n${content}\n\n`; + } + } + + // Append skills section (only if read tool is available) + const customPromptHasRead = !selectedTools || selectedTools.includes("read"); + if (customPromptHasRead && skills.length > 0) { + prompt += formatSkillsForPrompt(skills); + } + + // Add date/time and working directory last + prompt += `\nCurrent date and time: ${dateTime}`; + prompt += `\nCurrent working directory: ${resolvedCwd}`; + + return prompt; + } + + // Get absolute paths to documentation and examples + const readmePath = getReadmePath(); + const docsPath = getDocsPath(); + const examplesPath = getExamplesPath(); + + // Build tools list based on selected tools. + // Built-ins use toolDescriptions. Custom tools can provide one-line snippets. + const tools = selectedTools || ["read", "bash", "edit", "write"]; + const toolsList = + tools.length > 0 + ? tools + .map((name) => { + const snippet = toolSnippets?.[name] ?? toolDescriptions[name] ?? name; + return `- ${name}: ${snippet}`; + }) + .join("\n") + : "(none)"; + + // Build guidelines based on which tools are actually available + const guidelinesList: string[] = []; + const guidelinesSet = new Set(); + const addGuideline = (guideline: string): void => { + if (guidelinesSet.has(guideline)) { + return; + } + guidelinesSet.add(guideline); + guidelinesList.push(guideline); + }; + + const hasBash = tools.includes("bash"); + const hasEdit = tools.includes("edit"); + const hasWrite = tools.includes("write"); + const hasGrep = tools.includes("grep"); + const hasFind = tools.includes("find"); + const hasLs = tools.includes("ls"); + const hasRead = tools.includes("read"); + + // File exploration guidelines + if (hasBash && !hasGrep && !hasFind && !hasLs) { + addGuideline("Use bash for file operations like ls, rg, find"); + } else if (hasBash && (hasGrep || hasFind || hasLs)) { + addGuideline("Prefer grep/find/ls tools over bash for file exploration (faster, respects .gitignore)"); + } + + // Read before edit guideline + if (hasRead && hasEdit) { + addGuideline("Use read to examine files before editing. You must use this tool instead of cat or sed."); + } + + // Edit guideline + if (hasEdit) { + addGuideline("Use edit for precise changes (old text must match exactly)"); + } + + // Write guideline + if (hasWrite) { + addGuideline("Use write only for new files or complete rewrites"); + } + + // Output guideline (only when actually writing or executing) + if (hasEdit || hasWrite) { + addGuideline( + "When summarizing your actions, output plain text directly - do NOT use cat or bash to display what you did", + ); + } + + for (const guideline of promptGuidelines ?? []) { + const normalized = guideline.trim(); + if (normalized.length > 0) { + addGuideline(normalized); + } + } + + // Always include these + addGuideline("Be concise in your responses"); + addGuideline("Show file paths clearly when working with files"); + + const guidelines = guidelinesList.map((g) => `- ${g}`).join("\n"); + + let prompt = `You are an expert coding assistant operating inside pi, a coding agent harness. You help users by reading files, executing commands, editing code, and writing new files. + +Available tools: +${toolsList} + +In addition to the tools above, you may have access to other custom tools depending on the project. + +Guidelines: +${guidelines} + +Pi documentation (read only when the user asks about pi itself, its SDK, extensions, themes, skills, or TUI): +- Main documentation: ${readmePath} +- Additional docs: ${docsPath} +- Examples: ${examplesPath} (extensions, custom tools, SDK) +- When asked about: extensions (docs/extensions.md, examples/extensions/), themes (docs/themes.md), skills (docs/skills.md), prompt templates (docs/prompt-templates.md), TUI components (docs/tui.md), keybindings (docs/keybindings.md), SDK integrations (docs/sdk.md), custom providers (docs/custom-provider.md), adding models (docs/models.md), pi packages (docs/packages.md) +- When working on pi topics, read the docs and examples, and follow .md cross-references before implementing +- Always read pi .md files completely and follow links to related docs (e.g., tui.md for TUI API details)`; + + if (appendSection) { + prompt += appendSection; + } + + // Append project context files + if (contextFiles.length > 0) { + prompt += "\n\n# Project Context\n\n"; + prompt += "Project-specific instructions and guidelines:\n\n"; + for (const { path: filePath, content } of contextFiles) { + prompt += `## ${filePath}\n\n${content}\n\n`; + } + } + + // Append skills section (only if read tool is available) + if (hasRead && skills.length > 0) { + prompt += formatSkillsForPrompt(skills); + } + + // Add date/time and working directory last + prompt += `\nCurrent date and time: ${dateTime}`; + prompt += `\nCurrent working directory: ${resolvedCwd}`; + + return prompt; +} diff --git a/packages/pi-coding-agent/src/core/timings.ts b/packages/pi-coding-agent/src/core/timings.ts new file mode 100644 index 000000000..4ef5fc8cf --- /dev/null +++ b/packages/pi-coding-agent/src/core/timings.ts @@ -0,0 +1,25 @@ +/** + * Central timing instrumentation for startup profiling. + * Enable with PI_TIMING=1 environment variable. + */ + +const ENABLED = process.env.PI_TIMING === "1"; +const timings: Array<{ label: string; ms: number }> = []; +let lastTime = Date.now(); + +export function time(label: string): void { + if (!ENABLED) return; + const now = Date.now(); + timings.push({ label, ms: now - lastTime }); + lastTime = now; +} + +export function printTimings(): void { + if (!ENABLED || timings.length === 0) return; + console.error("\n--- Startup Timings ---"); + for (const t of timings) { + console.error(` ${t.label}: ${t.ms}ms`); + } + console.error(` TOTAL: ${timings.reduce((a, b) => a + b.ms, 0)}ms`); + console.error("------------------------\n"); +} diff --git a/packages/pi-coding-agent/src/core/tools/bash.ts b/packages/pi-coding-agent/src/core/tools/bash.ts new file mode 100644 index 000000000..e75aad44c --- /dev/null +++ b/packages/pi-coding-agent/src/core/tools/bash.ts @@ -0,0 +1,347 @@ +import { randomBytes } from "node:crypto"; +import { createWriteStream, existsSync } from "node:fs"; +import { createRequire } from "node:module"; +import { tmpdir } from "node:os"; +import { join } from "node:path"; +import type { AgentTool } from "@gsd/pi-agent-core"; +import { type Static, Type } from "@sinclair/typebox"; +import { spawn } from "child_process"; +import { getShellConfig, getShellEnv, killProcessTree } from "../../utils/shell.js"; +import { DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES, formatSize, type TruncationResult, truncateTail } from "./truncate.js"; + +// Cached Win32 FFI handles for restoring VT input after child processes +let _vtHandles: { GetConsoleMode: any; SetConsoleMode: any; handle: any } | null = null; +function restoreWindowsVTInput(): void { + if (process.platform !== "win32") return; + try { + if (!_vtHandles) { + const cjsRequire = createRequire(import.meta.url); + const koffi = cjsRequire("koffi"); + const k32 = koffi.load("kernel32.dll"); + const GetStdHandle = k32.func("void* __stdcall GetStdHandle(int)"); + const GetConsoleMode = k32.func("bool __stdcall GetConsoleMode(void*, _Out_ uint32_t*)"); + const SetConsoleMode = k32.func("bool __stdcall SetConsoleMode(void*, uint32_t)"); + const handle = GetStdHandle(-10); + _vtHandles = { GetConsoleMode, SetConsoleMode, handle }; + } + const ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200; + const mode = new Uint32Array(1); + _vtHandles.GetConsoleMode(_vtHandles.handle, mode); + if (!(mode[0]! & ENABLE_VIRTUAL_TERMINAL_INPUT)) { + _vtHandles.SetConsoleMode(_vtHandles.handle, mode[0]! | ENABLE_VIRTUAL_TERMINAL_INPUT); + } + } catch { /* koffi not available */ } +} + +/** + * Generate a unique temp file path for bash output + */ +function getTempFilePath(): string { + const id = randomBytes(8).toString("hex"); + return join(tmpdir(), `pi-bash-${id}.log`); +} + +const bashSchema = Type.Object({ + command: Type.String({ description: "Bash command to execute" }), + timeout: Type.Optional(Type.Number({ description: "Timeout in seconds (optional, no default timeout)" })), +}); + +export type BashToolInput = Static; + +export interface BashToolDetails { + truncation?: TruncationResult; + fullOutputPath?: string; +} + +/** + * Pluggable operations for the bash tool. + * Override these to delegate command execution to remote systems (e.g., SSH). + */ +export interface BashOperations { + /** + * Execute a command and stream output. + * @param command - The command to execute + * @param cwd - Working directory + * @param options - Execution options + * @returns Promise resolving to exit code (null if killed) + */ + exec: ( + command: string, + cwd: string, + options: { + onData: (data: Buffer) => void; + signal?: AbortSignal; + timeout?: number; + env?: NodeJS.ProcessEnv; + }, + ) => Promise<{ exitCode: number | null }>; +} + +/** + * Default bash operations using local shell + */ +const defaultBashOperations: BashOperations = { + exec: (command, cwd, { onData, signal, timeout, env }) => { + return new Promise((resolve, reject) => { + const { shell, args } = getShellConfig(); + + if (!existsSync(cwd)) { + reject(new Error(`Working directory does not exist: ${cwd}\nCannot execute bash commands.`)); + return; + } + + const child = spawn(shell, [...args, command], { + cwd, + detached: true, + env: env ?? getShellEnv(), + stdio: ["ignore", "pipe", "pipe"], + }); + + let timedOut = false; + + // Set timeout if provided + let timeoutHandle: NodeJS.Timeout | undefined; + if (timeout !== undefined && timeout > 0) { + timeoutHandle = setTimeout(() => { + timedOut = true; + if (child.pid) { + killProcessTree(child.pid); + } + }, timeout * 1000); + } + + // Stream stdout and stderr + if (child.stdout) { + child.stdout.on("data", onData); + } + if (child.stderr) { + child.stderr.on("data", onData); + } + + // Handle shell spawn errors + child.on("error", (err) => { + if (timeoutHandle) clearTimeout(timeoutHandle); + if (signal) signal.removeEventListener("abort", onAbort); + reject(err); + }); + + // Handle abort signal - kill entire process tree + const onAbort = () => { + if (child.pid) { + killProcessTree(child.pid); + } + }; + + if (signal) { + if (signal.aborted) { + onAbort(); + } else { + signal.addEventListener("abort", onAbort, { once: true }); + } + } + + // Handle process exit + child.on("close", (code) => { + restoreWindowsVTInput(); + if (timeoutHandle) clearTimeout(timeoutHandle); + if (signal) signal.removeEventListener("abort", onAbort); + + if (signal?.aborted) { + reject(new Error("aborted")); + return; + } + + if (timedOut) { + reject(new Error(`timeout:${timeout}`)); + return; + } + + resolve({ exitCode: code }); + }); + }); + }, +}; + +export interface BashSpawnContext { + command: string; + cwd: string; + env: NodeJS.ProcessEnv; +} + +export type BashSpawnHook = (context: BashSpawnContext) => BashSpawnContext; + +function resolveSpawnContext(command: string, cwd: string, spawnHook?: BashSpawnHook): BashSpawnContext { + const baseContext: BashSpawnContext = { + command, + cwd, + env: { ...getShellEnv() }, + }; + + return spawnHook ? spawnHook(baseContext) : baseContext; +} + +export interface BashToolOptions { + /** Custom operations for command execution. Default: local shell */ + operations?: BashOperations; + /** Command prefix prepended to every command (e.g., "shopt -s expand_aliases" for alias support) */ + commandPrefix?: string; + /** Hook to adjust command, cwd, or env before execution */ + spawnHook?: BashSpawnHook; +} + +export function createBashTool(cwd: string, options?: BashToolOptions): AgentTool { + const ops = options?.operations ?? defaultBashOperations; + const commandPrefix = options?.commandPrefix; + const spawnHook = options?.spawnHook; + + return { + name: "bash", + label: "bash", + description: `Execute a bash command in the current working directory. Returns stdout and stderr. Output is truncated to last ${DEFAULT_MAX_LINES} lines or ${DEFAULT_MAX_BYTES / 1024}KB (whichever is hit first). If truncated, full output is saved to a temp file. Optionally provide a timeout in seconds.`, + parameters: bashSchema, + execute: async ( + _toolCallId: string, + { command, timeout }: { command: string; timeout?: number }, + signal?: AbortSignal, + onUpdate?, + ) => { + // Apply command prefix if configured (e.g., "shopt -s expand_aliases" for alias support) + const resolvedCommand = commandPrefix ? `${commandPrefix}\n${command}` : command; + const spawnContext = resolveSpawnContext(resolvedCommand, cwd, spawnHook); + + return new Promise((resolve, reject) => { + // We'll stream to a temp file if output gets large + let tempFilePath: string | undefined; + let tempFileStream: ReturnType | undefined; + let totalBytes = 0; + + // Keep a rolling buffer of the last chunk for tail truncation + const chunks: Buffer[] = []; + let chunksBytes = 0; + // Keep more than we need so we have enough for truncation + const maxChunksBytes = DEFAULT_MAX_BYTES * 2; + + const handleData = (data: Buffer) => { + totalBytes += data.length; + + // Start writing to temp file once we exceed the threshold + if (totalBytes > DEFAULT_MAX_BYTES && !tempFilePath) { + tempFilePath = getTempFilePath(); + tempFileStream = createWriteStream(tempFilePath); + // Write all buffered chunks to the file + for (const chunk of chunks) { + tempFileStream.write(chunk); + } + } + + // Write to temp file if we have one + if (tempFileStream) { + tempFileStream.write(data); + } + + // Keep rolling buffer of recent data + chunks.push(data); + chunksBytes += data.length; + + // Trim old chunks if buffer is too large + while (chunksBytes > maxChunksBytes && chunks.length > 1) { + const removed = chunks.shift()!; + chunksBytes -= removed.length; + } + + // Stream partial output to callback (truncated rolling buffer) + if (onUpdate) { + const fullBuffer = Buffer.concat(chunks); + const fullText = fullBuffer.toString("utf-8"); + const truncation = truncateTail(fullText); + onUpdate({ + content: [{ type: "text", text: truncation.content || "" }], + details: { + truncation: truncation.truncated ? truncation : undefined, + fullOutputPath: tempFilePath, + }, + }); + } + }; + + ops.exec(spawnContext.command, spawnContext.cwd, { + onData: handleData, + signal, + timeout, + env: spawnContext.env, + }) + .then(({ exitCode }) => { + // Close temp file stream + if (tempFileStream) { + tempFileStream.end(); + } + + // Combine all buffered chunks + const fullBuffer = Buffer.concat(chunks); + const fullOutput = fullBuffer.toString("utf-8"); + + // Apply tail truncation + const truncation = truncateTail(fullOutput); + let outputText = truncation.content || "(no output)"; + + // Build details with truncation info + let details: BashToolDetails | undefined; + + if (truncation.truncated) { + details = { + truncation, + fullOutputPath: tempFilePath, + }; + + // Build actionable notice + const startLine = truncation.totalLines - truncation.outputLines + 1; + const endLine = truncation.totalLines; + + if (truncation.lastLinePartial) { + // Edge case: last line alone > 30KB + const lastLineSize = formatSize(Buffer.byteLength(fullOutput.split("\n").pop() || "", "utf-8")); + outputText += `\n\n[Showing last ${formatSize(truncation.outputBytes)} of line ${endLine} (line is ${lastLineSize}). Full output: ${tempFilePath}]`; + } else if (truncation.truncatedBy === "lines") { + outputText += `\n\n[Showing lines ${startLine}-${endLine} of ${truncation.totalLines}. Full output: ${tempFilePath}]`; + } else { + outputText += `\n\n[Showing lines ${startLine}-${endLine} of ${truncation.totalLines} (${formatSize(DEFAULT_MAX_BYTES)} limit). Full output: ${tempFilePath}]`; + } + } + + if (exitCode !== 0 && exitCode !== null) { + outputText += `\n\nCommand exited with code ${exitCode}`; + reject(new Error(outputText)); + } else { + resolve({ content: [{ type: "text", text: outputText }], details }); + } + }) + .catch((err: Error) => { + // Close temp file stream + if (tempFileStream) { + tempFileStream.end(); + } + + // Combine all buffered chunks for error output + const fullBuffer = Buffer.concat(chunks); + let output = fullBuffer.toString("utf-8"); + + if (err.message === "aborted") { + if (output) output += "\n\n"; + output += "Command aborted"; + reject(new Error(output)); + } else if (err.message.startsWith("timeout:")) { + const timeoutSecs = err.message.split(":")[1]; + if (output) output += "\n\n"; + output += `Command timed out after ${timeoutSecs} seconds`; + reject(new Error(output)); + } else { + reject(err); + } + }); + }); + }, + }; +} + +/** Default bash tool using process.cwd() - for backwards compatibility */ +export const bashTool = createBashTool(process.cwd()); diff --git a/packages/pi-coding-agent/src/core/tools/edit-diff.ts b/packages/pi-coding-agent/src/core/tools/edit-diff.ts new file mode 100644 index 000000000..17f017bf9 --- /dev/null +++ b/packages/pi-coding-agent/src/core/tools/edit-diff.ts @@ -0,0 +1,308 @@ +/** + * Shared diff computation utilities for the edit tool. + * Used by both edit.ts (for execution) and tool-execution.ts (for preview rendering). + */ + +import * as Diff from "diff"; +import { constants } from "fs"; +import { access, readFile } from "fs/promises"; +import { resolveToCwd } from "./path-utils.js"; + +export function detectLineEnding(content: string): "\r\n" | "\n" { + const crlfIdx = content.indexOf("\r\n"); + const lfIdx = content.indexOf("\n"); + if (lfIdx === -1) return "\n"; + if (crlfIdx === -1) return "\n"; + return crlfIdx < lfIdx ? "\r\n" : "\n"; +} + +export function normalizeToLF(text: string): string { + return text.replace(/\r\n/g, "\n").replace(/\r/g, "\n"); +} + +export function restoreLineEndings(text: string, ending: "\r\n" | "\n"): string { + return ending === "\r\n" ? text.replace(/\n/g, "\r\n") : text; +} + +/** + * Normalize text for fuzzy matching. Applies progressive transformations: + * - Strip trailing whitespace from each line + * - Normalize smart quotes to ASCII equivalents + * - Normalize Unicode dashes/hyphens to ASCII hyphen + * - Normalize special Unicode spaces to regular space + */ +export function normalizeForFuzzyMatch(text: string): string { + return ( + text + // Strip trailing whitespace per line + .split("\n") + .map((line) => line.trimEnd()) + .join("\n") + // Smart single quotes → ' + .replace(/[\u2018\u2019\u201A\u201B]/g, "'") + // Smart double quotes → " + .replace(/[\u201C\u201D\u201E\u201F]/g, '"') + // Various dashes/hyphens → - + // U+2010 hyphen, U+2011 non-breaking hyphen, U+2012 figure dash, + // U+2013 en-dash, U+2014 em-dash, U+2015 horizontal bar, U+2212 minus + .replace(/[\u2010\u2011\u2012\u2013\u2014\u2015\u2212]/g, "-") + // Special spaces → regular space + // U+00A0 NBSP, U+2002-U+200A various spaces, U+202F narrow NBSP, + // U+205F medium math space, U+3000 ideographic space + .replace(/[\u00A0\u2002-\u200A\u202F\u205F\u3000]/g, " ") + ); +} + +export interface FuzzyMatchResult { + /** Whether a match was found */ + found: boolean; + /** The index where the match starts (in the content that should be used for replacement) */ + index: number; + /** Length of the matched text */ + matchLength: number; + /** Whether fuzzy matching was used (false = exact match) */ + usedFuzzyMatch: boolean; + /** + * The content to use for replacement operations. + * When exact match: original content. When fuzzy match: normalized content. + */ + contentForReplacement: string; +} + +/** + * Find oldText in content, trying exact match first, then fuzzy match. + * When fuzzy matching is used, the returned contentForReplacement is the + * fuzzy-normalized version of the content (trailing whitespace stripped, + * Unicode quotes/dashes normalized to ASCII). + */ +export function fuzzyFindText(content: string, oldText: string): FuzzyMatchResult { + // Try exact match first + const exactIndex = content.indexOf(oldText); + if (exactIndex !== -1) { + return { + found: true, + index: exactIndex, + matchLength: oldText.length, + usedFuzzyMatch: false, + contentForReplacement: content, + }; + } + + // Try fuzzy match - work entirely in normalized space + const fuzzyContent = normalizeForFuzzyMatch(content); + const fuzzyOldText = normalizeForFuzzyMatch(oldText); + const fuzzyIndex = fuzzyContent.indexOf(fuzzyOldText); + + if (fuzzyIndex === -1) { + return { + found: false, + index: -1, + matchLength: 0, + usedFuzzyMatch: false, + contentForReplacement: content, + }; + } + + // When fuzzy matching, we work in the normalized space for replacement. + // This means the output will have normalized whitespace/quotes/dashes, + // which is acceptable since we're fixing minor formatting differences anyway. + return { + found: true, + index: fuzzyIndex, + matchLength: fuzzyOldText.length, + usedFuzzyMatch: true, + contentForReplacement: fuzzyContent, + }; +} + +/** Strip UTF-8 BOM if present, return both the BOM (if any) and the text without it */ +export function stripBom(content: string): { bom: string; text: string } { + return content.startsWith("\uFEFF") ? { bom: "\uFEFF", text: content.slice(1) } : { bom: "", text: content }; +} + +/** + * Generate a unified diff string with line numbers and context. + * Returns both the diff string and the first changed line number (in the new file). + */ +export function generateDiffString( + oldContent: string, + newContent: string, + contextLines = 4, +): { diff: string; firstChangedLine: number | undefined } { + const parts = Diff.diffLines(oldContent, newContent); + const output: string[] = []; + + const oldLines = oldContent.split("\n"); + const newLines = newContent.split("\n"); + const maxLineNum = Math.max(oldLines.length, newLines.length); + const lineNumWidth = String(maxLineNum).length; + + let oldLineNum = 1; + let newLineNum = 1; + let lastWasChange = false; + let firstChangedLine: number | undefined; + + for (let i = 0; i < parts.length; i++) { + const part = parts[i]; + const raw = part.value.split("\n"); + if (raw[raw.length - 1] === "") { + raw.pop(); + } + + if (part.added || part.removed) { + // Capture the first changed line (in the new file) + if (firstChangedLine === undefined) { + firstChangedLine = newLineNum; + } + + // Show the change + for (const line of raw) { + if (part.added) { + const lineNum = String(newLineNum).padStart(lineNumWidth, " "); + output.push(`+${lineNum} ${line}`); + newLineNum++; + } else { + // removed + const lineNum = String(oldLineNum).padStart(lineNumWidth, " "); + output.push(`-${lineNum} ${line}`); + oldLineNum++; + } + } + lastWasChange = true; + } else { + // Context lines - only show a few before/after changes + const nextPartIsChange = i < parts.length - 1 && (parts[i + 1].added || parts[i + 1].removed); + + if (lastWasChange || nextPartIsChange) { + // Show context + let linesToShow = raw; + let skipStart = 0; + let skipEnd = 0; + + if (!lastWasChange) { + // Show only last N lines as leading context + skipStart = Math.max(0, raw.length - contextLines); + linesToShow = raw.slice(skipStart); + } + + if (!nextPartIsChange && linesToShow.length > contextLines) { + // Show only first N lines as trailing context + skipEnd = linesToShow.length - contextLines; + linesToShow = linesToShow.slice(0, contextLines); + } + + // Add ellipsis if we skipped lines at start + if (skipStart > 0) { + output.push(` ${"".padStart(lineNumWidth, " ")} ...`); + // Update line numbers for the skipped leading context + oldLineNum += skipStart; + newLineNum += skipStart; + } + + for (const line of linesToShow) { + const lineNum = String(oldLineNum).padStart(lineNumWidth, " "); + output.push(` ${lineNum} ${line}`); + oldLineNum++; + newLineNum++; + } + + // Add ellipsis if we skipped lines at end + if (skipEnd > 0) { + output.push(` ${"".padStart(lineNumWidth, " ")} ...`); + // Update line numbers for the skipped trailing context + oldLineNum += skipEnd; + newLineNum += skipEnd; + } + } else { + // Skip these context lines entirely + oldLineNum += raw.length; + newLineNum += raw.length; + } + + lastWasChange = false; + } + } + + return { diff: output.join("\n"), firstChangedLine }; +} + +export interface EditDiffResult { + diff: string; + firstChangedLine: number | undefined; +} + +export interface EditDiffError { + error: string; +} + +/** + * Compute the diff for an edit operation without applying it. + * Used for preview rendering in the TUI before the tool executes. + */ +export async function computeEditDiff( + path: string, + oldText: string, + newText: string, + cwd: string, +): Promise { + const absolutePath = resolveToCwd(path, cwd); + + try { + // Check if file exists and is readable + try { + await access(absolutePath, constants.R_OK); + } catch { + return { error: `File not found: ${path}` }; + } + + // Read the file + const rawContent = await readFile(absolutePath, "utf-8"); + + // Strip BOM before matching (LLM won't include invisible BOM in oldText) + const { text: content } = stripBom(rawContent); + + const normalizedContent = normalizeToLF(content); + const normalizedOldText = normalizeToLF(oldText); + const normalizedNewText = normalizeToLF(newText); + + // Find the old text using fuzzy matching (tries exact match first, then fuzzy) + const matchResult = fuzzyFindText(normalizedContent, normalizedOldText); + + if (!matchResult.found) { + return { + error: `Could not find the exact text in ${path}. The old text must match exactly including all whitespace and newlines.`, + }; + } + + // Count occurrences using fuzzy-normalized content for consistency + const fuzzyContent = normalizeForFuzzyMatch(normalizedContent); + const fuzzyOldText = normalizeForFuzzyMatch(normalizedOldText); + const occurrences = fuzzyContent.split(fuzzyOldText).length - 1; + + if (occurrences > 1) { + return { + error: `Found ${occurrences} occurrences of the text in ${path}. The text must be unique. Please provide more context to make it unique.`, + }; + } + + // Compute the new content using the matched position + // When fuzzy matching was used, contentForReplacement is the normalized version + const baseContent = matchResult.contentForReplacement; + const newContent = + baseContent.substring(0, matchResult.index) + + normalizedNewText + + baseContent.substring(matchResult.index + matchResult.matchLength); + + // Check if it would actually change anything + if (baseContent === newContent) { + return { + error: `No changes would be made to ${path}. The replacement produces identical content.`, + }; + } + + // Generate the diff + return generateDiffString(baseContent, newContent); + } catch (err) { + return { error: err instanceof Error ? err.message : String(err) }; + } +} diff --git a/packages/pi-coding-agent/src/core/tools/edit.ts b/packages/pi-coding-agent/src/core/tools/edit.ts new file mode 100644 index 000000000..600f94bd0 --- /dev/null +++ b/packages/pi-coding-agent/src/core/tools/edit.ts @@ -0,0 +1,227 @@ +import type { AgentTool } from "@gsd/pi-agent-core"; +import { type Static, Type } from "@sinclair/typebox"; +import { constants } from "fs"; +import { access as fsAccess, readFile as fsReadFile, writeFile as fsWriteFile } from "fs/promises"; +import { + detectLineEnding, + fuzzyFindText, + generateDiffString, + normalizeForFuzzyMatch, + normalizeToLF, + restoreLineEndings, + stripBom, +} from "./edit-diff.js"; +import { resolveToCwd } from "./path-utils.js"; + +const editSchema = Type.Object({ + path: Type.String({ description: "Path to the file to edit (relative or absolute)" }), + oldText: Type.String({ description: "Exact text to find and replace (must match exactly)" }), + newText: Type.String({ description: "New text to replace the old text with" }), +}); + +export type EditToolInput = Static; + +export interface EditToolDetails { + /** Unified diff of the changes made */ + diff: string; + /** Line number of the first change in the new file (for editor navigation) */ + firstChangedLine?: number; +} + +/** + * Pluggable operations for the edit tool. + * Override these to delegate file editing to remote systems (e.g., SSH). + */ +export interface EditOperations { + /** Read file contents as a Buffer */ + readFile: (absolutePath: string) => Promise; + /** Write content to a file */ + writeFile: (absolutePath: string, content: string) => Promise; + /** Check if file is readable and writable (throw if not) */ + access: (absolutePath: string) => Promise; +} + +const defaultEditOperations: EditOperations = { + readFile: (path) => fsReadFile(path), + writeFile: (path, content) => fsWriteFile(path, content, "utf-8"), + access: (path) => fsAccess(path, constants.R_OK | constants.W_OK), +}; + +export interface EditToolOptions { + /** Custom operations for file editing. Default: local filesystem */ + operations?: EditOperations; +} + +export function createEditTool(cwd: string, options?: EditToolOptions): AgentTool { + const ops = options?.operations ?? defaultEditOperations; + + return { + name: "edit", + label: "edit", + description: + "Edit a file by replacing exact text. The oldText must match exactly (including whitespace). Use this for precise, surgical edits.", + parameters: editSchema, + execute: async ( + _toolCallId: string, + { path, oldText, newText }: { path: string; oldText: string; newText: string }, + signal?: AbortSignal, + ) => { + const absolutePath = resolveToCwd(path, cwd); + + return new Promise<{ + content: Array<{ type: "text"; text: string }>; + details: EditToolDetails | undefined; + }>((resolve, reject) => { + // Check if already aborted + if (signal?.aborted) { + reject(new Error("Operation aborted")); + return; + } + + let aborted = false; + + // Set up abort handler + const onAbort = () => { + aborted = true; + reject(new Error("Operation aborted")); + }; + + if (signal) { + signal.addEventListener("abort", onAbort, { once: true }); + } + + // Perform the edit operation + (async () => { + try { + // Check if file exists + try { + await ops.access(absolutePath); + } catch { + if (signal) { + signal.removeEventListener("abort", onAbort); + } + reject(new Error(`File not found: ${path}`)); + return; + } + + // Check if aborted before reading + if (aborted) { + return; + } + + // Read the file + const buffer = await ops.readFile(absolutePath); + const rawContent = buffer.toString("utf-8"); + + // Check if aborted after reading + if (aborted) { + return; + } + + // Strip BOM before matching (LLM won't include invisible BOM in oldText) + const { bom, text: content } = stripBom(rawContent); + + const originalEnding = detectLineEnding(content); + const normalizedContent = normalizeToLF(content); + const normalizedOldText = normalizeToLF(oldText); + const normalizedNewText = normalizeToLF(newText); + + // Find the old text using fuzzy matching (tries exact match first, then fuzzy) + const matchResult = fuzzyFindText(normalizedContent, normalizedOldText); + + if (!matchResult.found) { + if (signal) { + signal.removeEventListener("abort", onAbort); + } + reject( + new Error( + `Could not find the exact text in ${path}. The old text must match exactly including all whitespace and newlines.`, + ), + ); + return; + } + + // Count occurrences using fuzzy-normalized content for consistency + const fuzzyContent = normalizeForFuzzyMatch(normalizedContent); + const fuzzyOldText = normalizeForFuzzyMatch(normalizedOldText); + const occurrences = fuzzyContent.split(fuzzyOldText).length - 1; + + if (occurrences > 1) { + if (signal) { + signal.removeEventListener("abort", onAbort); + } + reject( + new Error( + `Found ${occurrences} occurrences of the text in ${path}. The text must be unique. Please provide more context to make it unique.`, + ), + ); + return; + } + + // Check if aborted before writing + if (aborted) { + return; + } + + // Perform replacement using the matched text position + // When fuzzy matching was used, contentForReplacement is the normalized version + const baseContent = matchResult.contentForReplacement; + const newContent = + baseContent.substring(0, matchResult.index) + + normalizedNewText + + baseContent.substring(matchResult.index + matchResult.matchLength); + + // Verify the replacement actually changed something + if (baseContent === newContent) { + if (signal) { + signal.removeEventListener("abort", onAbort); + } + reject( + new Error( + `No changes made to ${path}. The replacement produced identical content. This might indicate an issue with special characters or the text not existing as expected.`, + ), + ); + return; + } + + const finalContent = bom + restoreLineEndings(newContent, originalEnding); + await ops.writeFile(absolutePath, finalContent); + + // Check if aborted after writing + if (aborted) { + return; + } + + // Clean up abort handler + if (signal) { + signal.removeEventListener("abort", onAbort); + } + + const diffResult = generateDiffString(baseContent, newContent); + resolve({ + content: [ + { + type: "text", + text: `Successfully replaced text in ${path}.`, + }, + ], + details: { diff: diffResult.diff, firstChangedLine: diffResult.firstChangedLine }, + }); + } catch (error: any) { + // Clean up abort handler + if (signal) { + signal.removeEventListener("abort", onAbort); + } + + if (!aborted) { + reject(error); + } + } + })(); + }); + }, + }; +} + +/** Default edit tool using process.cwd() - for backwards compatibility */ +export const editTool = createEditTool(process.cwd()); diff --git a/packages/pi-coding-agent/src/core/tools/find.ts b/packages/pi-coding-agent/src/core/tools/find.ts new file mode 100644 index 000000000..84dfcc4ad --- /dev/null +++ b/packages/pi-coding-agent/src/core/tools/find.ts @@ -0,0 +1,273 @@ +import type { AgentTool } from "@gsd/pi-agent-core"; +import { type Static, Type } from "@sinclair/typebox"; +import { spawnSync } from "child_process"; +import { existsSync } from "fs"; +import { globSync } from "glob"; +import path from "path"; +import { ensureTool } from "../../utils/tools-manager.js"; +import { resolveToCwd } from "./path-utils.js"; +import { DEFAULT_MAX_BYTES, formatSize, type TruncationResult, truncateHead } from "./truncate.js"; + +const findSchema = Type.Object({ + pattern: Type.String({ + description: "Glob pattern to match files, e.g. '*.ts', '**/*.json', or 'src/**/*.spec.ts'", + }), + path: Type.Optional(Type.String({ description: "Directory to search in (default: current directory)" })), + limit: Type.Optional(Type.Number({ description: "Maximum number of results (default: 1000)" })), +}); + +export type FindToolInput = Static; + +const DEFAULT_LIMIT = 1000; + +export interface FindToolDetails { + truncation?: TruncationResult; + resultLimitReached?: number; +} + +/** + * Pluggable operations for the find tool. + * Override these to delegate file search to remote systems (e.g., SSH). + */ +export interface FindOperations { + /** Check if path exists */ + exists: (absolutePath: string) => Promise | boolean; + /** Find files matching glob pattern. Returns relative paths. */ + glob: (pattern: string, cwd: string, options: { ignore: string[]; limit: number }) => Promise | string[]; +} + +const defaultFindOperations: FindOperations = { + exists: existsSync, + glob: (_pattern, _searchCwd, _options) => { + // This is a placeholder - actual fd execution happens in execute + return []; + }, +}; + +export interface FindToolOptions { + /** Custom operations for find. Default: local filesystem + fd */ + operations?: FindOperations; +} + +export function createFindTool(cwd: string, options?: FindToolOptions): AgentTool { + const customOps = options?.operations; + + return { + name: "find", + label: "find", + description: `Search for files by glob pattern. Returns matching file paths relative to the search directory. Respects .gitignore. Output is truncated to ${DEFAULT_LIMIT} results or ${DEFAULT_MAX_BYTES / 1024}KB (whichever is hit first).`, + parameters: findSchema, + execute: async ( + _toolCallId: string, + { pattern, path: searchDir, limit }: { pattern: string; path?: string; limit?: number }, + signal?: AbortSignal, + ) => { + return new Promise((resolve, reject) => { + if (signal?.aborted) { + reject(new Error("Operation aborted")); + return; + } + + const onAbort = () => reject(new Error("Operation aborted")); + signal?.addEventListener("abort", onAbort, { once: true }); + + (async () => { + try { + const searchPath = resolveToCwd(searchDir || ".", cwd); + const effectiveLimit = limit ?? DEFAULT_LIMIT; + const ops = customOps ?? defaultFindOperations; + + // If custom operations provided with glob, use that + if (customOps?.glob) { + if (!(await ops.exists(searchPath))) { + reject(new Error(`Path not found: ${searchPath}`)); + return; + } + + const results = await ops.glob(pattern, searchPath, { + ignore: ["**/node_modules/**", "**/.git/**"], + limit: effectiveLimit, + }); + + signal?.removeEventListener("abort", onAbort); + + if (results.length === 0) { + resolve({ + content: [{ type: "text", text: "No files found matching pattern" }], + details: undefined, + }); + return; + } + + // Relativize paths + const relativized = results.map((p) => { + if (p.startsWith(searchPath)) { + return p.slice(searchPath.length + 1); + } + return path.relative(searchPath, p); + }); + + const resultLimitReached = relativized.length >= effectiveLimit; + const rawOutput = relativized.join("\n"); + const truncation = truncateHead(rawOutput, { maxLines: Number.MAX_SAFE_INTEGER }); + + let resultOutput = truncation.content; + const details: FindToolDetails = {}; + const notices: string[] = []; + + if (resultLimitReached) { + notices.push(`${effectiveLimit} results limit reached`); + details.resultLimitReached = effectiveLimit; + } + + if (truncation.truncated) { + notices.push(`${formatSize(DEFAULT_MAX_BYTES)} limit reached`); + details.truncation = truncation; + } + + if (notices.length > 0) { + resultOutput += `\n\n[${notices.join(". ")}]`; + } + + resolve({ + content: [{ type: "text", text: resultOutput }], + details: Object.keys(details).length > 0 ? details : undefined, + }); + return; + } + + // Default: use fd + const fdPath = await ensureTool("fd", true); + if (!fdPath) { + reject(new Error("fd is not available and could not be downloaded")); + return; + } + + // Build fd arguments + const args: string[] = [ + "--glob", + "--color=never", + "--hidden", + "--max-results", + String(effectiveLimit), + ]; + + // Include .gitignore files + const gitignoreFiles = new Set(); + const rootGitignore = path.join(searchPath, ".gitignore"); + if (existsSync(rootGitignore)) { + gitignoreFiles.add(rootGitignore); + } + + try { + const nestedGitignores = globSync("**/.gitignore", { + cwd: searchPath, + dot: true, + absolute: true, + ignore: ["**/node_modules/**", "**/.git/**"], + }); + for (const file of nestedGitignores) { + gitignoreFiles.add(file); + } + } catch { + // Ignore glob errors + } + + for (const gitignorePath of gitignoreFiles) { + args.push("--ignore-file", gitignorePath); + } + + args.push(pattern, searchPath); + + const result = spawnSync(fdPath, args, { + encoding: "utf-8", + maxBuffer: 10 * 1024 * 1024, + }); + + signal?.removeEventListener("abort", onAbort); + + if (result.error) { + reject(new Error(`Failed to run fd: ${result.error.message}`)); + return; + } + + const output = result.stdout?.trim() || ""; + + if (result.status !== 0) { + const errorMsg = result.stderr?.trim() || `fd exited with code ${result.status}`; + if (!output) { + reject(new Error(errorMsg)); + return; + } + } + + if (!output) { + resolve({ + content: [{ type: "text", text: "No files found matching pattern" }], + details: undefined, + }); + return; + } + + const lines = output.split("\n"); + const relativized: string[] = []; + + for (const rawLine of lines) { + const line = rawLine.replace(/\r$/, "").trim(); + if (!line) continue; + + const hadTrailingSlash = line.endsWith("/") || line.endsWith("\\"); + let relativePath = line; + if (line.startsWith(searchPath)) { + relativePath = line.slice(searchPath.length + 1); + } else { + relativePath = path.relative(searchPath, line); + } + + if (hadTrailingSlash && !relativePath.endsWith("/")) { + relativePath += "/"; + } + + relativized.push(relativePath); + } + + const resultLimitReached = relativized.length >= effectiveLimit; + const rawOutput = relativized.join("\n"); + const truncation = truncateHead(rawOutput, { maxLines: Number.MAX_SAFE_INTEGER }); + + let resultOutput = truncation.content; + const details: FindToolDetails = {}; + const notices: string[] = []; + + if (resultLimitReached) { + notices.push( + `${effectiveLimit} results limit reached. Use limit=${effectiveLimit * 2} for more, or refine pattern`, + ); + details.resultLimitReached = effectiveLimit; + } + + if (truncation.truncated) { + notices.push(`${formatSize(DEFAULT_MAX_BYTES)} limit reached`); + details.truncation = truncation; + } + + if (notices.length > 0) { + resultOutput += `\n\n[${notices.join(". ")}]`; + } + + resolve({ + content: [{ type: "text", text: resultOutput }], + details: Object.keys(details).length > 0 ? details : undefined, + }); + } catch (e: any) { + signal?.removeEventListener("abort", onAbort); + reject(e); + } + })(); + }); + }, + }; +} + +/** Default find tool using process.cwd() - for backwards compatibility */ +export const findTool = createFindTool(process.cwd()); diff --git a/packages/pi-coding-agent/src/core/tools/grep.ts b/packages/pi-coding-agent/src/core/tools/grep.ts new file mode 100644 index 000000000..6250a2601 --- /dev/null +++ b/packages/pi-coding-agent/src/core/tools/grep.ts @@ -0,0 +1,346 @@ +import { createInterface } from "node:readline"; +import type { AgentTool } from "@gsd/pi-agent-core"; +import { type Static, Type } from "@sinclair/typebox"; +import { spawn } from "child_process"; +import { readFileSync, statSync } from "fs"; +import path from "path"; +import { ensureTool } from "../../utils/tools-manager.js"; +import { resolveToCwd } from "./path-utils.js"; +import { + DEFAULT_MAX_BYTES, + formatSize, + GREP_MAX_LINE_LENGTH, + type TruncationResult, + truncateHead, + truncateLine, +} from "./truncate.js"; + +const grepSchema = Type.Object({ + pattern: Type.String({ description: "Search pattern (regex or literal string)" }), + path: Type.Optional(Type.String({ description: "Directory or file to search (default: current directory)" })), + glob: Type.Optional(Type.String({ description: "Filter files by glob pattern, e.g. '*.ts' or '**/*.spec.ts'" })), + ignoreCase: Type.Optional(Type.Boolean({ description: "Case-insensitive search (default: false)" })), + literal: Type.Optional( + Type.Boolean({ description: "Treat pattern as literal string instead of regex (default: false)" }), + ), + context: Type.Optional( + Type.Number({ description: "Number of lines to show before and after each match (default: 0)" }), + ), + limit: Type.Optional(Type.Number({ description: "Maximum number of matches to return (default: 100)" })), +}); + +export type GrepToolInput = Static; + +const DEFAULT_LIMIT = 100; + +export interface GrepToolDetails { + truncation?: TruncationResult; + matchLimitReached?: number; + linesTruncated?: boolean; +} + +/** + * Pluggable operations for the grep tool. + * Override these to delegate search to remote systems (e.g., SSH). + */ +export interface GrepOperations { + /** Check if path is a directory. Throws if path doesn't exist. */ + isDirectory: (absolutePath: string) => Promise | boolean; + /** Read file contents for context lines */ + readFile: (absolutePath: string) => Promise | string; +} + +const defaultGrepOperations: GrepOperations = { + isDirectory: (p) => statSync(p).isDirectory(), + readFile: (p) => readFileSync(p, "utf-8"), +}; + +export interface GrepToolOptions { + /** Custom operations for grep. Default: local filesystem + ripgrep */ + operations?: GrepOperations; +} + +export function createGrepTool(cwd: string, options?: GrepToolOptions): AgentTool { + const customOps = options?.operations; + + return { + name: "grep", + label: "grep", + description: `Search file contents for a pattern. Returns matching lines with file paths and line numbers. Respects .gitignore. Output is truncated to ${DEFAULT_LIMIT} matches or ${DEFAULT_MAX_BYTES / 1024}KB (whichever is hit first). Long lines are truncated to ${GREP_MAX_LINE_LENGTH} chars.`, + parameters: grepSchema, + execute: async ( + _toolCallId: string, + { + pattern, + path: searchDir, + glob, + ignoreCase, + literal, + context, + limit, + }: { + pattern: string; + path?: string; + glob?: string; + ignoreCase?: boolean; + literal?: boolean; + context?: number; + limit?: number; + }, + signal?: AbortSignal, + ) => { + return new Promise((resolve, reject) => { + if (signal?.aborted) { + reject(new Error("Operation aborted")); + return; + } + + let settled = false; + const settle = (fn: () => void) => { + if (!settled) { + settled = true; + fn(); + } + }; + + (async () => { + try { + const rgPath = await ensureTool("rg", true); + if (!rgPath) { + settle(() => reject(new Error("ripgrep (rg) is not available and could not be downloaded"))); + return; + } + + const searchPath = resolveToCwd(searchDir || ".", cwd); + const ops = customOps ?? defaultGrepOperations; + + let isDirectory: boolean; + try { + isDirectory = await ops.isDirectory(searchPath); + } catch (_err) { + settle(() => reject(new Error(`Path not found: ${searchPath}`))); + return; + } + const contextValue = context && context > 0 ? context : 0; + const effectiveLimit = Math.max(1, limit ?? DEFAULT_LIMIT); + + const formatPath = (filePath: string): string => { + if (isDirectory) { + const relative = path.relative(searchPath, filePath); + if (relative && !relative.startsWith("..")) { + return relative.replace(/\\/g, "/"); + } + } + return path.basename(filePath); + }; + + const fileCache = new Map(); + const getFileLines = async (filePath: string): Promise => { + let lines = fileCache.get(filePath); + if (!lines) { + try { + const content = await ops.readFile(filePath); + lines = content.replace(/\r\n/g, "\n").replace(/\r/g, "\n").split("\n"); + } catch { + lines = []; + } + fileCache.set(filePath, lines); + } + return lines; + }; + + const args: string[] = ["--json", "--line-number", "--color=never", "--hidden"]; + + if (ignoreCase) { + args.push("--ignore-case"); + } + + if (literal) { + args.push("--fixed-strings"); + } + + if (glob) { + args.push("--glob", glob); + } + + args.push(pattern, searchPath); + + const child = spawn(rgPath, args, { stdio: ["ignore", "pipe", "pipe"] }); + const rl = createInterface({ input: child.stdout }); + let stderr = ""; + let matchCount = 0; + let matchLimitReached = false; + let linesTruncated = false; + let aborted = false; + let killedDueToLimit = false; + const outputLines: string[] = []; + + const cleanup = () => { + rl.close(); + signal?.removeEventListener("abort", onAbort); + }; + + const stopChild = (dueToLimit: boolean = false) => { + if (!child.killed) { + killedDueToLimit = dueToLimit; + child.kill(); + } + }; + + const onAbort = () => { + aborted = true; + stopChild(); + }; + + signal?.addEventListener("abort", onAbort, { once: true }); + + child.stderr?.on("data", (chunk) => { + stderr += chunk.toString(); + }); + + const formatBlock = async (filePath: string, lineNumber: number): Promise => { + const relativePath = formatPath(filePath); + const lines = await getFileLines(filePath); + if (!lines.length) { + return [`${relativePath}:${lineNumber}: (unable to read file)`]; + } + + const block: string[] = []; + const start = contextValue > 0 ? Math.max(1, lineNumber - contextValue) : lineNumber; + const end = contextValue > 0 ? Math.min(lines.length, lineNumber + contextValue) : lineNumber; + + for (let current = start; current <= end; current++) { + const lineText = lines[current - 1] ?? ""; + const sanitized = lineText.replace(/\r/g, ""); + const isMatchLine = current === lineNumber; + + // Truncate long lines + const { text: truncatedText, wasTruncated } = truncateLine(sanitized); + if (wasTruncated) { + linesTruncated = true; + } + + if (isMatchLine) { + block.push(`${relativePath}:${current}: ${truncatedText}`); + } else { + block.push(`${relativePath}-${current}- ${truncatedText}`); + } + } + + return block; + }; + + // Collect matches during streaming, format after + const matches: Array<{ filePath: string; lineNumber: number }> = []; + + rl.on("line", (line) => { + if (!line.trim() || matchCount >= effectiveLimit) { + return; + } + + let event: any; + try { + event = JSON.parse(line); + } catch { + return; + } + + if (event.type === "match") { + matchCount++; + const filePath = event.data?.path?.text; + const lineNumber = event.data?.line_number; + + if (filePath && typeof lineNumber === "number") { + matches.push({ filePath, lineNumber }); + } + + if (matchCount >= effectiveLimit) { + matchLimitReached = true; + stopChild(true); + } + } + }); + + child.on("error", (error) => { + cleanup(); + settle(() => reject(new Error(`Failed to run ripgrep: ${error.message}`))); + }); + + child.on("close", async (code) => { + cleanup(); + + if (aborted) { + settle(() => reject(new Error("Operation aborted"))); + return; + } + + if (!killedDueToLimit && code !== 0 && code !== 1) { + const errorMsg = stderr.trim() || `ripgrep exited with code ${code}`; + settle(() => reject(new Error(errorMsg))); + return; + } + + if (matchCount === 0) { + settle(() => + resolve({ content: [{ type: "text", text: "No matches found" }], details: undefined }), + ); + return; + } + + // Format matches (async to support remote file reading) + for (const match of matches) { + const block = await formatBlock(match.filePath, match.lineNumber); + outputLines.push(...block); + } + + // Apply byte truncation (no line limit since we already have match limit) + const rawOutput = outputLines.join("\n"); + const truncation = truncateHead(rawOutput, { maxLines: Number.MAX_SAFE_INTEGER }); + + let output = truncation.content; + const details: GrepToolDetails = {}; + + // Build notices + const notices: string[] = []; + + if (matchLimitReached) { + notices.push( + `${effectiveLimit} matches limit reached. Use limit=${effectiveLimit * 2} for more, or refine pattern`, + ); + details.matchLimitReached = effectiveLimit; + } + + if (truncation.truncated) { + notices.push(`${formatSize(DEFAULT_MAX_BYTES)} limit reached`); + details.truncation = truncation; + } + + if (linesTruncated) { + notices.push( + `Some lines truncated to ${GREP_MAX_LINE_LENGTH} chars. Use read tool to see full lines`, + ); + details.linesTruncated = true; + } + + if (notices.length > 0) { + output += `\n\n[${notices.join(". ")}]`; + } + + settle(() => + resolve({ + content: [{ type: "text", text: output }], + details: Object.keys(details).length > 0 ? details : undefined, + }), + ); + }); + } catch (err) { + settle(() => reject(err as Error)); + } + })(); + }); + }, + }; +} + +/** Default grep tool using process.cwd() - for backwards compatibility */ +export const grepTool = createGrepTool(process.cwd()); diff --git a/packages/pi-coding-agent/src/core/tools/index.ts b/packages/pi-coding-agent/src/core/tools/index.ts new file mode 100644 index 000000000..3768dbcf5 --- /dev/null +++ b/packages/pi-coding-agent/src/core/tools/index.ts @@ -0,0 +1,139 @@ +export { + type BashOperations, + type BashSpawnContext, + type BashSpawnHook, + type BashToolDetails, + type BashToolInput, + type BashToolOptions, + bashTool, + createBashTool, +} from "./bash.js"; +export { + createEditTool, + type EditOperations, + type EditToolDetails, + type EditToolInput, + type EditToolOptions, + editTool, +} from "./edit.js"; +export { + createFindTool, + type FindOperations, + type FindToolDetails, + type FindToolInput, + type FindToolOptions, + findTool, +} from "./find.js"; +export { + createGrepTool, + type GrepOperations, + type GrepToolDetails, + type GrepToolInput, + type GrepToolOptions, + grepTool, +} from "./grep.js"; +export { + createLsTool, + type LsOperations, + type LsToolDetails, + type LsToolInput, + type LsToolOptions, + lsTool, +} from "./ls.js"; +export { + createReadTool, + type ReadOperations, + type ReadToolDetails, + type ReadToolInput, + type ReadToolOptions, + readTool, +} from "./read.js"; +export { + DEFAULT_MAX_BYTES, + DEFAULT_MAX_LINES, + formatSize, + type TruncationOptions, + type TruncationResult, + truncateHead, + truncateLine, + truncateTail, +} from "./truncate.js"; +export { + createWriteTool, + type WriteOperations, + type WriteToolInput, + type WriteToolOptions, + writeTool, +} from "./write.js"; + +import type { AgentTool } from "@gsd/pi-agent-core"; +import { type BashToolOptions, bashTool, createBashTool } from "./bash.js"; +import { createEditTool, editTool } from "./edit.js"; +import { createFindTool, findTool } from "./find.js"; +import { createGrepTool, grepTool } from "./grep.js"; +import { createLsTool, lsTool } from "./ls.js"; +import { createReadTool, type ReadToolOptions, readTool } from "./read.js"; +import { createWriteTool, writeTool } from "./write.js"; + +/** Tool type (AgentTool from pi-ai) */ +export type Tool = AgentTool; + +// Default tools for full access mode (using process.cwd()) +export const codingTools: Tool[] = [readTool, bashTool, editTool, writeTool]; + +// Read-only tools for exploration without modification (using process.cwd()) +export const readOnlyTools: Tool[] = [readTool, grepTool, findTool, lsTool]; + +// All available tools (using process.cwd()) +export const allTools = { + read: readTool, + bash: bashTool, + edit: editTool, + write: writeTool, + grep: grepTool, + find: findTool, + ls: lsTool, +}; + +export type ToolName = keyof typeof allTools; + +export interface ToolsOptions { + /** Options for the read tool */ + read?: ReadToolOptions; + /** Options for the bash tool */ + bash?: BashToolOptions; +} + +/** + * Create coding tools configured for a specific working directory. + */ +export function createCodingTools(cwd: string, options?: ToolsOptions): Tool[] { + return [ + createReadTool(cwd, options?.read), + createBashTool(cwd, options?.bash), + createEditTool(cwd), + createWriteTool(cwd), + ]; +} + +/** + * Create read-only tools configured for a specific working directory. + */ +export function createReadOnlyTools(cwd: string, options?: ToolsOptions): Tool[] { + return [createReadTool(cwd, options?.read), createGrepTool(cwd), createFindTool(cwd), createLsTool(cwd)]; +} + +/** + * Create all tools configured for a specific working directory. + */ +export function createAllTools(cwd: string, options?: ToolsOptions): Record { + return { + read: createReadTool(cwd, options?.read), + bash: createBashTool(cwd, options?.bash), + edit: createEditTool(cwd), + write: createWriteTool(cwd), + grep: createGrepTool(cwd), + find: createFindTool(cwd), + ls: createLsTool(cwd), + }; +} diff --git a/packages/pi-coding-agent/src/core/tools/ls.ts b/packages/pi-coding-agent/src/core/tools/ls.ts new file mode 100644 index 000000000..4876e2155 --- /dev/null +++ b/packages/pi-coding-agent/src/core/tools/ls.ts @@ -0,0 +1,170 @@ +import type { AgentTool } from "@gsd/pi-agent-core"; +import { type Static, Type } from "@sinclair/typebox"; +import { existsSync, readdirSync, statSync } from "fs"; +import nodePath from "path"; +import { resolveToCwd } from "./path-utils.js"; +import { DEFAULT_MAX_BYTES, formatSize, type TruncationResult, truncateHead } from "./truncate.js"; + +const lsSchema = Type.Object({ + path: Type.Optional(Type.String({ description: "Directory to list (default: current directory)" })), + limit: Type.Optional(Type.Number({ description: "Maximum number of entries to return (default: 500)" })), +}); + +export type LsToolInput = Static; + +const DEFAULT_LIMIT = 500; + +export interface LsToolDetails { + truncation?: TruncationResult; + entryLimitReached?: number; +} + +/** + * Pluggable operations for the ls tool. + * Override these to delegate directory listing to remote systems (e.g., SSH). + */ +export interface LsOperations { + /** Check if path exists */ + exists: (absolutePath: string) => Promise | boolean; + /** Get file/directory stats. Throws if not found. */ + stat: (absolutePath: string) => Promise<{ isDirectory: () => boolean }> | { isDirectory: () => boolean }; + /** Read directory entries */ + readdir: (absolutePath: string) => Promise | string[]; +} + +const defaultLsOperations: LsOperations = { + exists: existsSync, + stat: statSync, + readdir: readdirSync, +}; + +export interface LsToolOptions { + /** Custom operations for directory listing. Default: local filesystem */ + operations?: LsOperations; +} + +export function createLsTool(cwd: string, options?: LsToolOptions): AgentTool { + const ops = options?.operations ?? defaultLsOperations; + + return { + name: "ls", + label: "ls", + description: `List directory contents. Returns entries sorted alphabetically, with '/' suffix for directories. Includes dotfiles. Output is truncated to ${DEFAULT_LIMIT} entries or ${DEFAULT_MAX_BYTES / 1024}KB (whichever is hit first).`, + parameters: lsSchema, + execute: async ( + _toolCallId: string, + { path, limit }: { path?: string; limit?: number }, + signal?: AbortSignal, + ) => { + return new Promise((resolve, reject) => { + if (signal?.aborted) { + reject(new Error("Operation aborted")); + return; + } + + const onAbort = () => reject(new Error("Operation aborted")); + signal?.addEventListener("abort", onAbort, { once: true }); + + (async () => { + try { + const dirPath = resolveToCwd(path || ".", cwd); + const effectiveLimit = limit ?? DEFAULT_LIMIT; + + // Check if path exists + if (!(await ops.exists(dirPath))) { + reject(new Error(`Path not found: ${dirPath}`)); + return; + } + + // Check if path is a directory + const stat = await ops.stat(dirPath); + if (!stat.isDirectory()) { + reject(new Error(`Not a directory: ${dirPath}`)); + return; + } + + // Read directory entries + let entries: string[]; + try { + entries = await ops.readdir(dirPath); + } catch (e: any) { + reject(new Error(`Cannot read directory: ${e.message}`)); + return; + } + + // Sort alphabetically (case-insensitive) + entries.sort((a, b) => a.toLowerCase().localeCompare(b.toLowerCase())); + + // Format entries with directory indicators + const results: string[] = []; + let entryLimitReached = false; + + for (const entry of entries) { + if (results.length >= effectiveLimit) { + entryLimitReached = true; + break; + } + + const fullPath = nodePath.join(dirPath, entry); + let suffix = ""; + + try { + const entryStat = await ops.stat(fullPath); + if (entryStat.isDirectory()) { + suffix = "/"; + } + } catch { + // Skip entries we can't stat + continue; + } + + results.push(entry + suffix); + } + + signal?.removeEventListener("abort", onAbort); + + if (results.length === 0) { + resolve({ content: [{ type: "text", text: "(empty directory)" }], details: undefined }); + return; + } + + // Apply byte truncation (no line limit since we already have entry limit) + const rawOutput = results.join("\n"); + const truncation = truncateHead(rawOutput, { maxLines: Number.MAX_SAFE_INTEGER }); + + let output = truncation.content; + const details: LsToolDetails = {}; + + // Build notices + const notices: string[] = []; + + if (entryLimitReached) { + notices.push(`${effectiveLimit} entries limit reached. Use limit=${effectiveLimit * 2} for more`); + details.entryLimitReached = effectiveLimit; + } + + if (truncation.truncated) { + notices.push(`${formatSize(DEFAULT_MAX_BYTES)} limit reached`); + details.truncation = truncation; + } + + if (notices.length > 0) { + output += `\n\n[${notices.join(". ")}]`; + } + + resolve({ + content: [{ type: "text", text: output }], + details: Object.keys(details).length > 0 ? details : undefined, + }); + } catch (e: any) { + signal?.removeEventListener("abort", onAbort); + reject(e); + } + })(); + }); + }, + }; +} + +/** Default ls tool using process.cwd() - for backwards compatibility */ +export const lsTool = createLsTool(process.cwd()); diff --git a/packages/pi-coding-agent/src/core/tools/path-utils.ts b/packages/pi-coding-agent/src/core/tools/path-utils.ts new file mode 100644 index 000000000..3b5b8e2f2 --- /dev/null +++ b/packages/pi-coding-agent/src/core/tools/path-utils.ts @@ -0,0 +1,94 @@ +import { accessSync, constants } from "node:fs"; +import * as os from "node:os"; +import { isAbsolute, resolve as resolvePath } from "node:path"; + +const UNICODE_SPACES = /[\u00A0\u2000-\u200A\u202F\u205F\u3000]/g; +const NARROW_NO_BREAK_SPACE = "\u202F"; +function normalizeUnicodeSpaces(str: string): string { + return str.replace(UNICODE_SPACES, " "); +} + +function tryMacOSScreenshotPath(filePath: string): string { + return filePath.replace(/ (AM|PM)\./g, `${NARROW_NO_BREAK_SPACE}$1.`); +} + +function tryNFDVariant(filePath: string): string { + // macOS stores filenames in NFD (decomposed) form, try converting user input to NFD + return filePath.normalize("NFD"); +} + +function tryCurlyQuoteVariant(filePath: string): string { + // macOS uses U+2019 (right single quotation mark) in screenshot names like "Capture d'écran" + // Users typically type U+0027 (straight apostrophe) + return filePath.replace(/'/g, "\u2019"); +} + +function fileExists(filePath: string): boolean { + try { + accessSync(filePath, constants.F_OK); + return true; + } catch { + return false; + } +} + +function normalizeAtPrefix(filePath: string): string { + return filePath.startsWith("@") ? filePath.slice(1) : filePath; +} + +export function expandPath(filePath: string): string { + const normalized = normalizeUnicodeSpaces(normalizeAtPrefix(filePath)); + if (normalized === "~") { + return os.homedir(); + } + if (normalized.startsWith("~/")) { + return os.homedir() + normalized.slice(1); + } + return normalized; +} + +/** + * Resolve a path relative to the given cwd. + * Handles ~ expansion and absolute paths. + */ +export function resolveToCwd(filePath: string, cwd: string): string { + const expanded = expandPath(filePath); + if (isAbsolute(expanded)) { + return expanded; + } + return resolvePath(cwd, expanded); +} + +export function resolveReadPath(filePath: string, cwd: string): string { + const resolved = resolveToCwd(filePath, cwd); + + if (fileExists(resolved)) { + return resolved; + } + + // Try macOS AM/PM variant (narrow no-break space before AM/PM) + const amPmVariant = tryMacOSScreenshotPath(resolved); + if (amPmVariant !== resolved && fileExists(amPmVariant)) { + return amPmVariant; + } + + // Try NFD variant (macOS stores filenames in NFD form) + const nfdVariant = tryNFDVariant(resolved); + if (nfdVariant !== resolved && fileExists(nfdVariant)) { + return nfdVariant; + } + + // Try curly quote variant (macOS uses U+2019 in screenshot names) + const curlyVariant = tryCurlyQuoteVariant(resolved); + if (curlyVariant !== resolved && fileExists(curlyVariant)) { + return curlyVariant; + } + + // Try combined NFD + curly quote (for French macOS screenshots like "Capture d'écran") + const nfdCurlyVariant = tryCurlyQuoteVariant(nfdVariant); + if (nfdCurlyVariant !== resolved && fileExists(nfdCurlyVariant)) { + return nfdCurlyVariant; + } + + return resolved; +} diff --git a/packages/pi-coding-agent/src/core/tools/read.ts b/packages/pi-coding-agent/src/core/tools/read.ts new file mode 100644 index 000000000..c2f23e60a --- /dev/null +++ b/packages/pi-coding-agent/src/core/tools/read.ts @@ -0,0 +1,222 @@ +import type { AgentTool } from "@gsd/pi-agent-core"; +import type { ImageContent, TextContent } from "@gsd/pi-ai"; +import { type Static, Type } from "@sinclair/typebox"; +import { constants } from "fs"; +import { access as fsAccess, readFile as fsReadFile } from "fs/promises"; +import { formatDimensionNote, resizeImage } from "../../utils/image-resize.js"; +import { detectSupportedImageMimeTypeFromFile } from "../../utils/mime.js"; +import { resolveReadPath } from "./path-utils.js"; +import { DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES, formatSize, type TruncationResult, truncateHead } from "./truncate.js"; + +const readSchema = Type.Object({ + path: Type.String({ description: "Path to the file to read (relative or absolute)" }), + offset: Type.Optional(Type.Number({ description: "Line number to start reading from (1-indexed)" })), + limit: Type.Optional(Type.Number({ description: "Maximum number of lines to read" })), +}); + +export type ReadToolInput = Static; + +export interface ReadToolDetails { + truncation?: TruncationResult; +} + +/** + * Pluggable operations for the read tool. + * Override these to delegate file reading to remote systems (e.g., SSH). + */ +export interface ReadOperations { + /** Read file contents as a Buffer */ + readFile: (absolutePath: string) => Promise; + /** Check if file is readable (throw if not) */ + access: (absolutePath: string) => Promise; + /** Detect image MIME type, return null/undefined for non-images */ + detectImageMimeType?: (absolutePath: string) => Promise; +} + +const defaultReadOperations: ReadOperations = { + readFile: (path) => fsReadFile(path), + access: (path) => fsAccess(path, constants.R_OK), + detectImageMimeType: detectSupportedImageMimeTypeFromFile, +}; + +export interface ReadToolOptions { + /** Whether to auto-resize images to 2000x2000 max. Default: true */ + autoResizeImages?: boolean; + /** Custom operations for file reading. Default: local filesystem */ + operations?: ReadOperations; +} + +export function createReadTool(cwd: string, options?: ReadToolOptions): AgentTool { + const autoResizeImages = options?.autoResizeImages ?? true; + const ops = options?.operations ?? defaultReadOperations; + + return { + name: "read", + label: "read", + description: `Read the contents of a file. Supports text files and images (jpg, png, gif, webp). Images are sent as attachments. For text files, output is truncated to ${DEFAULT_MAX_LINES} lines or ${DEFAULT_MAX_BYTES / 1024}KB (whichever is hit first). Use offset/limit for large files. When you need the full file, continue with offset until complete.`, + parameters: readSchema, + execute: async ( + _toolCallId: string, + { path, offset, limit }: { path: string; offset?: number; limit?: number }, + signal?: AbortSignal, + ) => { + const absolutePath = resolveReadPath(path, cwd); + + return new Promise<{ content: (TextContent | ImageContent)[]; details: ReadToolDetails | undefined }>( + (resolve, reject) => { + // Check if already aborted + if (signal?.aborted) { + reject(new Error("Operation aborted")); + return; + } + + let aborted = false; + + // Set up abort handler + const onAbort = () => { + aborted = true; + reject(new Error("Operation aborted")); + }; + + if (signal) { + signal.addEventListener("abort", onAbort, { once: true }); + } + + // Perform the read operation + (async () => { + try { + // Check if file exists + await ops.access(absolutePath); + + // Check if aborted before reading + if (aborted) { + return; + } + + const mimeType = ops.detectImageMimeType ? await ops.detectImageMimeType(absolutePath) : undefined; + + // Read the file based on type + let content: (TextContent | ImageContent)[]; + let details: ReadToolDetails | undefined; + + if (mimeType) { + // Read as image (binary) + const buffer = await ops.readFile(absolutePath); + const base64 = buffer.toString("base64"); + + if (autoResizeImages) { + // Resize image if needed + const resized = await resizeImage({ type: "image", data: base64, mimeType }); + const dimensionNote = formatDimensionNote(resized); + + let textNote = `Read image file [${resized.mimeType}]`; + if (dimensionNote) { + textNote += `\n${dimensionNote}`; + } + + content = [ + { type: "text", text: textNote }, + { type: "image", data: resized.data, mimeType: resized.mimeType }, + ]; + } else { + const textNote = `Read image file [${mimeType}]`; + content = [ + { type: "text", text: textNote }, + { type: "image", data: base64, mimeType }, + ]; + } + } else { + // Read as text + const buffer = await ops.readFile(absolutePath); + const textContent = buffer.toString("utf-8"); + const allLines = textContent.split("\n"); + const totalFileLines = allLines.length; + + // Apply offset if specified (1-indexed to 0-indexed) + const startLine = offset ? Math.max(0, offset - 1) : 0; + const startLineDisplay = startLine + 1; // For display (1-indexed) + + // Check if offset is out of bounds + if (startLine >= allLines.length) { + throw new Error(`Offset ${offset} is beyond end of file (${allLines.length} lines total)`); + } + + // If limit is specified by user, use it; otherwise we'll let truncateHead decide + let selectedContent: string; + let userLimitedLines: number | undefined; + if (limit !== undefined) { + const endLine = Math.min(startLine + limit, allLines.length); + selectedContent = allLines.slice(startLine, endLine).join("\n"); + userLimitedLines = endLine - startLine; + } else { + selectedContent = allLines.slice(startLine).join("\n"); + } + + // Apply truncation (respects both line and byte limits) + const truncation = truncateHead(selectedContent); + + let outputText: string; + + if (truncation.firstLineExceedsLimit) { + // First line at offset exceeds 30KB - tell model to use bash + const firstLineSize = formatSize(Buffer.byteLength(allLines[startLine], "utf-8")); + outputText = `[Line ${startLineDisplay} is ${firstLineSize}, exceeds ${formatSize(DEFAULT_MAX_BYTES)} limit. Use bash: sed -n '${startLineDisplay}p' ${path} | head -c ${DEFAULT_MAX_BYTES}]`; + details = { truncation }; + } else if (truncation.truncated) { + // Truncation occurred - build actionable notice + const endLineDisplay = startLineDisplay + truncation.outputLines - 1; + const nextOffset = endLineDisplay + 1; + + outputText = truncation.content; + + if (truncation.truncatedBy === "lines") { + outputText += `\n\n[Showing lines ${startLineDisplay}-${endLineDisplay} of ${totalFileLines}. Use offset=${nextOffset} to continue.]`; + } else { + outputText += `\n\n[Showing lines ${startLineDisplay}-${endLineDisplay} of ${totalFileLines} (${formatSize(DEFAULT_MAX_BYTES)} limit). Use offset=${nextOffset} to continue.]`; + } + details = { truncation }; + } else if (userLimitedLines !== undefined && startLine + userLimitedLines < allLines.length) { + // User specified limit, there's more content, but no truncation + const remaining = allLines.length - (startLine + userLimitedLines); + const nextOffset = startLine + userLimitedLines + 1; + + outputText = truncation.content; + outputText += `\n\n[${remaining} more lines in file. Use offset=${nextOffset} to continue.]`; + } else { + // No truncation, no user limit exceeded + outputText = truncation.content; + } + + content = [{ type: "text", text: outputText }]; + } + + // Check if aborted after reading + if (aborted) { + return; + } + + // Clean up abort handler + if (signal) { + signal.removeEventListener("abort", onAbort); + } + + resolve({ content, details }); + } catch (error: any) { + // Clean up abort handler + if (signal) { + signal.removeEventListener("abort", onAbort); + } + + if (!aborted) { + reject(error); + } + } + })(); + }, + ); + }, + }; +} + +/** Default read tool using process.cwd() - for backwards compatibility */ +export const readTool = createReadTool(process.cwd()); diff --git a/packages/pi-coding-agent/src/core/tools/truncate.ts b/packages/pi-coding-agent/src/core/tools/truncate.ts new file mode 100644 index 000000000..18ac5d74d --- /dev/null +++ b/packages/pi-coding-agent/src/core/tools/truncate.ts @@ -0,0 +1,265 @@ +/** + * Shared truncation utilities for tool outputs. + * + * Truncation is based on two independent limits - whichever is hit first wins: + * - Line limit (default: 2000 lines) + * - Byte limit (default: 50KB) + * + * Never returns partial lines (except bash tail truncation edge case). + */ + +export const DEFAULT_MAX_LINES = 2000; +export const DEFAULT_MAX_BYTES = 50 * 1024; // 50KB +export const GREP_MAX_LINE_LENGTH = 500; // Max chars per grep match line + +export interface TruncationResult { + /** The truncated content */ + content: string; + /** Whether truncation occurred */ + truncated: boolean; + /** Which limit was hit: "lines", "bytes", or null if not truncated */ + truncatedBy: "lines" | "bytes" | null; + /** Total number of lines in the original content */ + totalLines: number; + /** Total number of bytes in the original content */ + totalBytes: number; + /** Number of complete lines in the truncated output */ + outputLines: number; + /** Number of bytes in the truncated output */ + outputBytes: number; + /** Whether the last line was partially truncated (only for tail truncation edge case) */ + lastLinePartial: boolean; + /** Whether the first line exceeded the byte limit (for head truncation) */ + firstLineExceedsLimit: boolean; + /** The max lines limit that was applied */ + maxLines: number; + /** The max bytes limit that was applied */ + maxBytes: number; +} + +export interface TruncationOptions { + /** Maximum number of lines (default: 2000) */ + maxLines?: number; + /** Maximum number of bytes (default: 50KB) */ + maxBytes?: number; +} + +/** + * Format bytes as human-readable size. + */ +export function formatSize(bytes: number): string { + if (bytes < 1024) { + return `${bytes}B`; + } else if (bytes < 1024 * 1024) { + return `${(bytes / 1024).toFixed(1)}KB`; + } else { + return `${(bytes / (1024 * 1024)).toFixed(1)}MB`; + } +} + +/** + * Truncate content from the head (keep first N lines/bytes). + * Suitable for file reads where you want to see the beginning. + * + * Never returns partial lines. If first line exceeds byte limit, + * returns empty content with firstLineExceedsLimit=true. + */ +export function truncateHead(content: string, options: TruncationOptions = {}): TruncationResult { + const maxLines = options.maxLines ?? DEFAULT_MAX_LINES; + const maxBytes = options.maxBytes ?? DEFAULT_MAX_BYTES; + + const totalBytes = Buffer.byteLength(content, "utf-8"); + const lines = content.split("\n"); + const totalLines = lines.length; + + // Check if no truncation needed + if (totalLines <= maxLines && totalBytes <= maxBytes) { + return { + content, + truncated: false, + truncatedBy: null, + totalLines, + totalBytes, + outputLines: totalLines, + outputBytes: totalBytes, + lastLinePartial: false, + firstLineExceedsLimit: false, + maxLines, + maxBytes, + }; + } + + // Check if first line alone exceeds byte limit + const firstLineBytes = Buffer.byteLength(lines[0], "utf-8"); + if (firstLineBytes > maxBytes) { + return { + content: "", + truncated: true, + truncatedBy: "bytes", + totalLines, + totalBytes, + outputLines: 0, + outputBytes: 0, + lastLinePartial: false, + firstLineExceedsLimit: true, + maxLines, + maxBytes, + }; + } + + // Collect complete lines that fit + const outputLinesArr: string[] = []; + let outputBytesCount = 0; + let truncatedBy: "lines" | "bytes" = "lines"; + + for (let i = 0; i < lines.length && i < maxLines; i++) { + const line = lines[i]; + const lineBytes = Buffer.byteLength(line, "utf-8") + (i > 0 ? 1 : 0); // +1 for newline + + if (outputBytesCount + lineBytes > maxBytes) { + truncatedBy = "bytes"; + break; + } + + outputLinesArr.push(line); + outputBytesCount += lineBytes; + } + + // If we exited due to line limit + if (outputLinesArr.length >= maxLines && outputBytesCount <= maxBytes) { + truncatedBy = "lines"; + } + + const outputContent = outputLinesArr.join("\n"); + const finalOutputBytes = Buffer.byteLength(outputContent, "utf-8"); + + return { + content: outputContent, + truncated: true, + truncatedBy, + totalLines, + totalBytes, + outputLines: outputLinesArr.length, + outputBytes: finalOutputBytes, + lastLinePartial: false, + firstLineExceedsLimit: false, + maxLines, + maxBytes, + }; +} + +/** + * Truncate content from the tail (keep last N lines/bytes). + * Suitable for bash output where you want to see the end (errors, final results). + * + * May return partial first line if the last line of original content exceeds byte limit. + */ +export function truncateTail(content: string, options: TruncationOptions = {}): TruncationResult { + const maxLines = options.maxLines ?? DEFAULT_MAX_LINES; + const maxBytes = options.maxBytes ?? DEFAULT_MAX_BYTES; + + const totalBytes = Buffer.byteLength(content, "utf-8"); + const lines = content.split("\n"); + const totalLines = lines.length; + + // Check if no truncation needed + if (totalLines <= maxLines && totalBytes <= maxBytes) { + return { + content, + truncated: false, + truncatedBy: null, + totalLines, + totalBytes, + outputLines: totalLines, + outputBytes: totalBytes, + lastLinePartial: false, + firstLineExceedsLimit: false, + maxLines, + maxBytes, + }; + } + + // Work backwards from the end + const outputLinesArr: string[] = []; + let outputBytesCount = 0; + let truncatedBy: "lines" | "bytes" = "lines"; + let lastLinePartial = false; + + for (let i = lines.length - 1; i >= 0 && outputLinesArr.length < maxLines; i--) { + const line = lines[i]; + const lineBytes = Buffer.byteLength(line, "utf-8") + (outputLinesArr.length > 0 ? 1 : 0); // +1 for newline + + if (outputBytesCount + lineBytes > maxBytes) { + truncatedBy = "bytes"; + // Edge case: if we haven't added ANY lines yet and this line exceeds maxBytes, + // take the end of the line (partial) + if (outputLinesArr.length === 0) { + const truncatedLine = truncateStringToBytesFromEnd(line, maxBytes); + outputLinesArr.unshift(truncatedLine); + outputBytesCount = Buffer.byteLength(truncatedLine, "utf-8"); + lastLinePartial = true; + } + break; + } + + outputLinesArr.unshift(line); + outputBytesCount += lineBytes; + } + + // If we exited due to line limit + if (outputLinesArr.length >= maxLines && outputBytesCount <= maxBytes) { + truncatedBy = "lines"; + } + + const outputContent = outputLinesArr.join("\n"); + const finalOutputBytes = Buffer.byteLength(outputContent, "utf-8"); + + return { + content: outputContent, + truncated: true, + truncatedBy, + totalLines, + totalBytes, + outputLines: outputLinesArr.length, + outputBytes: finalOutputBytes, + lastLinePartial, + firstLineExceedsLimit: false, + maxLines, + maxBytes, + }; +} + +/** + * Truncate a string to fit within a byte limit (from the end). + * Handles multi-byte UTF-8 characters correctly. + */ +function truncateStringToBytesFromEnd(str: string, maxBytes: number): string { + const buf = Buffer.from(str, "utf-8"); + if (buf.length <= maxBytes) { + return str; + } + + // Start from the end, skip maxBytes back + let start = buf.length - maxBytes; + + // Find a valid UTF-8 boundary (start of a character) + while (start < buf.length && (buf[start] & 0xc0) === 0x80) { + start++; + } + + return buf.slice(start).toString("utf-8"); +} + +/** + * Truncate a single line to max characters, adding [truncated] suffix. + * Used for grep match lines. + */ +export function truncateLine( + line: string, + maxChars: number = GREP_MAX_LINE_LENGTH, +): { text: string; wasTruncated: boolean } { + if (line.length <= maxChars) { + return { text: line, wasTruncated: false }; + } + return { text: `${line.slice(0, maxChars)}... [truncated]`, wasTruncated: true }; +} diff --git a/packages/pi-coding-agent/src/core/tools/write.ts b/packages/pi-coding-agent/src/core/tools/write.ts new file mode 100644 index 000000000..09e0f650c --- /dev/null +++ b/packages/pi-coding-agent/src/core/tools/write.ts @@ -0,0 +1,118 @@ +import type { AgentTool } from "@gsd/pi-agent-core"; +import { type Static, Type } from "@sinclair/typebox"; +import { mkdir as fsMkdir, writeFile as fsWriteFile } from "fs/promises"; +import { dirname } from "path"; +import { resolveToCwd } from "./path-utils.js"; + +const writeSchema = Type.Object({ + path: Type.String({ description: "Path to the file to write (relative or absolute)" }), + content: Type.String({ description: "Content to write to the file" }), +}); + +export type WriteToolInput = Static; + +/** + * Pluggable operations for the write tool. + * Override these to delegate file writing to remote systems (e.g., SSH). + */ +export interface WriteOperations { + /** Write content to a file */ + writeFile: (absolutePath: string, content: string) => Promise; + /** Create directory (recursively) */ + mkdir: (dir: string) => Promise; +} + +const defaultWriteOperations: WriteOperations = { + writeFile: (path, content) => fsWriteFile(path, content, "utf-8"), + mkdir: (dir) => fsMkdir(dir, { recursive: true }).then(() => {}), +}; + +export interface WriteToolOptions { + /** Custom operations for file writing. Default: local filesystem */ + operations?: WriteOperations; +} + +export function createWriteTool(cwd: string, options?: WriteToolOptions): AgentTool { + const ops = options?.operations ?? defaultWriteOperations; + + return { + name: "write", + label: "write", + description: + "Write content to a file. Creates the file if it doesn't exist, overwrites if it does. Automatically creates parent directories.", + parameters: writeSchema, + execute: async ( + _toolCallId: string, + { path, content }: { path: string; content: string }, + signal?: AbortSignal, + ) => { + const absolutePath = resolveToCwd(path, cwd); + const dir = dirname(absolutePath); + + return new Promise<{ content: Array<{ type: "text"; text: string }>; details: undefined }>( + (resolve, reject) => { + // Check if already aborted + if (signal?.aborted) { + reject(new Error("Operation aborted")); + return; + } + + let aborted = false; + + // Set up abort handler + const onAbort = () => { + aborted = true; + reject(new Error("Operation aborted")); + }; + + if (signal) { + signal.addEventListener("abort", onAbort, { once: true }); + } + + // Perform the write operation + (async () => { + try { + // Create parent directories if needed + await ops.mkdir(dir); + + // Check if aborted before writing + if (aborted) { + return; + } + + // Write the file + await ops.writeFile(absolutePath, content); + + // Check if aborted after writing + if (aborted) { + return; + } + + // Clean up abort handler + if (signal) { + signal.removeEventListener("abort", onAbort); + } + + resolve({ + content: [{ type: "text", text: `Successfully wrote ${content.length} bytes to ${path}` }], + details: undefined, + }); + } catch (error: any) { + // Clean up abort handler + if (signal) { + signal.removeEventListener("abort", onAbort); + } + + if (!aborted) { + reject(error); + } + } + })(); + }, + ); + }, + }; +} + +/** Default write tool using process.cwd() - for backwards compatibility */ +export const writeTool = createWriteTool(process.cwd()); diff --git a/packages/pi-coding-agent/src/index.ts b/packages/pi-coding-agent/src/index.ts new file mode 100644 index 000000000..b83c17fe4 --- /dev/null +++ b/packages/pi-coding-agent/src/index.ts @@ -0,0 +1,333 @@ +// Core session management + +// Config paths +export { getAgentDir, VERSION } from "./config.js"; +export { + AgentSession, + type AgentSessionConfig, + type AgentSessionEvent, + type AgentSessionEventListener, + type ModelCycleResult, + type ParsedSkillBlock, + type PromptOptions, + parseSkillBlock, + type SessionStats, +} from "./core/agent-session.js"; +// Auth and model registry +export { + type ApiKeyCredential, + type AuthCredential, + AuthStorage, + type AuthStorageBackend, + FileAuthStorageBackend, + InMemoryAuthStorageBackend, + type OAuthCredential, +} from "./core/auth-storage.js"; +// Compaction +export { + type BranchPreparation, + type BranchSummaryResult, + type CollectEntriesResult, + type CompactionResult, + type CutPointResult, + calculateContextTokens, + collectEntriesForBranchSummary, + compact, + DEFAULT_COMPACTION_SETTINGS, + estimateTokens, + type FileOperations, + findCutPoint, + findTurnStartIndex, + type GenerateBranchSummaryOptions, + generateBranchSummary, + generateSummary, + getLastAssistantUsage, + prepareBranchEntries, + serializeConversation, + shouldCompact, +} from "./core/compaction/index.js"; +export { createEventBus, type EventBus, type EventBusController } from "./core/event-bus.js"; +// Extension system +export type { + AgentEndEvent, + AgentStartEvent, + AgentToolResult, + AgentToolUpdateCallback, + AppAction, + BashToolCallEvent, + BeforeAgentStartEvent, + BeforeProviderRequestEvent, + BeforeProviderRequestEventResult, + CompactOptions, + ContextEvent, + ContextUsage, + CustomToolCallEvent, + EditToolCallEvent, + ExecOptions, + ExecResult, + Extension, + ExtensionActions, + ExtensionAPI, + ExtensionCommandContext, + ExtensionCommandContextActions, + ExtensionContext, + ExtensionContextActions, + ExtensionError, + ExtensionEvent, + ExtensionFactory, + ExtensionFlag, + ExtensionHandler, + ExtensionRuntime, + ExtensionShortcut, + ExtensionUIContext, + ExtensionUIDialogOptions, + ExtensionWidgetOptions, + FindToolCallEvent, + GrepToolCallEvent, + InputEvent, + InputEventResult, + InputSource, + KeybindingsManager, + LoadExtensionsResult, + LsToolCallEvent, + MessageRenderer, + MessageRenderOptions, + ProviderConfig, + ProviderModelConfig, + ReadToolCallEvent, + RegisteredCommand, + RegisteredTool, + SessionBeforeCompactEvent, + SessionBeforeForkEvent, + SessionBeforeSwitchEvent, + SessionBeforeTreeEvent, + SessionCompactEvent, + SessionForkEvent, + SessionShutdownEvent, + SessionStartEvent, + SessionSwitchEvent, + SessionTreeEvent, + SlashCommandInfo, + SlashCommandLocation, + SlashCommandSource, + TerminalInputHandler, + ToolCallEvent, + ToolDefinition, + ToolInfo, + ToolRenderResultOptions, + ToolResultEvent, + TurnEndEvent, + TurnStartEvent, + UserBashEvent, + UserBashEventResult, + WidgetPlacement, + WriteToolCallEvent, +} from "./core/extensions/index.js"; +export { + createExtensionRuntime, + discoverAndLoadExtensions, + ExtensionRunner, + isBashToolResult, + isEditToolResult, + isFindToolResult, + isGrepToolResult, + isLsToolResult, + isReadToolResult, + isToolCallEventType, + isWriteToolResult, + wrapRegisteredTool, + wrapRegisteredTools, + wrapToolsWithExtensions, + wrapToolWithExtensions, +} from "./core/extensions/index.js"; +// Footer data provider (git branch + extension statuses - data not otherwise available to extensions) +export type { ReadonlyFooterDataProvider } from "./core/footer-data-provider.js"; +export { convertToLlm } from "./core/messages.js"; +export { ModelRegistry } from "./core/model-registry.js"; +export type { + PackageManager, + PathMetadata, + ProgressCallback, + ProgressEvent, + ResolvedPaths, + ResolvedResource, +} from "./core/package-manager.js"; +export { DefaultPackageManager } from "./core/package-manager.js"; +export type { ResourceCollision, ResourceDiagnostic, ResourceLoader } from "./core/resource-loader.js"; +export { DefaultResourceLoader } from "./core/resource-loader.js"; +// SDK for programmatic usage +export { + type CreateAgentSessionOptions, + type CreateAgentSessionResult, + // Factory + createAgentSession, + createBashTool, + // Tool factories (for custom cwd) + createCodingTools, + createEditTool, + createFindTool, + createGrepTool, + createLsTool, + createReadOnlyTools, + createReadTool, + createWriteTool, + type PromptTemplate, + // Pre-built tools (use process.cwd()) + readOnlyTools, +} from "./core/sdk.js"; +export { + type BranchSummaryEntry, + buildSessionContext, + type CompactionEntry, + CURRENT_SESSION_VERSION, + type CustomEntry, + type CustomMessageEntry, + type FileEntry, + getLatestCompactionEntry, + type ModelChangeEntry, + migrateSessionEntries, + type NewSessionOptions, + parseSessionEntries, + type SessionContext, + type SessionEntry, + type SessionEntryBase, + type SessionHeader, + type SessionInfo, + type SessionInfoEntry, + SessionManager, + type SessionMessageEntry, + type ThinkingLevelChangeEntry, +} from "./core/session-manager.js"; +export { + type CompactionSettings, + type ImageSettings, + type PackageSource, + type RetrySettings, + SettingsManager, +} from "./core/settings-manager.js"; +// Skills +export { + formatSkillsForPrompt, + type LoadSkillsFromDirOptions, + type LoadSkillsResult, + loadSkills, + loadSkillsFromDir, + type Skill, + type SkillFrontmatter, +} from "./core/skills.js"; +// Tools +export { + type BashOperations, + type BashSpawnContext, + type BashSpawnHook, + type BashToolDetails, + type BashToolInput, + type BashToolOptions, + bashTool, + codingTools, + DEFAULT_MAX_BYTES, + DEFAULT_MAX_LINES, + type EditOperations, + type EditToolDetails, + type EditToolInput, + type EditToolOptions, + editTool, + type FindOperations, + type FindToolDetails, + type FindToolInput, + type FindToolOptions, + findTool, + formatSize, + type GrepOperations, + type GrepToolDetails, + type GrepToolInput, + type GrepToolOptions, + grepTool, + type LsOperations, + type LsToolDetails, + type LsToolInput, + type LsToolOptions, + lsTool, + type ReadOperations, + type ReadToolDetails, + type ReadToolInput, + type ReadToolOptions, + readTool, + type ToolsOptions, + type TruncationOptions, + type TruncationResult, + truncateHead, + truncateLine, + truncateTail, + type WriteOperations, + type WriteToolInput, + type WriteToolOptions, + writeTool, +} from "./core/tools/index.js"; +// Main entry point +export { main } from "./main.js"; +// Run modes for programmatic SDK usage +export { + InteractiveMode, + type InteractiveModeOptions, + type PrintModeOptions, + runPrintMode, + runRpcMode, +} from "./modes/index.js"; +// UI components for extensions +export { + ArminComponent, + AssistantMessageComponent, + appKey, + appKeyHint, + BashExecutionComponent, + BorderedLoader, + BranchSummaryMessageComponent, + CompactionSummaryMessageComponent, + CustomEditor, + CustomMessageComponent, + DynamicBorder, + ExtensionEditorComponent, + ExtensionInputComponent, + ExtensionSelectorComponent, + editorKey, + FooterComponent, + keyHint, + LoginDialogComponent, + ModelSelectorComponent, + OAuthSelectorComponent, + type RenderDiffOptions, + rawKeyHint, + renderDiff, + SessionSelectorComponent, + type SettingsCallbacks, + type SettingsConfig, + SettingsSelectorComponent, + ShowImagesSelectorComponent, + SkillInvocationMessageComponent, + ThemeSelectorComponent, + ThinkingSelectorComponent, + ToolExecutionComponent, + type ToolExecutionOptions, + TreeSelectorComponent, + truncateToVisualLines, + UserMessageComponent, + UserMessageSelectorComponent, + type VisualTruncateResult, +} from "./modes/interactive/components/index.js"; +// Theme utilities for custom tools and extensions +export { + getLanguageFromPath, + getMarkdownTheme, + getSelectListTheme, + getSettingsListTheme, + highlightCode, + initTheme, + Theme, + type ThemeColor, +} from "./modes/interactive/theme/theme.js"; +// Clipboard utilities +export { copyToClipboard } from "./utils/clipboard.js"; +export { parseFrontmatter, stripFrontmatter } from "./utils/frontmatter.js"; +// Shell utilities +export { getShellConfig } from "./utils/shell.js"; diff --git a/packages/pi-coding-agent/src/main.ts b/packages/pi-coding-agent/src/main.ts new file mode 100644 index 000000000..5c39de898 --- /dev/null +++ b/packages/pi-coding-agent/src/main.ts @@ -0,0 +1,821 @@ +/** + * Main entry point for the coding agent CLI. + * + * This file handles CLI argument parsing and translates them into + * createAgentSession() options. The SDK does the heavy lifting. + */ + +import { type ImageContent, modelsAreEqual, supportsXhigh } from "@gsd/pi-ai"; +import chalk from "chalk"; +import { createInterface } from "readline"; +import { type Args, parseArgs, printHelp } from "./cli/args.js"; +import { selectConfig } from "./cli/config-selector.js"; +import { processFileArguments } from "./cli/file-processor.js"; +import { listModels } from "./cli/list-models.js"; +import { selectSession } from "./cli/session-picker.js"; +import { APP_NAME, getAgentDir, getModelsPath, VERSION } from "./config.js"; +import { AuthStorage } from "./core/auth-storage.js"; +import { exportFromFile } from "./core/export-html/index.js"; +import type { LoadExtensionsResult } from "./core/extensions/index.js"; +import { KeybindingsManager } from "./core/keybindings.js"; +import { ModelRegistry } from "./core/model-registry.js"; +import { resolveCliModel, resolveModelScope, type ScopedModel } from "./core/model-resolver.js"; +import { DefaultPackageManager } from "./core/package-manager.js"; +import { DefaultResourceLoader } from "./core/resource-loader.js"; +import { type CreateAgentSessionOptions, createAgentSession } from "./core/sdk.js"; +import { SessionManager } from "./core/session-manager.js"; +import { SettingsManager } from "./core/settings-manager.js"; +import { printTimings, time } from "./core/timings.js"; +import { allTools } from "./core/tools/index.js"; +import { runMigrations, showDeprecationWarnings } from "./migrations.js"; +import { InteractiveMode, runPrintMode, runRpcMode } from "./modes/index.js"; +import { initTheme, stopThemeWatcher } from "./modes/interactive/theme/theme.js"; + +/** + * Read all content from piped stdin. + * Returns undefined if stdin is a TTY (interactive terminal). + */ +async function readPipedStdin(): Promise { + // If stdin is a TTY, we're running interactively - don't read stdin + if (process.stdin.isTTY) { + return undefined; + } + + return new Promise((resolve) => { + let data = ""; + process.stdin.setEncoding("utf8"); + process.stdin.on("data", (chunk) => { + data += chunk; + }); + process.stdin.on("end", () => { + resolve(data.trim() || undefined); + }); + process.stdin.resume(); + }); +} + +function reportSettingsErrors(settingsManager: SettingsManager, context: string): void { + const errors = settingsManager.drainErrors(); + for (const { scope, error } of errors) { + console.error(chalk.yellow(`Warning (${context}, ${scope} settings): ${error.message}`)); + if (error.stack) { + console.error(chalk.dim(error.stack)); + } + } +} + +function isTruthyEnvFlag(value: string | undefined): boolean { + if (!value) return false; + return value === "1" || value.toLowerCase() === "true" || value.toLowerCase() === "yes"; +} + +type PackageCommand = "install" | "remove" | "update" | "list"; + +interface PackageCommandOptions { + command: PackageCommand; + source?: string; + local: boolean; + help: boolean; + invalidOption?: string; +} + +function getPackageCommandUsage(command: PackageCommand): string { + switch (command) { + case "install": + return `${APP_NAME} install [-l]`; + case "remove": + return `${APP_NAME} remove [-l]`; + case "update": + return `${APP_NAME} update [source]`; + case "list": + return `${APP_NAME} list`; + } +} + +function printPackageCommandHelp(command: PackageCommand): void { + switch (command) { + case "install": + console.log(`${chalk.bold("Usage:")} + ${getPackageCommandUsage("install")} + +Install a package and add it to settings. + +Options: + -l, --local Install project-locally (.pi/settings.json) + +Examples: + ${APP_NAME} install npm:@foo/bar + ${APP_NAME} install git:github.com/user/repo + ${APP_NAME} install git:git@github.com:user/repo + ${APP_NAME} install https://github.com/user/repo + ${APP_NAME} install ssh://git@github.com/user/repo + ${APP_NAME} install ./local/path +`); + return; + + case "remove": + console.log(`${chalk.bold("Usage:")} + ${getPackageCommandUsage("remove")} + +Remove a package and its source from settings. + +Options: + -l, --local Remove from project settings (.pi/settings.json) + +Example: + ${APP_NAME} remove npm:@foo/bar +`); + return; + + case "update": + console.log(`${chalk.bold("Usage:")} + ${getPackageCommandUsage("update")} + +Update installed packages. +If is provided, only that package is updated. +`); + return; + + case "list": + console.log(`${chalk.bold("Usage:")} + ${getPackageCommandUsage("list")} + +List installed packages from user and project settings. +`); + return; + } +} + +function parsePackageCommand(args: string[]): PackageCommandOptions | undefined { + const [command, ...rest] = args; + if (command !== "install" && command !== "remove" && command !== "update" && command !== "list") { + return undefined; + } + + let local = false; + let help = false; + let invalidOption: string | undefined; + let source: string | undefined; + + for (const arg of rest) { + if (arg === "-h" || arg === "--help") { + help = true; + continue; + } + + if (arg === "-l" || arg === "--local") { + if (command === "install" || command === "remove") { + local = true; + } else { + invalidOption = invalidOption ?? arg; + } + continue; + } + + if (arg.startsWith("-")) { + invalidOption = invalidOption ?? arg; + continue; + } + + if (!source) { + source = arg; + } + } + + return { command, source, local, help, invalidOption }; +} + +async function handlePackageCommand(args: string[]): Promise { + const options = parsePackageCommand(args); + if (!options) { + return false; + } + + if (options.help) { + printPackageCommandHelp(options.command); + return true; + } + + if (options.invalidOption) { + console.error(chalk.red(`Unknown option ${options.invalidOption} for "${options.command}".`)); + console.error(chalk.dim(`Use "${APP_NAME} --help" or "${getPackageCommandUsage(options.command)}".`)); + process.exitCode = 1; + return true; + } + + const source = options.source; + if ((options.command === "install" || options.command === "remove") && !source) { + console.error(chalk.red(`Missing ${options.command} source.`)); + console.error(chalk.dim(`Usage: ${getPackageCommandUsage(options.command)}`)); + process.exitCode = 1; + return true; + } + + const cwd = process.cwd(); + const agentDir = getAgentDir(); + const settingsManager = SettingsManager.create(cwd, agentDir); + reportSettingsErrors(settingsManager, "package command"); + const packageManager = new DefaultPackageManager({ cwd, agentDir, settingsManager }); + + packageManager.setProgressCallback((event) => { + if (event.type === "start") { + process.stdout.write(chalk.dim(`${event.message}\n`)); + } + }); + + try { + switch (options.command) { + case "install": + await packageManager.install(source!, { local: options.local }); + packageManager.addSourceToSettings(source!, { local: options.local }); + console.log(chalk.green(`Installed ${source}`)); + return true; + + case "remove": { + await packageManager.remove(source!, { local: options.local }); + const removed = packageManager.removeSourceFromSettings(source!, { local: options.local }); + if (!removed) { + console.error(chalk.red(`No matching package found for ${source}`)); + process.exitCode = 1; + return true; + } + console.log(chalk.green(`Removed ${source}`)); + return true; + } + + case "list": { + const globalSettings = settingsManager.getGlobalSettings(); + const projectSettings = settingsManager.getProjectSettings(); + const globalPackages = globalSettings.packages ?? []; + const projectPackages = projectSettings.packages ?? []; + + if (globalPackages.length === 0 && projectPackages.length === 0) { + console.log(chalk.dim("No packages installed.")); + return true; + } + + const formatPackage = (pkg: (typeof globalPackages)[number], scope: "user" | "project") => { + const source = typeof pkg === "string" ? pkg : pkg.source; + const filtered = typeof pkg === "object"; + const display = filtered ? `${source} (filtered)` : source; + console.log(` ${display}`); + const path = packageManager.getInstalledPath(source, scope); + if (path) { + console.log(chalk.dim(` ${path}`)); + } + }; + + if (globalPackages.length > 0) { + console.log(chalk.bold("User packages:")); + for (const pkg of globalPackages) { + formatPackage(pkg, "user"); + } + } + + if (projectPackages.length > 0) { + if (globalPackages.length > 0) console.log(); + console.log(chalk.bold("Project packages:")); + for (const pkg of projectPackages) { + formatPackage(pkg, "project"); + } + } + + return true; + } + + case "update": + await packageManager.update(source); + if (source) { + console.log(chalk.green(`Updated ${source}`)); + } else { + console.log(chalk.green("Updated packages")); + } + return true; + } + } catch (error: unknown) { + const message = error instanceof Error ? error.message : "Unknown package command error"; + console.error(chalk.red(`Error: ${message}`)); + process.exitCode = 1; + return true; + } +} + +async function prepareInitialMessage( + parsed: Args, + autoResizeImages: boolean, +): Promise<{ + initialMessage?: string; + initialImages?: ImageContent[]; +}> { + if (parsed.fileArgs.length === 0) { + return {}; + } + + const { text, images } = await processFileArguments(parsed.fileArgs, { autoResizeImages }); + + let initialMessage: string; + if (parsed.messages.length > 0) { + initialMessage = text + parsed.messages[0]; + parsed.messages.shift(); + } else { + initialMessage = text; + } + + return { + initialMessage, + initialImages: images.length > 0 ? images : undefined, + }; +} + +/** Result from resolving a session argument */ +type ResolvedSession = + | { type: "path"; path: string } // Direct file path + | { type: "local"; path: string } // Found in current project + | { type: "global"; path: string; cwd: string } // Found in different project + | { type: "not_found"; arg: string }; // Not found anywhere + +/** + * Resolve a session argument to a file path. + * If it looks like a path, use as-is. Otherwise try to match as session ID prefix. + */ +async function resolveSessionPath(sessionArg: string, cwd: string, sessionDir?: string): Promise { + // If it looks like a file path, use as-is + if (sessionArg.includes("/") || sessionArg.includes("\\") || sessionArg.endsWith(".jsonl")) { + return { type: "path", path: sessionArg }; + } + + // Try to match as session ID in current project first + const localSessions = await SessionManager.list(cwd, sessionDir); + const localMatches = localSessions.filter((s) => s.id.startsWith(sessionArg)); + + if (localMatches.length >= 1) { + return { type: "local", path: localMatches[0].path }; + } + + // Try global search across all projects + const allSessions = await SessionManager.listAll(); + const globalMatches = allSessions.filter((s) => s.id.startsWith(sessionArg)); + + if (globalMatches.length >= 1) { + const match = globalMatches[0]; + return { type: "global", path: match.path, cwd: match.cwd }; + } + + // Not found anywhere + return { type: "not_found", arg: sessionArg }; +} + +/** Prompt user for yes/no confirmation */ +async function promptConfirm(message: string): Promise { + return new Promise((resolve) => { + const rl = createInterface({ + input: process.stdin, + output: process.stdout, + }); + rl.question(`${message} [y/N] `, (answer) => { + rl.close(); + resolve(answer.toLowerCase() === "y" || answer.toLowerCase() === "yes"); + }); + }); +} + +/** Helper to call CLI-only session_directory handlers before the initial session manager is created */ +async function callSessionDirectoryHook(extensions: LoadExtensionsResult, cwd: string): Promise { + let customSessionDir: string | undefined; + + for (const ext of extensions.extensions) { + const handlers = ext.handlers.get("session_directory"); + if (!handlers || handlers.length === 0) continue; + + for (const handler of handlers) { + try { + const event = { type: "session_directory" as const, cwd }; + const result = (await handler(event)) as { sessionDir?: string } | undefined; + + if (result?.sessionDir) { + customSessionDir = result.sessionDir; + } + } catch (err) { + const message = err instanceof Error ? err.message : String(err); + console.error(chalk.red(`Extension "${ext.path}" session_directory handler failed: ${message}`)); + } + } + } + + return customSessionDir; +} + +async function createSessionManager( + parsed: Args, + cwd: string, + extensions: LoadExtensionsResult, +): Promise { + if (parsed.noSession) { + return SessionManager.inMemory(); + } + + // CLI flag takes precedence, otherwise ask extensions for custom session directory + let effectiveSessionDir = parsed.sessionDir; + if (!effectiveSessionDir) { + effectiveSessionDir = await callSessionDirectoryHook(extensions, cwd); + } + + if (parsed.session) { + const resolved = await resolveSessionPath(parsed.session, cwd, effectiveSessionDir); + + switch (resolved.type) { + case "path": + case "local": + return SessionManager.open(resolved.path, effectiveSessionDir); + + case "global": { + // Session found in different project - ask user if they want to fork + console.log(chalk.yellow(`Session found in different project: ${resolved.cwd}`)); + const shouldFork = await promptConfirm("Fork this session into current directory?"); + if (!shouldFork) { + console.log(chalk.dim("Aborted.")); + process.exit(0); + } + return SessionManager.forkFrom(resolved.path, cwd, effectiveSessionDir); + } + + case "not_found": + console.error(chalk.red(`No session found matching '${resolved.arg}'`)); + process.exit(1); + } + } + if (parsed.continue) { + return SessionManager.continueRecent(cwd, effectiveSessionDir); + } + // --resume is handled separately (needs picker UI) + // If effective session dir is set, create new session there + if (effectiveSessionDir) { + return SessionManager.create(cwd, effectiveSessionDir); + } + // Default case (new session) returns undefined, SDK will create one + return undefined; +} + +function buildSessionOptions( + parsed: Args, + scopedModels: ScopedModel[], + sessionManager: SessionManager | undefined, + modelRegistry: ModelRegistry, + settingsManager: SettingsManager, +): { options: CreateAgentSessionOptions; cliThinkingFromModel: boolean } { + const options: CreateAgentSessionOptions = {}; + let cliThinkingFromModel = false; + + if (sessionManager) { + options.sessionManager = sessionManager; + } + + // Model from CLI + // - supports --provider --model + // - supports --model / + if (parsed.model) { + const resolved = resolveCliModel({ + cliProvider: parsed.provider, + cliModel: parsed.model, + modelRegistry, + }); + if (resolved.warning) { + console.warn(chalk.yellow(`Warning: ${resolved.warning}`)); + } + if (resolved.error) { + console.error(chalk.red(resolved.error)); + process.exit(1); + } + if (resolved.model) { + options.model = resolved.model; + // Allow "--model :" as a shorthand. + // Explicit --thinking still takes precedence (applied later). + if (!parsed.thinking && resolved.thinkingLevel) { + options.thinkingLevel = resolved.thinkingLevel; + cliThinkingFromModel = true; + } + } + } + + if (!options.model && scopedModels.length > 0 && !parsed.continue && !parsed.resume) { + // Check if saved default is in scoped models - use it if so, otherwise first scoped model + const savedProvider = settingsManager.getDefaultProvider(); + const savedModelId = settingsManager.getDefaultModel(); + const savedModel = savedProvider && savedModelId ? modelRegistry.find(savedProvider, savedModelId) : undefined; + const savedInScope = savedModel ? scopedModels.find((sm) => modelsAreEqual(sm.model, savedModel)) : undefined; + + if (savedInScope) { + options.model = savedInScope.model; + // Use thinking level from scoped model config if explicitly set + if (!parsed.thinking && savedInScope.thinkingLevel) { + options.thinkingLevel = savedInScope.thinkingLevel; + } + } else { + options.model = scopedModels[0].model; + // Use thinking level from first scoped model if explicitly set + if (!parsed.thinking && scopedModels[0].thinkingLevel) { + options.thinkingLevel = scopedModels[0].thinkingLevel; + } + } + } + + // Thinking level from CLI (takes precedence over scoped model thinking levels set above) + if (parsed.thinking) { + options.thinkingLevel = parsed.thinking; + } + + // Scoped models for Ctrl+P cycling + // Keep thinking level undefined when not explicitly set in the model pattern. + // Undefined means "inherit current session thinking level" during cycling. + if (scopedModels.length > 0) { + options.scopedModels = scopedModels.map((sm) => ({ + model: sm.model, + thinkingLevel: sm.thinkingLevel, + })); + } + + // API key from CLI - set in authStorage + // (handled by caller before createAgentSession) + + // Tools + if (parsed.noTools) { + // --no-tools: start with no built-in tools + // --tools can still add specific ones back + if (parsed.tools && parsed.tools.length > 0) { + options.tools = parsed.tools.map((name) => allTools[name]); + } else { + options.tools = []; + } + } else if (parsed.tools) { + options.tools = parsed.tools.map((name) => allTools[name]); + } + + return { options, cliThinkingFromModel }; +} + +async function handleConfigCommand(args: string[]): Promise { + if (args[0] !== "config") { + return false; + } + + const cwd = process.cwd(); + const agentDir = getAgentDir(); + const settingsManager = SettingsManager.create(cwd, agentDir); + reportSettingsErrors(settingsManager, "config command"); + const packageManager = new DefaultPackageManager({ cwd, agentDir, settingsManager }); + + const resolvedPaths = await packageManager.resolve(); + + await selectConfig({ + resolvedPaths, + settingsManager, + cwd, + agentDir, + }); + + process.exit(0); +} + +export async function main(args: string[]) { + const offlineMode = args.includes("--offline") || isTruthyEnvFlag(process.env.PI_OFFLINE); + if (offlineMode) { + process.env.PI_OFFLINE = "1"; + process.env.PI_SKIP_VERSION_CHECK = "1"; + } + + if (await handlePackageCommand(args)) { + return; + } + + if (await handleConfigCommand(args)) { + return; + } + + // Run migrations (pass cwd for project-local migrations) + const { migratedAuthProviders: migratedProviders, deprecationWarnings } = runMigrations(process.cwd()); + + // First pass: parse args to get --extension paths + const firstPass = parseArgs(args); + + // Early load extensions to discover their CLI flags + const cwd = process.cwd(); + const agentDir = getAgentDir(); + const settingsManager = SettingsManager.create(cwd, agentDir); + reportSettingsErrors(settingsManager, "startup"); + const authStorage = AuthStorage.create(); + const modelRegistry = new ModelRegistry(authStorage, getModelsPath()); + + const resourceLoader = new DefaultResourceLoader({ + cwd, + agentDir, + settingsManager, + additionalExtensionPaths: firstPass.extensions, + additionalSkillPaths: firstPass.skills, + additionalPromptTemplatePaths: firstPass.promptTemplates, + additionalThemePaths: firstPass.themes, + noExtensions: firstPass.noExtensions, + noSkills: firstPass.noSkills, + noPromptTemplates: firstPass.noPromptTemplates, + noThemes: firstPass.noThemes, + systemPrompt: firstPass.systemPrompt, + appendSystemPrompt: firstPass.appendSystemPrompt, + }); + await resourceLoader.reload(); + time("resourceLoader.reload"); + + const extensionsResult: LoadExtensionsResult = resourceLoader.getExtensions(); + for (const { path, error } of extensionsResult.errors) { + console.error(chalk.red(`Failed to load extension "${path}": ${error}`)); + } + + // Apply pending provider registrations from extensions immediately + // so they're available for model resolution before AgentSession is created + for (const { name, config } of extensionsResult.runtime.pendingProviderRegistrations) { + modelRegistry.registerProvider(name, config); + } + extensionsResult.runtime.pendingProviderRegistrations = []; + + const extensionFlags = new Map(); + for (const ext of extensionsResult.extensions) { + for (const [name, flag] of ext.flags) { + extensionFlags.set(name, { type: flag.type }); + } + } + + // Second pass: parse args with extension flags + const parsed = parseArgs(args, extensionFlags); + + // Pass flag values to extensions via runtime + for (const [name, value] of parsed.unknownFlags) { + extensionsResult.runtime.flagValues.set(name, value); + } + + if (parsed.version) { + console.log(VERSION); + process.exit(0); + } + + if (parsed.help) { + printHelp(); + process.exit(0); + } + + if (parsed.listModels !== undefined) { + const searchPattern = typeof parsed.listModels === "string" ? parsed.listModels : undefined; + await listModels(modelRegistry, searchPattern); + process.exit(0); + } + + // Read piped stdin content (if any) - skip for RPC mode which uses stdin for JSON-RPC + if (parsed.mode !== "rpc") { + const stdinContent = await readPipedStdin(); + if (stdinContent !== undefined) { + // Force print mode since interactive mode requires a TTY for keyboard input + parsed.print = true; + // Prepend stdin content to messages + parsed.messages.unshift(stdinContent); + } + } + + if (parsed.export) { + let result: string; + try { + const outputPath = parsed.messages.length > 0 ? parsed.messages[0] : undefined; + result = await exportFromFile(parsed.export, outputPath); + } catch (error: unknown) { + const message = error instanceof Error ? error.message : "Failed to export session"; + console.error(chalk.red(`Error: ${message}`)); + process.exit(1); + } + console.log(`Exported to: ${result}`); + process.exit(0); + } + + if (parsed.mode === "rpc" && parsed.fileArgs.length > 0) { + console.error(chalk.red("Error: @file arguments are not supported in RPC mode")); + process.exit(1); + } + + const { initialMessage, initialImages } = await prepareInitialMessage(parsed, settingsManager.getImageAutoResize()); + const isInteractive = !parsed.print && parsed.mode === undefined; + const mode = parsed.mode || "text"; + initTheme(settingsManager.getTheme(), isInteractive); + + // Show deprecation warnings in interactive mode + if (isInteractive && deprecationWarnings.length > 0) { + await showDeprecationWarnings(deprecationWarnings); + } + + let scopedModels: ScopedModel[] = []; + const modelPatterns = parsed.models ?? settingsManager.getEnabledModels(); + if (modelPatterns && modelPatterns.length > 0) { + scopedModels = await resolveModelScope(modelPatterns, modelRegistry); + } + + // Create session manager based on CLI flags + let sessionManager = await createSessionManager(parsed, cwd, extensionsResult); + + // Handle --resume: show session picker + if (parsed.resume) { + // Initialize keybindings so session picker respects user config + KeybindingsManager.create(); + + // Compute effective session dir for resume (same logic as createSessionManager) + const effectiveSessionDir = parsed.sessionDir || (await callSessionDirectoryHook(extensionsResult, cwd)); + + const selectedPath = await selectSession( + (onProgress) => SessionManager.list(cwd, effectiveSessionDir, onProgress), + SessionManager.listAll, + ); + if (!selectedPath) { + console.log(chalk.dim("No session selected")); + stopThemeWatcher(); + process.exit(0); + } + sessionManager = SessionManager.open(selectedPath, effectiveSessionDir); + } + + const { options: sessionOptions, cliThinkingFromModel } = buildSessionOptions( + parsed, + scopedModels, + sessionManager, + modelRegistry, + settingsManager, + ); + sessionOptions.authStorage = authStorage; + sessionOptions.modelRegistry = modelRegistry; + sessionOptions.resourceLoader = resourceLoader; + + // Handle CLI --api-key as runtime override (not persisted) + if (parsed.apiKey) { + if (!sessionOptions.model) { + console.error( + chalk.red("--api-key requires a model to be specified via --model, --provider/--model, or --models"), + ); + process.exit(1); + } + authStorage.setRuntimeApiKey(sessionOptions.model.provider, parsed.apiKey); + } + + const { session, modelFallbackMessage } = await createAgentSession(sessionOptions); + + if (!isInteractive && !session.model) { + console.error(chalk.red("No models available.")); + console.error(chalk.yellow("\nSet an API key environment variable:")); + console.error(" ANTHROPIC_API_KEY, OPENAI_API_KEY, GEMINI_API_KEY, etc."); + console.error(chalk.yellow(`\nOr create ${getModelsPath()}`)); + process.exit(1); + } + + // Clamp thinking level to model capabilities for CLI-provided thinking levels. + // This covers both --thinking and --model :. + const cliThinkingOverride = parsed.thinking !== undefined || cliThinkingFromModel; + if (session.model && cliThinkingOverride) { + let effectiveThinking = session.thinkingLevel; + if (!session.model.reasoning) { + effectiveThinking = "off"; + } else if (effectiveThinking === "xhigh" && !supportsXhigh(session.model)) { + effectiveThinking = "high"; + } + if (effectiveThinking !== session.thinkingLevel) { + session.setThinkingLevel(effectiveThinking); + } + } + + if (mode === "rpc") { + await runRpcMode(session); + } else if (isInteractive) { + if (scopedModels.length > 0 && (parsed.verbose || !settingsManager.getQuietStartup())) { + const modelList = scopedModels + .map((sm) => { + const thinkingStr = sm.thinkingLevel ? `:${sm.thinkingLevel}` : ""; + return `${sm.model.id}${thinkingStr}`; + }) + .join(", "); + console.log(chalk.dim(`Model scope: ${modelList} ${chalk.gray("(Ctrl+P to cycle)")}`)); + } + + printTimings(); + const mode = new InteractiveMode(session, { + migratedProviders, + modelFallbackMessage, + initialMessage, + initialImages, + initialMessages: parsed.messages, + verbose: parsed.verbose, + }); + await mode.run(); + } else { + await runPrintMode(session, { + mode, + messages: parsed.messages, + initialMessage, + initialImages, + }); + stopThemeWatcher(); + if (process.stdout.writableLength > 0) { + await new Promise((resolve) => process.stdout.once("drain", resolve)); + } + process.exit(0); + } +} diff --git a/packages/pi-coding-agent/src/migrations.ts b/packages/pi-coding-agent/src/migrations.ts new file mode 100644 index 000000000..2721ed8d2 --- /dev/null +++ b/packages/pi-coding-agent/src/migrations.ts @@ -0,0 +1,295 @@ +/** + * One-time migrations that run on startup. + */ + +import chalk from "chalk"; +import { existsSync, mkdirSync, readdirSync, readFileSync, renameSync, rmSync, writeFileSync } from "fs"; +import { dirname, join } from "path"; +import { CONFIG_DIR_NAME, getAgentDir, getBinDir } from "./config.js"; + +const MIGRATION_GUIDE_URL = + "https://github.com/badlogic/pi-mono/blob/main/packages/coding-agent/CHANGELOG.md#extensions-migration"; +const EXTENSIONS_DOC_URL = "https://github.com/badlogic/pi-mono/blob/main/packages/coding-agent/docs/extensions.md"; + +/** + * Migrate legacy oauth.json and settings.json apiKeys to auth.json. + * + * @returns Array of provider names that were migrated + */ +export function migrateAuthToAuthJson(): string[] { + const agentDir = getAgentDir(); + const authPath = join(agentDir, "auth.json"); + const oauthPath = join(agentDir, "oauth.json"); + const settingsPath = join(agentDir, "settings.json"); + + // Skip if auth.json already exists + if (existsSync(authPath)) return []; + + const migrated: Record = {}; + const providers: string[] = []; + + // Migrate oauth.json + if (existsSync(oauthPath)) { + try { + const oauth = JSON.parse(readFileSync(oauthPath, "utf-8")); + for (const [provider, cred] of Object.entries(oauth)) { + migrated[provider] = { type: "oauth", ...(cred as object) }; + providers.push(provider); + } + renameSync(oauthPath, `${oauthPath}.migrated`); + } catch { + // Skip on error + } + } + + // Migrate settings.json apiKeys + if (existsSync(settingsPath)) { + try { + const content = readFileSync(settingsPath, "utf-8"); + const settings = JSON.parse(content); + if (settings.apiKeys && typeof settings.apiKeys === "object") { + for (const [provider, key] of Object.entries(settings.apiKeys)) { + if (!migrated[provider] && typeof key === "string") { + migrated[provider] = { type: "api_key", key }; + providers.push(provider); + } + } + delete settings.apiKeys; + writeFileSync(settingsPath, JSON.stringify(settings, null, 2)); + } + } catch { + // Skip on error + } + } + + if (Object.keys(migrated).length > 0) { + mkdirSync(dirname(authPath), { recursive: true }); + writeFileSync(authPath, JSON.stringify(migrated, null, 2), { mode: 0o600 }); + } + + return providers; +} + +/** + * Migrate sessions from ~/.pi/agent/*.jsonl to proper session directories. + * + * Bug in v0.30.0: Sessions were saved to ~/.pi/agent/ instead of + * ~/.pi/agent/sessions//. This migration moves them + * to the correct location based on the cwd in their session header. + * + * See: https://github.com/badlogic/pi-mono/issues/320 + */ +export function migrateSessionsFromAgentRoot(): void { + const agentDir = getAgentDir(); + + // Find all .jsonl files directly in agentDir (not in subdirectories) + let files: string[]; + try { + files = readdirSync(agentDir) + .filter((f) => f.endsWith(".jsonl")) + .map((f) => join(agentDir, f)); + } catch { + return; + } + + if (files.length === 0) return; + + for (const file of files) { + try { + // Read first line to get session header + const content = readFileSync(file, "utf8"); + const firstLine = content.split("\n")[0]; + if (!firstLine?.trim()) continue; + + const header = JSON.parse(firstLine); + if (header.type !== "session" || !header.cwd) continue; + + const cwd: string = header.cwd; + + // Compute the correct session directory (same encoding as session-manager.ts) + const safePath = `--${cwd.replace(/^[/\\]/, "").replace(/[/\\:]/g, "-")}--`; + const correctDir = join(agentDir, "sessions", safePath); + + // Create directory if needed + if (!existsSync(correctDir)) { + mkdirSync(correctDir, { recursive: true }); + } + + // Move the file + const fileName = file.split("/").pop() || file.split("\\").pop(); + const newPath = join(correctDir, fileName!); + + if (existsSync(newPath)) continue; // Skip if target exists + + renameSync(file, newPath); + } catch { + // Skip files that can't be migrated + } + } +} + +/** + * Migrate commands/ to prompts/ if needed. + * Works for both regular directories and symlinks. + */ +function migrateCommandsToPrompts(baseDir: string, label: string): boolean { + const commandsDir = join(baseDir, "commands"); + const promptsDir = join(baseDir, "prompts"); + + if (existsSync(commandsDir) && !existsSync(promptsDir)) { + try { + renameSync(commandsDir, promptsDir); + console.log(chalk.green(`Migrated ${label} commands/ → prompts/`)); + return true; + } catch (err) { + console.log( + chalk.yellow( + `Warning: Could not migrate ${label} commands/ to prompts/: ${err instanceof Error ? err.message : err}`, + ), + ); + } + } + return false; +} + +/** + * Move fd/rg binaries from tools/ to bin/ if they exist. + */ +function migrateToolsToBin(): void { + const agentDir = getAgentDir(); + const toolsDir = join(agentDir, "tools"); + const binDir = getBinDir(); + + if (!existsSync(toolsDir)) return; + + const binaries = ["fd", "rg", "fd.exe", "rg.exe"]; + let movedAny = false; + + for (const bin of binaries) { + const oldPath = join(toolsDir, bin); + const newPath = join(binDir, bin); + + if (existsSync(oldPath)) { + if (!existsSync(binDir)) { + mkdirSync(binDir, { recursive: true }); + } + if (!existsSync(newPath)) { + try { + renameSync(oldPath, newPath); + movedAny = true; + } catch { + // Ignore errors + } + } else { + // Target exists, just delete the old one + try { + rmSync?.(oldPath, { force: true }); + } catch { + // Ignore + } + } + } + } + + if (movedAny) { + console.log(chalk.green(`Migrated managed binaries tools/ → bin/`)); + } +} + +/** + * Check for deprecated hooks/ and tools/ directories. + * Note: tools/ may contain fd/rg binaries extracted by pi, so only warn if it has other files. + */ +function checkDeprecatedExtensionDirs(baseDir: string, label: string): string[] { + const hooksDir = join(baseDir, "hooks"); + const toolsDir = join(baseDir, "tools"); + const warnings: string[] = []; + + if (existsSync(hooksDir)) { + warnings.push(`${label} hooks/ directory found. Hooks have been renamed to extensions.`); + } + + if (existsSync(toolsDir)) { + // Check if tools/ contains anything other than fd/rg (which are auto-extracted binaries) + try { + const entries = readdirSync(toolsDir); + const customTools = entries.filter((e) => { + const lower = e.toLowerCase(); + return ( + lower !== "fd" && lower !== "rg" && lower !== "fd.exe" && lower !== "rg.exe" && !e.startsWith(".") // Ignore .DS_Store and other hidden files + ); + }); + if (customTools.length > 0) { + warnings.push( + `${label} tools/ directory contains custom tools. Custom tools have been merged into extensions.`, + ); + } + } catch { + // Ignore read errors + } + } + + return warnings; +} + +/** + * Run extension system migrations (commands→prompts) and collect warnings about deprecated directories. + */ +function migrateExtensionSystem(cwd: string): string[] { + const agentDir = getAgentDir(); + const projectDir = join(cwd, CONFIG_DIR_NAME); + + // Migrate commands/ to prompts/ + migrateCommandsToPrompts(agentDir, "Global"); + migrateCommandsToPrompts(projectDir, "Project"); + + // Check for deprecated directories + const warnings = [ + ...checkDeprecatedExtensionDirs(agentDir, "Global"), + ...checkDeprecatedExtensionDirs(projectDir, "Project"), + ]; + + return warnings; +} + +/** + * Print deprecation warnings and wait for keypress. + */ +export async function showDeprecationWarnings(warnings: string[]): Promise { + if (warnings.length === 0) return; + + for (const warning of warnings) { + console.log(chalk.yellow(`Warning: ${warning}`)); + } + console.log(chalk.yellow(`\nMove your extensions to the extensions/ directory.`)); + console.log(chalk.yellow(`Migration guide: ${MIGRATION_GUIDE_URL}`)); + console.log(chalk.yellow(`Documentation: ${EXTENSIONS_DOC_URL}`)); + console.log(chalk.dim(`\nPress any key to continue...`)); + + await new Promise((resolve) => { + process.stdin.setRawMode?.(true); + process.stdin.resume(); + process.stdin.once("data", () => { + process.stdin.setRawMode?.(false); + process.stdin.pause(); + resolve(); + }); + }); + console.log(); +} + +/** + * Run all migrations. Called once on startup. + * + * @returns Object with migration results and deprecation warnings + */ +export function runMigrations(cwd: string = process.cwd()): { + migratedAuthProviders: string[]; + deprecationWarnings: string[]; +} { + const migratedAuthProviders = migrateAuthToAuthJson(); + migrateSessionsFromAgentRoot(); + migrateToolsToBin(); + const deprecationWarnings = migrateExtensionSystem(cwd); + return { migratedAuthProviders, deprecationWarnings }; +} diff --git a/packages/pi-coding-agent/src/modes/index.ts b/packages/pi-coding-agent/src/modes/index.ts new file mode 100644 index 000000000..205e9f54c --- /dev/null +++ b/packages/pi-coding-agent/src/modes/index.ts @@ -0,0 +1,9 @@ +/** + * Run modes for the coding agent. + */ + +export { InteractiveMode, type InteractiveModeOptions } from "./interactive/interactive-mode.js"; +export { type PrintModeOptions, runPrintMode } from "./print-mode.js"; +export { type ModelInfo, RpcClient, type RpcClientOptions, type RpcEventListener } from "./rpc/rpc-client.js"; +export { runRpcMode } from "./rpc/rpc-mode.js"; +export type { RpcCommand, RpcResponse, RpcSessionState } from "./rpc/rpc-types.js"; diff --git a/packages/pi-coding-agent/src/modes/interactive/components/armin.ts b/packages/pi-coding-agent/src/modes/interactive/components/armin.ts new file mode 100644 index 000000000..afa0d780a --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/armin.ts @@ -0,0 +1,382 @@ +/** + * Armin says hi! A fun easter egg with animated XBM art. + */ + +import type { Component, TUI } from "@gsd/pi-tui"; +import { theme } from "../theme/theme.js"; + +// XBM image: 31x36 pixels, LSB first, 1=background, 0=foreground +const WIDTH = 31; +const HEIGHT = 36; +const BITS = [ + 0xff, 0xff, 0xff, 0x7f, 0xff, 0xf0, 0xff, 0x7f, 0xff, 0xed, 0xff, 0x7f, 0xff, 0xdb, 0xff, 0x7f, 0xff, 0xb7, 0xff, + 0x7f, 0xff, 0x77, 0xfe, 0x7f, 0x3f, 0xf8, 0xfe, 0x7f, 0xdf, 0xff, 0xfe, 0x7f, 0xdf, 0x3f, 0xfc, 0x7f, 0x9f, 0xc3, + 0xfb, 0x7f, 0x6f, 0xfc, 0xf4, 0x7f, 0xf7, 0x0f, 0xf7, 0x7f, 0xf7, 0xff, 0xf7, 0x7f, 0xf7, 0xff, 0xe3, 0x7f, 0xf7, + 0x07, 0xe8, 0x7f, 0xef, 0xf8, 0x67, 0x70, 0x0f, 0xff, 0xbb, 0x6f, 0xf1, 0x00, 0xd0, 0x5b, 0xfd, 0x3f, 0xec, 0x53, + 0xc1, 0xff, 0xef, 0x57, 0x9f, 0xfd, 0xee, 0x5f, 0x9f, 0xfc, 0xae, 0x5f, 0x1f, 0x78, 0xac, 0x5f, 0x3f, 0x00, 0x50, + 0x6c, 0x7f, 0x00, 0xdc, 0x77, 0xff, 0xc0, 0x3f, 0x78, 0xff, 0x01, 0xf8, 0x7f, 0xff, 0x03, 0x9c, 0x78, 0xff, 0x07, + 0x8c, 0x7c, 0xff, 0x0f, 0xce, 0x78, 0xff, 0xff, 0xcf, 0x7f, 0xff, 0xff, 0xcf, 0x78, 0xff, 0xff, 0xdf, 0x78, 0xff, + 0xff, 0xdf, 0x7d, 0xff, 0xff, 0x3f, 0x7e, 0xff, 0xff, 0xff, 0x7f, +]; + +const BYTES_PER_ROW = Math.ceil(WIDTH / 8); +const DISPLAY_HEIGHT = Math.ceil(HEIGHT / 2); // Half-block rendering + +type Effect = "typewriter" | "scanline" | "rain" | "fade" | "crt" | "glitch" | "dissolve"; + +const EFFECTS: Effect[] = ["typewriter", "scanline", "rain", "fade", "crt", "glitch", "dissolve"]; + +// Get pixel at (x, y): true = foreground, false = background +function getPixel(x: number, y: number): boolean { + if (y >= HEIGHT) return false; + const byteIndex = y * BYTES_PER_ROW + Math.floor(x / 8); + const bitIndex = x % 8; + return ((BITS[byteIndex] >> bitIndex) & 1) === 0; +} + +// Get the character for a cell (2 vertical pixels packed) +function getChar(x: number, row: number): string { + const upper = getPixel(x, row * 2); + const lower = getPixel(x, row * 2 + 1); + if (upper && lower) return "█"; + if (upper) return "▀"; + if (lower) return "▄"; + return " "; +} + +// Build the final image grid +function buildFinalGrid(): string[][] { + const grid: string[][] = []; + for (let row = 0; row < DISPLAY_HEIGHT; row++) { + const line: string[] = []; + for (let x = 0; x < WIDTH; x++) { + line.push(getChar(x, row)); + } + grid.push(line); + } + return grid; +} + +export class ArminComponent implements Component { + private ui: TUI; + private interval: ReturnType | null = null; + private effect: Effect; + private finalGrid: string[][]; + private currentGrid: string[][]; + private effectState: Record = {}; + private cachedLines: string[] = []; + private cachedWidth = 0; + private gridVersion = 0; + private cachedVersion = -1; + + constructor(ui: TUI) { + this.ui = ui; + this.effect = EFFECTS[Math.floor(Math.random() * EFFECTS.length)]; + this.finalGrid = buildFinalGrid(); + this.currentGrid = this.createEmptyGrid(); + + this.initEffect(); + this.startAnimation(); + } + + invalidate(): void { + this.cachedWidth = 0; + } + + render(width: number): string[] { + if (width === this.cachedWidth && this.cachedVersion === this.gridVersion) { + return this.cachedLines; + } + + const padding = 1; + const availableWidth = width - padding; + + this.cachedLines = this.currentGrid.map((row) => { + // Clip row to available width before applying color + const clipped = row.slice(0, availableWidth).join(""); + const padRight = Math.max(0, width - padding - clipped.length); + return ` ${theme.fg("accent", clipped)}${" ".repeat(padRight)}`; + }); + + // Add "ARMIN SAYS HI" at the end + const message = "ARMIN SAYS HI"; + const msgPadRight = Math.max(0, width - padding - message.length); + this.cachedLines.push(` ${theme.fg("accent", message)}${" ".repeat(msgPadRight)}`); + + this.cachedWidth = width; + this.cachedVersion = this.gridVersion; + + return this.cachedLines; + } + + private createEmptyGrid(): string[][] { + return Array.from({ length: DISPLAY_HEIGHT }, () => Array(WIDTH).fill(" ")); + } + + private initEffect(): void { + switch (this.effect) { + case "typewriter": + this.effectState = { pos: 0 }; + break; + case "scanline": + this.effectState = { row: 0 }; + break; + case "rain": + // Track falling position for each column + this.effectState = { + drops: Array.from({ length: WIDTH }, () => ({ + y: -Math.floor(Math.random() * DISPLAY_HEIGHT * 2), + settled: 0, + })), + }; + break; + case "fade": { + // Shuffle all pixel positions + const positions: [number, number][] = []; + for (let row = 0; row < DISPLAY_HEIGHT; row++) { + for (let x = 0; x < WIDTH; x++) { + positions.push([row, x]); + } + } + // Fisher-Yates shuffle + for (let i = positions.length - 1; i > 0; i--) { + const j = Math.floor(Math.random() * (i + 1)); + [positions[i], positions[j]] = [positions[j], positions[i]]; + } + this.effectState = { positions, idx: 0 }; + break; + } + case "crt": + this.effectState = { expansion: 0 }; + break; + case "glitch": + this.effectState = { phase: 0, glitchFrames: 8 }; + break; + case "dissolve": { + // Start with random noise + this.currentGrid = Array.from({ length: DISPLAY_HEIGHT }, () => + Array.from({ length: WIDTH }, () => { + const chars = [" ", "░", "▒", "▓", "█", "▀", "▄"]; + return chars[Math.floor(Math.random() * chars.length)]; + }), + ); + // Shuffle positions for gradual resolve + const dissolvePositions: [number, number][] = []; + for (let row = 0; row < DISPLAY_HEIGHT; row++) { + for (let x = 0; x < WIDTH; x++) { + dissolvePositions.push([row, x]); + } + } + for (let i = dissolvePositions.length - 1; i > 0; i--) { + const j = Math.floor(Math.random() * (i + 1)); + [dissolvePositions[i], dissolvePositions[j]] = [dissolvePositions[j], dissolvePositions[i]]; + } + this.effectState = { positions: dissolvePositions, idx: 0 }; + break; + } + } + } + + private startAnimation(): void { + const fps = this.effect === "glitch" ? 60 : 30; + this.interval = setInterval(() => { + const done = this.tickEffect(); + this.updateDisplay(); + this.ui.requestRender(); + if (done) { + this.stopAnimation(); + } + }, 1000 / fps); + } + + private stopAnimation(): void { + if (this.interval) { + clearInterval(this.interval); + this.interval = null; + } + } + + private tickEffect(): boolean { + switch (this.effect) { + case "typewriter": + return this.tickTypewriter(); + case "scanline": + return this.tickScanline(); + case "rain": + return this.tickRain(); + case "fade": + return this.tickFade(); + case "crt": + return this.tickCrt(); + case "glitch": + return this.tickGlitch(); + case "dissolve": + return this.tickDissolve(); + default: + return true; + } + } + + private tickTypewriter(): boolean { + const state = this.effectState as { pos: number }; + const pixelsPerFrame = 3; + + for (let i = 0; i < pixelsPerFrame; i++) { + const row = Math.floor(state.pos / WIDTH); + const x = state.pos % WIDTH; + if (row >= DISPLAY_HEIGHT) return true; + this.currentGrid[row][x] = this.finalGrid[row][x]; + state.pos++; + } + return false; + } + + private tickScanline(): boolean { + const state = this.effectState as { row: number }; + if (state.row >= DISPLAY_HEIGHT) return true; + + // Copy row + for (let x = 0; x < WIDTH; x++) { + this.currentGrid[state.row][x] = this.finalGrid[state.row][x]; + } + state.row++; + return false; + } + + private tickRain(): boolean { + const state = this.effectState as { + drops: { y: number; settled: number }[]; + }; + + let allSettled = true; + this.currentGrid = this.createEmptyGrid(); + + for (let x = 0; x < WIDTH; x++) { + const drop = state.drops[x]; + + // Draw settled pixels + for (let row = DISPLAY_HEIGHT - 1; row >= DISPLAY_HEIGHT - drop.settled; row--) { + if (row >= 0) { + this.currentGrid[row][x] = this.finalGrid[row][x]; + } + } + + // Check if this column is done + if (drop.settled >= DISPLAY_HEIGHT) continue; + + allSettled = false; + + // Find the target row for this column (lowest non-space pixel) + let targetRow = -1; + for (let row = DISPLAY_HEIGHT - 1 - drop.settled; row >= 0; row--) { + if (this.finalGrid[row][x] !== " ") { + targetRow = row; + break; + } + } + + // Move drop down + drop.y++; + + // Draw falling drop + if (drop.y >= 0 && drop.y < DISPLAY_HEIGHT) { + if (targetRow >= 0 && drop.y >= targetRow) { + // Settle + drop.settled = DISPLAY_HEIGHT - targetRow; + drop.y = -Math.floor(Math.random() * 5) - 1; + } else { + // Still falling + this.currentGrid[drop.y][x] = "▓"; + } + } + } + + return allSettled; + } + + private tickFade(): boolean { + const state = this.effectState as { positions: [number, number][]; idx: number }; + const pixelsPerFrame = 15; + + for (let i = 0; i < pixelsPerFrame; i++) { + if (state.idx >= state.positions.length) return true; + const [row, x] = state.positions[state.idx]; + this.currentGrid[row][x] = this.finalGrid[row][x]; + state.idx++; + } + return false; + } + + private tickCrt(): boolean { + const state = this.effectState as { expansion: number }; + const midRow = Math.floor(DISPLAY_HEIGHT / 2); + + this.currentGrid = this.createEmptyGrid(); + + // Draw from middle expanding outward + const top = midRow - state.expansion; + const bottom = midRow + state.expansion; + + for (let row = Math.max(0, top); row <= Math.min(DISPLAY_HEIGHT - 1, bottom); row++) { + for (let x = 0; x < WIDTH; x++) { + this.currentGrid[row][x] = this.finalGrid[row][x]; + } + } + + state.expansion++; + return state.expansion > DISPLAY_HEIGHT; + } + + private tickGlitch(): boolean { + const state = this.effectState as { phase: number; glitchFrames: number }; + + if (state.phase < state.glitchFrames) { + // Glitch phase: show corrupted version + this.currentGrid = this.finalGrid.map((row) => { + const offset = Math.floor(Math.random() * 7) - 3; + const glitchRow = [...row]; + + // Random horizontal offset + if (Math.random() < 0.3) { + const shifted = glitchRow.slice(offset).concat(glitchRow.slice(0, offset)); + return shifted.slice(0, WIDTH); + } + + // Random vertical swap + if (Math.random() < 0.2) { + const swapRow = Math.floor(Math.random() * DISPLAY_HEIGHT); + return [...this.finalGrid[swapRow]]; + } + + return glitchRow; + }); + state.phase++; + return false; + } + + // Final frame: show clean image + this.currentGrid = this.finalGrid.map((row) => [...row]); + return true; + } + + private tickDissolve(): boolean { + const state = this.effectState as { positions: [number, number][]; idx: number }; + const pixelsPerFrame = 20; + + for (let i = 0; i < pixelsPerFrame; i++) { + if (state.idx >= state.positions.length) return true; + const [row, x] = state.positions[state.idx]; + this.currentGrid[row][x] = this.finalGrid[row][x]; + state.idx++; + } + return false; + } + + private updateDisplay(): void { + this.gridVersion++; + } + + dispose(): void { + this.stopAnimation(); + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/assistant-message.ts b/packages/pi-coding-agent/src/modes/interactive/components/assistant-message.ts new file mode 100644 index 000000000..fe78c54e9 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/assistant-message.ts @@ -0,0 +1,115 @@ +import type { AssistantMessage } from "@gsd/pi-ai"; +import { Container, Markdown, type MarkdownTheme, Spacer, Text } from "@gsd/pi-tui"; +import { getMarkdownTheme, theme } from "../theme/theme.js"; + +/** + * Component that renders a complete assistant message + */ +export class AssistantMessageComponent extends Container { + private contentContainer: Container; + private hideThinkingBlock: boolean; + private markdownTheme: MarkdownTheme; + private lastMessage?: AssistantMessage; + + constructor( + message?: AssistantMessage, + hideThinkingBlock = false, + markdownTheme: MarkdownTheme = getMarkdownTheme(), + ) { + super(); + + this.hideThinkingBlock = hideThinkingBlock; + this.markdownTheme = markdownTheme; + + // Container for text/thinking content + this.contentContainer = new Container(); + this.addChild(this.contentContainer); + + if (message) { + this.updateContent(message); + } + } + + override invalidate(): void { + super.invalidate(); + if (this.lastMessage) { + this.updateContent(this.lastMessage); + } + } + + setHideThinkingBlock(hide: boolean): void { + this.hideThinkingBlock = hide; + } + + updateContent(message: AssistantMessage): void { + this.lastMessage = message; + + // Clear content container + this.contentContainer.clear(); + + const hasVisibleContent = message.content.some( + (c) => (c.type === "text" && c.text.trim()) || (c.type === "thinking" && c.thinking.trim()), + ); + + if (hasVisibleContent) { + this.contentContainer.addChild(new Spacer(1)); + } + + // Render content in order + for (let i = 0; i < message.content.length; i++) { + const content = message.content[i]; + if (content.type === "text" && content.text.trim()) { + // Assistant text messages with no background - trim the text + // Set paddingY=0 to avoid extra spacing before tool executions + this.contentContainer.addChild(new Markdown(content.text.trim(), 1, 0, this.markdownTheme)); + } else if (content.type === "thinking" && content.thinking.trim()) { + // Add spacing only when another visible assistant content block follows. + // This avoids a superfluous blank line before separately-rendered tool execution blocks. + const hasVisibleContentAfter = message.content + .slice(i + 1) + .some((c) => (c.type === "text" && c.text.trim()) || (c.type === "thinking" && c.thinking.trim())); + + if (this.hideThinkingBlock) { + // Show static "Thinking..." label when hidden + this.contentContainer.addChild(new Text(theme.italic(theme.fg("thinkingText", "Thinking...")), 1, 0)); + if (hasVisibleContentAfter) { + this.contentContainer.addChild(new Spacer(1)); + } + } else { + // Thinking traces in thinkingText color, italic + this.contentContainer.addChild( + new Markdown(content.thinking.trim(), 1, 0, this.markdownTheme, { + color: (text: string) => theme.fg("thinkingText", text), + italic: true, + }), + ); + if (hasVisibleContentAfter) { + this.contentContainer.addChild(new Spacer(1)); + } + } + } + } + + // Check if aborted - show after partial content + // But only if there are no tool calls (tool execution components will show the error) + const hasToolCalls = message.content.some((c) => c.type === "toolCall"); + if (!hasToolCalls) { + if (message.stopReason === "aborted") { + const abortMessage = + message.errorMessage && message.errorMessage !== "Request was aborted" + ? message.errorMessage + : "Operation aborted"; + if (hasVisibleContent) { + this.contentContainer.addChild(new Spacer(1)); + } else { + this.contentContainer.addChild(new Spacer(1)); + } + this.contentContainer.addChild(new Text(theme.fg("error", abortMessage), 1, 0)); + } else if (message.stopReason === "error") { + const errorMsg = message.errorMessage || "Unknown error"; + this.contentContainer.addChild(new Spacer(1)); + this.contentContainer.addChild(new Text(theme.fg("error", `Error: ${errorMsg}`), 1, 0)); + } + } + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/bash-execution.ts b/packages/pi-coding-agent/src/modes/interactive/components/bash-execution.ts new file mode 100644 index 000000000..cec80e097 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/bash-execution.ts @@ -0,0 +1,210 @@ +/** + * Component for displaying bash command execution with streaming output. + */ + +import { Container, Loader, Spacer, Text, type TUI } from "@gsd/pi-tui"; +import stripAnsi from "strip-ansi"; +import { + DEFAULT_MAX_BYTES, + DEFAULT_MAX_LINES, + type TruncationResult, + truncateTail, +} from "../../../core/tools/truncate.js"; +import { theme } from "../theme/theme.js"; +import { DynamicBorder } from "./dynamic-border.js"; +import { editorKey, keyHint } from "./keybinding-hints.js"; +import { truncateToVisualLines } from "./visual-truncate.js"; + +// Preview line limit when not expanded (matches tool execution behavior) +const PREVIEW_LINES = 20; + +export class BashExecutionComponent extends Container { + private command: string; + private outputLines: string[] = []; + private status: "running" | "complete" | "cancelled" | "error" = "running"; + private exitCode: number | undefined = undefined; + private loader: Loader; + private truncationResult?: TruncationResult; + private fullOutputPath?: string; + private expanded = false; + private contentContainer: Container; + private ui: TUI; + + constructor(command: string, ui: TUI, excludeFromContext = false) { + super(); + this.command = command; + this.ui = ui; + + // Use dim border for excluded-from-context commands (!! prefix) + const colorKey = excludeFromContext ? "dim" : "bashMode"; + const borderColor = (str: string) => theme.fg(colorKey, str); + + // Add spacer + this.addChild(new Spacer(1)); + + // Top border + this.addChild(new DynamicBorder(borderColor)); + + // Content container (holds dynamic content between borders) + this.contentContainer = new Container(); + this.addChild(this.contentContainer); + + // Command header + const header = new Text(theme.fg(colorKey, theme.bold(`$ ${command}`)), 1, 0); + this.contentContainer.addChild(header); + + // Loader + this.loader = new Loader( + ui, + (spinner) => theme.fg(colorKey, spinner), + (text) => theme.fg("muted", text), + `Running... (${editorKey("selectCancel")} to cancel)`, // Plain text for loader + ); + this.contentContainer.addChild(this.loader); + + // Bottom border + this.addChild(new DynamicBorder(borderColor)); + } + + /** + * Set whether the output is expanded (shows full output) or collapsed (preview only). + */ + setExpanded(expanded: boolean): void { + this.expanded = expanded; + this.updateDisplay(); + } + + override invalidate(): void { + super.invalidate(); + this.updateDisplay(); + } + + appendOutput(chunk: string): void { + // Strip ANSI codes and normalize line endings + // Note: binary data is already sanitized in tui-renderer.ts executeBashCommand + const clean = stripAnsi(chunk).replace(/\r\n/g, "\n").replace(/\r/g, "\n"); + + // Append to output lines + const newLines = clean.split("\n"); + if (this.outputLines.length > 0 && newLines.length > 0) { + // Append first chunk to last line (incomplete line continuation) + this.outputLines[this.outputLines.length - 1] += newLines[0]; + this.outputLines.push(...newLines.slice(1)); + } else { + this.outputLines.push(...newLines); + } + + this.updateDisplay(); + } + + setComplete( + exitCode: number | undefined, + cancelled: boolean, + truncationResult?: TruncationResult, + fullOutputPath?: string, + ): void { + this.exitCode = exitCode; + this.status = cancelled + ? "cancelled" + : exitCode !== 0 && exitCode !== undefined && exitCode !== null + ? "error" + : "complete"; + this.truncationResult = truncationResult; + this.fullOutputPath = fullOutputPath; + + // Stop loader + this.loader.stop(); + + this.updateDisplay(); + } + + private updateDisplay(): void { + // Apply truncation for LLM context limits (same limits as bash tool) + const fullOutput = this.outputLines.join("\n"); + const contextTruncation = truncateTail(fullOutput, { + maxLines: DEFAULT_MAX_LINES, + maxBytes: DEFAULT_MAX_BYTES, + }); + + // Get the lines to potentially display (after context truncation) + const availableLines = contextTruncation.content ? contextTruncation.content.split("\n") : []; + + // Apply preview truncation based on expanded state + const previewLogicalLines = availableLines.slice(-PREVIEW_LINES); + const hiddenLineCount = availableLines.length - previewLogicalLines.length; + + // Rebuild content container + this.contentContainer.clear(); + + // Command header + const header = new Text(theme.fg("bashMode", theme.bold(`$ ${this.command}`)), 1, 0); + this.contentContainer.addChild(header); + + // Output + if (availableLines.length > 0) { + if (this.expanded) { + // Show all lines + const displayText = availableLines.map((line) => theme.fg("muted", line)).join("\n"); + this.contentContainer.addChild(new Text(`\n${displayText}`, 1, 0)); + } else { + // Use shared visual truncation utility + const styledOutput = previewLogicalLines.map((line) => theme.fg("muted", line)).join("\n"); + const { visualLines } = truncateToVisualLines( + `\n${styledOutput}`, + PREVIEW_LINES, + this.ui.terminal.columns, + 1, // padding + ); + this.contentContainer.addChild({ render: () => visualLines, invalidate: () => {} }); + } + } + + // Loader or status + if (this.status === "running") { + this.contentContainer.addChild(this.loader); + } else { + const statusParts: string[] = []; + + // Show how many lines are hidden (collapsed preview) + if (hiddenLineCount > 0) { + if (this.expanded) { + statusParts.push(`(${keyHint("expandTools", "to collapse")})`); + } else { + statusParts.push( + `${theme.fg("muted", `... ${hiddenLineCount} more lines`)} (${keyHint("expandTools", "to expand")})`, + ); + } + } + + if (this.status === "cancelled") { + statusParts.push(theme.fg("warning", "(cancelled)")); + } else if (this.status === "error") { + statusParts.push(theme.fg("error", `(exit ${this.exitCode})`)); + } + + // Add truncation warning (context truncation, not preview truncation) + const wasTruncated = this.truncationResult?.truncated || contextTruncation.truncated; + if (wasTruncated && this.fullOutputPath) { + statusParts.push(theme.fg("warning", `Output truncated. Full output: ${this.fullOutputPath}`)); + } + + if (statusParts.length > 0) { + this.contentContainer.addChild(new Text(`\n${statusParts.join("\n")}`, 1, 0)); + } + } + } + + /** + * Get the raw output for creating BashExecutionMessage. + */ + getOutput(): string { + return this.outputLines.join("\n"); + } + + /** + * Get the command that was executed. + */ + getCommand(): string { + return this.command; + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/bordered-loader.ts b/packages/pi-coding-agent/src/modes/interactive/components/bordered-loader.ts new file mode 100644 index 000000000..d2610da96 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/bordered-loader.ts @@ -0,0 +1,66 @@ +import { CancellableLoader, Container, Loader, Spacer, Text, type TUI } from "@gsd/pi-tui"; +import type { Theme } from "../theme/theme.js"; +import { DynamicBorder } from "./dynamic-border.js"; +import { keyHint } from "./keybinding-hints.js"; + +/** Loader wrapped with borders for extension UI */ +export class BorderedLoader extends Container { + private loader: CancellableLoader | Loader; + private cancellable: boolean; + private signalController?: AbortController; + + constructor(tui: TUI, theme: Theme, message: string, options?: { cancellable?: boolean }) { + super(); + this.cancellable = options?.cancellable ?? true; + const borderColor = (s: string) => theme.fg("border", s); + this.addChild(new DynamicBorder(borderColor)); + if (this.cancellable) { + this.loader = new CancellableLoader( + tui, + (s) => theme.fg("accent", s), + (s) => theme.fg("muted", s), + message, + ); + } else { + this.signalController = new AbortController(); + this.loader = new Loader( + tui, + (s) => theme.fg("accent", s), + (s) => theme.fg("muted", s), + message, + ); + } + this.addChild(this.loader); + if (this.cancellable) { + this.addChild(new Spacer(1)); + this.addChild(new Text(keyHint("selectCancel", "cancel"), 1, 0)); + } + this.addChild(new Spacer(1)); + this.addChild(new DynamicBorder(borderColor)); + } + + get signal(): AbortSignal { + if (this.cancellable) { + return (this.loader as CancellableLoader).signal; + } + return this.signalController?.signal ?? new AbortController().signal; + } + + set onAbort(fn: (() => void) | undefined) { + if (this.cancellable) { + (this.loader as CancellableLoader).onAbort = fn; + } + } + + handleInput(data: string): void { + if (this.cancellable) { + (this.loader as CancellableLoader).handleInput(data); + } + } + + dispose(): void { + if ("dispose" in this.loader && typeof this.loader.dispose === "function") { + this.loader.dispose(); + } + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/branch-summary-message.ts b/packages/pi-coding-agent/src/modes/interactive/components/branch-summary-message.ts new file mode 100644 index 000000000..c7b666a2f --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/branch-summary-message.ts @@ -0,0 +1,58 @@ +import { Box, Markdown, type MarkdownTheme, Spacer, Text } from "@gsd/pi-tui"; +import type { BranchSummaryMessage } from "../../../core/messages.js"; +import { getMarkdownTheme, theme } from "../theme/theme.js"; +import { editorKey } from "./keybinding-hints.js"; + +/** + * Component that renders a branch summary message with collapsed/expanded state. + * Uses same background color as custom messages for visual consistency. + */ +export class BranchSummaryMessageComponent extends Box { + private expanded = false; + private message: BranchSummaryMessage; + private markdownTheme: MarkdownTheme; + + constructor(message: BranchSummaryMessage, markdownTheme: MarkdownTheme = getMarkdownTheme()) { + super(1, 1, (t) => theme.bg("customMessageBg", t)); + this.message = message; + this.markdownTheme = markdownTheme; + this.updateDisplay(); + } + + setExpanded(expanded: boolean): void { + this.expanded = expanded; + this.updateDisplay(); + } + + override invalidate(): void { + super.invalidate(); + this.updateDisplay(); + } + + private updateDisplay(): void { + this.clear(); + + const label = theme.fg("customMessageLabel", `\x1b[1m[branch]\x1b[22m`); + this.addChild(new Text(label, 0, 0)); + this.addChild(new Spacer(1)); + + if (this.expanded) { + const header = "**Branch Summary**\n\n"; + this.addChild( + new Markdown(header + this.message.summary, 0, 0, this.markdownTheme, { + color: (text: string) => theme.fg("customMessageText", text), + }), + ); + } else { + this.addChild( + new Text( + theme.fg("customMessageText", "Branch summary (") + + theme.fg("dim", editorKey("expandTools")) + + theme.fg("customMessageText", " to expand)"), + 0, + 0, + ), + ); + } + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/compaction-summary-message.ts b/packages/pi-coding-agent/src/modes/interactive/components/compaction-summary-message.ts new file mode 100644 index 000000000..ace738406 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/compaction-summary-message.ts @@ -0,0 +1,59 @@ +import { Box, Markdown, type MarkdownTheme, Spacer, Text } from "@gsd/pi-tui"; +import type { CompactionSummaryMessage } from "../../../core/messages.js"; +import { getMarkdownTheme, theme } from "../theme/theme.js"; +import { editorKey } from "./keybinding-hints.js"; + +/** + * Component that renders a compaction message with collapsed/expanded state. + * Uses same background color as custom messages for visual consistency. + */ +export class CompactionSummaryMessageComponent extends Box { + private expanded = false; + private message: CompactionSummaryMessage; + private markdownTheme: MarkdownTheme; + + constructor(message: CompactionSummaryMessage, markdownTheme: MarkdownTheme = getMarkdownTheme()) { + super(1, 1, (t) => theme.bg("customMessageBg", t)); + this.message = message; + this.markdownTheme = markdownTheme; + this.updateDisplay(); + } + + setExpanded(expanded: boolean): void { + this.expanded = expanded; + this.updateDisplay(); + } + + override invalidate(): void { + super.invalidate(); + this.updateDisplay(); + } + + private updateDisplay(): void { + this.clear(); + + const tokenStr = this.message.tokensBefore.toLocaleString(); + const label = theme.fg("customMessageLabel", `\x1b[1m[compaction]\x1b[22m`); + this.addChild(new Text(label, 0, 0)); + this.addChild(new Spacer(1)); + + if (this.expanded) { + const header = `**Compacted from ${tokenStr} tokens**\n\n`; + this.addChild( + new Markdown(header + this.message.summary, 0, 0, this.markdownTheme, { + color: (text: string) => theme.fg("customMessageText", text), + }), + ); + } else { + this.addChild( + new Text( + theme.fg("customMessageText", `Compacted from ${tokenStr} tokens (`) + + theme.fg("dim", editorKey("expandTools")) + + theme.fg("customMessageText", " to expand)"), + 0, + 0, + ), + ); + } + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/config-selector.ts b/packages/pi-coding-agent/src/modes/interactive/components/config-selector.ts new file mode 100644 index 000000000..61f6d57dd --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/config-selector.ts @@ -0,0 +1,592 @@ +/** + * TUI component for managing package resources (enable/disable) + */ + +import { basename, dirname, join, relative } from "node:path"; +import { + type Component, + Container, + type Focusable, + getEditorKeybindings, + Input, + matchesKey, + Spacer, + truncateToWidth, + visibleWidth, +} from "@gsd/pi-tui"; +import { CONFIG_DIR_NAME } from "../../../config.js"; +import type { PathMetadata, ResolvedPaths, ResolvedResource } from "../../../core/package-manager.js"; +import type { PackageSource, SettingsManager } from "../../../core/settings-manager.js"; +import { theme } from "../theme/theme.js"; +import { DynamicBorder } from "./dynamic-border.js"; +import { rawKeyHint } from "./keybinding-hints.js"; + +type ResourceType = "extensions" | "skills" | "prompts" | "themes"; + +const RESOURCE_TYPE_LABELS: Record = { + extensions: "Extensions", + skills: "Skills", + prompts: "Prompts", + themes: "Themes", +}; + +interface ResourceItem { + path: string; + enabled: boolean; + metadata: PathMetadata; + resourceType: ResourceType; + displayName: string; + groupKey: string; + subgroupKey: string; +} + +interface ResourceSubgroup { + type: ResourceType; + label: string; + items: ResourceItem[]; +} + +interface ResourceGroup { + key: string; + label: string; + scope: "user" | "project" | "temporary"; + origin: "package" | "top-level"; + source: string; + subgroups: ResourceSubgroup[]; +} + +function getGroupLabel(metadata: PathMetadata): string { + if (metadata.origin === "package") { + return `${metadata.source} (${metadata.scope})`; + } + // Top-level resources + if (metadata.source === "auto") { + return metadata.scope === "user" ? "User (~/.pi/agent/)" : "Project (.pi/)"; + } + return metadata.scope === "user" ? "User settings" : "Project settings"; +} + +function buildGroups(resolved: ResolvedPaths): ResourceGroup[] { + const groupMap = new Map(); + + const addToGroup = (resources: ResolvedResource[], resourceType: ResourceType) => { + for (const res of resources) { + const { path, enabled, metadata } = res; + const groupKey = `${metadata.origin}:${metadata.scope}:${metadata.source}`; + + if (!groupMap.has(groupKey)) { + groupMap.set(groupKey, { + key: groupKey, + label: getGroupLabel(metadata), + scope: metadata.scope, + origin: metadata.origin, + source: metadata.source, + subgroups: [], + }); + } + + const group = groupMap.get(groupKey)!; + const subgroupKey = `${groupKey}:${resourceType}`; + + let subgroup = group.subgroups.find((sg) => sg.type === resourceType); + if (!subgroup) { + subgroup = { + type: resourceType, + label: RESOURCE_TYPE_LABELS[resourceType], + items: [], + }; + group.subgroups.push(subgroup); + } + + const fileName = basename(path); + const parentFolder = basename(dirname(path)); + let displayName: string; + if (resourceType === "extensions" && parentFolder !== "extensions") { + displayName = `${parentFolder}/${fileName}`; + } else if (resourceType === "skills" && fileName === "SKILL.md") { + displayName = parentFolder; + } else { + displayName = fileName; + } + subgroup.items.push({ + path, + enabled, + metadata, + resourceType, + displayName, + groupKey, + subgroupKey, + }); + } + }; + + addToGroup(resolved.extensions, "extensions"); + addToGroup(resolved.skills, "skills"); + addToGroup(resolved.prompts, "prompts"); + addToGroup(resolved.themes, "themes"); + + // Sort groups: packages first, then top-level; user before project + const groups = Array.from(groupMap.values()); + groups.sort((a, b) => { + if (a.origin !== b.origin) { + return a.origin === "package" ? -1 : 1; + } + if (a.scope !== b.scope) { + return a.scope === "user" ? -1 : 1; + } + return a.source.localeCompare(b.source); + }); + + // Sort subgroups within each group by type order, and items by name + const typeOrder: Record = { extensions: 0, skills: 1, prompts: 2, themes: 3 }; + for (const group of groups) { + group.subgroups.sort((a, b) => typeOrder[a.type] - typeOrder[b.type]); + for (const subgroup of group.subgroups) { + subgroup.items.sort((a, b) => a.displayName.localeCompare(b.displayName)); + } + } + + return groups; +} + +type FlatEntry = + | { type: "group"; group: ResourceGroup } + | { type: "subgroup"; subgroup: ResourceSubgroup; group: ResourceGroup } + | { type: "item"; item: ResourceItem }; + +class ConfigSelectorHeader implements Component { + invalidate(): void {} + + render(width: number): string[] { + const title = theme.bold("Resource Configuration"); + const sep = theme.fg("muted", " · "); + const hint = rawKeyHint("space", "toggle") + sep + rawKeyHint("esc", "close"); + const hintWidth = visibleWidth(hint); + const titleWidth = visibleWidth(title); + const spacing = Math.max(1, width - titleWidth - hintWidth); + + return [ + truncateToWidth(`${title}${" ".repeat(spacing)}${hint}`, width, ""), + theme.fg("muted", "Type to filter resources"), + ]; + } +} + +class ResourceList implements Component, Focusable { + private groups: ResourceGroup[]; + private flatItems: FlatEntry[] = []; + private filteredItems: FlatEntry[] = []; + private selectedIndex = 0; + private searchInput: Input; + private maxVisible = 15; + private settingsManager: SettingsManager; + private cwd: string; + private agentDir: string; + + public onCancel?: () => void; + public onExit?: () => void; + public onToggle?: (item: ResourceItem, newEnabled: boolean) => void; + + private _focused = false; + get focused(): boolean { + return this._focused; + } + set focused(value: boolean) { + this._focused = value; + this.searchInput.focused = value; + } + + constructor(groups: ResourceGroup[], settingsManager: SettingsManager, cwd: string, agentDir: string) { + this.groups = groups; + this.settingsManager = settingsManager; + this.cwd = cwd; + this.agentDir = agentDir; + this.searchInput = new Input(); + this.buildFlatList(); + this.filteredItems = [...this.flatItems]; + } + + private buildFlatList(): void { + this.flatItems = []; + for (const group of this.groups) { + this.flatItems.push({ type: "group", group }); + for (const subgroup of group.subgroups) { + this.flatItems.push({ type: "subgroup", subgroup, group }); + for (const item of subgroup.items) { + this.flatItems.push({ type: "item", item }); + } + } + } + // Start selection on first item (not header) + this.selectedIndex = this.flatItems.findIndex((e) => e.type === "item"); + if (this.selectedIndex < 0) this.selectedIndex = 0; + } + + private findNextItem(fromIndex: number, direction: 1 | -1): number { + let idx = fromIndex + direction; + while (idx >= 0 && idx < this.filteredItems.length) { + if (this.filteredItems[idx].type === "item") { + return idx; + } + idx += direction; + } + return fromIndex; // Stay at current if no item found + } + + private filterItems(query: string): void { + if (!query.trim()) { + this.filteredItems = [...this.flatItems]; + this.selectFirstItem(); + return; + } + + const lowerQuery = query.toLowerCase(); + const matchingItems = new Set(); + const matchingSubgroups = new Set(); + const matchingGroups = new Set(); + + for (const entry of this.flatItems) { + if (entry.type === "item") { + const item = entry.item; + if ( + item.displayName.toLowerCase().includes(lowerQuery) || + item.resourceType.toLowerCase().includes(lowerQuery) || + item.path.toLowerCase().includes(lowerQuery) + ) { + matchingItems.add(item); + } + } + } + + // Find which subgroups and groups contain matching items + for (const group of this.groups) { + for (const subgroup of group.subgroups) { + for (const item of subgroup.items) { + if (matchingItems.has(item)) { + matchingSubgroups.add(subgroup); + matchingGroups.add(group); + } + } + } + } + + this.filteredItems = []; + for (const entry of this.flatItems) { + if (entry.type === "group" && matchingGroups.has(entry.group)) { + this.filteredItems.push(entry); + } else if (entry.type === "subgroup" && matchingSubgroups.has(entry.subgroup)) { + this.filteredItems.push(entry); + } else if (entry.type === "item" && matchingItems.has(entry.item)) { + this.filteredItems.push(entry); + } + } + + this.selectFirstItem(); + } + + private selectFirstItem(): void { + const firstItemIndex = this.filteredItems.findIndex((e) => e.type === "item"); + this.selectedIndex = firstItemIndex >= 0 ? firstItemIndex : 0; + } + + updateItem(item: ResourceItem, enabled: boolean): void { + item.enabled = enabled; + // Update in groups too + for (const group of this.groups) { + for (const subgroup of group.subgroups) { + const found = subgroup.items.find((i) => i.path === item.path && i.resourceType === item.resourceType); + if (found) { + found.enabled = enabled; + return; + } + } + } + } + + invalidate(): void {} + + render(width: number): string[] { + const lines: string[] = []; + + // Search input + lines.push(...this.searchInput.render(width)); + lines.push(""); + + if (this.filteredItems.length === 0) { + lines.push(theme.fg("muted", " No resources found")); + return lines; + } + + // Calculate visible range + const startIndex = Math.max( + 0, + Math.min(this.selectedIndex - Math.floor(this.maxVisible / 2), this.filteredItems.length - this.maxVisible), + ); + const endIndex = Math.min(startIndex + this.maxVisible, this.filteredItems.length); + + for (let i = startIndex; i < endIndex; i++) { + const entry = this.filteredItems[i]; + const isSelected = i === this.selectedIndex; + + if (entry.type === "group") { + // Main group header (no cursor) + const groupLine = theme.fg("accent", theme.bold(entry.group.label)); + lines.push(truncateToWidth(` ${groupLine}`, width, "")); + } else if (entry.type === "subgroup") { + // Subgroup header (indented, no cursor) + const subgroupLine = theme.fg("muted", entry.subgroup.label); + lines.push(truncateToWidth(` ${subgroupLine}`, width, "")); + } else { + // Resource item (cursor only on items) + const item = entry.item; + const cursor = isSelected ? "> " : " "; + const checkbox = item.enabled ? theme.fg("success", "[x]") : theme.fg("dim", "[ ]"); + const name = isSelected ? theme.bold(item.displayName) : item.displayName; + lines.push(truncateToWidth(`${cursor} ${checkbox} ${name}`, width, "...")); + } + } + + // Scroll indicator + if (startIndex > 0 || endIndex < this.filteredItems.length) { + lines.push(theme.fg("dim", ` (${this.selectedIndex + 1}/${this.filteredItems.length})`)); + } + + return lines; + } + + handleInput(data: string): void { + const kb = getEditorKeybindings(); + + if (kb.matches(data, "selectUp")) { + this.selectedIndex = this.findNextItem(this.selectedIndex, -1); + return; + } + if (kb.matches(data, "selectDown")) { + this.selectedIndex = this.findNextItem(this.selectedIndex, 1); + return; + } + if (kb.matches(data, "selectPageUp")) { + // Jump up by maxVisible, then find nearest item + let target = Math.max(0, this.selectedIndex - this.maxVisible); + while (target < this.filteredItems.length && this.filteredItems[target].type !== "item") { + target++; + } + if (target < this.filteredItems.length) { + this.selectedIndex = target; + } + return; + } + if (kb.matches(data, "selectPageDown")) { + // Jump down by maxVisible, then find nearest item + let target = Math.min(this.filteredItems.length - 1, this.selectedIndex + this.maxVisible); + while (target >= 0 && this.filteredItems[target].type !== "item") { + target--; + } + if (target >= 0) { + this.selectedIndex = target; + } + return; + } + if (kb.matches(data, "selectCancel")) { + this.onCancel?.(); + return; + } + if (matchesKey(data, "ctrl+c")) { + this.onExit?.(); + return; + } + if (data === " " || kb.matches(data, "selectConfirm")) { + const entry = this.filteredItems[this.selectedIndex]; + if (entry?.type === "item") { + const newEnabled = !entry.item.enabled; + this.toggleResource(entry.item, newEnabled); + this.updateItem(entry.item, newEnabled); + this.onToggle?.(entry.item, newEnabled); + } + return; + } + + // Pass to search input + this.searchInput.handleInput(data); + this.filterItems(this.searchInput.getValue()); + } + + private toggleResource(item: ResourceItem, enabled: boolean): void { + if (item.metadata.origin === "top-level") { + this.toggleTopLevelResource(item, enabled); + } else { + this.togglePackageResource(item, enabled); + } + } + + private toggleTopLevelResource(item: ResourceItem, enabled: boolean): void { + const scope = item.metadata.scope as "user" | "project"; + const settings = + scope === "project" ? this.settingsManager.getProjectSettings() : this.settingsManager.getGlobalSettings(); + + const arrayKey = item.resourceType as "extensions" | "skills" | "prompts" | "themes"; + const current = (settings[arrayKey] ?? []) as string[]; + + // Generate pattern for this resource + const pattern = this.getResourcePattern(item); + const disablePattern = `-${pattern}`; + const enablePattern = `+${pattern}`; + + // Filter out existing patterns for this resource + const updated = current.filter((p) => { + const stripped = p.startsWith("!") || p.startsWith("+") || p.startsWith("-") ? p.slice(1) : p; + return stripped !== pattern; + }); + + if (enabled) { + updated.push(enablePattern); + } else { + updated.push(disablePattern); + } + + if (scope === "project") { + if (arrayKey === "extensions") { + this.settingsManager.setProjectExtensionPaths(updated); + } else if (arrayKey === "skills") { + this.settingsManager.setProjectSkillPaths(updated); + } else if (arrayKey === "prompts") { + this.settingsManager.setProjectPromptTemplatePaths(updated); + } else if (arrayKey === "themes") { + this.settingsManager.setProjectThemePaths(updated); + } + } else { + if (arrayKey === "extensions") { + this.settingsManager.setExtensionPaths(updated); + } else if (arrayKey === "skills") { + this.settingsManager.setSkillPaths(updated); + } else if (arrayKey === "prompts") { + this.settingsManager.setPromptTemplatePaths(updated); + } else if (arrayKey === "themes") { + this.settingsManager.setThemePaths(updated); + } + } + } + + private togglePackageResource(item: ResourceItem, enabled: boolean): void { + const scope = item.metadata.scope as "user" | "project"; + const settings = + scope === "project" ? this.settingsManager.getProjectSettings() : this.settingsManager.getGlobalSettings(); + + const packages = [...(settings.packages ?? [])] as PackageSource[]; + const pkgIndex = packages.findIndex((pkg) => { + const source = typeof pkg === "string" ? pkg : pkg.source; + return source === item.metadata.source; + }); + + if (pkgIndex === -1) return; + + let pkg = packages[pkgIndex]; + + // Convert string to object form if needed + if (typeof pkg === "string") { + pkg = { source: pkg }; + packages[pkgIndex] = pkg; + } + + // Get the resource array for this type + const arrayKey = item.resourceType as "extensions" | "skills" | "prompts" | "themes"; + const current = (pkg[arrayKey] ?? []) as string[]; + + // Generate pattern relative to package root + const pattern = this.getPackageResourcePattern(item); + const disablePattern = `-${pattern}`; + const enablePattern = `+${pattern}`; + + // Filter out existing patterns for this resource + const updated = current.filter((p) => { + const stripped = p.startsWith("!") || p.startsWith("+") || p.startsWith("-") ? p.slice(1) : p; + return stripped !== pattern; + }); + + if (enabled) { + updated.push(enablePattern); + } else { + updated.push(disablePattern); + } + + (pkg as Record)[arrayKey] = updated.length > 0 ? updated : undefined; + + // Clean up empty filter object + const hasFilters = ["extensions", "skills", "prompts", "themes"].some( + (k) => (pkg as Record)[k] !== undefined, + ); + if (!hasFilters) { + packages[pkgIndex] = (pkg as { source: string }).source; + } + + if (scope === "project") { + this.settingsManager.setProjectPackages(packages); + } else { + this.settingsManager.setPackages(packages); + } + } + + private getTopLevelBaseDir(scope: "user" | "project"): string { + return scope === "project" ? join(this.cwd, CONFIG_DIR_NAME) : this.agentDir; + } + + private getResourcePattern(item: ResourceItem): string { + const scope = item.metadata.scope as "user" | "project"; + const baseDir = this.getTopLevelBaseDir(scope); + return relative(baseDir, item.path); + } + + private getPackageResourcePattern(item: ResourceItem): string { + const baseDir = item.metadata.baseDir ?? dirname(item.path); + return relative(baseDir, item.path); + } +} + +export class ConfigSelectorComponent extends Container implements Focusable { + private resourceList: ResourceList; + + private _focused = false; + get focused(): boolean { + return this._focused; + } + set focused(value: boolean) { + this._focused = value; + this.resourceList.focused = value; + } + + constructor( + resolvedPaths: ResolvedPaths, + settingsManager: SettingsManager, + cwd: string, + agentDir: string, + onClose: () => void, + onExit: () => void, + requestRender: () => void, + ) { + super(); + + const groups = buildGroups(resolvedPaths); + + // Add header + this.addChild(new Spacer(1)); + this.addChild(new DynamicBorder()); + this.addChild(new Spacer(1)); + this.addChild(new ConfigSelectorHeader()); + this.addChild(new Spacer(1)); + + // Resource list + this.resourceList = new ResourceList(groups, settingsManager, cwd, agentDir); + this.resourceList.onCancel = onClose; + this.resourceList.onExit = onExit; + this.resourceList.onToggle = () => requestRender(); + this.addChild(this.resourceList); + + // Bottom border + this.addChild(new Spacer(1)); + this.addChild(new DynamicBorder()); + } + + getResourceList(): ResourceList { + return this.resourceList; + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/countdown-timer.ts b/packages/pi-coding-agent/src/modes/interactive/components/countdown-timer.ts new file mode 100644 index 000000000..0f051c2f6 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/countdown-timer.ts @@ -0,0 +1,38 @@ +/** + * Reusable countdown timer for dialog components. + */ + +import type { TUI } from "@gsd/pi-tui"; + +export class CountdownTimer { + private intervalId: ReturnType | undefined; + private remainingSeconds: number; + + constructor( + timeoutMs: number, + private tui: TUI | undefined, + private onTick: (seconds: number) => void, + private onExpire: () => void, + ) { + this.remainingSeconds = Math.ceil(timeoutMs / 1000); + this.onTick(this.remainingSeconds); + + this.intervalId = setInterval(() => { + this.remainingSeconds--; + this.onTick(this.remainingSeconds); + this.tui?.requestRender(); + + if (this.remainingSeconds <= 0) { + this.dispose(); + this.onExpire(); + } + }, 1000); + } + + dispose(): void { + if (this.intervalId) { + clearInterval(this.intervalId); + this.intervalId = undefined; + } + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/custom-editor.ts b/packages/pi-coding-agent/src/modes/interactive/components/custom-editor.ts new file mode 100644 index 000000000..74fcc7767 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/custom-editor.ts @@ -0,0 +1,80 @@ +import { Editor, type EditorOptions, type EditorTheme, type TUI } from "@gsd/pi-tui"; +import type { AppAction, KeybindingsManager } from "../../../core/keybindings.js"; + +/** + * Custom editor that handles app-level keybindings for coding-agent. + */ +export class CustomEditor extends Editor { + private keybindings: KeybindingsManager; + public actionHandlers: Map void> = new Map(); + + // Special handlers that can be dynamically replaced + public onEscape?: () => void; + public onCtrlD?: () => void; + public onPasteImage?: () => void; + /** Handler for extension-registered shortcuts. Returns true if handled. */ + public onExtensionShortcut?: (data: string) => boolean; + + constructor(tui: TUI, theme: EditorTheme, keybindings: KeybindingsManager, options?: EditorOptions) { + super(tui, theme, options); + this.keybindings = keybindings; + } + + /** + * Register a handler for an app action. + */ + onAction(action: AppAction, handler: () => void): void { + this.actionHandlers.set(action, handler); + } + + handleInput(data: string): void { + // Check extension-registered shortcuts first + if (this.onExtensionShortcut?.(data)) { + return; + } + + // Check for paste image keybinding + if (this.keybindings.matches(data, "pasteImage")) { + this.onPasteImage?.(); + return; + } + + // Check app keybindings first + + // Escape/interrupt - only if autocomplete is NOT active + if (this.keybindings.matches(data, "interrupt")) { + if (!this.isShowingAutocomplete()) { + // Use dynamic onEscape if set, otherwise registered handler + const handler = this.onEscape ?? this.actionHandlers.get("interrupt"); + if (handler) { + handler(); + return; + } + } + // Let parent handle escape for autocomplete cancellation + super.handleInput(data); + return; + } + + // Exit (Ctrl+D) - only when editor is empty + if (this.keybindings.matches(data, "exit")) { + if (this.getText().length === 0) { + const handler = this.onCtrlD ?? this.actionHandlers.get("exit"); + if (handler) handler(); + return; + } + // Fall through to editor handling for delete-char-forward when not empty + } + + // Check all other app actions + for (const [action, handler] of this.actionHandlers) { + if (action !== "interrupt" && action !== "exit" && this.keybindings.matches(data, action)) { + handler(); + return; + } + } + + // Pass to parent for editor handling + super.handleInput(data); + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/custom-message.ts b/packages/pi-coding-agent/src/modes/interactive/components/custom-message.ts new file mode 100644 index 000000000..f3f6455fb --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/custom-message.ts @@ -0,0 +1,99 @@ +import type { TextContent } from "@gsd/pi-ai"; +import type { Component } from "@gsd/pi-tui"; +import { Box, Container, Markdown, type MarkdownTheme, Spacer, Text } from "@gsd/pi-tui"; +import type { MessageRenderer } from "../../../core/extensions/types.js"; +import type { CustomMessage } from "../../../core/messages.js"; +import { getMarkdownTheme, theme } from "../theme/theme.js"; + +/** + * Component that renders a custom message entry from extensions. + * Uses distinct styling to differentiate from user messages. + */ +export class CustomMessageComponent extends Container { + private message: CustomMessage; + private customRenderer?: MessageRenderer; + private box: Box; + private customComponent?: Component; + private markdownTheme: MarkdownTheme; + private _expanded = false; + + constructor( + message: CustomMessage, + customRenderer?: MessageRenderer, + markdownTheme: MarkdownTheme = getMarkdownTheme(), + ) { + super(); + this.message = message; + this.customRenderer = customRenderer; + this.markdownTheme = markdownTheme; + + this.addChild(new Spacer(1)); + + // Create box with purple background (used for default rendering) + this.box = new Box(1, 1, (t) => theme.bg("customMessageBg", t)); + + this.rebuild(); + } + + setExpanded(expanded: boolean): void { + if (this._expanded !== expanded) { + this._expanded = expanded; + this.rebuild(); + } + } + + override invalidate(): void { + super.invalidate(); + this.rebuild(); + } + + private rebuild(): void { + // Remove previous content component + if (this.customComponent) { + this.removeChild(this.customComponent); + this.customComponent = undefined; + } + this.removeChild(this.box); + + // Try custom renderer first - it handles its own styling + if (this.customRenderer) { + try { + const component = this.customRenderer(this.message, { expanded: this._expanded }, theme); + if (component) { + // Custom renderer provides its own styled component + this.customComponent = component; + this.addChild(component); + return; + } + } catch { + // Fall through to default rendering + } + } + + // Default rendering uses our box + this.addChild(this.box); + this.box.clear(); + + // Default rendering: label + content + const label = theme.fg("customMessageLabel", `\x1b[1m[${this.message.customType}]\x1b[22m`); + this.box.addChild(new Text(label, 0, 0)); + this.box.addChild(new Spacer(1)); + + // Extract text content + let text: string; + if (typeof this.message.content === "string") { + text = this.message.content; + } else { + text = this.message.content + .filter((c): c is TextContent => c.type === "text") + .map((c) => c.text) + .join("\n"); + } + + this.box.addChild( + new Markdown(text, 0, 0, this.markdownTheme, { + color: (text: string) => theme.fg("customMessageText", text), + }), + ); + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/daxnuts.ts b/packages/pi-coding-agent/src/modes/interactive/components/daxnuts.ts new file mode 100644 index 000000000..e501cd435 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/daxnuts.ts @@ -0,0 +1,164 @@ +/** + * POWERED BY DAXNUTS - Easter egg for OpenCode + Kimi K2.5 + * + * A heartfelt tribute to dax (@thdxr) for providing free Kimi K2.5 access via OpenCode. + */ + +import type { Component, TUI } from "@gsd/pi-tui"; +import { theme } from "../theme/theme.js"; + +// 32x32 RGB image of dax, hex encoded (3 bytes per pixel) +const DAX_HEX = + "bbbab8b9b9b6b9b8b5bcbbb8b8b7b4b7b5b2b6b5b2b8b7b4b7b6b3b6b4b1bdbcb8bab8b6bbb8b5b8b5b1bbb8b4c2bebbc1bebac0bdbabfbcb9c1bebabfbebbc0bfbcc0bdbabbb8b5c1bfbcbfbcb8bbb9b6bfbcb8c2bfbcc1bfbcbfbbb8bdb9b6b8b7b5b9b8b5b8b8b5b5b5b2b6b5b2b8b7b4b9b8b5b9b8b5b6b5b3bab8b5bcbab7bbb9b6bbb8b5bfb9b5bdb2abbcb0a8beb2aabeb5afbfbab6bebab7c0bfbcbebdbabebbb8c0bdbabfbebbc2bebbbdbab7c3c0bdc3c0bdc1bebbc2bebabfbcb8bab9b6b7b6b3b2b1aeb6b5b2b5b4b1b5b4b2b6b5b2b7b6b4b9b8b6b7b6b3bbbab7b2afaba5988fb49e90b09481b79a88b39683b09583b7a395bfb6b0c0bdbabdbbb8bebcb9c1bfbcc0bebbbdbab7bebbb8c2bfbcc0bdbac0bcb9bdb9b6c0bcb8b5b4b2b4b3b0bab9b6b9b9b6b5b4b1b5b4b1b6b5b3b9b8b5b9b8b6b9b8b6b2aeaa968174a6836eaa856eab846eaf8973ac8973b08f79b18f7ab39786b7a89dbbb3aebfbab6c2c0bdbebcb9bfbdbac3c1bdc2bebbc0bcb9bdb9b6c1bdbabfbbb8b4b3b0b9b8b5b8b7b5b4b3b1b5b4b1b8b7b4b8b7b5bab9b6bbbab7b1afad8c7a719d735ca47860a87d65a98069ae8972ae8c75af8d77aa826ba98067aa8974b39e90b6a79dbbb2adc0bdbac1bfbdbfbbb8c1bdb9bebab6c0bdb9bfbbb8c1bdbab4b2b0b7b6b4b7b6b3b4b2b0bab9b7b6b5b2b6b5b2bab9b6bab9b6958c87977663aa836bac8772b08f7aad8c77b2917db0917db0907cac8971a77d64a87f67ac8972b29887b8a89dbfbab5bfbdbac1bebac0bcb9c0bcb9c0bcb9c1bebabebab7b8b7b4b7b6b4b5b4b1b5b4b2b7b6b3b5b4b2bab9b7bab9b6b4b1ada88f7fad8973ae8d78b19684b19685b29786b69a89b29582b1917daa856ea87e66a97e66ad866ea9826baf9280b8ada6bdbbb8bebab7bfbbb8c1bdbabfbbb8bcb8b4bcb8b5b6b4b2b7b5b3b6b5b2b8b7b4b3b2afb8b7b4b6b5b2b3b2b0b3a59aab856fad8d78b0917eb19886b49b8bb49a89b39785b0917eaf8f7cab866fa77d65a77a61a87d64a9816ab08f79b5a296c1bcb8c3bfbcc2bebbbebab7bfbbb7bdbab6c2bebab8b7b4b7b6b4b6b5b3b7b6b3b6b5b2b9b8b6b4b3b1b6b1acac8f7ca9826bae8f7aaf9583b49c8cb49c8bb79d8cb59987b19380ad8e79ae8c77af8e78ac8771a3775faa826bae8972b39888bbb6b2bebbb8bfbbb8bfbbb8c0bdb9bebbb7c0bdb9b6b5b2b9b8b5b4b3b1b8b7b5b4b3b0b7b6b4b6b5b3b1a7a0aa8772a77d65a88570b49887b19b8d9c887c907a6d987f71aa907faf917daf8e7aad8c78ac8b77a8836ca9836cac8770b49b8abdb6b2c0bcb9c0bdb9bfbbb8bebab7bfbcb9bebab7b9b8b6b5b4b2b9b8b5b8b7b5b8b7b4b7b6b4b5b4b2b3a9a2ad8973a1755da9856fb398858c776a65544b776358725d526e594d9c7f6eb1907ba68672ad8e7aab8771ac856db18f79b3a092beb9b5c1bdbabdb9b5bebab7bfbbb7bebab7bcb9b6b7b6b4b6b6b3b8b7b4b5b4b2b8b6b4b7b6b3b4b3b0b4aba4a6826ba3775fb08e79b19584a88e7daa8e7db29481ad8f7c997e6da38674ac8d79ac8e7aae917f9a7c6a896a599a7c6ab3a398c1bdbabdb9b6bcb8b5bebab6bebab7bdb9b5bdb9b6b5b4b1b7b5b3b5b4b2b7b6b3b7b6b4b3b3b0b3b2b0b4aca5a7846fa97f68ae8f7bae9383b59c8bb2937fae8e79ac8b76af927eaf927eb29683b39885b2988891786a72594c6e594d978d86bdbab7bab7b3c0bcb9c0bcb9bebab7bebbb7bdb9b6b3b2b0b4b3b0b5b4b2b4b4b1b4b3b1b4b3b1b4b3b0b6ada5aa8670a57a62ad8e7ab29b8cb69d8dab856fa9826aa88069ab8771af907db49987b19684b29886b59987b39480b09787b5a9a1bcb8b5bebab7bdb9b5bebab7bfbbb8bfbbb7bbb7b4b3b2afb8b7b5b8b7b5b3b2b0b5b4b2b6b5b3b6b4b1afa299a98975a9826baf907cb39988b49a89af8e7aac8973aa856eaf8c74b1917dae907dac907db39988b29785b49785b7a090b9aca3bfbab7bcb8b5bdb9b6bcb8b4bcb8b5bdb9b5bcb8b4b5b4b2b6b5b3b4b3b0b4b3b0b9b8b5b8b6b4908b88887467aa8f7ea78976ad8973b08b74b59885b69e8eb29888b1917cb1917db1937fae907cb19686b39a8ab29886b59b8ab8a192b6aaa3b7b2afbcb8b4bcb8b5bbb7b4c0bcb9bebab7c0bcb9b6b5b2b6b5b3b4b3b0bab9b7b7b6b4b1b0ae7b716ba083709b806f716158967764b08870b29481b69b8ab69f8fb39a89b69f90b49d8db39a89b29988b49c8cb6a090b8a496baa49593867f8f8986bfbbb7bdb9b5bcb7b4bab6b3b9b5b2bab6b2b4b3b1b3b3b0b6b5b3b8b7b5b4b2b0a7a5a38f837dae917ea084725a504c63544da28370b39784b59e8db2a093a698909b918b998e8790857e95877dad998bb39c8cb5a091b9a2938d827c95908dbebab6bbb7b3bdbab7bbb7b4bdb9b6bbb7b4b4b3b0b5b4b1b8b7b5b6b5b3b8b8b5b4b2af968f8ab29a8bab9485544b483a323073655d96887f70655f61595547403e453e3c453f3d57504f655e5b90847db39c8db7a090b6a09189807aaba6a3bdb9b6c0bcb9bebab7bcb7b4bebab7bbb7b4b3b2b0b6b5b3b2b1afb7b6b4b8b7b4b5b4b1aeaba8b5a89fac998d4d44412d25244d46444e4744322b293a3230423937433a37352d2a59504c534b48524a48988a81b59f8fb19c8d827974b2afacbdb9b5bcb8b4bdb9b5bcb8b5bdb9b6bab6b2b8b7b5b5b4b2b6b6b3b9b8b5b7b6b3b6b5b2b8b6b3b9b4b1b2a9a26c64612d25242d2625312a28352d2c453d3a78675c8d7a6ea09792aea6a0615854332b29524a479f8e82b09d90a49b96c1bdb9bebab7bfbbb8bbb8b4b9b5b1b8b4b0b9b4b0b7b6b4b8b7b5b8b7b4b6b5b3b8b6b3bab9b6b9b8b5b4b3b0b7b5b2a5a29f453d3b261e1d261f1e2e2625413936857268977865b19482b5a69caca5a07c7572453d3b746963a0948cc5bfbbc0bbb8beb9b6bbb7b3bbb6b3b7b3afb8b4b0b9b5b1b7b6b3b6b5b3b5b4b2b5b4b2b7b6b3b7b6b3b8b6b3b4b2afb7b6b3b3b1ae6d6765251f1e1e18172a22212d2523443b3971625ab19888b09482a89182877e792c25243e3634766d6abeb9b5bfbbb7bebab6bcb7b3bbb6b3b9b5b1b7b3afb8b4b0b4b3b0b5b4b1b5b4b1b4b3b1b5b4b2b8b6b4b5b3b0b9b6b4b5b4b1b6b4b27f79762a2322221c1b2d2524221b1a443e3c47413f6f676281766f867971675e5a3e37352a222166605dbab7b3bdb9b5beb9b5bcb7b3bcb7b3b9b4b0bab6b2bab6b2b5b3b0b6b4b2b3b2afb7b6b3b4b4b1b4b3b0b6b4b1b5b4b1b4b3b0b9b6b29a8c8252474230292828201f181212322c2c231e1d1c16162c26252923222d26252d2523332b2a8e8885bcb8b5bcb7b3bbb6b2bcb7b3b9b4b1b9b5b1b7b2afb7b2ae7a838e9b9b9caeadacb3b2b0b3b2afb7b7b4b6b5b3b6b6b3b7b6b3b9ada4a991808e7b6f50453f2b24231a14142923221f19181d17161f18182620201d17162a22215d5654b7b3b0bbb7b3bbb6b2b8b4b0bab5b1bbb6b2bab5b1b8b4b0bab6b22c496b4c5d735f68766e727a828285929090adaba8b7b2aeb6a59ab39682a28470a387748e76674e403a1a14141d1716181211221c1c1f1918221c1b2f2827342d2c8d8884bab6b3b9b5b2bab5b1bab5b1b9b4b0bab6b2b8b4b0b9b4b0b7b2ae325e8b365f8a3a5d833f5b7a545f70646469706b6aa08f84b08e78b18e769f7e689e7f6b9e816d907766584940362d2a1c1615201b1a1a1413201a1a251e1d393331a39e9bbab5b1bcb7b3bab6b2b8b3afb8b4b0b9b4b0b9b4b1bab5b2b5b0ac3d6c9843729d44719c426e98415f805a64716f6a699d8677b1927eb3947faa89749d7a649f7f6ba487749e837186716454463f2c25231e181837302e3a33317a7471beb9b6bcb8b4bbb6b2b6b2aebab5b1b9b5b1b8b3afbab6b2b6b1adb5aeaa4877a14c7aa44e7ba345719a3a5d80586b7f767475927b6eb1927faf8e79b08e78a78169a07861a17f6aa58570a688749b83738270666f66618a8480a49e99b7b2aebab6b2bcb8b4b9b5b1b7b2aebab5b1b9b4b0b6b1aeb6b1adb2aca8b2aca84876a04a78a2517fa74771973a5d80405c7a6161677c695fac8a75b08d77b4917aaf8971ad876fa5816aa6846ea78670a98a76ac9484ab9f96b2aca8bdb8b4bcb7b3bcb8b4bcb8b4b8b3afb7b2aeb9b4b0b8b3afb8b2aeb6afabb3aeaab2aeaa4878a14b7aa34c7ba44a759b3d63873b5f825b67766f5f569c7e6caf8c77b18f79b28f78b5927caf8e78a98872aa8a76a98a76ac917fada199b7b0acb9b3afbfb9b5c1bab6bdb6b2b8b3afbab5b1b9b4b0b6afabb7b1adb3ada9b3aeaab0aba8"; + +const WIDTH = 32; +const HEIGHT = 32; + +function parseImage(): number[][][] { + const pixels: number[][][] = []; + for (let y = 0; y < HEIGHT; y++) { + const row: number[][] = []; + for (let x = 0; x < WIDTH; x++) { + const idx = (y * WIDTH + x) * 6; + const r = parseInt(DAX_HEX.slice(idx, idx + 2), 16); + const g = parseInt(DAX_HEX.slice(idx + 2, idx + 4), 16); + const b = parseInt(DAX_HEX.slice(idx + 4, idx + 6), 16); + row.push([r, g, b]); + } + pixels.push(row); + } + return pixels; +} + +function rgb(r: number, g: number, b: number, bg = false): string { + return `\x1b[${bg ? 48 : 38};2;${r};${g};${b}m`; +} + +const RESET = "\x1b[0m"; + +function buildImage(): string[] { + const pixels = parseImage(); + const lines: string[] = []; + + // Use half-block chars: ▄ with bg=top pixel, fg=bottom pixel + for (let row = 0; row < HEIGHT; row += 2) { + let line = ""; + for (let x = 0; x < WIDTH; x++) { + const top = pixels[row][x]; + const bottom = pixels[row + 1]?.[x] ?? top; + line += `${rgb(bottom[0], bottom[1], bottom[2])}${rgb(top[0], top[1], top[2], true)}▄`; + } + line += RESET; + lines.push(line); + } + return lines; +} + +export class DaxnutsComponent implements Component { + private ui: TUI; + private image: string[]; + private interval: ReturnType | null = null; + private tick = 0; + private maxTicks = 25; // ~2 seconds at 80ms + private cachedLines: string[] = []; + private cachedWidth = 0; + private cachedTick = -1; + + constructor(ui: TUI) { + this.ui = ui; + this.image = buildImage(); + this.startAnimation(); + } + + invalidate(): void { + this.cachedWidth = 0; + } + + private startAnimation(): void { + this.interval = setInterval(() => { + this.tick++; + if (this.tick >= this.maxTicks) { + this.stopAnimation(); + } + this.cachedWidth = 0; + this.ui.requestRender(); + }, 80); + } + + private stopAnimation(): void { + if (this.interval) { + clearInterval(this.interval); + this.interval = null; + } + } + + render(width: number): string[] { + if (width === this.cachedWidth && this.cachedTick === this.tick) { + return this.cachedLines; + } + + const t = theme; + const lines: string[] = []; + + const center = (s: string) => { + const visible = s.replace(/\x1b\[[0-9;]*m/g, "").length; + const left = Math.max(0, Math.floor((width - visible) / 2)); + return " ".repeat(left) + s; + }; + + lines.push(""); + + // Scanline reveal effect: show rows progressively + const revealedRows = Math.min( + this.image.length, + Math.floor((this.tick / this.maxTicks) * (this.image.length + 3)), + ); + + for (let i = 0; i < this.image.length; i++) { + if (i < revealedRows) { + lines.push(center(this.image[i])); + } else { + // Show scan line + if (i === revealedRows) { + const scanline = "▓".repeat(WIDTH); + lines.push(center(rgb(100, 200, 255) + scanline + RESET)); + } else { + lines.push(center(" ".repeat(WIDTH))); + } + } + } + + lines.push(""); + + // Fade in text after image is revealed + const textPhase = Math.max(0, this.tick - this.maxTicks * 0.6); + if (textPhase > 0 || this.tick >= this.maxTicks) { + lines.push(center(t.fg("accent", "Free Kimi K2.5 via OpenCode Zen"))); + lines.push(center(t.fg("success", '"Powered by daxnuts"'))); + lines.push(center(t.fg("muted", "— @thdxr"))); + } else { + lines.push(""); + lines.push(""); + lines.push(""); + } + + lines.push(""); + if (textPhase > 2 || this.tick >= this.maxTicks) { + lines.push(center(t.fg("dim", "Try OpenCode"))); + lines.push(center(t.fg("mdLink", "https://mistral.ai/news/mistral-vibe-2-0"))); + } else { + lines.push(""); + lines.push(""); + } + lines.push(""); + + this.cachedLines = lines; + this.cachedWidth = width; + this.cachedTick = this.tick; + return lines; + } + + dispose(): void { + this.stopAnimation(); + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/diff.ts b/packages/pi-coding-agent/src/modes/interactive/components/diff.ts new file mode 100644 index 000000000..d575d63e3 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/diff.ts @@ -0,0 +1,147 @@ +import * as Diff from "diff"; +import { theme } from "../theme/theme.js"; + +/** + * Parse diff line to extract prefix, line number, and content. + * Format: "+123 content" or "-123 content" or " 123 content" or " ..." + */ +function parseDiffLine(line: string): { prefix: string; lineNum: string; content: string } | null { + const match = line.match(/^([+-\s])(\s*\d*)\s(.*)$/); + if (!match) return null; + return { prefix: match[1], lineNum: match[2], content: match[3] }; +} + +/** + * Replace tabs with spaces for consistent rendering. + */ +function replaceTabs(text: string): string { + return text.replace(/\t/g, " "); +} + +/** + * Compute word-level diff and render with inverse on changed parts. + * Uses diffWords which groups whitespace with adjacent words for cleaner highlighting. + * Strips leading whitespace from inverse to avoid highlighting indentation. + */ +function renderIntraLineDiff(oldContent: string, newContent: string): { removedLine: string; addedLine: string } { + const wordDiff = Diff.diffWords(oldContent, newContent); + + let removedLine = ""; + let addedLine = ""; + let isFirstRemoved = true; + let isFirstAdded = true; + + for (const part of wordDiff) { + if (part.removed) { + let value = part.value; + // Strip leading whitespace from the first removed part + if (isFirstRemoved) { + const leadingWs = value.match(/^(\s*)/)?.[1] || ""; + value = value.slice(leadingWs.length); + removedLine += leadingWs; + isFirstRemoved = false; + } + if (value) { + removedLine += theme.inverse(value); + } + } else if (part.added) { + let value = part.value; + // Strip leading whitespace from the first added part + if (isFirstAdded) { + const leadingWs = value.match(/^(\s*)/)?.[1] || ""; + value = value.slice(leadingWs.length); + addedLine += leadingWs; + isFirstAdded = false; + } + if (value) { + addedLine += theme.inverse(value); + } + } else { + removedLine += part.value; + addedLine += part.value; + } + } + + return { removedLine, addedLine }; +} + +export interface RenderDiffOptions { + /** File path (unused, kept for API compatibility) */ + filePath?: string; +} + +/** + * Render a diff string with colored lines and intra-line change highlighting. + * - Context lines: dim/gray + * - Removed lines: red, with inverse on changed tokens + * - Added lines: green, with inverse on changed tokens + */ +export function renderDiff(diffText: string, _options: RenderDiffOptions = {}): string { + const lines = diffText.split("\n"); + const result: string[] = []; + + let i = 0; + while (i < lines.length) { + const line = lines[i]; + const parsed = parseDiffLine(line); + + if (!parsed) { + result.push(theme.fg("toolDiffContext", line)); + i++; + continue; + } + + if (parsed.prefix === "-") { + // Collect consecutive removed lines + const removedLines: { lineNum: string; content: string }[] = []; + while (i < lines.length) { + const p = parseDiffLine(lines[i]); + if (!p || p.prefix !== "-") break; + removedLines.push({ lineNum: p.lineNum, content: p.content }); + i++; + } + + // Collect consecutive added lines + const addedLines: { lineNum: string; content: string }[] = []; + while (i < lines.length) { + const p = parseDiffLine(lines[i]); + if (!p || p.prefix !== "+") break; + addedLines.push({ lineNum: p.lineNum, content: p.content }); + i++; + } + + // Only do intra-line diffing when there's exactly one removed and one added line + // (indicating a single line modification). Otherwise, show lines as-is. + if (removedLines.length === 1 && addedLines.length === 1) { + const removed = removedLines[0]; + const added = addedLines[0]; + + const { removedLine, addedLine } = renderIntraLineDiff( + replaceTabs(removed.content), + replaceTabs(added.content), + ); + + result.push(theme.fg("toolDiffRemoved", `-${removed.lineNum} ${removedLine}`)); + result.push(theme.fg("toolDiffAdded", `+${added.lineNum} ${addedLine}`)); + } else { + // Show all removed lines first, then all added lines + for (const removed of removedLines) { + result.push(theme.fg("toolDiffRemoved", `-${removed.lineNum} ${replaceTabs(removed.content)}`)); + } + for (const added of addedLines) { + result.push(theme.fg("toolDiffAdded", `+${added.lineNum} ${replaceTabs(added.content)}`)); + } + } + } else if (parsed.prefix === "+") { + // Standalone added line + result.push(theme.fg("toolDiffAdded", `+${parsed.lineNum} ${replaceTabs(parsed.content)}`)); + i++; + } else { + // Context line + result.push(theme.fg("toolDiffContext", ` ${parsed.lineNum} ${replaceTabs(parsed.content)}`)); + i++; + } + } + + return result.join("\n"); +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/dynamic-border.ts b/packages/pi-coding-agent/src/modes/interactive/components/dynamic-border.ts new file mode 100644 index 000000000..60d2da9e3 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/dynamic-border.ts @@ -0,0 +1,25 @@ +import type { Component } from "@gsd/pi-tui"; +import { theme } from "../theme/theme.js"; + +/** + * Dynamic border component that adjusts to viewport width. + * + * Note: When used from extensions loaded via jiti, the global `theme` may be undefined + * because jiti creates a separate module cache. Always pass an explicit color + * function when using DynamicBorder in components exported for extension use. + */ +export class DynamicBorder implements Component { + private color: (str: string) => string; + + constructor(color: (str: string) => string = (str) => theme.fg("border", str)) { + this.color = color; + } + + invalidate(): void { + // No cached state to invalidate currently + } + + render(width: number): string[] { + return [this.color("─".repeat(Math.max(1, width)))]; + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/extension-editor.ts b/packages/pi-coding-agent/src/modes/interactive/components/extension-editor.ts new file mode 100644 index 000000000..f0a9eae8b --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/extension-editor.ts @@ -0,0 +1,147 @@ +/** + * Multi-line editor component for extensions. + * Supports Ctrl+G for external editor. + */ + +import { spawnSync } from "node:child_process"; +import * as fs from "node:fs"; +import * as os from "node:os"; +import * as path from "node:path"; +import { + Container, + Editor, + type EditorOptions, + type Focusable, + getEditorKeybindings, + Spacer, + Text, + type TUI, +} from "@gsd/pi-tui"; +import type { KeybindingsManager } from "../../../core/keybindings.js"; +import { getEditorTheme, theme } from "../theme/theme.js"; +import { DynamicBorder } from "./dynamic-border.js"; +import { appKeyHint, keyHint } from "./keybinding-hints.js"; + +export class ExtensionEditorComponent extends Container implements Focusable { + private editor: Editor; + private onSubmitCallback: (value: string) => void; + private onCancelCallback: () => void; + private tui: TUI; + private keybindings: KeybindingsManager; + + private _focused = false; + get focused(): boolean { + return this._focused; + } + set focused(value: boolean) { + this._focused = value; + this.editor.focused = value; + } + + constructor( + tui: TUI, + keybindings: KeybindingsManager, + title: string, + prefill: string | undefined, + onSubmit: (value: string) => void, + onCancel: () => void, + options?: EditorOptions, + ) { + super(); + + this.tui = tui; + this.keybindings = keybindings; + this.onSubmitCallback = onSubmit; + this.onCancelCallback = onCancel; + + // Add top border + this.addChild(new DynamicBorder()); + this.addChild(new Spacer(1)); + + // Add title + this.addChild(new Text(theme.fg("accent", title), 1, 0)); + this.addChild(new Spacer(1)); + + // Create editor + this.editor = new Editor(tui, getEditorTheme(), options); + if (prefill) { + this.editor.setText(prefill); + } + // Wire up Enter to submit (Shift+Enter for newlines, like the main editor) + this.editor.onSubmit = (text: string) => { + this.onSubmitCallback(text); + }; + this.addChild(this.editor); + + this.addChild(new Spacer(1)); + + // Add hint + const hasExternalEditor = !!(process.env.VISUAL || process.env.EDITOR); + const hint = + keyHint("selectConfirm", "submit") + + " " + + keyHint("newLine", "newline") + + " " + + keyHint("selectCancel", "cancel") + + (hasExternalEditor ? ` ${appKeyHint(this.keybindings, "externalEditor", "external editor")}` : ""); + this.addChild(new Text(hint, 1, 0)); + + this.addChild(new Spacer(1)); + + // Add bottom border + this.addChild(new DynamicBorder()); + } + + handleInput(keyData: string): void { + const kb = getEditorKeybindings(); + // Escape or Ctrl+C to cancel + if (kb.matches(keyData, "selectCancel")) { + this.onCancelCallback(); + return; + } + + // External editor (app keybinding) + if (this.keybindings.matches(keyData, "externalEditor")) { + this.openExternalEditor(); + return; + } + + // Forward to editor + this.editor.handleInput(keyData); + } + + private openExternalEditor(): void { + const editorCmd = process.env.VISUAL || process.env.EDITOR; + if (!editorCmd) { + return; + } + + const currentText = this.editor.getText(); + const tmpFile = path.join(os.tmpdir(), `pi-extension-editor-${Date.now()}.md`); + + try { + fs.writeFileSync(tmpFile, currentText, "utf-8"); + this.tui.stop(); + + const [editor, ...editorArgs] = editorCmd.split(" "); + const result = spawnSync(editor, [...editorArgs, tmpFile], { + stdio: "inherit", + shell: process.platform === "win32", + }); + + if (result.status === 0) { + const newContent = fs.readFileSync(tmpFile, "utf-8").replace(/\n$/, ""); + this.editor.setText(newContent); + } + } finally { + try { + fs.unlinkSync(tmpFile); + } catch { + // Ignore cleanup errors + } + this.tui.start(); + // Force full re-render since external editor uses alternate screen + this.tui.requestRender(true); + } + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/extension-input.ts b/packages/pi-coding-agent/src/modes/interactive/components/extension-input.ts new file mode 100644 index 000000000..4c0e816bd --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/extension-input.ts @@ -0,0 +1,85 @@ +/** + * Simple text input component for extensions. + */ + +import { Container, type Focusable, getEditorKeybindings, Input, Spacer, Text, type TUI } from "@gsd/pi-tui"; +import { theme } from "../theme/theme.js"; +import { CountdownTimer } from "./countdown-timer.js"; +import { DynamicBorder } from "./dynamic-border.js"; +import { keyHint } from "./keybinding-hints.js"; + +export interface ExtensionInputOptions { + tui?: TUI; + timeout?: number; +} + +export class ExtensionInputComponent extends Container implements Focusable { + private input: Input; + private onSubmitCallback: (value: string) => void; + private onCancelCallback: () => void; + private titleText: Text; + private baseTitle: string; + private countdown: CountdownTimer | undefined; + + // Focusable implementation - propagate to input for IME cursor positioning + private _focused = false; + get focused(): boolean { + return this._focused; + } + set focused(value: boolean) { + this._focused = value; + this.input.focused = value; + } + + constructor( + title: string, + _placeholder: string | undefined, + onSubmit: (value: string) => void, + onCancel: () => void, + opts?: ExtensionInputOptions, + ) { + super(); + + this.onSubmitCallback = onSubmit; + this.onCancelCallback = onCancel; + this.baseTitle = title; + + this.addChild(new DynamicBorder()); + this.addChild(new Spacer(1)); + + this.titleText = new Text(theme.fg("accent", title), 1, 0); + this.addChild(this.titleText); + this.addChild(new Spacer(1)); + + if (opts?.timeout && opts.timeout > 0 && opts.tui) { + this.countdown = new CountdownTimer( + opts.timeout, + opts.tui, + (s) => this.titleText.setText(theme.fg("accent", `${this.baseTitle} (${s}s)`)), + () => this.onCancelCallback(), + ); + } + + this.input = new Input(); + this.addChild(this.input); + this.addChild(new Spacer(1)); + this.addChild(new Text(`${keyHint("selectConfirm", "submit")} ${keyHint("selectCancel", "cancel")}`, 1, 0)); + this.addChild(new Spacer(1)); + this.addChild(new DynamicBorder()); + } + + handleInput(keyData: string): void { + const kb = getEditorKeybindings(); + if (kb.matches(keyData, "selectConfirm") || keyData === "\n") { + this.onSubmitCallback(this.input.getValue()); + } else if (kb.matches(keyData, "selectCancel")) { + this.onCancelCallback(); + } else { + this.input.handleInput(keyData); + } + } + + dispose(): void { + this.countdown?.dispose(); + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/extension-selector.ts b/packages/pi-coding-agent/src/modes/interactive/components/extension-selector.ts new file mode 100644 index 000000000..b925b7f5b --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/extension-selector.ts @@ -0,0 +1,107 @@ +/** + * Generic selector component for extensions. + * Displays a list of string options with keyboard navigation. + */ + +import { Container, getEditorKeybindings, Spacer, Text, type TUI } from "@gsd/pi-tui"; +import { theme } from "../theme/theme.js"; +import { CountdownTimer } from "./countdown-timer.js"; +import { DynamicBorder } from "./dynamic-border.js"; +import { keyHint, rawKeyHint } from "./keybinding-hints.js"; + +export interface ExtensionSelectorOptions { + tui?: TUI; + timeout?: number; +} + +export class ExtensionSelectorComponent extends Container { + private options: string[]; + private selectedIndex = 0; + private listContainer: Container; + private onSelectCallback: (option: string) => void; + private onCancelCallback: () => void; + private titleText: Text; + private baseTitle: string; + private countdown: CountdownTimer | undefined; + + constructor( + title: string, + options: string[], + onSelect: (option: string) => void, + onCancel: () => void, + opts?: ExtensionSelectorOptions, + ) { + super(); + + this.options = options; + this.onSelectCallback = onSelect; + this.onCancelCallback = onCancel; + this.baseTitle = title; + + this.addChild(new DynamicBorder()); + this.addChild(new Spacer(1)); + + this.titleText = new Text(theme.fg("accent", title), 1, 0); + this.addChild(this.titleText); + this.addChild(new Spacer(1)); + + if (opts?.timeout && opts.timeout > 0 && opts.tui) { + this.countdown = new CountdownTimer( + opts.timeout, + opts.tui, + (s) => this.titleText.setText(theme.fg("accent", `${this.baseTitle} (${s}s)`)), + () => this.onCancelCallback(), + ); + } + + this.listContainer = new Container(); + this.addChild(this.listContainer); + this.addChild(new Spacer(1)); + this.addChild( + new Text( + rawKeyHint("↑↓", "navigate") + + " " + + keyHint("selectConfirm", "select") + + " " + + keyHint("selectCancel", "cancel"), + 1, + 0, + ), + ); + this.addChild(new Spacer(1)); + this.addChild(new DynamicBorder()); + + this.updateList(); + } + + private updateList(): void { + this.listContainer.clear(); + for (let i = 0; i < this.options.length; i++) { + const isSelected = i === this.selectedIndex; + const text = isSelected + ? theme.fg("accent", "→ ") + theme.fg("accent", this.options[i]) + : ` ${theme.fg("text", this.options[i])}`; + this.listContainer.addChild(new Text(text, 1, 0)); + } + } + + handleInput(keyData: string): void { + const kb = getEditorKeybindings(); + if (kb.matches(keyData, "selectUp") || keyData === "k") { + this.selectedIndex = Math.max(0, this.selectedIndex - 1); + this.updateList(); + } else if (kb.matches(keyData, "selectDown") || keyData === "j") { + this.selectedIndex = Math.min(this.options.length - 1, this.selectedIndex + 1); + this.updateList(); + } else if (kb.matches(keyData, "selectConfirm") || keyData === "\n") { + const selected = this.options[this.selectedIndex]; + if (selected) this.onSelectCallback(selected); + } else if (kb.matches(keyData, "selectCancel")) { + this.onCancelCallback(); + } + } + + dispose(): void { + this.countdown?.dispose(); + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/footer.ts b/packages/pi-coding-agent/src/modes/interactive/components/footer.ts new file mode 100644 index 000000000..d80f0e635 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/footer.ts @@ -0,0 +1,216 @@ +import { type Component, truncateToWidth, visibleWidth } from "@gsd/pi-tui"; +import type { AgentSession } from "../../../core/agent-session.js"; +import type { ReadonlyFooterDataProvider } from "../../../core/footer-data-provider.js"; +import { theme } from "../theme/theme.js"; + +/** + * Sanitize text for display in a single-line status. + * Removes newlines, tabs, carriage returns, and other control characters. + */ +function sanitizeStatusText(text: string): string { + // Replace newlines, tabs, carriage returns with space, then collapse multiple spaces + return text + .replace(/[\r\n\t]/g, " ") + .replace(/ +/g, " ") + .trim(); +} + +/** + * Format token counts (similar to web-ui) + */ +function formatTokens(count: number): string { + if (count < 1000) return count.toString(); + if (count < 10000) return `${(count / 1000).toFixed(1)}k`; + if (count < 1000000) return `${Math.round(count / 1000)}k`; + if (count < 10000000) return `${(count / 1000000).toFixed(1)}M`; + return `${Math.round(count / 1000000)}M`; +} + +/** + * Footer component that shows pwd, token stats, and context usage. + * Computes token/context stats from session, gets git branch and extension statuses from provider. + */ +export class FooterComponent implements Component { + private autoCompactEnabled = true; + + constructor( + private session: AgentSession, + private footerData: ReadonlyFooterDataProvider, + ) {} + + setAutoCompactEnabled(enabled: boolean): void { + this.autoCompactEnabled = enabled; + } + + /** + * No-op: git branch caching now handled by provider. + * Kept for compatibility with existing call sites in interactive-mode. + */ + invalidate(): void { + // No-op: git branch is cached/invalidated by provider + } + + /** + * Clean up resources. + * Git watcher cleanup now handled by provider. + */ + dispose(): void { + // Git watcher cleanup handled by provider + } + + render(width: number): string[] { + const state = this.session.state; + + // Calculate cumulative usage from ALL session entries (not just post-compaction messages) + let totalInput = 0; + let totalOutput = 0; + let totalCacheRead = 0; + let totalCacheWrite = 0; + let totalCost = 0; + + for (const entry of this.session.sessionManager.getEntries()) { + if (entry.type === "message" && entry.message.role === "assistant") { + totalInput += entry.message.usage.input; + totalOutput += entry.message.usage.output; + totalCacheRead += entry.message.usage.cacheRead; + totalCacheWrite += entry.message.usage.cacheWrite; + totalCost += entry.message.usage.cost.total; + } + } + + // Calculate context usage from session (handles compaction correctly). + // After compaction, tokens are unknown until the next LLM response. + const contextUsage = this.session.getContextUsage(); + const contextWindow = contextUsage?.contextWindow ?? state.model?.contextWindow ?? 0; + const contextPercentValue = contextUsage?.percent ?? 0; + const contextPercent = contextUsage?.percent !== null ? contextPercentValue.toFixed(1) : "?"; + + // Replace home directory with ~ + let pwd = process.cwd(); + const home = process.env.HOME || process.env.USERPROFILE; + if (home && pwd.startsWith(home)) { + pwd = `~${pwd.slice(home.length)}`; + } + + // Add git branch if available + const branch = this.footerData.getGitBranch(); + if (branch) { + pwd = `${pwd} (${branch})`; + } + + // Add session name if set + const sessionName = this.session.sessionManager.getSessionName(); + if (sessionName) { + pwd = `${pwd} • ${sessionName}`; + } + + // Build stats line + const statsParts = []; + if (totalInput) statsParts.push(`↑${formatTokens(totalInput)}`); + if (totalOutput) statsParts.push(`↓${formatTokens(totalOutput)}`); + if (totalCacheRead) statsParts.push(`R${formatTokens(totalCacheRead)}`); + if (totalCacheWrite) statsParts.push(`W${formatTokens(totalCacheWrite)}`); + + // Show cost with "(sub)" indicator if using OAuth subscription + const usingSubscription = state.model ? this.session.modelRegistry.isUsingOAuth(state.model) : false; + if (totalCost || usingSubscription) { + const costStr = `$${totalCost.toFixed(3)}${usingSubscription ? " (sub)" : ""}`; + statsParts.push(costStr); + } + + // Colorize context percentage based on usage + let contextPercentStr: string; + const autoIndicator = this.autoCompactEnabled ? " (auto)" : ""; + const contextPercentDisplay = + contextPercent === "?" + ? `?/${formatTokens(contextWindow)}${autoIndicator}` + : `${contextPercent}%/${formatTokens(contextWindow)}${autoIndicator}`; + if (contextPercentValue > 90) { + contextPercentStr = theme.fg("error", contextPercentDisplay); + } else if (contextPercentValue > 70) { + contextPercentStr = theme.fg("warning", contextPercentDisplay); + } else { + contextPercentStr = contextPercentDisplay; + } + statsParts.push(contextPercentStr); + + let statsLeft = statsParts.join(" "); + + // Add model name on the right side, plus thinking level if model supports it + const modelName = state.model?.id || "no-model"; + + let statsLeftWidth = visibleWidth(statsLeft); + + // If statsLeft is too wide, truncate it + if (statsLeftWidth > width) { + statsLeft = truncateToWidth(statsLeft, width, "..."); + statsLeftWidth = visibleWidth(statsLeft); + } + + // Calculate available space for padding (minimum 2 spaces between stats and model) + const minPadding = 2; + + // Add thinking level indicator if model supports reasoning + let rightSideWithoutProvider = modelName; + if (state.model?.reasoning) { + const thinkingLevel = state.thinkingLevel || "off"; + rightSideWithoutProvider = + thinkingLevel === "off" ? `${modelName} • thinking off` : `${modelName} • ${thinkingLevel}`; + } + + // Prepend the provider in parentheses if there are multiple providers and there's enough room + let rightSide = rightSideWithoutProvider; + if (this.footerData.getAvailableProviderCount() > 1 && state.model) { + rightSide = `(${state.model!.provider}) ${rightSideWithoutProvider}`; + if (statsLeftWidth + minPadding + visibleWidth(rightSide) > width) { + // Too wide, fall back + rightSide = rightSideWithoutProvider; + } + } + + const rightSideWidth = visibleWidth(rightSide); + const totalNeeded = statsLeftWidth + minPadding + rightSideWidth; + + let statsLine: string; + if (totalNeeded <= width) { + // Both fit - add padding to right-align model + const padding = " ".repeat(width - statsLeftWidth - rightSideWidth); + statsLine = statsLeft + padding + rightSide; + } else { + // Need to truncate right side + const availableForRight = width - statsLeftWidth - minPadding; + if (availableForRight > 0) { + const truncatedRight = truncateToWidth(rightSide, availableForRight, ""); + const truncatedRightWidth = visibleWidth(truncatedRight); + const padding = " ".repeat(Math.max(0, width - statsLeftWidth - truncatedRightWidth)); + statsLine = statsLeft + padding + truncatedRight; + } else { + // Not enough space for right side at all + statsLine = statsLeft; + } + } + + // Apply dim to each part separately. statsLeft may contain color codes (for context %) + // that end with a reset, which would clear an outer dim wrapper. So we dim the parts + // before and after the colored section independently. + const dimStatsLeft = theme.fg("dim", statsLeft); + const remainder = statsLine.slice(statsLeft.length); // padding + rightSide + const dimRemainder = theme.fg("dim", remainder); + + const pwdLine = truncateToWidth(theme.fg("dim", pwd), width, theme.fg("dim", "...")); + const lines = [pwdLine, dimStatsLeft + dimRemainder]; + + // Add extension statuses on a single line, sorted by key alphabetically + const extensionStatuses = this.footerData.getExtensionStatuses(); + if (extensionStatuses.size > 0) { + const sortedStatuses = Array.from(extensionStatuses.entries()) + .sort(([a], [b]) => a.localeCompare(b)) + .map(([, text]) => sanitizeStatusText(text)); + const statusLine = sortedStatuses.join(" "); + // Truncate to terminal width with dim ellipsis for consistency with footer style + lines.push(truncateToWidth(statusLine, width, theme.fg("dim", "..."))); + } + + return lines; + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/index.ts b/packages/pi-coding-agent/src/modes/interactive/components/index.ts new file mode 100644 index 000000000..78200f36c --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/index.ts @@ -0,0 +1,32 @@ +// UI Components for extensions +export { ArminComponent } from "./armin.js"; +export { AssistantMessageComponent } from "./assistant-message.js"; +export { BashExecutionComponent } from "./bash-execution.js"; +export { BorderedLoader } from "./bordered-loader.js"; +export { BranchSummaryMessageComponent } from "./branch-summary-message.js"; +export { CompactionSummaryMessageComponent } from "./compaction-summary-message.js"; +export { CustomEditor } from "./custom-editor.js"; +export { CustomMessageComponent } from "./custom-message.js"; +export { DaxnutsComponent } from "./daxnuts.js"; +export { type RenderDiffOptions, renderDiff } from "./diff.js"; +export { DynamicBorder } from "./dynamic-border.js"; +export { ExtensionEditorComponent } from "./extension-editor.js"; +export { ExtensionInputComponent } from "./extension-input.js"; +export { ExtensionSelectorComponent } from "./extension-selector.js"; +export { FooterComponent } from "./footer.js"; +export { appKey, appKeyHint, editorKey, keyHint, rawKeyHint } from "./keybinding-hints.js"; +export { LoginDialogComponent } from "./login-dialog.js"; +export { ModelSelectorComponent } from "./model-selector.js"; +export { OAuthSelectorComponent } from "./oauth-selector.js"; +export { type ModelsCallbacks, type ModelsConfig, ScopedModelsSelectorComponent } from "./scoped-models-selector.js"; +export { SessionSelectorComponent } from "./session-selector.js"; +export { type SettingsCallbacks, type SettingsConfig, SettingsSelectorComponent } from "./settings-selector.js"; +export { ShowImagesSelectorComponent } from "./show-images-selector.js"; +export { SkillInvocationMessageComponent } from "./skill-invocation-message.js"; +export { ThemeSelectorComponent } from "./theme-selector.js"; +export { ThinkingSelectorComponent } from "./thinking-selector.js"; +export { ToolExecutionComponent, type ToolExecutionOptions } from "./tool-execution.js"; +export { TreeSelectorComponent } from "./tree-selector.js"; +export { UserMessageComponent } from "./user-message.js"; +export { UserMessageSelectorComponent } from "./user-message-selector.js"; +export { truncateToVisualLines, type VisualTruncateResult } from "./visual-truncate.js"; diff --git a/packages/pi-coding-agent/src/modes/interactive/components/keybinding-hints.ts b/packages/pi-coding-agent/src/modes/interactive/components/keybinding-hints.ts new file mode 100644 index 000000000..6d546a712 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/keybinding-hints.ts @@ -0,0 +1,66 @@ +/** + * Utilities for formatting keybinding hints in the UI. + */ + +import { type EditorAction, getEditorKeybindings, type KeyId } from "@gsd/pi-tui"; +import type { AppAction, KeybindingsManager } from "../../../core/keybindings.js"; +import { theme } from "../theme/theme.js"; + +/** + * Format keys array as display string (e.g., ["ctrl+c", "escape"] -> "ctrl+c/escape"). + */ +function formatKeys(keys: KeyId[]): string { + if (keys.length === 0) return ""; + if (keys.length === 1) return keys[0]!; + return keys.join("/"); +} + +/** + * Get display string for an editor action. + */ +export function editorKey(action: EditorAction): string { + return formatKeys(getEditorKeybindings().getKeys(action)); +} + +/** + * Get display string for an app action. + */ +export function appKey(keybindings: KeybindingsManager, action: AppAction): string { + return formatKeys(keybindings.getKeys(action)); +} + +/** + * Format a keybinding hint with consistent styling: dim key, muted description. + * Looks up the key from editor keybindings automatically. + * + * @param action - Editor action name (e.g., "selectConfirm", "expandTools") + * @param description - Description text (e.g., "to expand", "cancel") + * @returns Formatted string with dim key and muted description + */ +export function keyHint(action: EditorAction, description: string): string { + return theme.fg("dim", editorKey(action)) + theme.fg("muted", ` ${description}`); +} + +/** + * Format a keybinding hint for app-level actions. + * Requires the KeybindingsManager instance. + * + * @param keybindings - KeybindingsManager instance + * @param action - App action name (e.g., "interrupt", "externalEditor") + * @param description - Description text + * @returns Formatted string with dim key and muted description + */ +export function appKeyHint(keybindings: KeybindingsManager, action: AppAction, description: string): string { + return theme.fg("dim", appKey(keybindings, action)) + theme.fg("muted", ` ${description}`); +} + +/** + * Format a raw key string with description (for non-configurable keys like ↑↓). + * + * @param key - Raw key string + * @param description - Description text + * @returns Formatted string with dim key and muted description + */ +export function rawKeyHint(key: string, description: string): string { + return theme.fg("dim", key) + theme.fg("muted", ` ${description}`); +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/login-dialog.ts b/packages/pi-coding-agent/src/modes/interactive/components/login-dialog.ts new file mode 100644 index 000000000..8e26afe7d --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/login-dialog.ts @@ -0,0 +1,174 @@ +import { getOAuthProviders } from "@gsd/pi-ai/oauth"; +import { Container, type Focusable, getEditorKeybindings, Input, Spacer, Text, type TUI } from "@gsd/pi-tui"; +import { exec } from "child_process"; +import { theme } from "../theme/theme.js"; +import { DynamicBorder } from "./dynamic-border.js"; +import { keyHint } from "./keybinding-hints.js"; + +/** + * Login dialog component - replaces editor during OAuth login flow + */ +export class LoginDialogComponent extends Container implements Focusable { + private contentContainer: Container; + private input: Input; + private tui: TUI; + private abortController = new AbortController(); + private inputResolver?: (value: string) => void; + private inputRejecter?: (error: Error) => void; + + // Focusable implementation - propagate to input for IME cursor positioning + private _focused = false; + get focused(): boolean { + return this._focused; + } + set focused(value: boolean) { + this._focused = value; + this.input.focused = value; + } + + constructor( + tui: TUI, + providerId: string, + private onComplete: (success: boolean, message?: string) => void, + ) { + super(); + this.tui = tui; + + const providerInfo = getOAuthProviders().find((p) => p.id === providerId); + const providerName = providerInfo?.name || providerId; + + // Top border + this.addChild(new DynamicBorder()); + + // Title + this.addChild(new Text(theme.fg("warning", `Login to ${providerName}`), 1, 0)); + + // Dynamic content area + this.contentContainer = new Container(); + this.addChild(this.contentContainer); + + // Input (always present, used when needed) + this.input = new Input(); + this.input.onSubmit = () => { + if (this.inputResolver) { + this.inputResolver(this.input.getValue()); + this.inputResolver = undefined; + this.inputRejecter = undefined; + } + }; + this.input.onEscape = () => { + this.cancel(); + }; + + // Bottom border + this.addChild(new DynamicBorder()); + } + + get signal(): AbortSignal { + return this.abortController.signal; + } + + private cancel(): void { + this.abortController.abort(); + if (this.inputRejecter) { + this.inputRejecter(new Error("Login cancelled")); + this.inputResolver = undefined; + this.inputRejecter = undefined; + } + this.onComplete(false, "Login cancelled"); + } + + /** + * Called by onAuth callback - show URL and optional instructions + */ + showAuth(url: string, instructions?: string): void { + this.contentContainer.clear(); + this.contentContainer.addChild(new Spacer(1)); + this.contentContainer.addChild(new Text(theme.fg("accent", url), 1, 0)); + + const clickHint = process.platform === "darwin" ? "Cmd+click to open" : "Ctrl+click to open"; + const hyperlink = `\x1b]8;;${url}\x07${clickHint}\x1b]8;;\x07`; + this.contentContainer.addChild(new Text(theme.fg("dim", hyperlink), 1, 0)); + + if (instructions) { + this.contentContainer.addChild(new Spacer(1)); + this.contentContainer.addChild(new Text(theme.fg("warning", instructions), 1, 0)); + } + + // Try to open browser + const openCmd = process.platform === "darwin" ? "open" : process.platform === "win32" ? "start" : "xdg-open"; + exec(`${openCmd} "${url}"`); + + this.tui.requestRender(); + } + + /** + * Show input for manual code/URL entry (for callback server providers) + */ + showManualInput(prompt: string): Promise { + this.contentContainer.addChild(new Spacer(1)); + this.contentContainer.addChild(new Text(theme.fg("dim", prompt), 1, 0)); + this.contentContainer.addChild(this.input); + this.contentContainer.addChild(new Text(`(${keyHint("selectCancel", "to cancel")})`, 1, 0)); + this.tui.requestRender(); + + return new Promise((resolve, reject) => { + this.inputResolver = resolve; + this.inputRejecter = reject; + }); + } + + /** + * Called by onPrompt callback - show prompt and wait for input + * Note: Does NOT clear content, appends to existing (preserves URL from showAuth) + */ + showPrompt(message: string, placeholder?: string): Promise { + this.contentContainer.addChild(new Spacer(1)); + this.contentContainer.addChild(new Text(theme.fg("text", message), 1, 0)); + if (placeholder) { + this.contentContainer.addChild(new Text(theme.fg("dim", `e.g., ${placeholder}`), 1, 0)); + } + this.contentContainer.addChild(this.input); + this.contentContainer.addChild( + new Text(`(${keyHint("selectCancel", "to cancel,")} ${keyHint("selectConfirm", "to submit")})`, 1, 0), + ); + + this.input.setValue(""); + this.tui.requestRender(); + + return new Promise((resolve, reject) => { + this.inputResolver = resolve; + this.inputRejecter = reject; + }); + } + + /** + * Show waiting message (for polling flows like GitHub Copilot) + */ + showWaiting(message: string): void { + this.contentContainer.addChild(new Spacer(1)); + this.contentContainer.addChild(new Text(theme.fg("dim", message), 1, 0)); + this.contentContainer.addChild(new Text(`(${keyHint("selectCancel", "to cancel")})`, 1, 0)); + this.tui.requestRender(); + } + + /** + * Called by onProgress callback + */ + showProgress(message: string): void { + this.contentContainer.addChild(new Text(theme.fg("dim", message), 1, 0)); + this.tui.requestRender(); + } + + handleInput(data: string): void { + const kb = getEditorKeybindings(); + + if (kb.matches(data, "selectCancel")) { + this.cancel(); + return; + } + + // Pass to input + this.input.handleInput(data); + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/model-selector.ts b/packages/pi-coding-agent/src/modes/interactive/components/model-selector.ts new file mode 100644 index 000000000..3ec0fae26 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/model-selector.ts @@ -0,0 +1,329 @@ +import { type Model, modelsAreEqual } from "@gsd/pi-ai"; +import { + Container, + type Focusable, + fuzzyFilter, + getEditorKeybindings, + Input, + Spacer, + Text, + type TUI, +} from "@gsd/pi-tui"; +import type { ModelRegistry } from "../../../core/model-registry.js"; +import type { SettingsManager } from "../../../core/settings-manager.js"; +import { theme } from "../theme/theme.js"; +import { DynamicBorder } from "./dynamic-border.js"; +import { keyHint } from "./keybinding-hints.js"; + +interface ModelItem { + provider: string; + id: string; + model: Model; +} + +interface ScopedModelItem { + model: Model; + thinkingLevel?: string; +} + +type ModelScope = "all" | "scoped"; + +/** + * Component that renders a model selector with search + */ +export class ModelSelectorComponent extends Container implements Focusable { + private searchInput: Input; + + // Focusable implementation - propagate to searchInput for IME cursor positioning + private _focused = false; + get focused(): boolean { + return this._focused; + } + set focused(value: boolean) { + this._focused = value; + this.searchInput.focused = value; + } + private listContainer: Container; + private allModels: ModelItem[] = []; + private scopedModelItems: ModelItem[] = []; + private activeModels: ModelItem[] = []; + private filteredModels: ModelItem[] = []; + private selectedIndex: number = 0; + private currentModel?: Model; + private settingsManager: SettingsManager; + private modelRegistry: ModelRegistry; + private onSelectCallback: (model: Model) => void; + private onCancelCallback: () => void; + private errorMessage?: string; + private tui: TUI; + private scopedModels: ReadonlyArray; + private scope: ModelScope = "all"; + private scopeText?: Text; + private scopeHintText?: Text; + + constructor( + tui: TUI, + currentModel: Model | undefined, + settingsManager: SettingsManager, + modelRegistry: ModelRegistry, + scopedModels: ReadonlyArray, + onSelect: (model: Model) => void, + onCancel: () => void, + initialSearchInput?: string, + ) { + super(); + + this.tui = tui; + this.currentModel = currentModel; + this.settingsManager = settingsManager; + this.modelRegistry = modelRegistry; + this.scopedModels = scopedModels; + this.scope = scopedModels.length > 0 ? "scoped" : "all"; + this.onSelectCallback = onSelect; + this.onCancelCallback = onCancel; + + // Add top border + this.addChild(new DynamicBorder()); + this.addChild(new Spacer(1)); + + // Add hint about model filtering + if (scopedModels.length > 0) { + this.scopeText = new Text(this.getScopeText(), 0, 0); + this.addChild(this.scopeText); + this.scopeHintText = new Text(this.getScopeHintText(), 0, 0); + this.addChild(this.scopeHintText); + } else { + const hintText = "Only showing models with configured API keys (see README for details)"; + this.addChild(new Text(theme.fg("warning", hintText), 0, 0)); + } + this.addChild(new Spacer(1)); + + // Create search input + this.searchInput = new Input(); + if (initialSearchInput) { + this.searchInput.setValue(initialSearchInput); + } + this.searchInput.onSubmit = () => { + // Enter on search input selects the first filtered item + if (this.filteredModels[this.selectedIndex]) { + this.handleSelect(this.filteredModels[this.selectedIndex].model); + } + }; + this.addChild(this.searchInput); + + this.addChild(new Spacer(1)); + + // Create list container + this.listContainer = new Container(); + this.addChild(this.listContainer); + + this.addChild(new Spacer(1)); + + // Add bottom border + this.addChild(new DynamicBorder()); + + // Load models and do initial render + this.loadModels().then(() => { + if (initialSearchInput) { + this.filterModels(initialSearchInput); + } else { + this.updateList(); + } + // Request re-render after models are loaded + this.tui.requestRender(); + }); + } + + private async loadModels(): Promise { + let models: ModelItem[]; + + // Refresh to pick up any changes to models.json + this.modelRegistry.refresh(); + + // Check for models.json errors + const loadError = this.modelRegistry.getError(); + if (loadError) { + this.errorMessage = loadError; + } + + // Load available models (built-in models still work even if models.json failed) + try { + const availableModels = await this.modelRegistry.getAvailable(); + models = availableModels.map((model: Model) => ({ + provider: model.provider, + id: model.id, + model, + })); + } catch (error) { + this.allModels = []; + this.scopedModelItems = []; + this.activeModels = []; + this.filteredModels = []; + this.errorMessage = error instanceof Error ? error.message : String(error); + return; + } + + this.allModels = this.sortModels(models); + this.scopedModelItems = this.sortModels( + this.scopedModels.map((scoped) => ({ + provider: scoped.model.provider, + id: scoped.model.id, + model: scoped.model, + })), + ); + this.activeModels = this.scope === "scoped" ? this.scopedModelItems : this.allModels; + this.filteredModels = this.activeModels; + this.selectedIndex = Math.min(this.selectedIndex, Math.max(0, this.filteredModels.length - 1)); + } + + private sortModels(models: ModelItem[]): ModelItem[] { + const sorted = [...models]; + // Sort: current model first, then by provider + sorted.sort((a, b) => { + const aIsCurrent = modelsAreEqual(this.currentModel, a.model); + const bIsCurrent = modelsAreEqual(this.currentModel, b.model); + if (aIsCurrent && !bIsCurrent) return -1; + if (!aIsCurrent && bIsCurrent) return 1; + return a.provider.localeCompare(b.provider); + }); + return sorted; + } + + private getScopeText(): string { + const allText = this.scope === "all" ? theme.fg("accent", "all") : theme.fg("muted", "all"); + const scopedText = this.scope === "scoped" ? theme.fg("accent", "scoped") : theme.fg("muted", "scoped"); + return `${theme.fg("muted", "Scope: ")}${allText}${theme.fg("muted", " | ")}${scopedText}`; + } + + private getScopeHintText(): string { + return keyHint("tab", "scope") + theme.fg("muted", " (all/scoped)"); + } + + private setScope(scope: ModelScope): void { + if (this.scope === scope) return; + this.scope = scope; + this.activeModels = this.scope === "scoped" ? this.scopedModelItems : this.allModels; + this.selectedIndex = 0; + this.filterModels(this.searchInput.getValue()); + if (this.scopeText) { + this.scopeText.setText(this.getScopeText()); + } + } + + private filterModels(query: string): void { + this.filteredModels = query + ? fuzzyFilter(this.activeModels, query, ({ id, provider }) => `${id} ${provider}`) + : this.activeModels; + this.selectedIndex = Math.min(this.selectedIndex, Math.max(0, this.filteredModels.length - 1)); + this.updateList(); + } + + private updateList(): void { + this.listContainer.clear(); + + const maxVisible = 10; + const startIndex = Math.max( + 0, + Math.min(this.selectedIndex - Math.floor(maxVisible / 2), this.filteredModels.length - maxVisible), + ); + const endIndex = Math.min(startIndex + maxVisible, this.filteredModels.length); + + // Show visible slice of filtered models + for (let i = startIndex; i < endIndex; i++) { + const item = this.filteredModels[i]; + if (!item) continue; + + const isSelected = i === this.selectedIndex; + const isCurrent = modelsAreEqual(this.currentModel, item.model); + + let line = ""; + if (isSelected) { + const prefix = theme.fg("accent", "→ "); + const modelText = `${item.id}`; + const providerBadge = theme.fg("muted", `[${item.provider}]`); + const checkmark = isCurrent ? theme.fg("success", " ✓") : ""; + line = `${prefix + theme.fg("accent", modelText)} ${providerBadge}${checkmark}`; + } else { + const modelText = ` ${item.id}`; + const providerBadge = theme.fg("muted", `[${item.provider}]`); + const checkmark = isCurrent ? theme.fg("success", " ✓") : ""; + line = `${modelText} ${providerBadge}${checkmark}`; + } + + this.listContainer.addChild(new Text(line, 0, 0)); + } + + // Add scroll indicator if needed + if (startIndex > 0 || endIndex < this.filteredModels.length) { + const scrollInfo = theme.fg("muted", ` (${this.selectedIndex + 1}/${this.filteredModels.length})`); + this.listContainer.addChild(new Text(scrollInfo, 0, 0)); + } + + // Show error message or "no results" if empty + if (this.errorMessage) { + // Show error in red + const errorLines = this.errorMessage.split("\n"); + for (const line of errorLines) { + this.listContainer.addChild(new Text(theme.fg("error", line), 0, 0)); + } + } else if (this.filteredModels.length === 0) { + this.listContainer.addChild(new Text(theme.fg("muted", " No matching models"), 0, 0)); + } else { + const selected = this.filteredModels[this.selectedIndex]; + this.listContainer.addChild(new Spacer(1)); + this.listContainer.addChild(new Text(theme.fg("muted", ` Model Name: ${selected.model.name}`), 0, 0)); + } + } + + handleInput(keyData: string): void { + const kb = getEditorKeybindings(); + if (kb.matches(keyData, "tab")) { + if (this.scopedModelItems.length > 0) { + const nextScope: ModelScope = this.scope === "all" ? "scoped" : "all"; + this.setScope(nextScope); + if (this.scopeHintText) { + this.scopeHintText.setText(this.getScopeHintText()); + } + } + return; + } + // Up arrow - wrap to bottom when at top + if (kb.matches(keyData, "selectUp")) { + if (this.filteredModels.length === 0) return; + this.selectedIndex = this.selectedIndex === 0 ? this.filteredModels.length - 1 : this.selectedIndex - 1; + this.updateList(); + } + // Down arrow - wrap to top when at bottom + else if (kb.matches(keyData, "selectDown")) { + if (this.filteredModels.length === 0) return; + this.selectedIndex = this.selectedIndex === this.filteredModels.length - 1 ? 0 : this.selectedIndex + 1; + this.updateList(); + } + // Enter + else if (kb.matches(keyData, "selectConfirm")) { + const selectedModel = this.filteredModels[this.selectedIndex]; + if (selectedModel) { + this.handleSelect(selectedModel.model); + } + } + // Escape or Ctrl+C + else if (kb.matches(keyData, "selectCancel")) { + this.onCancelCallback(); + } + // Pass everything else to search input + else { + this.searchInput.handleInput(keyData); + this.filterModels(this.searchInput.getValue()); + } + } + + private handleSelect(model: Model): void { + // Save as new default + this.settingsManager.setDefaultModelAndProvider(model.provider, model.id); + this.onSelectCallback(model); + } + + getSearchInput(): Input { + return this.searchInput; + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/oauth-selector.ts b/packages/pi-coding-agent/src/modes/interactive/components/oauth-selector.ts new file mode 100644 index 000000000..17844be07 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/oauth-selector.ts @@ -0,0 +1,121 @@ +import type { OAuthProviderInterface } from "@gsd/pi-ai"; +import { getOAuthProviders } from "@gsd/pi-ai/oauth"; +import { Container, getEditorKeybindings, Spacer, TruncatedText } from "@gsd/pi-tui"; +import type { AuthStorage } from "../../../core/auth-storage.js"; +import { theme } from "../theme/theme.js"; +import { DynamicBorder } from "./dynamic-border.js"; + +/** + * Component that renders an OAuth provider selector + */ +export class OAuthSelectorComponent extends Container { + private listContainer: Container; + private allProviders: OAuthProviderInterface[] = []; + private selectedIndex: number = 0; + private mode: "login" | "logout"; + private authStorage: AuthStorage; + private onSelectCallback: (providerId: string) => void; + private onCancelCallback: () => void; + + constructor( + mode: "login" | "logout", + authStorage: AuthStorage, + onSelect: (providerId: string) => void, + onCancel: () => void, + ) { + super(); + + this.mode = mode; + this.authStorage = authStorage; + this.onSelectCallback = onSelect; + this.onCancelCallback = onCancel; + + // Load all OAuth providers + this.loadProviders(); + + // Add top border + this.addChild(new DynamicBorder()); + this.addChild(new Spacer(1)); + + // Add title + const title = mode === "login" ? "Select provider to login:" : "Select provider to logout:"; + this.addChild(new TruncatedText(theme.bold(title))); + this.addChild(new Spacer(1)); + + // Create list container + this.listContainer = new Container(); + this.addChild(this.listContainer); + + this.addChild(new Spacer(1)); + + // Add bottom border + this.addChild(new DynamicBorder()); + + // Initial render + this.updateList(); + } + + private loadProviders(): void { + this.allProviders = getOAuthProviders(); + } + + private updateList(): void { + this.listContainer.clear(); + + for (let i = 0; i < this.allProviders.length; i++) { + const provider = this.allProviders[i]; + if (!provider) continue; + + const isSelected = i === this.selectedIndex; + + // Check if user is logged in for this provider + const credentials = this.authStorage.get(provider.id); + const isLoggedIn = credentials?.type === "oauth"; + const statusIndicator = isLoggedIn ? theme.fg("success", " ✓ logged in") : ""; + + let line = ""; + if (isSelected) { + const prefix = theme.fg("accent", "→ "); + const text = theme.fg("accent", provider.name); + line = prefix + text + statusIndicator; + } else { + const text = ` ${provider.name}`; + line = text + statusIndicator; + } + + this.listContainer.addChild(new TruncatedText(line, 0, 0)); + } + + // Show "no providers" if empty + if (this.allProviders.length === 0) { + const message = + this.mode === "login" ? "No OAuth providers available" : "No OAuth providers logged in. Use /login first."; + this.listContainer.addChild(new TruncatedText(theme.fg("muted", ` ${message}`), 0, 0)); + } + } + + handleInput(keyData: string): void { + const kb = getEditorKeybindings(); + // Up arrow + if (kb.matches(keyData, "selectUp")) { + this.selectedIndex = Math.max(0, this.selectedIndex - 1); + this.updateList(); + } + // Down arrow + else if (kb.matches(keyData, "selectDown")) { + this.selectedIndex = Math.min(this.allProviders.length - 1, this.selectedIndex + 1); + this.updateList(); + } + // Enter + else if (kb.matches(keyData, "selectConfirm")) { + const selectedProvider = this.allProviders[this.selectedIndex]; + if (selectedProvider) { + this.onSelectCallback(selectedProvider.id); + } + } + // Escape or Ctrl+C + else if (kb.matches(keyData, "selectCancel")) { + this.onCancelCallback(); + } + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/scoped-models-selector.ts b/packages/pi-coding-agent/src/modes/interactive/components/scoped-models-selector.ts new file mode 100644 index 000000000..37d334cca --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/scoped-models-selector.ts @@ -0,0 +1,346 @@ +import type { Model } from "@gsd/pi-ai"; +import { + Container, + type Focusable, + fuzzyFilter, + getEditorKeybindings, + Input, + Key, + matchesKey, + Spacer, + Text, +} from "@gsd/pi-tui"; +import { theme } from "../theme/theme.js"; +import { DynamicBorder } from "./dynamic-border.js"; + +// EnabledIds: null = all enabled (no filter), string[] = explicit ordered list +type EnabledIds = string[] | null; + +function isEnabled(enabledIds: EnabledIds, id: string): boolean { + return enabledIds === null || enabledIds.includes(id); +} + +function toggle(enabledIds: EnabledIds, id: string): EnabledIds { + if (enabledIds === null) return [id]; // First toggle: start with only this one + const index = enabledIds.indexOf(id); + if (index >= 0) return [...enabledIds.slice(0, index), ...enabledIds.slice(index + 1)]; + return [...enabledIds, id]; +} + +function enableAll(enabledIds: EnabledIds, allIds: string[], targetIds?: string[]): EnabledIds { + if (enabledIds === null) return null; // Already all enabled + const targets = targetIds ?? allIds; + const result = [...enabledIds]; + for (const id of targets) { + if (!result.includes(id)) result.push(id); + } + return result.length === allIds.length ? null : result; +} + +function clearAll(enabledIds: EnabledIds, allIds: string[], targetIds?: string[]): EnabledIds { + if (enabledIds === null) { + return targetIds ? allIds.filter((id) => !targetIds.includes(id)) : []; + } + const targets = new Set(targetIds ?? enabledIds); + return enabledIds.filter((id) => !targets.has(id)); +} + +function move(enabledIds: EnabledIds, allIds: string[], id: string, delta: number): EnabledIds { + const list = enabledIds ?? [...allIds]; + const index = list.indexOf(id); + if (index < 0) return list; + const newIndex = index + delta; + if (newIndex < 0 || newIndex >= list.length) return list; + const result = [...list]; + [result[index], result[newIndex]] = [result[newIndex], result[index]]; + return result; +} + +function getSortedIds(enabledIds: EnabledIds, allIds: string[]): string[] { + if (enabledIds === null) return allIds; + const enabledSet = new Set(enabledIds); + return [...enabledIds, ...allIds.filter((id) => !enabledSet.has(id))]; +} + +interface ModelItem { + fullId: string; + model: Model; + enabled: boolean; +} + +export interface ModelsConfig { + allModels: Model[]; + enabledModelIds: Set; + /** true if enabledModels setting is defined (empty = all enabled) */ + hasEnabledModelsFilter: boolean; +} + +export interface ModelsCallbacks { + /** Called when a model is toggled (session-only, no persist) */ + onModelToggle: (modelId: string, enabled: boolean) => void; + /** Called when user wants to persist current selection to settings */ + onPersist: (enabledModelIds: string[]) => void; + /** Called when user enables all models. Returns list of all model IDs. */ + onEnableAll: (allModelIds: string[]) => void; + /** Called when user clears all models */ + onClearAll: () => void; + /** Called when user toggles all models for a provider. Returns affected model IDs. */ + onToggleProvider: (provider: string, modelIds: string[], enabled: boolean) => void; + onCancel: () => void; +} + +/** + * Component for enabling/disabling models for Ctrl+P cycling. + * Changes are session-only until explicitly persisted with Ctrl+S. + */ +export class ScopedModelsSelectorComponent extends Container implements Focusable { + private modelsById: Map> = new Map(); + private allIds: string[] = []; + private enabledIds: EnabledIds = null; + private filteredItems: ModelItem[] = []; + private selectedIndex = 0; + private searchInput: Input; + + // Focusable implementation - propagate to searchInput for IME cursor positioning + private _focused = false; + get focused(): boolean { + return this._focused; + } + set focused(value: boolean) { + this._focused = value; + this.searchInput.focused = value; + } + private listContainer: Container; + private footerText: Text; + private callbacks: ModelsCallbacks; + private maxVisible = 15; + private isDirty = false; + + constructor(config: ModelsConfig, callbacks: ModelsCallbacks) { + super(); + this.callbacks = callbacks; + + for (const model of config.allModels) { + const fullId = `${model.provider}/${model.id}`; + this.modelsById.set(fullId, model); + this.allIds.push(fullId); + } + + this.enabledIds = config.hasEnabledModelsFilter ? [...config.enabledModelIds] : null; + this.filteredItems = this.buildItems(); + + // Header + this.addChild(new DynamicBorder()); + this.addChild(new Spacer(1)); + this.addChild(new Text(theme.fg("accent", theme.bold("Model Configuration")), 0, 0)); + this.addChild(new Text(theme.fg("muted", "Session-only. Ctrl+S to save to settings."), 0, 0)); + this.addChild(new Spacer(1)); + + // Search input + this.searchInput = new Input(); + this.addChild(this.searchInput); + this.addChild(new Spacer(1)); + + // List container + this.listContainer = new Container(); + this.addChild(this.listContainer); + + // Footer hint + this.addChild(new Spacer(1)); + this.footerText = new Text(this.getFooterText(), 0, 0); + this.addChild(this.footerText); + + this.addChild(new DynamicBorder()); + this.updateList(); + } + + private buildItems(): ModelItem[] { + // Filter out IDs that no longer have a corresponding model (e.g., after logout) + return getSortedIds(this.enabledIds, this.allIds) + .filter((id) => this.modelsById.has(id)) + .map((id) => ({ + fullId: id, + model: this.modelsById.get(id)!, + enabled: isEnabled(this.enabledIds, id), + })); + } + + private getFooterText(): string { + const enabledCount = this.enabledIds?.length ?? this.allIds.length; + const allEnabled = this.enabledIds === null; + const countText = allEnabled ? "all enabled" : `${enabledCount}/${this.allIds.length} enabled`; + const parts = ["Enter toggle", "^A all", "^X clear", "^P provider", "Alt+↑↓ reorder", "^S save", countText]; + return this.isDirty + ? theme.fg("dim", ` ${parts.join(" · ")} `) + theme.fg("warning", "(unsaved)") + : theme.fg("dim", ` ${parts.join(" · ")}`); + } + + private refresh(): void { + const query = this.searchInput.getValue(); + const items = this.buildItems(); + this.filteredItems = query ? fuzzyFilter(items, query, (i) => `${i.model.id} ${i.model.provider}`) : items; + this.selectedIndex = Math.min(this.selectedIndex, Math.max(0, this.filteredItems.length - 1)); + this.updateList(); + this.footerText.setText(this.getFooterText()); + } + + private updateList(): void { + this.listContainer.clear(); + + if (this.filteredItems.length === 0) { + this.listContainer.addChild(new Text(theme.fg("muted", " No matching models"), 0, 0)); + return; + } + + const startIndex = Math.max( + 0, + Math.min(this.selectedIndex - Math.floor(this.maxVisible / 2), this.filteredItems.length - this.maxVisible), + ); + const endIndex = Math.min(startIndex + this.maxVisible, this.filteredItems.length); + const allEnabled = this.enabledIds === null; + + for (let i = startIndex; i < endIndex; i++) { + const item = this.filteredItems[i]!; + const isSelected = i === this.selectedIndex; + const prefix = isSelected ? theme.fg("accent", "→ ") : " "; + const modelText = isSelected ? theme.fg("accent", item.model.id) : item.model.id; + const providerBadge = theme.fg("muted", ` [${item.model.provider}]`); + const status = allEnabled ? "" : item.enabled ? theme.fg("success", " ✓") : theme.fg("dim", " ✗"); + this.listContainer.addChild(new Text(`${prefix}${modelText}${providerBadge}${status}`, 0, 0)); + } + + // Add scroll indicator if needed + if (startIndex > 0 || endIndex < this.filteredItems.length) { + this.listContainer.addChild( + new Text(theme.fg("muted", ` (${this.selectedIndex + 1}/${this.filteredItems.length})`), 0, 0), + ); + } + + if (this.filteredItems.length > 0) { + const selected = this.filteredItems[this.selectedIndex]; + this.listContainer.addChild(new Spacer(1)); + this.listContainer.addChild(new Text(theme.fg("muted", ` Model Name: ${selected.model.name}`), 0, 0)); + } + } + + handleInput(data: string): void { + const kb = getEditorKeybindings(); + + // Navigation + if (kb.matches(data, "selectUp")) { + if (this.filteredItems.length === 0) return; + this.selectedIndex = this.selectedIndex === 0 ? this.filteredItems.length - 1 : this.selectedIndex - 1; + this.updateList(); + return; + } + if (kb.matches(data, "selectDown")) { + if (this.filteredItems.length === 0) return; + this.selectedIndex = this.selectedIndex === this.filteredItems.length - 1 ? 0 : this.selectedIndex + 1; + this.updateList(); + return; + } + + // Alt+Up/Down - Reorder enabled models + if (matchesKey(data, Key.alt("up")) || matchesKey(data, Key.alt("down"))) { + const item = this.filteredItems[this.selectedIndex]; + if (item && isEnabled(this.enabledIds, item.fullId)) { + const delta = matchesKey(data, Key.alt("up")) ? -1 : 1; + const enabledList = this.enabledIds ?? this.allIds; + const currentIndex = enabledList.indexOf(item.fullId); + const newIndex = currentIndex + delta; + // Only move if within bounds + if (newIndex >= 0 && newIndex < enabledList.length) { + this.enabledIds = move(this.enabledIds, this.allIds, item.fullId, delta); + this.isDirty = true; + this.selectedIndex += delta; + this.refresh(); + } + } + return; + } + + // Toggle on Enter + if (matchesKey(data, Key.enter)) { + const item = this.filteredItems[this.selectedIndex]; + if (item) { + const wasAllEnabled = this.enabledIds === null; + this.enabledIds = toggle(this.enabledIds, item.fullId); + this.isDirty = true; + if (wasAllEnabled) this.callbacks.onClearAll(); + this.callbacks.onModelToggle(item.fullId, isEnabled(this.enabledIds, item.fullId)); + this.refresh(); + } + return; + } + + // Ctrl+A - Enable all (filtered if search active, otherwise all) + if (matchesKey(data, Key.ctrl("a"))) { + const targetIds = this.searchInput.getValue() ? this.filteredItems.map((i) => i.fullId) : undefined; + this.enabledIds = enableAll(this.enabledIds, this.allIds, targetIds); + this.isDirty = true; + this.callbacks.onEnableAll(targetIds ?? this.allIds); + this.refresh(); + return; + } + + // Ctrl+X - Clear all (filtered if search active, otherwise all) + if (matchesKey(data, Key.ctrl("x"))) { + const targetIds = this.searchInput.getValue() ? this.filteredItems.map((i) => i.fullId) : undefined; + this.enabledIds = clearAll(this.enabledIds, this.allIds, targetIds); + this.isDirty = true; + this.callbacks.onClearAll(); + this.refresh(); + return; + } + + // Ctrl+P - Toggle provider of current item + if (matchesKey(data, Key.ctrl("p"))) { + const item = this.filteredItems[this.selectedIndex]; + if (item) { + const provider = item.model.provider; + const providerIds = this.allIds.filter((id) => this.modelsById.get(id)!.provider === provider); + const allEnabled = providerIds.every((id) => isEnabled(this.enabledIds, id)); + this.enabledIds = allEnabled + ? clearAll(this.enabledIds, this.allIds, providerIds) + : enableAll(this.enabledIds, this.allIds, providerIds); + this.isDirty = true; + this.callbacks.onToggleProvider(provider, providerIds, !allEnabled); + this.refresh(); + } + return; + } + + // Ctrl+S - Save/persist to settings + if (matchesKey(data, Key.ctrl("s"))) { + this.callbacks.onPersist(this.enabledIds ?? [...this.allIds]); + this.isDirty = false; + this.footerText.setText(this.getFooterText()); + return; + } + + // Ctrl+C - clear search or cancel if empty + if (matchesKey(data, Key.ctrl("c"))) { + if (this.searchInput.getValue()) { + this.searchInput.setValue(""); + this.refresh(); + } else { + this.callbacks.onCancel(); + } + return; + } + + // Escape - cancel + if (matchesKey(data, Key.escape)) { + this.callbacks.onCancel(); + return; + } + + // Pass everything else to search input + this.searchInput.handleInput(data); + this.refresh(); + } + + getSearchInput(): Input { + return this.searchInput; + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/session-selector-search.ts b/packages/pi-coding-agent/src/modes/interactive/components/session-selector-search.ts new file mode 100644 index 000000000..b780d92e5 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/session-selector-search.ts @@ -0,0 +1,194 @@ +import { fuzzyMatch } from "@gsd/pi-tui"; +import type { SessionInfo } from "../../../core/session-manager.js"; + +export type SortMode = "threaded" | "recent" | "relevance"; + +export type NameFilter = "all" | "named"; + +export interface ParsedSearchQuery { + mode: "tokens" | "regex"; + tokens: { kind: "fuzzy" | "phrase"; value: string }[]; + regex: RegExp | null; + /** If set, parsing failed and we should treat query as non-matching. */ + error?: string; +} + +export interface MatchResult { + matches: boolean; + /** Lower is better; only meaningful when matches === true */ + score: number; +} + +function normalizeWhitespaceLower(text: string): string { + return text.toLowerCase().replace(/\s+/g, " ").trim(); +} + +function getSessionSearchText(session: SessionInfo): string { + return `${session.id} ${session.name ?? ""} ${session.allMessagesText} ${session.cwd}`; +} + +export function hasSessionName(session: SessionInfo): boolean { + return Boolean(session.name?.trim()); +} + +function matchesNameFilter(session: SessionInfo, filter: NameFilter): boolean { + if (filter === "all") return true; + return hasSessionName(session); +} + +export function parseSearchQuery(query: string): ParsedSearchQuery { + const trimmed = query.trim(); + if (!trimmed) { + return { mode: "tokens", tokens: [], regex: null }; + } + + // Regex mode: re: + if (trimmed.startsWith("re:")) { + const pattern = trimmed.slice(3).trim(); + if (!pattern) { + return { mode: "regex", tokens: [], regex: null, error: "Empty regex" }; + } + try { + return { mode: "regex", tokens: [], regex: new RegExp(pattern, "i") }; + } catch (err) { + const msg = err instanceof Error ? err.message : String(err); + return { mode: "regex", tokens: [], regex: null, error: msg }; + } + } + + // Token mode with quote support. + // Example: foo "node cve" bar + const tokens: { kind: "fuzzy" | "phrase"; value: string }[] = []; + let buf = ""; + let inQuote = false; + let hadUnclosedQuote = false; + + const flush = (kind: "fuzzy" | "phrase"): void => { + const v = buf.trim(); + buf = ""; + if (!v) return; + tokens.push({ kind, value: v }); + }; + + for (let i = 0; i < trimmed.length; i++) { + const ch = trimmed[i]!; + if (ch === '"') { + if (inQuote) { + flush("phrase"); + inQuote = false; + } else { + flush("fuzzy"); + inQuote = true; + } + continue; + } + + if (!inQuote && /\s/.test(ch)) { + flush("fuzzy"); + continue; + } + + buf += ch; + } + + if (inQuote) { + hadUnclosedQuote = true; + } + + // If quotes were unbalanced, fall back to plain whitespace tokenization. + if (hadUnclosedQuote) { + return { + mode: "tokens", + tokens: trimmed + .split(/\s+/) + .map((t) => t.trim()) + .filter((t) => t.length > 0) + .map((t) => ({ kind: "fuzzy" as const, value: t })), + regex: null, + }; + } + + flush(inQuote ? "phrase" : "fuzzy"); + + return { mode: "tokens", tokens, regex: null }; +} + +export function matchSession(session: SessionInfo, parsed: ParsedSearchQuery): MatchResult { + const text = getSessionSearchText(session); + + if (parsed.mode === "regex") { + if (!parsed.regex) { + return { matches: false, score: 0 }; + } + const idx = text.search(parsed.regex); + if (idx < 0) return { matches: false, score: 0 }; + return { matches: true, score: idx * 0.1 }; + } + + if (parsed.tokens.length === 0) { + return { matches: true, score: 0 }; + } + + let totalScore = 0; + let normalizedText: string | null = null; + + for (const token of parsed.tokens) { + if (token.kind === "phrase") { + if (normalizedText === null) { + normalizedText = normalizeWhitespaceLower(text); + } + const phrase = normalizeWhitespaceLower(token.value); + if (!phrase) continue; + const idx = normalizedText.indexOf(phrase); + if (idx < 0) return { matches: false, score: 0 }; + totalScore += idx * 0.1; + continue; + } + + const m = fuzzyMatch(token.value, text); + if (!m.matches) return { matches: false, score: 0 }; + totalScore += m.score; + } + + return { matches: true, score: totalScore }; +} + +export function filterAndSortSessions( + sessions: SessionInfo[], + query: string, + sortMode: SortMode, + nameFilter: NameFilter = "all", +): SessionInfo[] { + const nameFiltered = + nameFilter === "all" ? sessions : sessions.filter((session) => matchesNameFilter(session, nameFilter)); + const trimmed = query.trim(); + if (!trimmed) return nameFiltered; + + const parsed = parseSearchQuery(query); + if (parsed.error) return []; + + // Recent mode: filter only, keep incoming order. + if (sortMode === "recent") { + const filtered: SessionInfo[] = []; + for (const s of nameFiltered) { + const res = matchSession(s, parsed); + if (res.matches) filtered.push(s); + } + return filtered; + } + + // Relevance mode: sort by score, tie-break by modified desc. + const scored: { session: SessionInfo; score: number }[] = []; + for (const s of nameFiltered) { + const res = matchSession(s, parsed); + if (!res.matches) continue; + scored.push({ session: s, score: res.score }); + } + + scored.sort((a, b) => { + if (a.score !== b.score) return a.score - b.score; + return b.session.modified.getTime() - a.session.modified.getTime(); + }); + + return scored.map((r) => r.session); +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/session-selector.ts b/packages/pi-coding-agent/src/modes/interactive/components/session-selector.ts new file mode 100644 index 000000000..f38de288f --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/session-selector.ts @@ -0,0 +1,1019 @@ +import { spawnSync } from "node:child_process"; +import { existsSync } from "node:fs"; +import { unlink } from "node:fs/promises"; +import * as os from "node:os"; +import { + type Component, + Container, + type Focusable, + getEditorKeybindings, + Input, + matchesKey, + Spacer, + Text, + truncateToWidth, + visibleWidth, +} from "@gsd/pi-tui"; +import { KeybindingsManager } from "../../../core/keybindings.js"; +import type { SessionInfo, SessionListProgress } from "../../../core/session-manager.js"; +import { theme } from "../theme/theme.js"; +import { DynamicBorder } from "./dynamic-border.js"; +import { appKey, appKeyHint, keyHint } from "./keybinding-hints.js"; +import { filterAndSortSessions, hasSessionName, type NameFilter, type SortMode } from "./session-selector-search.js"; + +type SessionScope = "current" | "all"; + +function shortenPath(path: string): string { + const home = os.homedir(); + if (!path) return path; + if (path.startsWith(home)) { + return `~${path.slice(home.length)}`; + } + return path; +} + +function formatSessionDate(date: Date): string { + const now = new Date(); + const diffMs = now.getTime() - date.getTime(); + const diffMins = Math.floor(diffMs / 60000); + const diffHours = Math.floor(diffMs / 3600000); + const diffDays = Math.floor(diffMs / 86400000); + + if (diffMins < 1) return "now"; + if (diffMins < 60) return `${diffMins}m`; + if (diffHours < 24) return `${diffHours}h`; + if (diffDays < 7) return `${diffDays}d`; + if (diffDays < 30) return `${Math.floor(diffDays / 7)}w`; + if (diffDays < 365) return `${Math.floor(diffDays / 30)}mo`; + return `${Math.floor(diffDays / 365)}y`; +} + +class SessionSelectorHeader implements Component { + private scope: SessionScope; + private sortMode: SortMode; + private nameFilter: NameFilter; + private keybindings: KeybindingsManager; + private requestRender: () => void; + private loading = false; + private loadProgress: { loaded: number; total: number } | null = null; + private showPath = false; + private confirmingDeletePath: string | null = null; + private statusMessage: { type: "info" | "error"; message: string } | null = null; + private statusTimeout: ReturnType | null = null; + private showRenameHint = false; + + constructor( + scope: SessionScope, + sortMode: SortMode, + nameFilter: NameFilter, + keybindings: KeybindingsManager, + requestRender: () => void, + ) { + this.scope = scope; + this.sortMode = sortMode; + this.nameFilter = nameFilter; + this.keybindings = keybindings; + this.requestRender = requestRender; + } + + setScope(scope: SessionScope): void { + this.scope = scope; + } + + setSortMode(sortMode: SortMode): void { + this.sortMode = sortMode; + } + + setNameFilter(nameFilter: NameFilter): void { + this.nameFilter = nameFilter; + } + + setLoading(loading: boolean): void { + this.loading = loading; + // Progress is scoped to the current load; clear whenever the loading state is set + this.loadProgress = null; + } + + setProgress(loaded: number, total: number): void { + this.loadProgress = { loaded, total }; + } + + setShowPath(showPath: boolean): void { + this.showPath = showPath; + } + + setShowRenameHint(show: boolean): void { + this.showRenameHint = show; + } + + setConfirmingDeletePath(path: string | null): void { + this.confirmingDeletePath = path; + } + + private clearStatusTimeout(): void { + if (!this.statusTimeout) return; + clearTimeout(this.statusTimeout); + this.statusTimeout = null; + } + + setStatusMessage(msg: { type: "info" | "error"; message: string } | null, autoHideMs?: number): void { + this.clearStatusTimeout(); + this.statusMessage = msg; + if (!msg || !autoHideMs) return; + + this.statusTimeout = setTimeout(() => { + this.statusMessage = null; + this.statusTimeout = null; + this.requestRender(); + }, autoHideMs); + } + + invalidate(): void {} + + render(width: number): string[] { + const title = this.scope === "current" ? "Resume Session (Current Folder)" : "Resume Session (All)"; + const leftText = theme.bold(title); + + const sortLabel = this.sortMode === "threaded" ? "Threaded" : this.sortMode === "recent" ? "Recent" : "Fuzzy"; + const sortText = theme.fg("muted", "Sort: ") + theme.fg("accent", sortLabel); + + const nameLabel = this.nameFilter === "all" ? "All" : "Named"; + const nameText = theme.fg("muted", "Name: ") + theme.fg("accent", nameLabel); + + let scopeText: string; + if (this.loading) { + const progressText = this.loadProgress ? `${this.loadProgress.loaded}/${this.loadProgress.total}` : "..."; + scopeText = `${theme.fg("muted", "○ Current Folder | ")}${theme.fg("accent", `Loading ${progressText}`)}`; + } else if (this.scope === "current") { + scopeText = `${theme.fg("accent", "◉ Current Folder")}${theme.fg("muted", " | ○ All")}`; + } else { + scopeText = `${theme.fg("muted", "○ Current Folder | ")}${theme.fg("accent", "◉ All")}`; + } + + const rightText = truncateToWidth(`${scopeText} ${nameText} ${sortText}`, width, ""); + const availableLeft = Math.max(0, width - visibleWidth(rightText) - 1); + const left = truncateToWidth(leftText, availableLeft, ""); + const spacing = Math.max(0, width - visibleWidth(left) - visibleWidth(rightText)); + + // Build hint lines - changes based on state (all branches truncate to width) + let hintLine1: string; + let hintLine2: string; + if (this.confirmingDeletePath !== null) { + const confirmHint = "Delete session? [Enter] confirm · [Esc/Ctrl+C] cancel"; + hintLine1 = theme.fg("error", truncateToWidth(confirmHint, width, "…")); + hintLine2 = ""; + } else if (this.statusMessage) { + const color = this.statusMessage.type === "error" ? "error" : "accent"; + hintLine1 = theme.fg(color, truncateToWidth(this.statusMessage.message, width, "…")); + hintLine2 = ""; + } else { + const pathState = this.showPath ? "(on)" : "(off)"; + const sep = theme.fg("muted", " · "); + const hint1 = keyHint("tab", "scope") + sep + theme.fg("muted", 're: regex · "phrase" exact'); + const hint2Parts = [ + keyHint("toggleSessionSort", "sort"), + appKeyHint(this.keybindings, "toggleSessionNamedFilter", "named"), + keyHint("deleteSession", "delete"), + keyHint("toggleSessionPath", `path ${pathState}`), + ]; + if (this.showRenameHint) { + hint2Parts.push(keyHint("renameSession", "rename")); + } + const hint2 = hint2Parts.join(sep); + hintLine1 = truncateToWidth(hint1, width, "…"); + hintLine2 = truncateToWidth(hint2, width, "…"); + } + + return [`${left}${" ".repeat(spacing)}${rightText}`, hintLine1, hintLine2]; + } +} + +/** A session tree node for hierarchical display */ +interface SessionTreeNode { + session: SessionInfo; + children: SessionTreeNode[]; +} + +/** Flattened node for display with tree structure info */ +interface FlatSessionNode { + session: SessionInfo; + depth: number; + isLast: boolean; + /** For each ancestor level, whether there are more siblings after it */ + ancestorContinues: boolean[]; +} + +/** + * Build a tree structure from sessions based on parentSessionPath. + * Returns root nodes sorted by modified date (descending). + */ +function buildSessionTree(sessions: SessionInfo[]): SessionTreeNode[] { + const byPath = new Map(); + + for (const session of sessions) { + byPath.set(session.path, { session, children: [] }); + } + + const roots: SessionTreeNode[] = []; + + for (const session of sessions) { + const node = byPath.get(session.path)!; + const parentPath = session.parentSessionPath; + + if (parentPath && byPath.has(parentPath)) { + byPath.get(parentPath)!.children.push(node); + } else { + roots.push(node); + } + } + + // Sort children and roots by modified date (descending) + const sortNodes = (nodes: SessionTreeNode[]): void => { + nodes.sort((a, b) => b.session.modified.getTime() - a.session.modified.getTime()); + for (const node of nodes) { + sortNodes(node.children); + } + }; + sortNodes(roots); + + return roots; +} + +/** + * Flatten tree into display list with tree structure metadata. + */ +function flattenSessionTree(roots: SessionTreeNode[]): FlatSessionNode[] { + const result: FlatSessionNode[] = []; + + const walk = (node: SessionTreeNode, depth: number, ancestorContinues: boolean[], isLast: boolean): void => { + result.push({ session: node.session, depth, isLast, ancestorContinues }); + + for (let i = 0; i < node.children.length; i++) { + const childIsLast = i === node.children.length - 1; + // Only show continuation line for non-root ancestors + const continues = depth > 0 ? !isLast : false; + walk(node.children[i]!, depth + 1, [...ancestorContinues, continues], childIsLast); + } + }; + + for (let i = 0; i < roots.length; i++) { + walk(roots[i]!, 0, [], i === roots.length - 1); + } + + return result; +} + +/** + * Custom session list component with multi-line items and search + */ +class SessionList implements Component, Focusable { + public getSelectedSessionPath(): string | undefined { + const selected = this.filteredSessions[this.selectedIndex]; + return selected?.session.path; + } + private allSessions: SessionInfo[] = []; + private filteredSessions: FlatSessionNode[] = []; + private selectedIndex: number = 0; + private searchInput: Input; + private showCwd = false; + private sortMode: SortMode = "threaded"; + private nameFilter: NameFilter = "all"; + private keybindings: KeybindingsManager; + private showPath = false; + private confirmingDeletePath: string | null = null; + private currentSessionFilePath?: string; + public onSelect?: (sessionPath: string) => void; + public onCancel?: () => void; + public onExit: () => void = () => {}; + public onToggleScope?: () => void; + public onToggleSort?: () => void; + public onToggleNameFilter?: () => void; + public onTogglePath?: (showPath: boolean) => void; + public onDeleteConfirmationChange?: (path: string | null) => void; + public onDeleteSession?: (sessionPath: string) => Promise; + public onRenameSession?: (sessionPath: string) => void; + public onError?: (message: string) => void; + private maxVisible: number = 10; // Max sessions visible (one line each) + + // Focusable implementation - propagate to searchInput for IME cursor positioning + private _focused = false; + get focused(): boolean { + return this._focused; + } + set focused(value: boolean) { + this._focused = value; + this.searchInput.focused = value; + } + + constructor( + sessions: SessionInfo[], + showCwd: boolean, + sortMode: SortMode, + nameFilter: NameFilter, + keybindings: KeybindingsManager, + currentSessionFilePath?: string, + ) { + this.allSessions = sessions; + this.filteredSessions = []; + this.searchInput = new Input(); + this.showCwd = showCwd; + this.sortMode = sortMode; + this.nameFilter = nameFilter; + this.keybindings = keybindings; + this.currentSessionFilePath = currentSessionFilePath; + this.filterSessions(""); + + // Handle Enter in search input - select current item + this.searchInput.onSubmit = () => { + if (this.filteredSessions[this.selectedIndex]) { + const selected = this.filteredSessions[this.selectedIndex]; + if (this.onSelect) { + this.onSelect(selected.session.path); + } + } + }; + } + + setSortMode(sortMode: SortMode): void { + this.sortMode = sortMode; + this.filterSessions(this.searchInput.getValue()); + } + + setNameFilter(nameFilter: NameFilter): void { + this.nameFilter = nameFilter; + this.filterSessions(this.searchInput.getValue()); + } + + setSessions(sessions: SessionInfo[], showCwd: boolean): void { + this.allSessions = sessions; + this.showCwd = showCwd; + this.filterSessions(this.searchInput.getValue()); + } + + private filterSessions(query: string): void { + const trimmed = query.trim(); + const nameFiltered = + this.nameFilter === "all" ? this.allSessions : this.allSessions.filter((session) => hasSessionName(session)); + + if (this.sortMode === "threaded" && !trimmed) { + // Threaded mode without search: show tree structure + const roots = buildSessionTree(nameFiltered); + this.filteredSessions = flattenSessionTree(roots); + } else { + // Other modes or with search: flat list + const filtered = filterAndSortSessions(nameFiltered, query, this.sortMode, "all"); + this.filteredSessions = filtered.map((session) => ({ + session, + depth: 0, + isLast: true, + ancestorContinues: [], + })); + } + this.selectedIndex = Math.min(this.selectedIndex, Math.max(0, this.filteredSessions.length - 1)); + } + + private setConfirmingDeletePath(path: string | null): void { + this.confirmingDeletePath = path; + this.onDeleteConfirmationChange?.(path); + } + + private startDeleteConfirmationForSelectedSession(): void { + const selected = this.filteredSessions[this.selectedIndex]; + if (!selected) return; + + // Prevent deleting current session + if (this.currentSessionFilePath && selected.session.path === this.currentSessionFilePath) { + this.onError?.("Cannot delete the currently active session"); + return; + } + + this.setConfirmingDeletePath(selected.session.path); + } + + invalidate(): void {} + + render(width: number): string[] { + const lines: string[] = []; + + // Render search input + lines.push(...this.searchInput.render(width)); + lines.push(""); // Blank line after search + + if (this.filteredSessions.length === 0) { + let emptyMessage: string; + if (this.nameFilter === "named") { + const toggleKey = appKey(this.keybindings, "toggleSessionNamedFilter"); + if (this.showCwd) { + emptyMessage = ` No named sessions found. Press ${toggleKey} to show all.`; + } else { + emptyMessage = ` No named sessions in current folder. Press ${toggleKey} to show all, or Tab to view all.`; + } + } else if (this.showCwd) { + // "All" scope - no sessions anywhere that match filter + emptyMessage = " No sessions found"; + } else { + // "Current folder" scope - hint to try "all" + emptyMessage = " No sessions in current folder. Press Tab to view all."; + } + lines.push(theme.fg("muted", truncateToWidth(emptyMessage, width, "…"))); + return lines; + } + + // Calculate visible range with scrolling + const startIndex = Math.max( + 0, + Math.min(this.selectedIndex - Math.floor(this.maxVisible / 2), this.filteredSessions.length - this.maxVisible), + ); + const endIndex = Math.min(startIndex + this.maxVisible, this.filteredSessions.length); + + // Render visible sessions (one line each with tree structure) + for (let i = startIndex; i < endIndex; i++) { + const node = this.filteredSessions[i]!; + const session = node.session; + const isSelected = i === this.selectedIndex; + const isConfirmingDelete = session.path === this.confirmingDeletePath; + const isCurrent = this.currentSessionFilePath === session.path; + + // Build tree prefix + const prefix = this.buildTreePrefix(node); + + // Session display text (name or first message) + const hasName = !!session.name; + const displayText = session.name ?? session.firstMessage; + const normalizedMessage = displayText.replace(/[\x00-\x1f\x7f]/g, " ").trim(); + + // Right side: message count and age + const age = formatSessionDate(session.modified); + const msgCount = String(session.messageCount); + let rightPart = `${msgCount} ${age}`; + if (this.showCwd && session.cwd) { + rightPart = `${shortenPath(session.cwd)} ${rightPart}`; + } + if (this.showPath) { + rightPart = `${shortenPath(session.path)} ${rightPart}`; + } + + // Cursor + const cursor = isSelected ? theme.fg("accent", "› ") : " "; + + // Calculate available width for message + const prefixWidth = visibleWidth(prefix); + const rightWidth = visibleWidth(rightPart) + 2; // +2 for spacing + const availableForMsg = width - 2 - prefixWidth - rightWidth; // -2 for cursor + + const truncatedMsg = truncateToWidth(normalizedMessage, Math.max(10, availableForMsg), "…"); + + // Style message + let messageColor: "error" | "warning" | "accent" | null = null; + if (isConfirmingDelete) { + messageColor = "error"; + } else if (isCurrent) { + messageColor = "accent"; + } else if (hasName) { + messageColor = "warning"; + } + let styledMsg = messageColor ? theme.fg(messageColor, truncatedMsg) : truncatedMsg; + if (isSelected) { + styledMsg = theme.bold(styledMsg); + } + + // Build line + const leftPart = cursor + theme.fg("dim", prefix) + styledMsg; + const leftWidth = visibleWidth(leftPart); + const spacing = Math.max(1, width - leftWidth - visibleWidth(rightPart)); + const styledRight = theme.fg(isConfirmingDelete ? "error" : "dim", rightPart); + + let line = leftPart + " ".repeat(spacing) + styledRight; + if (isSelected) { + line = theme.bg("selectedBg", line); + } + lines.push(truncateToWidth(line, width)); + } + + // Add scroll indicator if needed + if (startIndex > 0 || endIndex < this.filteredSessions.length) { + const scrollText = ` (${this.selectedIndex + 1}/${this.filteredSessions.length})`; + const scrollInfo = theme.fg("muted", truncateToWidth(scrollText, width, "")); + lines.push(scrollInfo); + } + + return lines; + } + + private buildTreePrefix(node: FlatSessionNode): string { + if (node.depth === 0) { + return ""; + } + + const parts = node.ancestorContinues.map((continues) => (continues ? "│ " : " ")); + const branch = node.isLast ? "└─ " : "├─ "; + return parts.join("") + branch; + } + + handleInput(keyData: string): void { + const kb = getEditorKeybindings(); + + // Handle delete confirmation state first - intercept all keys + if (this.confirmingDeletePath !== null) { + if (kb.matches(keyData, "selectConfirm")) { + const pathToDelete = this.confirmingDeletePath; + this.setConfirmingDeletePath(null); + void this.onDeleteSession?.(pathToDelete); + return; + } + // Allow both Escape and Ctrl+C to cancel (consistent with pi UX) + if (kb.matches(keyData, "selectCancel") || matchesKey(keyData, "ctrl+c")) { + this.setConfirmingDeletePath(null); + return; + } + // Ignore all other keys while confirming + return; + } + + if (kb.matches(keyData, "tab")) { + if (this.onToggleScope) { + this.onToggleScope(); + } + return; + } + + if (kb.matches(keyData, "toggleSessionSort")) { + this.onToggleSort?.(); + return; + } + + if (this.keybindings.matches(keyData, "toggleSessionNamedFilter")) { + this.onToggleNameFilter?.(); + return; + } + + // Ctrl+P: toggle path display + if (kb.matches(keyData, "toggleSessionPath")) { + this.showPath = !this.showPath; + this.onTogglePath?.(this.showPath); + return; + } + + // Ctrl+D: initiate delete confirmation (useful on terminals that don't distinguish Ctrl+Backspace from Backspace) + if (kb.matches(keyData, "deleteSession")) { + this.startDeleteConfirmationForSelectedSession(); + return; + } + + // Ctrl+R: rename selected session + if (matchesKey(keyData, "ctrl+r")) { + const selected = this.filteredSessions[this.selectedIndex]; + if (selected) { + this.onRenameSession?.(selected.session.path); + } + return; + } + + // Ctrl+Backspace: non-invasive convenience alias for delete + // Only triggers deletion when the query is empty; otherwise it is forwarded to the input + if (kb.matches(keyData, "deleteSessionNoninvasive")) { + if (this.searchInput.getValue().length > 0) { + this.searchInput.handleInput(keyData); + this.filterSessions(this.searchInput.getValue()); + return; + } + + this.startDeleteConfirmationForSelectedSession(); + return; + } + + // Up arrow + if (kb.matches(keyData, "selectUp")) { + this.selectedIndex = Math.max(0, this.selectedIndex - 1); + } + // Down arrow + else if (kb.matches(keyData, "selectDown")) { + this.selectedIndex = Math.min(this.filteredSessions.length - 1, this.selectedIndex + 1); + } + // Page up - jump up by maxVisible items + else if (kb.matches(keyData, "selectPageUp")) { + this.selectedIndex = Math.max(0, this.selectedIndex - this.maxVisible); + } + // Page down - jump down by maxVisible items + else if (kb.matches(keyData, "selectPageDown")) { + this.selectedIndex = Math.min(this.filteredSessions.length - 1, this.selectedIndex + this.maxVisible); + } + // Enter + else if (kb.matches(keyData, "selectConfirm")) { + const selected = this.filteredSessions[this.selectedIndex]; + if (selected && this.onSelect) { + this.onSelect(selected.session.path); + } + } + // Escape - cancel + else if (kb.matches(keyData, "selectCancel")) { + if (this.onCancel) { + this.onCancel(); + } + } + // Pass everything else to search input + else { + this.searchInput.handleInput(keyData); + this.filterSessions(this.searchInput.getValue()); + } + } +} + +type SessionsLoader = (onProgress?: SessionListProgress) => Promise; + +/** + * Delete a session file, trying the `trash` CLI first, then falling back to unlink + */ +async function deleteSessionFile( + sessionPath: string, +): Promise<{ ok: boolean; method: "trash" | "unlink"; error?: string }> { + // Try `trash` first (if installed) + const trashArgs = sessionPath.startsWith("-") ? ["--", sessionPath] : [sessionPath]; + const trashResult = spawnSync("trash", trashArgs, { encoding: "utf-8" }); + + const getTrashErrorHint = (): string | null => { + const parts: string[] = []; + if (trashResult.error) { + parts.push(trashResult.error.message); + } + const stderr = trashResult.stderr?.trim(); + if (stderr) { + parts.push(stderr.split("\n")[0] ?? stderr); + } + if (parts.length === 0) return null; + return `trash: ${parts.join(" · ").slice(0, 200)}`; + }; + + // If trash reports success, or the file is gone afterwards, treat it as successful + if (trashResult.status === 0 || !existsSync(sessionPath)) { + return { ok: true, method: "trash" }; + } + + // Fallback to permanent deletion + try { + await unlink(sessionPath); + return { ok: true, method: "unlink" }; + } catch (err) { + const unlinkError = err instanceof Error ? err.message : String(err); + const trashErrorHint = getTrashErrorHint(); + const error = trashErrorHint ? `${unlinkError} (${trashErrorHint})` : unlinkError; + return { ok: false, method: "unlink", error }; + } +} + +/** + * Component that renders a session selector + */ +export class SessionSelectorComponent extends Container implements Focusable { + handleInput(data: string): void { + if (this.mode === "rename") { + const kb = getEditorKeybindings(); + if (kb.matches(data, "selectCancel") || matchesKey(data, "ctrl+c")) { + this.exitRenameMode(); + return; + } + this.renameInput.handleInput(data); + return; + } + + this.sessionList.handleInput(data); + } + + private canRename = true; + private sessionList: SessionList; + private header: SessionSelectorHeader; + private keybindings: KeybindingsManager; + private scope: SessionScope = "current"; + private sortMode: SortMode = "threaded"; + private nameFilter: NameFilter = "all"; + private currentSessions: SessionInfo[] | null = null; + private allSessions: SessionInfo[] | null = null; + private currentSessionsLoader: SessionsLoader; + private allSessionsLoader: SessionsLoader; + private onCancel: () => void; + private requestRender: () => void; + private renameSession?: (sessionPath: string, currentName: string | undefined) => Promise; + private currentLoading = false; + private allLoading = false; + private allLoadSeq = 0; + + private mode: "list" | "rename" = "list"; + private renameInput = new Input(); + private renameTargetPath: string | null = null; + + // Focusable implementation - propagate to sessionList for IME cursor positioning + private _focused = false; + get focused(): boolean { + return this._focused; + } + set focused(value: boolean) { + this._focused = value; + this.sessionList.focused = value; + this.renameInput.focused = value; + if (value && this.mode === "rename") { + this.renameInput.focused = true; + } + } + + private buildBaseLayout(content: Component, options?: { showHeader?: boolean }): void { + this.clear(); + this.addChild(new Spacer(1)); + this.addChild(new DynamicBorder((s) => theme.fg("accent", s))); + this.addChild(new Spacer(1)); + if (options?.showHeader ?? true) { + this.addChild(this.header); + this.addChild(new Spacer(1)); + } + this.addChild(content); + this.addChild(new Spacer(1)); + this.addChild(new DynamicBorder((s) => theme.fg("accent", s))); + } + + constructor( + currentSessionsLoader: SessionsLoader, + allSessionsLoader: SessionsLoader, + onSelect: (sessionPath: string) => void, + onCancel: () => void, + onExit: () => void, + requestRender: () => void, + options?: { + renameSession?: (sessionPath: string, currentName: string | undefined) => Promise; + showRenameHint?: boolean; + keybindings?: KeybindingsManager; + }, + currentSessionFilePath?: string, + ) { + super(); + this.keybindings = options?.keybindings ?? KeybindingsManager.create(); + this.currentSessionsLoader = currentSessionsLoader; + this.allSessionsLoader = allSessionsLoader; + this.onCancel = onCancel; + this.requestRender = requestRender; + this.header = new SessionSelectorHeader( + this.scope, + this.sortMode, + this.nameFilter, + this.keybindings, + this.requestRender, + ); + const renameSession = options?.renameSession; + this.renameSession = renameSession; + this.canRename = !!renameSession; + this.header.setShowRenameHint(options?.showRenameHint ?? this.canRename); + + // Create session list (starts empty, will be populated after load) + this.sessionList = new SessionList( + [], + false, + this.sortMode, + this.nameFilter, + this.keybindings, + currentSessionFilePath, + ); + + this.buildBaseLayout(this.sessionList); + + this.renameInput.onSubmit = (value) => { + void this.confirmRename(value); + }; + + // Ensure header status timeouts are cleared when leaving the selector + const clearStatusMessage = () => this.header.setStatusMessage(null); + this.sessionList.onSelect = (sessionPath) => { + clearStatusMessage(); + onSelect(sessionPath); + }; + this.sessionList.onCancel = () => { + clearStatusMessage(); + onCancel(); + }; + this.sessionList.onExit = () => { + clearStatusMessage(); + onExit(); + }; + this.sessionList.onToggleScope = () => this.toggleScope(); + this.sessionList.onToggleSort = () => this.toggleSortMode(); + this.sessionList.onToggleNameFilter = () => this.toggleNameFilter(); + this.sessionList.onRenameSession = (sessionPath) => { + if (!renameSession) return; + if (this.scope === "current" && this.currentLoading) return; + if (this.scope === "all" && this.allLoading) return; + + const sessions = this.scope === "all" ? (this.allSessions ?? []) : (this.currentSessions ?? []); + const session = sessions.find((s) => s.path === sessionPath); + this.enterRenameMode(sessionPath, session?.name); + }; + + // Sync list events to header + this.sessionList.onTogglePath = (showPath) => { + this.header.setShowPath(showPath); + this.requestRender(); + }; + this.sessionList.onDeleteConfirmationChange = (path) => { + this.header.setConfirmingDeletePath(path); + this.requestRender(); + }; + this.sessionList.onError = (msg) => { + this.header.setStatusMessage({ type: "error", message: msg }, 3000); + this.requestRender(); + }; + + // Handle session deletion + this.sessionList.onDeleteSession = async (sessionPath: string) => { + const result = await deleteSessionFile(sessionPath); + + if (result.ok) { + if (this.currentSessions) { + this.currentSessions = this.currentSessions.filter((s) => s.path !== sessionPath); + } + if (this.allSessions) { + this.allSessions = this.allSessions.filter((s) => s.path !== sessionPath); + } + + const sessions = this.scope === "all" ? (this.allSessions ?? []) : (this.currentSessions ?? []); + const showCwd = this.scope === "all"; + this.sessionList.setSessions(sessions, showCwd); + + const msg = result.method === "trash" ? "Session moved to trash" : "Session deleted"; + this.header.setStatusMessage({ type: "info", message: msg }, 2000); + await this.refreshSessionsAfterMutation(); + } else { + const errorMessage = result.error ?? "Unknown error"; + this.header.setStatusMessage({ type: "error", message: `Failed to delete: ${errorMessage}` }, 3000); + } + + this.requestRender(); + }; + + // Start loading current sessions immediately + this.loadCurrentSessions(); + } + + private loadCurrentSessions(): void { + void this.loadScope("current", "initial"); + } + + private enterRenameMode(sessionPath: string, currentName: string | undefined): void { + this.mode = "rename"; + this.renameTargetPath = sessionPath; + this.renameInput.setValue(currentName ?? ""); + this.renameInput.focused = true; + + const panel = new Container(); + panel.addChild(new Text(theme.bold("Rename Session"), 1, 0)); + panel.addChild(new Spacer(1)); + panel.addChild(this.renameInput); + panel.addChild(new Spacer(1)); + panel.addChild(new Text(theme.fg("muted", "Enter to save · Esc/Ctrl+C to cancel"), 1, 0)); + + this.buildBaseLayout(panel, { showHeader: false }); + this.requestRender(); + } + + private exitRenameMode(): void { + this.mode = "list"; + this.renameTargetPath = null; + + this.buildBaseLayout(this.sessionList); + + this.requestRender(); + } + + private async confirmRename(value: string): Promise { + const next = value.trim(); + if (!next) return; + const target = this.renameTargetPath; + if (!target) { + this.exitRenameMode(); + return; + } + + // Find current name for callback + const renameSession = this.renameSession; + if (!renameSession) { + this.exitRenameMode(); + return; + } + + try { + await renameSession(target, next); + await this.refreshSessionsAfterMutation(); + } finally { + this.exitRenameMode(); + } + } + + private async loadScope(scope: SessionScope, reason: "initial" | "refresh" | "toggle"): Promise { + const showCwd = scope === "all"; + + // Mark loading + if (scope === "current") { + this.currentLoading = true; + } else { + this.allLoading = true; + } + + const seq = scope === "all" ? ++this.allLoadSeq : undefined; + this.header.setScope(scope); + this.header.setLoading(true); + this.requestRender(); + + const onProgress = (loaded: number, total: number) => { + if (scope !== this.scope) return; + if (seq !== undefined && seq !== this.allLoadSeq) return; + this.header.setProgress(loaded, total); + this.requestRender(); + }; + + try { + const sessions = await (scope === "current" + ? this.currentSessionsLoader(onProgress) + : this.allSessionsLoader(onProgress)); + + if (scope === "current") { + this.currentSessions = sessions; + this.currentLoading = false; + } else { + this.allSessions = sessions; + this.allLoading = false; + } + + if (scope !== this.scope) return; + if (seq !== undefined && seq !== this.allLoadSeq) return; + + this.header.setLoading(false); + this.sessionList.setSessions(sessions, showCwd); + this.requestRender(); + + if (scope === "all" && sessions.length === 0 && (this.currentSessions?.length ?? 0) === 0) { + this.onCancel(); + } + } catch (err) { + if (scope === "current") { + this.currentLoading = false; + } else { + this.allLoading = false; + } + + if (scope !== this.scope) return; + if (seq !== undefined && seq !== this.allLoadSeq) return; + + const message = err instanceof Error ? err.message : String(err); + this.header.setLoading(false); + this.header.setStatusMessage({ type: "error", message: `Failed to load sessions: ${message}` }, 4000); + + if (reason === "initial") { + this.sessionList.setSessions([], showCwd); + } + this.requestRender(); + } + } + + private toggleSortMode(): void { + // Cycle: threaded -> recent -> relevance -> threaded + this.sortMode = this.sortMode === "threaded" ? "recent" : this.sortMode === "recent" ? "relevance" : "threaded"; + this.header.setSortMode(this.sortMode); + this.sessionList.setSortMode(this.sortMode); + this.requestRender(); + } + + private toggleNameFilter(): void { + this.nameFilter = this.nameFilter === "all" ? "named" : "all"; + this.header.setNameFilter(this.nameFilter); + this.sessionList.setNameFilter(this.nameFilter); + this.requestRender(); + } + + private async refreshSessionsAfterMutation(): Promise { + await this.loadScope(this.scope, "refresh"); + } + + private toggleScope(): void { + if (this.scope === "current") { + this.scope = "all"; + this.header.setScope(this.scope); + + if (this.allSessions !== null) { + this.header.setLoading(false); + this.sessionList.setSessions(this.allSessions, true); + this.requestRender(); + return; + } + + if (!this.allLoading) { + void this.loadScope("all", "toggle"); + } + return; + } + + this.scope = "current"; + this.header.setScope(this.scope); + this.header.setLoading(this.currentLoading); + this.sessionList.setSessions(this.currentSessions ?? [], false); + this.requestRender(); + } + + getSessionList(): SessionList { + return this.sessionList; + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/settings-selector.ts b/packages/pi-coding-agent/src/modes/interactive/components/settings-selector.ts new file mode 100644 index 000000000..ae0b36748 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/settings-selector.ts @@ -0,0 +1,421 @@ +import type { ThinkingLevel } from "@gsd/pi-agent-core"; +import type { Transport } from "@gsd/pi-ai"; +import { + Container, + getCapabilities, + type SelectItem, + SelectList, + type SettingItem, + SettingsList, + Spacer, + Text, +} from "@gsd/pi-tui"; +import { getSelectListTheme, getSettingsListTheme, theme } from "../theme/theme.js"; +import { DynamicBorder } from "./dynamic-border.js"; + +const THINKING_DESCRIPTIONS: Record = { + off: "No reasoning", + minimal: "Very brief reasoning (~1k tokens)", + low: "Light reasoning (~2k tokens)", + medium: "Moderate reasoning (~8k tokens)", + high: "Deep reasoning (~16k tokens)", + xhigh: "Maximum reasoning (~32k tokens)", +}; + +export interface SettingsConfig { + autoCompact: boolean; + showImages: boolean; + autoResizeImages: boolean; + blockImages: boolean; + enableSkillCommands: boolean; + steeringMode: "all" | "one-at-a-time"; + followUpMode: "all" | "one-at-a-time"; + transport: Transport; + thinkingLevel: ThinkingLevel; + availableThinkingLevels: ThinkingLevel[]; + currentTheme: string; + availableThemes: string[]; + hideThinkingBlock: boolean; + collapseChangelog: boolean; + doubleEscapeAction: "fork" | "tree" | "none"; + treeFilterMode: "default" | "no-tools" | "user-only" | "labeled-only" | "all"; + showHardwareCursor: boolean; + editorPaddingX: number; + autocompleteMaxVisible: number; + quietStartup: boolean; + clearOnShrink: boolean; +} + +export interface SettingsCallbacks { + onAutoCompactChange: (enabled: boolean) => void; + onShowImagesChange: (enabled: boolean) => void; + onAutoResizeImagesChange: (enabled: boolean) => void; + onBlockImagesChange: (blocked: boolean) => void; + onEnableSkillCommandsChange: (enabled: boolean) => void; + onSteeringModeChange: (mode: "all" | "one-at-a-time") => void; + onFollowUpModeChange: (mode: "all" | "one-at-a-time") => void; + onTransportChange: (transport: Transport) => void; + onThinkingLevelChange: (level: ThinkingLevel) => void; + onThemeChange: (theme: string) => void; + onThemePreview?: (theme: string) => void; + onHideThinkingBlockChange: (hidden: boolean) => void; + onCollapseChangelogChange: (collapsed: boolean) => void; + onDoubleEscapeActionChange: (action: "fork" | "tree" | "none") => void; + onTreeFilterModeChange: (mode: "default" | "no-tools" | "user-only" | "labeled-only" | "all") => void; + onShowHardwareCursorChange: (enabled: boolean) => void; + onEditorPaddingXChange: (padding: number) => void; + onAutocompleteMaxVisibleChange: (maxVisible: number) => void; + onQuietStartupChange: (enabled: boolean) => void; + onClearOnShrinkChange: (enabled: boolean) => void; + onCancel: () => void; +} + +/** + * A submenu component for selecting from a list of options. + */ +class SelectSubmenu extends Container { + private selectList: SelectList; + + constructor( + title: string, + description: string, + options: SelectItem[], + currentValue: string, + onSelect: (value: string) => void, + onCancel: () => void, + onSelectionChange?: (value: string) => void, + ) { + super(); + + // Title + this.addChild(new Text(theme.bold(theme.fg("accent", title)), 0, 0)); + + // Description + if (description) { + this.addChild(new Spacer(1)); + this.addChild(new Text(theme.fg("muted", description), 0, 0)); + } + + // Spacer + this.addChild(new Spacer(1)); + + // Select list + this.selectList = new SelectList(options, Math.min(options.length, 10), getSelectListTheme()); + + // Pre-select current value + const currentIndex = options.findIndex((o) => o.value === currentValue); + if (currentIndex !== -1) { + this.selectList.setSelectedIndex(currentIndex); + } + + this.selectList.onSelect = (item) => { + onSelect(item.value); + }; + + this.selectList.onCancel = onCancel; + + if (onSelectionChange) { + this.selectList.onSelectionChange = (item) => { + onSelectionChange(item.value); + }; + } + + this.addChild(this.selectList); + + // Hint + this.addChild(new Spacer(1)); + this.addChild(new Text(theme.fg("dim", " Enter to select · Esc to go back"), 0, 0)); + } + + handleInput(data: string): void { + this.selectList.handleInput(data); + } +} + +/** + * Main settings selector component. + */ +export class SettingsSelectorComponent extends Container { + private settingsList: SettingsList; + + constructor(config: SettingsConfig, callbacks: SettingsCallbacks) { + super(); + + const supportsImages = getCapabilities().images; + + const items: SettingItem[] = [ + { + id: "autocompact", + label: "Auto-compact", + description: "Automatically compact context when it gets too large", + currentValue: config.autoCompact ? "true" : "false", + values: ["true", "false"], + }, + { + id: "steering-mode", + label: "Steering mode", + description: + "Enter while streaming queues steering messages. 'one-at-a-time': deliver one, wait for response. 'all': deliver all at once.", + currentValue: config.steeringMode, + values: ["one-at-a-time", "all"], + }, + { + id: "follow-up-mode", + label: "Follow-up mode", + description: + "Alt+Enter queues follow-up messages until agent stops. 'one-at-a-time': deliver one, wait for response. 'all': deliver all at once.", + currentValue: config.followUpMode, + values: ["one-at-a-time", "all"], + }, + { + id: "transport", + label: "Transport", + description: "Preferred transport for providers that support multiple transports", + currentValue: config.transport, + values: ["sse", "websocket", "auto"], + }, + { + id: "hide-thinking", + label: "Hide thinking", + description: "Hide thinking blocks in assistant responses", + currentValue: config.hideThinkingBlock ? "true" : "false", + values: ["true", "false"], + }, + { + id: "collapse-changelog", + label: "Collapse changelog", + description: "Show condensed changelog after updates", + currentValue: config.collapseChangelog ? "true" : "false", + values: ["true", "false"], + }, + { + id: "quiet-startup", + label: "Quiet startup", + description: "Disable verbose printing at startup", + currentValue: config.quietStartup ? "true" : "false", + values: ["true", "false"], + }, + { + id: "double-escape-action", + label: "Double-escape action", + description: "Action when pressing Escape twice with empty editor", + currentValue: config.doubleEscapeAction, + values: ["tree", "fork", "none"], + }, + { + id: "tree-filter-mode", + label: "Tree filter mode", + description: "Default filter when opening /tree", + currentValue: config.treeFilterMode, + values: ["default", "no-tools", "user-only", "labeled-only", "all"], + }, + { + id: "thinking", + label: "Thinking level", + description: "Reasoning depth for thinking-capable models", + currentValue: config.thinkingLevel, + submenu: (currentValue, done) => + new SelectSubmenu( + "Thinking Level", + "Select reasoning depth for thinking-capable models", + config.availableThinkingLevels.map((level) => ({ + value: level, + label: level, + description: THINKING_DESCRIPTIONS[level], + })), + currentValue, + (value) => { + callbacks.onThinkingLevelChange(value as ThinkingLevel); + done(value); + }, + () => done(), + ), + }, + { + id: "theme", + label: "Theme", + description: "Color theme for the interface", + currentValue: config.currentTheme, + submenu: (currentValue, done) => + new SelectSubmenu( + "Theme", + "Select color theme", + config.availableThemes.map((t) => ({ + value: t, + label: t, + })), + currentValue, + (value) => { + callbacks.onThemeChange(value); + done(value); + }, + () => { + // Restore original theme on cancel + callbacks.onThemePreview?.(currentValue); + done(); + }, + (value) => { + // Preview theme on selection change + callbacks.onThemePreview?.(value); + }, + ), + }, + ]; + + // Only show image toggle if terminal supports it + if (supportsImages) { + // Insert after autocompact + items.splice(1, 0, { + id: "show-images", + label: "Show images", + description: "Render images inline in terminal", + currentValue: config.showImages ? "true" : "false", + values: ["true", "false"], + }); + } + + // Image auto-resize toggle (always available, affects both attached and read images) + items.splice(supportsImages ? 2 : 1, 0, { + id: "auto-resize-images", + label: "Auto-resize images", + description: "Resize large images to 2000x2000 max for better model compatibility", + currentValue: config.autoResizeImages ? "true" : "false", + values: ["true", "false"], + }); + + // Block images toggle (always available, insert after auto-resize-images) + const autoResizeIndex = items.findIndex((item) => item.id === "auto-resize-images"); + items.splice(autoResizeIndex + 1, 0, { + id: "block-images", + label: "Block images", + description: "Prevent images from being sent to LLM providers", + currentValue: config.blockImages ? "true" : "false", + values: ["true", "false"], + }); + + // Skill commands toggle (insert after block-images) + const blockImagesIndex = items.findIndex((item) => item.id === "block-images"); + items.splice(blockImagesIndex + 1, 0, { + id: "skill-commands", + label: "Skill commands", + description: "Register skills as /skill:name commands", + currentValue: config.enableSkillCommands ? "true" : "false", + values: ["true", "false"], + }); + + // Hardware cursor toggle (insert after skill-commands) + const skillCommandsIndex = items.findIndex((item) => item.id === "skill-commands"); + items.splice(skillCommandsIndex + 1, 0, { + id: "show-hardware-cursor", + label: "Show hardware cursor", + description: "Show the terminal cursor while still positioning it for IME support", + currentValue: config.showHardwareCursor ? "true" : "false", + values: ["true", "false"], + }); + + // Editor padding toggle (insert after show-hardware-cursor) + const hardwareCursorIndex = items.findIndex((item) => item.id === "show-hardware-cursor"); + items.splice(hardwareCursorIndex + 1, 0, { + id: "editor-padding", + label: "Editor padding", + description: "Horizontal padding for input editor (0-3)", + currentValue: String(config.editorPaddingX), + values: ["0", "1", "2", "3"], + }); + + // Autocomplete max visible toggle (insert after editor-padding) + const editorPaddingIndex = items.findIndex((item) => item.id === "editor-padding"); + items.splice(editorPaddingIndex + 1, 0, { + id: "autocomplete-max-visible", + label: "Autocomplete max items", + description: "Max visible items in autocomplete dropdown (3-20)", + currentValue: String(config.autocompleteMaxVisible), + values: ["3", "5", "7", "10", "15", "20"], + }); + + // Clear on shrink toggle (insert after autocomplete-max-visible) + const autocompleteIndex = items.findIndex((item) => item.id === "autocomplete-max-visible"); + items.splice(autocompleteIndex + 1, 0, { + id: "clear-on-shrink", + label: "Clear on shrink", + description: "Clear empty rows when content shrinks (may cause flicker)", + currentValue: config.clearOnShrink ? "true" : "false", + values: ["true", "false"], + }); + + // Add borders + this.addChild(new DynamicBorder()); + + this.settingsList = new SettingsList( + items, + 10, + getSettingsListTheme(), + (id, newValue) => { + switch (id) { + case "autocompact": + callbacks.onAutoCompactChange(newValue === "true"); + break; + case "show-images": + callbacks.onShowImagesChange(newValue === "true"); + break; + case "auto-resize-images": + callbacks.onAutoResizeImagesChange(newValue === "true"); + break; + case "block-images": + callbacks.onBlockImagesChange(newValue === "true"); + break; + case "skill-commands": + callbacks.onEnableSkillCommandsChange(newValue === "true"); + break; + case "steering-mode": + callbacks.onSteeringModeChange(newValue as "all" | "one-at-a-time"); + break; + case "follow-up-mode": + callbacks.onFollowUpModeChange(newValue as "all" | "one-at-a-time"); + break; + case "transport": + callbacks.onTransportChange(newValue as Transport); + break; + case "hide-thinking": + callbacks.onHideThinkingBlockChange(newValue === "true"); + break; + case "collapse-changelog": + callbacks.onCollapseChangelogChange(newValue === "true"); + break; + case "quiet-startup": + callbacks.onQuietStartupChange(newValue === "true"); + break; + case "double-escape-action": + callbacks.onDoubleEscapeActionChange(newValue as "fork" | "tree"); + break; + case "tree-filter-mode": + callbacks.onTreeFilterModeChange( + newValue as "default" | "no-tools" | "user-only" | "labeled-only" | "all", + ); + break; + case "show-hardware-cursor": + callbacks.onShowHardwareCursorChange(newValue === "true"); + break; + case "editor-padding": + callbacks.onEditorPaddingXChange(parseInt(newValue, 10)); + break; + case "autocomplete-max-visible": + callbacks.onAutocompleteMaxVisibleChange(parseInt(newValue, 10)); + break; + case "clear-on-shrink": + callbacks.onClearOnShrinkChange(newValue === "true"); + break; + } + }, + callbacks.onCancel, + { enableSearch: true }, + ); + + this.addChild(this.settingsList); + this.addChild(new DynamicBorder()); + } + + getSettingsList(): SettingsList { + return this.settingsList; + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/show-images-selector.ts b/packages/pi-coding-agent/src/modes/interactive/components/show-images-selector.ts new file mode 100644 index 000000000..7303d42e6 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/show-images-selector.ts @@ -0,0 +1,45 @@ +import { Container, type SelectItem, SelectList } from "@gsd/pi-tui"; +import { getSelectListTheme } from "../theme/theme.js"; +import { DynamicBorder } from "./dynamic-border.js"; + +/** + * Component that renders a show images selector with borders + */ +export class ShowImagesSelectorComponent extends Container { + private selectList: SelectList; + + constructor(currentValue: boolean, onSelect: (show: boolean) => void, onCancel: () => void) { + super(); + + const items: SelectItem[] = [ + { value: "yes", label: "Yes", description: "Show images inline in terminal" }, + { value: "no", label: "No", description: "Show text placeholder instead" }, + ]; + + // Add top border + this.addChild(new DynamicBorder()); + + // Create selector + this.selectList = new SelectList(items, 5, getSelectListTheme()); + + // Preselect current value + this.selectList.setSelectedIndex(currentValue ? 0 : 1); + + this.selectList.onSelect = (item) => { + onSelect(item.value === "yes"); + }; + + this.selectList.onCancel = () => { + onCancel(); + }; + + this.addChild(this.selectList); + + // Add bottom border + this.addChild(new DynamicBorder()); + } + + getSelectList(): SelectList { + return this.selectList; + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/skill-invocation-message.ts b/packages/pi-coding-agent/src/modes/interactive/components/skill-invocation-message.ts new file mode 100644 index 000000000..adbf71fd9 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/skill-invocation-message.ts @@ -0,0 +1,55 @@ +import { Box, Markdown, type MarkdownTheme, Text } from "@gsd/pi-tui"; +import type { ParsedSkillBlock } from "../../../core/agent-session.js"; +import { getMarkdownTheme, theme } from "../theme/theme.js"; +import { editorKey } from "./keybinding-hints.js"; + +/** + * Component that renders a skill invocation message with collapsed/expanded state. + * Uses same background color as custom messages for visual consistency. + * Only renders the skill block itself - user message is rendered separately. + */ +export class SkillInvocationMessageComponent extends Box { + private expanded = false; + private skillBlock: ParsedSkillBlock; + private markdownTheme: MarkdownTheme; + + constructor(skillBlock: ParsedSkillBlock, markdownTheme: MarkdownTheme = getMarkdownTheme()) { + super(1, 1, (t) => theme.bg("customMessageBg", t)); + this.skillBlock = skillBlock; + this.markdownTheme = markdownTheme; + this.updateDisplay(); + } + + setExpanded(expanded: boolean): void { + this.expanded = expanded; + this.updateDisplay(); + } + + override invalidate(): void { + super.invalidate(); + this.updateDisplay(); + } + + private updateDisplay(): void { + this.clear(); + + if (this.expanded) { + // Expanded: label + skill name header + full content + const label = theme.fg("customMessageLabel", `\x1b[1m[skill]\x1b[22m`); + this.addChild(new Text(label, 0, 0)); + const header = `**${this.skillBlock.name}**\n\n`; + this.addChild( + new Markdown(header + this.skillBlock.content, 0, 0, this.markdownTheme, { + color: (text: string) => theme.fg("customMessageText", text), + }), + ); + } else { + // Collapsed: single line - [skill] name (hint to expand) + const line = + theme.fg("customMessageLabel", `\x1b[1m[skill]\x1b[22m `) + + theme.fg("customMessageText", this.skillBlock.name) + + theme.fg("dim", ` (${editorKey("expandTools")} to expand)`); + this.addChild(new Text(line, 0, 0)); + } + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/theme-selector.ts b/packages/pi-coding-agent/src/modes/interactive/components/theme-selector.ts new file mode 100644 index 000000000..caad68689 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/theme-selector.ts @@ -0,0 +1,62 @@ +import { Container, type SelectItem, SelectList } from "@gsd/pi-tui"; +import { getAvailableThemes, getSelectListTheme } from "../theme/theme.js"; +import { DynamicBorder } from "./dynamic-border.js"; + +/** + * Component that renders a theme selector + */ +export class ThemeSelectorComponent extends Container { + private selectList: SelectList; + private onPreview: (themeName: string) => void; + + constructor( + currentTheme: string, + onSelect: (themeName: string) => void, + onCancel: () => void, + onPreview: (themeName: string) => void, + ) { + super(); + this.onPreview = onPreview; + + // Get available themes and create select items + const themes = getAvailableThemes(); + const themeItems: SelectItem[] = themes.map((name) => ({ + value: name, + label: name, + description: name === currentTheme ? "(current)" : undefined, + })); + + // Add top border + this.addChild(new DynamicBorder()); + + // Create selector + this.selectList = new SelectList(themeItems, 10, getSelectListTheme()); + + // Preselect current theme + const currentIndex = themes.indexOf(currentTheme); + if (currentIndex !== -1) { + this.selectList.setSelectedIndex(currentIndex); + } + + this.selectList.onSelect = (item) => { + onSelect(item.value); + }; + + this.selectList.onCancel = () => { + onCancel(); + }; + + this.selectList.onSelectionChange = (item) => { + this.onPreview(item.value); + }; + + this.addChild(this.selectList); + + // Add bottom border + this.addChild(new DynamicBorder()); + } + + getSelectList(): SelectList { + return this.selectList; + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/thinking-selector.ts b/packages/pi-coding-agent/src/modes/interactive/components/thinking-selector.ts new file mode 100644 index 000000000..1f70c25bf --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/thinking-selector.ts @@ -0,0 +1,64 @@ +import type { ThinkingLevel } from "@gsd/pi-agent-core"; +import { Container, type SelectItem, SelectList } from "@gsd/pi-tui"; +import { getSelectListTheme } from "../theme/theme.js"; +import { DynamicBorder } from "./dynamic-border.js"; + +const LEVEL_DESCRIPTIONS: Record = { + off: "No reasoning", + minimal: "Very brief reasoning (~1k tokens)", + low: "Light reasoning (~2k tokens)", + medium: "Moderate reasoning (~8k tokens)", + high: "Deep reasoning (~16k tokens)", + xhigh: "Maximum reasoning (~32k tokens)", +}; + +/** + * Component that renders a thinking level selector with borders + */ +export class ThinkingSelectorComponent extends Container { + private selectList: SelectList; + + constructor( + currentLevel: ThinkingLevel, + availableLevels: ThinkingLevel[], + onSelect: (level: ThinkingLevel) => void, + onCancel: () => void, + ) { + super(); + + const thinkingLevels: SelectItem[] = availableLevels.map((level) => ({ + value: level, + label: level, + description: LEVEL_DESCRIPTIONS[level], + })); + + // Add top border + this.addChild(new DynamicBorder()); + + // Create selector + this.selectList = new SelectList(thinkingLevels, thinkingLevels.length, getSelectListTheme()); + + // Preselect current level + const currentIndex = thinkingLevels.findIndex((item) => item.value === currentLevel); + if (currentIndex !== -1) { + this.selectList.setSelectedIndex(currentIndex); + } + + this.selectList.onSelect = (item) => { + onSelect(item.value as ThinkingLevel); + }; + + this.selectList.onCancel = () => { + onCancel(); + }; + + this.addChild(this.selectList); + + // Add bottom border + this.addChild(new DynamicBorder()); + } + + getSelectList(): SelectList { + return this.selectList; + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/tool-execution.ts b/packages/pi-coding-agent/src/modes/interactive/components/tool-execution.ts new file mode 100644 index 000000000..efb4a2f3c --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/tool-execution.ts @@ -0,0 +1,916 @@ +import * as os from "node:os"; +import { + Box, + Container, + getCapabilities, + getImageDimensions, + Image, + imageFallback, + Spacer, + Text, + type TUI, + truncateToWidth, +} from "@gsd/pi-tui"; +import stripAnsi from "strip-ansi"; +import type { ToolDefinition } from "../../../core/extensions/types.js"; +import { computeEditDiff, type EditDiffError, type EditDiffResult } from "../../../core/tools/edit-diff.js"; +import { allTools } from "../../../core/tools/index.js"; +import { DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES, formatSize } from "../../../core/tools/truncate.js"; +import { convertToPng } from "../../../utils/image-convert.js"; +import { sanitizeBinaryOutput } from "../../../utils/shell.js"; +import { getLanguageFromPath, highlightCode, theme } from "../theme/theme.js"; +import { renderDiff } from "./diff.js"; +import { keyHint } from "./keybinding-hints.js"; +import { truncateToVisualLines } from "./visual-truncate.js"; + +// Preview line limit for bash when not expanded +const BASH_PREVIEW_LINES = 5; +// During partial write tool-call streaming, re-highlight the first N lines fully +// to keep multiline tokenization mostly correct without re-highlighting the full file. +const WRITE_PARTIAL_FULL_HIGHLIGHT_LINES = 50; + +/** + * Convert absolute path to tilde notation if it's in home directory + */ +function shortenPath(path: unknown): string { + if (typeof path !== "string") return ""; + const home = os.homedir(); + if (path.startsWith(home)) { + return `~${path.slice(home.length)}`; + } + return path; +} + +/** + * Replace tabs with spaces for consistent rendering + */ +function replaceTabs(text: string): string { + return text.replace(/\t/g, " "); +} + +/** + * Normalize control characters for terminal preview rendering. + * Keep tool arguments unchanged, sanitize only display text. + */ +function normalizeDisplayText(text: string): string { + return text.replace(/\r/g, ""); +} + +/** Safely coerce value to string for display. Returns null if invalid type. */ +function str(value: unknown): string | null { + if (typeof value === "string") return value; + if (value == null) return ""; + return null; // Invalid type +} + +export interface ToolExecutionOptions { + showImages?: boolean; // default: true (only used if terminal supports images) +} + +type WriteHighlightCache = { + rawPath: string | null; + lang: string; + rawContent: string; + normalizedLines: string[]; + highlightedLines: string[]; +}; + +/** + * Component that renders a tool call with its result (updateable) + */ +export class ToolExecutionComponent extends Container { + private contentBox: Box; // Used for custom tools and bash visual truncation + private contentText: Text; // For built-in tools (with its own padding/bg) + private imageComponents: Image[] = []; + private imageSpacers: Spacer[] = []; + private toolName: string; + private args: any; + private expanded = false; + private showImages: boolean; + private isPartial = true; + private toolDefinition?: ToolDefinition; + private ui: TUI; + private cwd: string; + private result?: { + content: Array<{ type: string; text?: string; data?: string; mimeType?: string }>; + isError: boolean; + details?: any; + }; + // Cached edit diff preview (computed when args arrive, before tool executes) + private editDiffPreview?: EditDiffResult | EditDiffError; + private editDiffArgsKey?: string; // Track which args the preview is for + // Cached converted images for Kitty protocol (which requires PNG), keyed by index + private convertedImages: Map = new Map(); + // Incremental syntax highlighting cache for write tool call args + private writeHighlightCache?: WriteHighlightCache; + // When true, this component intentionally renders no lines + private hideComponent = false; + + constructor( + toolName: string, + args: any, + options: ToolExecutionOptions = {}, + toolDefinition: ToolDefinition | undefined, + ui: TUI, + cwd: string = process.cwd(), + ) { + super(); + this.toolName = toolName; + this.args = args; + this.showImages = options.showImages ?? true; + this.toolDefinition = toolDefinition; + this.ui = ui; + this.cwd = cwd; + + this.addChild(new Spacer(1)); + + // Always create both - contentBox for custom tools/bash, contentText for other built-ins + this.contentBox = new Box(1, 1, (text: string) => theme.bg("toolPendingBg", text)); + this.contentText = new Text("", 1, 1, (text: string) => theme.bg("toolPendingBg", text)); + + // Use contentBox for bash (visual truncation) or custom tools with custom renderers + // Use contentText for built-in tools (including overrides without custom renderers) + if (toolName === "bash" || (toolDefinition && !this.shouldUseBuiltInRenderer())) { + this.addChild(this.contentBox); + } else { + this.addChild(this.contentText); + } + + this.updateDisplay(); + } + + /** + * Check if we should use built-in rendering for this tool. + * Returns true if the tool name is a built-in AND either there's no toolDefinition + * or the toolDefinition doesn't provide custom renderers. + */ + private shouldUseBuiltInRenderer(): boolean { + const isBuiltInName = this.toolName in allTools; + const hasCustomRenderers = this.toolDefinition?.renderCall || this.toolDefinition?.renderResult; + return isBuiltInName && !hasCustomRenderers; + } + + updateArgs(args: any): void { + this.args = args; + if (this.toolName === "write" && this.isPartial) { + this.updateWriteHighlightCacheIncremental(); + } + this.updateDisplay(); + } + + private highlightSingleLine(line: string, lang: string): string { + const highlighted = highlightCode(line, lang); + return highlighted[0] ?? ""; + } + + private refreshWriteHighlightPrefix(cache: WriteHighlightCache): void { + const prefixCount = Math.min(WRITE_PARTIAL_FULL_HIGHLIGHT_LINES, cache.normalizedLines.length); + if (prefixCount === 0) return; + + const prefixSource = cache.normalizedLines.slice(0, prefixCount).join("\n"); + const prefixHighlighted = highlightCode(prefixSource, cache.lang); + for (let i = 0; i < prefixCount; i++) { + cache.highlightedLines[i] = + prefixHighlighted[i] ?? this.highlightSingleLine(cache.normalizedLines[i] ?? "", cache.lang); + } + } + + private rebuildWriteHighlightCacheFull(rawPath: string | null, fileContent: string): void { + const lang = rawPath ? getLanguageFromPath(rawPath) : undefined; + if (!lang) { + this.writeHighlightCache = undefined; + return; + } + + const displayContent = normalizeDisplayText(fileContent); + const normalized = replaceTabs(displayContent); + this.writeHighlightCache = { + rawPath, + lang, + rawContent: fileContent, + normalizedLines: normalized.split("\n"), + highlightedLines: highlightCode(normalized, lang), + }; + } + + private updateWriteHighlightCacheIncremental(): void { + const rawPath = str(this.args?.file_path ?? this.args?.path); + const fileContent = str(this.args?.content); + if (rawPath === null || fileContent === null) { + this.writeHighlightCache = undefined; + return; + } + + const lang = rawPath ? getLanguageFromPath(rawPath) : undefined; + if (!lang) { + this.writeHighlightCache = undefined; + return; + } + + if (!this.writeHighlightCache) { + this.rebuildWriteHighlightCacheFull(rawPath, fileContent); + return; + } + + const cache = this.writeHighlightCache; + if (cache.lang !== lang || cache.rawPath !== rawPath) { + this.rebuildWriteHighlightCacheFull(rawPath, fileContent); + return; + } + + if (!fileContent.startsWith(cache.rawContent)) { + this.rebuildWriteHighlightCacheFull(rawPath, fileContent); + return; + } + + if (fileContent.length === cache.rawContent.length) { + return; + } + + const deltaRaw = fileContent.slice(cache.rawContent.length); + const deltaDisplay = normalizeDisplayText(deltaRaw); + const deltaNormalized = replaceTabs(deltaDisplay); + cache.rawContent = fileContent; + + if (cache.normalizedLines.length === 0) { + cache.normalizedLines.push(""); + cache.highlightedLines.push(""); + } + + const segments = deltaNormalized.split("\n"); + const lastIndex = cache.normalizedLines.length - 1; + cache.normalizedLines[lastIndex] += segments[0]; + cache.highlightedLines[lastIndex] = this.highlightSingleLine(cache.normalizedLines[lastIndex], cache.lang); + + for (let i = 1; i < segments.length; i++) { + cache.normalizedLines.push(segments[i]); + cache.highlightedLines.push(this.highlightSingleLine(segments[i], cache.lang)); + } + + this.refreshWriteHighlightPrefix(cache); + } + + /** + * Signal that args are complete (tool is about to execute). + * This triggers diff computation for edit tool. + */ + setArgsComplete(): void { + if (this.toolName === "write") { + const rawPath = str(this.args?.file_path ?? this.args?.path); + const fileContent = str(this.args?.content); + if (rawPath !== null && fileContent !== null) { + this.rebuildWriteHighlightCacheFull(rawPath, fileContent); + } + } + this.maybeComputeEditDiff(); + } + + /** + * Compute edit diff preview when we have complete args. + * This runs async and updates display when done. + */ + private maybeComputeEditDiff(): void { + if (this.toolName !== "edit") return; + + const path = this.args?.path; + const oldText = this.args?.oldText; + const newText = this.args?.newText; + + // Need all three params to compute diff + if (!path || oldText === undefined || newText === undefined) return; + + // Create a key to track which args this computation is for + const argsKey = JSON.stringify({ path, oldText, newText }); + + // Skip if we already computed for these exact args + if (this.editDiffArgsKey === argsKey) return; + + this.editDiffArgsKey = argsKey; + + // Compute diff async + computeEditDiff(path, oldText, newText, this.cwd).then((result) => { + // Only update if args haven't changed since we started + if (this.editDiffArgsKey === argsKey) { + this.editDiffPreview = result; + this.updateDisplay(); + this.ui.requestRender(); + } + }); + } + + updateResult( + result: { + content: Array<{ type: string; text?: string; data?: string; mimeType?: string }>; + details?: any; + isError: boolean; + }, + isPartial = false, + ): void { + this.result = result; + this.isPartial = isPartial; + if (this.toolName === "write" && !isPartial) { + const rawPath = str(this.args?.file_path ?? this.args?.path); + const fileContent = str(this.args?.content); + if (rawPath !== null && fileContent !== null) { + this.rebuildWriteHighlightCacheFull(rawPath, fileContent); + } + } + this.updateDisplay(); + // Convert non-PNG images to PNG for Kitty protocol (async) + this.maybeConvertImagesForKitty(); + } + + /** + * Convert non-PNG images to PNG for Kitty graphics protocol. + * Kitty requires PNG format (f=100), so JPEG/GIF/WebP won't display. + */ + private maybeConvertImagesForKitty(): void { + const caps = getCapabilities(); + // Only needed for Kitty protocol + if (caps.images !== "kitty") return; + if (!this.result) return; + + const imageBlocks = this.result.content?.filter((c: any) => c.type === "image") || []; + + for (let i = 0; i < imageBlocks.length; i++) { + const img = imageBlocks[i]; + if (!img.data || !img.mimeType) continue; + // Skip if already PNG or already converted + if (img.mimeType === "image/png") continue; + if (this.convertedImages.has(i)) continue; + + // Convert async + const index = i; + convertToPng(img.data, img.mimeType).then((converted) => { + if (converted) { + this.convertedImages.set(index, converted); + this.updateDisplay(); + this.ui.requestRender(); + } + }); + } + } + + setExpanded(expanded: boolean): void { + this.expanded = expanded; + this.updateDisplay(); + } + + setShowImages(show: boolean): void { + this.showImages = show; + this.updateDisplay(); + } + + override invalidate(): void { + super.invalidate(); + this.updateDisplay(); + } + + override render(width: number): string[] { + if (this.hideComponent) { + return []; + } + return super.render(width); + } + + private updateDisplay(): void { + // Set background based on state + const bgFn = this.isPartial + ? (text: string) => theme.bg("toolPendingBg", text) + : this.result?.isError + ? (text: string) => theme.bg("toolErrorBg", text) + : (text: string) => theme.bg("toolSuccessBg", text); + + const useBuiltInRenderer = this.shouldUseBuiltInRenderer(); + let customRendererHasContent = false; + this.hideComponent = false; + + // Use built-in rendering for built-in tools (or overrides without custom renderers) + if (useBuiltInRenderer) { + if (this.toolName === "bash") { + // Bash uses Box with visual line truncation + this.contentBox.setBgFn(bgFn); + this.contentBox.clear(); + this.renderBashContent(); + } else { + // Other built-in tools: use Text directly with caching + this.contentText.setCustomBgFn(bgFn); + this.contentText.setText(this.formatToolExecution()); + } + } else if (this.toolDefinition) { + // Custom tools use Box for flexible component rendering + this.contentBox.setBgFn(bgFn); + this.contentBox.clear(); + + // Render call component + if (this.toolDefinition.renderCall) { + try { + const callComponent = this.toolDefinition.renderCall(this.args, theme); + if (callComponent !== undefined) { + this.contentBox.addChild(callComponent); + customRendererHasContent = true; + } + } catch { + // Fall back to default on error + this.contentBox.addChild(new Text(theme.fg("toolTitle", theme.bold(this.toolName)), 0, 0)); + customRendererHasContent = true; + } + } else { + // No custom renderCall, show tool name + this.contentBox.addChild(new Text(theme.fg("toolTitle", theme.bold(this.toolName)), 0, 0)); + customRendererHasContent = true; + } + + // Render result component if we have a result + if (this.result && this.toolDefinition.renderResult) { + try { + const resultComponent = this.toolDefinition.renderResult( + { content: this.result.content as any, details: this.result.details }, + { expanded: this.expanded, isPartial: this.isPartial }, + theme, + ); + if (resultComponent !== undefined) { + this.contentBox.addChild(resultComponent); + customRendererHasContent = true; + } + } catch { + // Fall back to showing raw output on error + const output = this.getTextOutput(); + if (output) { + this.contentBox.addChild(new Text(theme.fg("toolOutput", output), 0, 0)); + customRendererHasContent = true; + } + } + } else if (this.result) { + // Has result but no custom renderResult + const output = this.getTextOutput(); + if (output) { + this.contentBox.addChild(new Text(theme.fg("toolOutput", output), 0, 0)); + customRendererHasContent = true; + } + } + } else { + // Unknown tool with no registered definition - show generic fallback + this.contentText.setCustomBgFn(bgFn); + this.contentText.setText(this.formatToolExecution()); + } + + // Handle images (same for both custom and built-in) + for (const img of this.imageComponents) { + this.removeChild(img); + } + this.imageComponents = []; + for (const spacer of this.imageSpacers) { + this.removeChild(spacer); + } + this.imageSpacers = []; + + if (this.result) { + const imageBlocks = this.result.content?.filter((c: any) => c.type === "image") || []; + const caps = getCapabilities(); + + for (let i = 0; i < imageBlocks.length; i++) { + const img = imageBlocks[i]; + if (caps.images && this.showImages && img.data && img.mimeType) { + // Use converted PNG for Kitty protocol if available + const converted = this.convertedImages.get(i); + const imageData = converted?.data ?? img.data; + const imageMimeType = converted?.mimeType ?? img.mimeType; + + // For Kitty, skip non-PNG images that haven't been converted yet + if (caps.images === "kitty" && imageMimeType !== "image/png") { + continue; + } + + const spacer = new Spacer(1); + this.addChild(spacer); + this.imageSpacers.push(spacer); + const imageComponent = new Image( + imageData, + imageMimeType, + { fallbackColor: (s: string) => theme.fg("toolOutput", s) }, + { maxWidthCells: 60 }, + ); + this.imageComponents.push(imageComponent); + this.addChild(imageComponent); + } + } + } + + if (!useBuiltInRenderer && this.toolDefinition) { + this.hideComponent = !customRendererHasContent && this.imageComponents.length === 0; + } + } + + /** + * Render bash content using visual line truncation (like bash-execution.ts) + */ + private renderBashContent(): void { + const command = str(this.args?.command); + const timeout = this.args?.timeout as number | undefined; + + // Header + const timeoutSuffix = timeout ? theme.fg("muted", ` (timeout ${timeout}s)`) : ""; + const commandDisplay = + command === null ? theme.fg("error", "[invalid arg]") : command ? command : theme.fg("toolOutput", "..."); + this.contentBox.addChild( + new Text(theme.fg("toolTitle", theme.bold(`$ ${commandDisplay}`)) + timeoutSuffix, 0, 0), + ); + + if (this.result) { + const output = this.getTextOutput().trim(); + + if (output) { + // Style each line for the output + const styledOutput = output + .split("\n") + .map((line) => theme.fg("toolOutput", line)) + .join("\n"); + + if (this.expanded) { + // Show all lines when expanded + this.contentBox.addChild(new Text(`\n${styledOutput}`, 0, 0)); + } else { + // Use visual line truncation when collapsed with width-aware caching + let cachedWidth: number | undefined; + let cachedLines: string[] | undefined; + let cachedSkipped: number | undefined; + + this.contentBox.addChild({ + render: (width: number) => { + if (cachedLines === undefined || cachedWidth !== width) { + const result = truncateToVisualLines(styledOutput, BASH_PREVIEW_LINES, width); + cachedLines = result.visualLines; + cachedSkipped = result.skippedCount; + cachedWidth = width; + } + if (cachedSkipped && cachedSkipped > 0) { + const hint = + theme.fg("muted", `... (${cachedSkipped} earlier lines,`) + + ` ${keyHint("expandTools", "to expand")})`; + return ["", truncateToWidth(hint, width, "..."), ...cachedLines]; + } + // Add blank line for spacing (matches expanded case) + return ["", ...cachedLines]; + }, + invalidate: () => { + cachedWidth = undefined; + cachedLines = undefined; + cachedSkipped = undefined; + }, + }); + } + } + + // Truncation warnings + const truncation = this.result.details?.truncation; + const fullOutputPath = this.result.details?.fullOutputPath; + if (truncation?.truncated || fullOutputPath) { + const warnings: string[] = []; + if (fullOutputPath) { + warnings.push(`Full output: ${fullOutputPath}`); + } + if (truncation?.truncated) { + if (truncation.truncatedBy === "lines") { + warnings.push(`Truncated: showing ${truncation.outputLines} of ${truncation.totalLines} lines`); + } else { + warnings.push( + `Truncated: ${truncation.outputLines} lines shown (${formatSize(truncation.maxBytes ?? DEFAULT_MAX_BYTES)} limit)`, + ); + } + } + this.contentBox.addChild(new Text(`\n${theme.fg("warning", `[${warnings.join(". ")}]`)}`, 0, 0)); + } + } + } + + private getTextOutput(): string { + if (!this.result) return ""; + + const textBlocks = this.result.content?.filter((c: any) => c.type === "text") || []; + const imageBlocks = this.result.content?.filter((c: any) => c.type === "image") || []; + + let output = textBlocks + .map((c: any) => { + // Use sanitizeBinaryOutput to handle binary data that crashes string-width + return sanitizeBinaryOutput(stripAnsi(c.text || "")).replace(/\r/g, ""); + }) + .join("\n"); + + const caps = getCapabilities(); + if (imageBlocks.length > 0 && (!caps.images || !this.showImages)) { + const imageIndicators = imageBlocks + .map((img: any) => { + const dims = img.data ? (getImageDimensions(img.data, img.mimeType) ?? undefined) : undefined; + return imageFallback(img.mimeType, dims); + }) + .join("\n"); + output = output ? `${output}\n${imageIndicators}` : imageIndicators; + } + + return output; + } + + private formatToolExecution(): string { + let text = ""; + const invalidArg = theme.fg("error", "[invalid arg]"); + + if (this.toolName === "read") { + const rawPath = str(this.args?.file_path ?? this.args?.path); + const path = rawPath !== null ? shortenPath(rawPath) : null; + const offset = this.args?.offset; + const limit = this.args?.limit; + + let pathDisplay = path === null ? invalidArg : path ? theme.fg("accent", path) : theme.fg("toolOutput", "..."); + if (offset !== undefined || limit !== undefined) { + const startLine = offset ?? 1; + const endLine = limit !== undefined ? startLine + limit - 1 : ""; + pathDisplay += theme.fg("warning", `:${startLine}${endLine ? `-${endLine}` : ""}`); + } + + text = `${theme.fg("toolTitle", theme.bold("read"))} ${pathDisplay}`; + + if (this.result) { + const output = this.getTextOutput(); + const rawPath = str(this.args?.file_path ?? this.args?.path); + const lang = rawPath ? getLanguageFromPath(rawPath) : undefined; + const lines = lang ? highlightCode(replaceTabs(output), lang) : output.split("\n"); + + const maxLines = this.expanded ? lines.length : 10; + const displayLines = lines.slice(0, maxLines); + const remaining = lines.length - maxLines; + + text += + "\n\n" + + displayLines + .map((line: string) => (lang ? replaceTabs(line) : theme.fg("toolOutput", replaceTabs(line)))) + .join("\n"); + if (remaining > 0) { + text += `${theme.fg("muted", `\n... (${remaining} more lines,`)} ${keyHint("expandTools", "to expand")})`; + } + + const truncation = this.result.details?.truncation; + if (truncation?.truncated) { + if (truncation.firstLineExceedsLimit) { + text += + "\n" + + theme.fg( + "warning", + `[First line exceeds ${formatSize(truncation.maxBytes ?? DEFAULT_MAX_BYTES)} limit]`, + ); + } else if (truncation.truncatedBy === "lines") { + text += + "\n" + + theme.fg( + "warning", + `[Truncated: showing ${truncation.outputLines} of ${truncation.totalLines} lines (${truncation.maxLines ?? DEFAULT_MAX_LINES} line limit)]`, + ); + } else { + text += + "\n" + + theme.fg( + "warning", + `[Truncated: ${truncation.outputLines} lines shown (${formatSize(truncation.maxBytes ?? DEFAULT_MAX_BYTES)} limit)]`, + ); + } + } + } + } else if (this.toolName === "write") { + const rawPath = str(this.args?.file_path ?? this.args?.path); + const fileContent = str(this.args?.content); + const path = rawPath !== null ? shortenPath(rawPath) : null; + + text = + theme.fg("toolTitle", theme.bold("write")) + + " " + + (path === null ? invalidArg : path ? theme.fg("accent", path) : theme.fg("toolOutput", "...")); + + if (fileContent === null) { + text += `\n\n${theme.fg("error", "[invalid content arg - expected string]")}`; + } else if (fileContent) { + const lang = rawPath ? getLanguageFromPath(rawPath) : undefined; + + let lines: string[]; + if (lang) { + const cache = this.writeHighlightCache; + if (cache && cache.lang === lang && cache.rawPath === rawPath && cache.rawContent === fileContent) { + lines = cache.highlightedLines; + } else { + const displayContent = normalizeDisplayText(fileContent); + const normalized = replaceTabs(displayContent); + lines = highlightCode(normalized, lang); + this.writeHighlightCache = { + rawPath, + lang, + rawContent: fileContent, + normalizedLines: normalized.split("\n"), + highlightedLines: lines, + }; + } + } else { + lines = normalizeDisplayText(fileContent).split("\n"); + this.writeHighlightCache = undefined; + } + + const totalLines = lines.length; + const maxLines = this.expanded ? lines.length : 10; + const displayLines = lines.slice(0, maxLines); + const remaining = lines.length - maxLines; + + text += + "\n\n" + + displayLines.map((line: string) => (lang ? line : theme.fg("toolOutput", replaceTabs(line)))).join("\n"); + if (remaining > 0) { + text += + theme.fg("muted", `\n... (${remaining} more lines, ${totalLines} total,`) + + ` ${keyHint("expandTools", "to expand")})`; + } + } + + // Show error if tool execution failed + if (this.result?.isError) { + const errorText = this.getTextOutput(); + if (errorText) { + text += `\n\n${theme.fg("error", errorText)}`; + } + } + } else if (this.toolName === "edit") { + const rawPath = str(this.args?.file_path ?? this.args?.path); + const path = rawPath !== null ? shortenPath(rawPath) : null; + + // Build path display, appending :line if we have diff info + let pathDisplay = path === null ? invalidArg : path ? theme.fg("accent", path) : theme.fg("toolOutput", "..."); + const firstChangedLine = + (this.editDiffPreview && "firstChangedLine" in this.editDiffPreview + ? this.editDiffPreview.firstChangedLine + : undefined) || + (this.result && !this.result.isError ? this.result.details?.firstChangedLine : undefined); + if (firstChangedLine) { + pathDisplay += theme.fg("warning", `:${firstChangedLine}`); + } + + text = `${theme.fg("toolTitle", theme.bold("edit"))} ${pathDisplay}`; + + if (this.result?.isError) { + // Show error from result + const errorText = this.getTextOutput(); + if (errorText) { + text += `\n\n${theme.fg("error", errorText)}`; + } + } else if (this.result?.details?.diff) { + // Tool executed successfully - use the diff from result + // This takes priority over editDiffPreview which may have a stale error + // due to race condition (async preview computed after file was modified) + text += `\n\n${renderDiff(this.result.details.diff, { filePath: rawPath ?? undefined })}`; + } else if (this.editDiffPreview) { + // Use cached diff preview (before tool executes) + if ("error" in this.editDiffPreview) { + text += `\n\n${theme.fg("error", this.editDiffPreview.error)}`; + } else if (this.editDiffPreview.diff) { + text += `\n\n${renderDiff(this.editDiffPreview.diff, { filePath: rawPath ?? undefined })}`; + } + } + } else if (this.toolName === "ls") { + const rawPath = str(this.args?.path); + const path = rawPath !== null ? shortenPath(rawPath || ".") : null; + const limit = this.args?.limit; + + text = `${theme.fg("toolTitle", theme.bold("ls"))} ${path === null ? invalidArg : theme.fg("accent", path)}`; + if (limit !== undefined) { + text += theme.fg("toolOutput", ` (limit ${limit})`); + } + + if (this.result) { + const output = this.getTextOutput().trim(); + if (output) { + const lines = output.split("\n"); + const maxLines = this.expanded ? lines.length : 20; + const displayLines = lines.slice(0, maxLines); + const remaining = lines.length - maxLines; + + text += `\n\n${displayLines.map((line: string) => theme.fg("toolOutput", line)).join("\n")}`; + if (remaining > 0) { + text += `${theme.fg("muted", `\n... (${remaining} more lines,`)} ${keyHint("expandTools", "to expand")})`; + } + } + + const entryLimit = this.result.details?.entryLimitReached; + const truncation = this.result.details?.truncation; + if (entryLimit || truncation?.truncated) { + const warnings: string[] = []; + if (entryLimit) { + warnings.push(`${entryLimit} entries limit`); + } + if (truncation?.truncated) { + warnings.push(`${formatSize(truncation.maxBytes ?? DEFAULT_MAX_BYTES)} limit`); + } + text += `\n${theme.fg("warning", `[Truncated: ${warnings.join(", ")}]`)}`; + } + } + } else if (this.toolName === "find") { + const pattern = str(this.args?.pattern); + const rawPath = str(this.args?.path); + const path = rawPath !== null ? shortenPath(rawPath || ".") : null; + const limit = this.args?.limit; + + text = + theme.fg("toolTitle", theme.bold("find")) + + " " + + (pattern === null ? invalidArg : theme.fg("accent", pattern || "")) + + theme.fg("toolOutput", ` in ${path === null ? invalidArg : path}`); + if (limit !== undefined) { + text += theme.fg("toolOutput", ` (limit ${limit})`); + } + + if (this.result) { + const output = this.getTextOutput().trim(); + if (output) { + const lines = output.split("\n"); + const maxLines = this.expanded ? lines.length : 20; + const displayLines = lines.slice(0, maxLines); + const remaining = lines.length - maxLines; + + text += `\n\n${displayLines.map((line: string) => theme.fg("toolOutput", line)).join("\n")}`; + if (remaining > 0) { + text += `${theme.fg("muted", `\n... (${remaining} more lines,`)} ${keyHint("expandTools", "to expand")})`; + } + } + + const resultLimit = this.result.details?.resultLimitReached; + const truncation = this.result.details?.truncation; + if (resultLimit || truncation?.truncated) { + const warnings: string[] = []; + if (resultLimit) { + warnings.push(`${resultLimit} results limit`); + } + if (truncation?.truncated) { + warnings.push(`${formatSize(truncation.maxBytes ?? DEFAULT_MAX_BYTES)} limit`); + } + text += `\n${theme.fg("warning", `[Truncated: ${warnings.join(", ")}]`)}`; + } + } + } else if (this.toolName === "grep") { + const pattern = str(this.args?.pattern); + const rawPath = str(this.args?.path); + const path = rawPath !== null ? shortenPath(rawPath || ".") : null; + const glob = str(this.args?.glob); + const limit = this.args?.limit; + + text = + theme.fg("toolTitle", theme.bold("grep")) + + " " + + (pattern === null ? invalidArg : theme.fg("accent", `/${pattern || ""}/`)) + + theme.fg("toolOutput", ` in ${path === null ? invalidArg : path}`); + if (glob) { + text += theme.fg("toolOutput", ` (${glob})`); + } + if (limit !== undefined) { + text += theme.fg("toolOutput", ` limit ${limit}`); + } + + if (this.result) { + const output = this.getTextOutput().trim(); + if (output) { + const lines = output.split("\n"); + const maxLines = this.expanded ? lines.length : 15; + const displayLines = lines.slice(0, maxLines); + const remaining = lines.length - maxLines; + + text += `\n\n${displayLines.map((line: string) => theme.fg("toolOutput", line)).join("\n")}`; + if (remaining > 0) { + text += `${theme.fg("muted", `\n... (${remaining} more lines,`)} ${keyHint("expandTools", "to expand")})`; + } + } + + const matchLimit = this.result.details?.matchLimitReached; + const truncation = this.result.details?.truncation; + const linesTruncated = this.result.details?.linesTruncated; + if (matchLimit || truncation?.truncated || linesTruncated) { + const warnings: string[] = []; + if (matchLimit) { + warnings.push(`${matchLimit} matches limit`); + } + if (truncation?.truncated) { + warnings.push(`${formatSize(truncation.maxBytes ?? DEFAULT_MAX_BYTES)} limit`); + } + if (linesTruncated) { + warnings.push("some lines truncated"); + } + text += `\n${theme.fg("warning", `[Truncated: ${warnings.join(", ")}]`)}`; + } + } + } else { + // Generic tool (shouldn't reach here for custom tools) + text = theme.fg("toolTitle", theme.bold(this.toolName)); + + const content = JSON.stringify(this.args, null, 2); + text += `\n\n${content}`; + const output = this.getTextOutput(); + if (output) { + text += `\n${output}`; + } + } + + return text; + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/tree-selector.ts b/packages/pi-coding-agent/src/modes/interactive/components/tree-selector.ts new file mode 100644 index 000000000..86e36f362 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/tree-selector.ts @@ -0,0 +1,1184 @@ +import { + type Component, + Container, + type Focusable, + getEditorKeybindings, + Input, + matchesKey, + Spacer, + Text, + TruncatedText, + truncateToWidth, +} from "@gsd/pi-tui"; +import type { SessionTreeNode } from "../../../core/session-manager.js"; +import { theme } from "../theme/theme.js"; +import { DynamicBorder } from "./dynamic-border.js"; +import { keyHint } from "./keybinding-hints.js"; + +/** Gutter info: position (displayIndent where connector was) and whether to show │ */ +interface GutterInfo { + position: number; // displayIndent level where the connector was shown + show: boolean; // true = show │, false = show spaces +} + +/** Flattened tree node for navigation */ +interface FlatNode { + node: SessionTreeNode; + /** Indentation level (each level = 3 chars) */ + indent: number; + /** Whether to show connector (├─ or └─) - true if parent has multiple children */ + showConnector: boolean; + /** If showConnector, true = last sibling (└─), false = not last (├─) */ + isLast: boolean; + /** Gutter info for each ancestor branch point */ + gutters: GutterInfo[]; + /** True if this node is a root under a virtual branching root (multiple roots) */ + isVirtualRootChild: boolean; +} + +/** Filter mode for tree display */ +export type FilterMode = "default" | "no-tools" | "user-only" | "labeled-only" | "all"; + +/** + * Tree list component with selection and ASCII art visualization + */ +/** Tool call info for lookup */ +interface ToolCallInfo { + name: string; + arguments: Record; +} + +class TreeList implements Component { + private flatNodes: FlatNode[] = []; + private filteredNodes: FlatNode[] = []; + private selectedIndex = 0; + private currentLeafId: string | null; + private maxVisibleLines: number; + private filterMode: FilterMode = "default"; + private searchQuery = ""; + private toolCallMap: Map = new Map(); + private multipleRoots = false; + private activePathIds: Set = new Set(); + private visibleParentMap: Map = new Map(); + private visibleChildrenMap: Map = new Map(); + private lastSelectedId: string | null = null; + private foldedNodes: Set = new Set(); + + public onSelect?: (entryId: string) => void; + public onCancel?: () => void; + public onLabelEdit?: (entryId: string, currentLabel: string | undefined) => void; + + constructor( + tree: SessionTreeNode[], + currentLeafId: string | null, + maxVisibleLines: number, + initialSelectedId?: string, + initialFilterMode?: FilterMode, + ) { + this.currentLeafId = currentLeafId; + this.maxVisibleLines = maxVisibleLines; + this.filterMode = initialFilterMode ?? "default"; + this.multipleRoots = tree.length > 1; + this.flatNodes = this.flattenTree(tree); + this.buildActivePath(); + this.applyFilter(); + + // Start with initialSelectedId if provided, otherwise current leaf + const targetId = initialSelectedId ?? currentLeafId; + this.selectedIndex = this.findNearestVisibleIndex(targetId); + this.lastSelectedId = this.filteredNodes[this.selectedIndex]?.node.entry.id ?? null; + } + + /** + * Find the index of the nearest visible entry, walking up the parent chain if needed. + * Returns the index in filteredNodes, or the last index as fallback. + */ + private findNearestVisibleIndex(entryId: string | null): number { + if (this.filteredNodes.length === 0) return 0; + + // Build a map for parent lookup + const entryMap = new Map(); + for (const flatNode of this.flatNodes) { + entryMap.set(flatNode.node.entry.id, flatNode); + } + + // Build a map of visible entry IDs to their indices in filteredNodes + const visibleIdToIndex = new Map(this.filteredNodes.map((node, i) => [node.node.entry.id, i])); + + // Walk from entryId up to root, looking for a visible entry + let currentId = entryId; + while (currentId !== null) { + const index = visibleIdToIndex.get(currentId); + if (index !== undefined) return index; + const node = entryMap.get(currentId); + if (!node) break; + currentId = node.node.entry.parentId ?? null; + } + + // Fallback: last visible entry + return this.filteredNodes.length - 1; + } + + /** Build the set of entry IDs on the path from root to current leaf */ + private buildActivePath(): void { + this.activePathIds.clear(); + if (!this.currentLeafId) return; + + // Build a map of id -> entry for parent lookup + const entryMap = new Map(); + for (const flatNode of this.flatNodes) { + entryMap.set(flatNode.node.entry.id, flatNode); + } + + // Walk from leaf to root + let currentId: string | null = this.currentLeafId; + while (currentId) { + this.activePathIds.add(currentId); + const node = entryMap.get(currentId); + if (!node) break; + currentId = node.node.entry.parentId ?? null; + } + } + + private flattenTree(roots: SessionTreeNode[]): FlatNode[] { + const result: FlatNode[] = []; + this.toolCallMap.clear(); + + // Indentation rules: + // - At indent 0: stay at 0 unless parent has >1 children (then +1) + // - At indent 1: children always go to indent 2 (visual grouping of subtree) + // - At indent 2+: stay flat for single-child chains, +1 only if parent branches + + // Stack items: [node, indent, justBranched, showConnector, isLast, gutters, isVirtualRootChild] + type StackItem = [SessionTreeNode, number, boolean, boolean, boolean, GutterInfo[], boolean]; + const stack: StackItem[] = []; + + // Determine which subtrees contain the active leaf (to sort current branch first) + // Use iterative post-order traversal to avoid stack overflow + const containsActive = new Map(); + const leafId = this.currentLeafId; + { + // Build list in pre-order, then process in reverse for post-order effect + const allNodes: SessionTreeNode[] = []; + const preOrderStack: SessionTreeNode[] = [...roots]; + while (preOrderStack.length > 0) { + const node = preOrderStack.pop()!; + allNodes.push(node); + // Push children in reverse so they're processed left-to-right + for (let i = node.children.length - 1; i >= 0; i--) { + preOrderStack.push(node.children[i]); + } + } + // Process in reverse (post-order): children before parents + for (let i = allNodes.length - 1; i >= 0; i--) { + const node = allNodes[i]; + let has = leafId !== null && node.entry.id === leafId; + for (const child of node.children) { + if (containsActive.get(child)) { + has = true; + } + } + containsActive.set(node, has); + } + } + + // Add roots in reverse order, prioritizing the one containing the active leaf + // If multiple roots, treat them as children of a virtual root that branches + const multipleRoots = roots.length > 1; + const orderedRoots = [...roots].sort((a, b) => Number(containsActive.get(b)) - Number(containsActive.get(a))); + for (let i = orderedRoots.length - 1; i >= 0; i--) { + const isLast = i === orderedRoots.length - 1; + stack.push([orderedRoots[i], multipleRoots ? 1 : 0, multipleRoots, multipleRoots, isLast, [], multipleRoots]); + } + + while (stack.length > 0) { + const [node, indent, justBranched, showConnector, isLast, gutters, isVirtualRootChild] = stack.pop()!; + + // Extract tool calls from assistant messages for later lookup + const entry = node.entry; + if (entry.type === "message" && entry.message.role === "assistant") { + const content = (entry.message as { content?: unknown }).content; + if (Array.isArray(content)) { + for (const block of content) { + if (typeof block === "object" && block !== null && "type" in block && block.type === "toolCall") { + const tc = block as { id: string; name: string; arguments: Record }; + this.toolCallMap.set(tc.id, { name: tc.name, arguments: tc.arguments }); + } + } + } + } + + result.push({ node, indent, showConnector, isLast, gutters, isVirtualRootChild }); + + const children = node.children; + const multipleChildren = children.length > 1; + + // Order children so the branch containing the active leaf comes first + const orderedChildren = (() => { + const prioritized: SessionTreeNode[] = []; + const rest: SessionTreeNode[] = []; + for (const child of children) { + if (containsActive.get(child)) { + prioritized.push(child); + } else { + rest.push(child); + } + } + return [...prioritized, ...rest]; + })(); + + // Calculate child indent + let childIndent: number; + if (multipleChildren) { + // Parent branches: children get +1 + childIndent = indent + 1; + } else if (justBranched && indent > 0) { + // First generation after a branch: +1 for visual grouping + childIndent = indent + 1; + } else { + // Single-child chain: stay flat + childIndent = indent; + } + + // Build gutters for children + // If this node showed a connector, add a gutter entry for descendants + // Only add gutter if connector is actually displayed (not suppressed for virtual root children) + const connectorDisplayed = showConnector && !isVirtualRootChild; + // When connector is displayed, add a gutter entry at the connector's position + // Connector is at position (displayIndent - 1), so gutter should be there too + const currentDisplayIndent = this.multipleRoots ? Math.max(0, indent - 1) : indent; + const connectorPosition = Math.max(0, currentDisplayIndent - 1); + const childGutters: GutterInfo[] = connectorDisplayed + ? [...gutters, { position: connectorPosition, show: !isLast }] + : gutters; + + // Add children in reverse order + for (let i = orderedChildren.length - 1; i >= 0; i--) { + const childIsLast = i === orderedChildren.length - 1; + stack.push([ + orderedChildren[i], + childIndent, + multipleChildren, + multipleChildren, + childIsLast, + childGutters, + false, + ]); + } + } + + return result; + } + + private applyFilter(): void { + // Update lastSelectedId only when we have a valid selection (non-empty list) + // This preserves the selection when switching through empty filter results + if (this.filteredNodes.length > 0) { + this.lastSelectedId = this.filteredNodes[this.selectedIndex]?.node.entry.id ?? this.lastSelectedId; + } + + const searchTokens = this.searchQuery.toLowerCase().split(/\s+/).filter(Boolean); + + this.filteredNodes = this.flatNodes.filter((flatNode) => { + const entry = flatNode.node.entry; + const isCurrentLeaf = entry.id === this.currentLeafId; + + // Skip assistant messages with only tool calls (no text) unless error/aborted + // Always show current leaf so active position is visible + if (entry.type === "message" && entry.message.role === "assistant" && !isCurrentLeaf) { + const msg = entry.message as { stopReason?: string; content?: unknown }; + const hasText = this.hasTextContent(msg.content); + const isErrorOrAborted = msg.stopReason && msg.stopReason !== "stop" && msg.stopReason !== "toolUse"; + // Only hide if no text AND not an error/aborted message + if (!hasText && !isErrorOrAborted) { + return false; + } + } + + // Apply filter mode + let passesFilter = true; + // Entry types hidden in default view (settings/bookkeeping) + const isSettingsEntry = + entry.type === "label" || + entry.type === "custom" || + entry.type === "model_change" || + entry.type === "thinking_level_change"; + + switch (this.filterMode) { + case "user-only": + // Just user messages + passesFilter = entry.type === "message" && entry.message.role === "user"; + break; + case "no-tools": + // Default minus tool results + passesFilter = !isSettingsEntry && !(entry.type === "message" && entry.message.role === "toolResult"); + break; + case "labeled-only": + // Just labeled entries + passesFilter = flatNode.node.label !== undefined; + break; + case "all": + // Show everything + passesFilter = true; + break; + default: + // Default mode: hide settings/bookkeeping entries + passesFilter = !isSettingsEntry; + break; + } + + if (!passesFilter) return false; + + // Apply search filter + if (searchTokens.length > 0) { + const nodeText = this.getSearchableText(flatNode.node).toLowerCase(); + return searchTokens.every((token) => nodeText.includes(token)); + } + + return true; + }); + + // Filter out descendants of folded nodes. + if (this.foldedNodes.size > 0) { + const skipSet = new Set(); + for (const flatNode of this.flatNodes) { + const { id, parentId } = flatNode.node.entry; + if (parentId != null && (this.foldedNodes.has(parentId) || skipSet.has(parentId))) { + skipSet.add(id); + } + } + this.filteredNodes = this.filteredNodes.filter((flatNode) => !skipSet.has(flatNode.node.entry.id)); + } + + // Recalculate visual structure (indent, connectors, gutters) based on visible tree + this.recalculateVisualStructure(); + + // Try to preserve cursor on the same node, or find nearest visible ancestor + if (this.lastSelectedId) { + this.selectedIndex = this.findNearestVisibleIndex(this.lastSelectedId); + } else if (this.selectedIndex >= this.filteredNodes.length) { + // Clamp index if out of bounds + this.selectedIndex = Math.max(0, this.filteredNodes.length - 1); + } + + // Update lastSelectedId to the actual selection (may have changed due to parent walk) + if (this.filteredNodes.length > 0) { + this.lastSelectedId = this.filteredNodes[this.selectedIndex]?.node.entry.id ?? this.lastSelectedId; + } + } + + /** + * Recompute indentation/connectors for the filtered view + * + * Filtering can hide intermediate entries; descendants attach to the nearest visible ancestor. + * Keep indentation semantics aligned with flattenTree() so single-child chains don't drift right. + */ + private recalculateVisualStructure(): void { + if (this.filteredNodes.length === 0) return; + + const visibleIds = new Set(this.filteredNodes.map((n) => n.node.entry.id)); + + // Build entry map for efficient parent lookup (using full tree) + const entryMap = new Map(); + for (const flatNode of this.flatNodes) { + entryMap.set(flatNode.node.entry.id, flatNode); + } + + // Find nearest visible ancestor for a node + const findVisibleAncestor = (nodeId: string): string | null => { + let currentId = entryMap.get(nodeId)?.node.entry.parentId ?? null; + while (currentId !== null) { + if (visibleIds.has(currentId)) { + return currentId; + } + currentId = entryMap.get(currentId)?.node.entry.parentId ?? null; + } + return null; + }; + + // Build visible tree structure: + // - visibleParent: nodeId → nearest visible ancestor (or null for roots) + // - visibleChildren: parentId → list of visible children (in filteredNodes order) + const visibleParent = new Map(); + const visibleChildren = new Map(); + visibleChildren.set(null, []); // root-level nodes + + for (const flatNode of this.filteredNodes) { + const nodeId = flatNode.node.entry.id; + const ancestorId = findVisibleAncestor(nodeId); + visibleParent.set(nodeId, ancestorId); + + if (!visibleChildren.has(ancestorId)) { + visibleChildren.set(ancestorId, []); + } + visibleChildren.get(ancestorId)!.push(nodeId); + } + + // Update multipleRoots based on visible roots + const visibleRootIds = visibleChildren.get(null)!; + this.multipleRoots = visibleRootIds.length > 1; + + // Build a map for quick lookup: nodeId → FlatNode + const filteredNodeMap = new Map(); + for (const flatNode of this.filteredNodes) { + filteredNodeMap.set(flatNode.node.entry.id, flatNode); + } + + // DFS over the visible tree using flattenTree() indentation semantics + // Stack items: [nodeId, indent, justBranched, showConnector, isLast, gutters, isVirtualRootChild] + type StackItem = [string, number, boolean, boolean, boolean, GutterInfo[], boolean]; + const stack: StackItem[] = []; + + // Add visible roots in reverse order (to process in forward order via stack) + for (let i = visibleRootIds.length - 1; i >= 0; i--) { + const isLast = i === visibleRootIds.length - 1; + stack.push([ + visibleRootIds[i], + this.multipleRoots ? 1 : 0, + this.multipleRoots, + this.multipleRoots, + isLast, + [], + this.multipleRoots, + ]); + } + + while (stack.length > 0) { + const [nodeId, indent, justBranched, showConnector, isLast, gutters, isVirtualRootChild] = stack.pop()!; + + const flatNode = filteredNodeMap.get(nodeId); + if (!flatNode) continue; + + // Update this node's visual properties + flatNode.indent = indent; + flatNode.showConnector = showConnector; + flatNode.isLast = isLast; + flatNode.gutters = gutters; + flatNode.isVirtualRootChild = isVirtualRootChild; + + // Get visible children of this node + const children = visibleChildren.get(nodeId) || []; + const multipleChildren = children.length > 1; + + // Child indent follows flattenTree(): branch points (and first generation after a branch) shift +1 + let childIndent: number; + if (multipleChildren) { + childIndent = indent + 1; + } else if (justBranched && indent > 0) { + childIndent = indent + 1; + } else { + childIndent = indent; + } + + // Child gutters follow flattenTree() connector/gutter rules + const connectorDisplayed = showConnector && !isVirtualRootChild; + const currentDisplayIndent = this.multipleRoots ? Math.max(0, indent - 1) : indent; + const connectorPosition = Math.max(0, currentDisplayIndent - 1); + const childGutters: GutterInfo[] = connectorDisplayed + ? [...gutters, { position: connectorPosition, show: !isLast }] + : gutters; + + // Add children in reverse order (to process in forward order via stack) + for (let i = children.length - 1; i >= 0; i--) { + const childIsLast = i === children.length - 1; + stack.push([ + children[i], + childIndent, + multipleChildren, + multipleChildren, + childIsLast, + childGutters, + false, + ]); + } + } + + // Store visible tree maps for ancestor/descendant lookups in navigation + this.visibleParentMap = visibleParent; + this.visibleChildrenMap = visibleChildren; + } + + /** Get searchable text content from a node */ + private getSearchableText(node: SessionTreeNode): string { + const entry = node.entry; + const parts: string[] = []; + + if (node.label) { + parts.push(node.label); + } + + switch (entry.type) { + case "message": { + const msg = entry.message; + parts.push(msg.role); + if ("content" in msg && msg.content) { + parts.push(this.extractContent(msg.content)); + } + if (msg.role === "bashExecution") { + const bashMsg = msg as { command?: string }; + if (bashMsg.command) parts.push(bashMsg.command); + } + break; + } + case "custom_message": { + parts.push(entry.customType); + if (typeof entry.content === "string") { + parts.push(entry.content); + } else { + parts.push(this.extractContent(entry.content)); + } + break; + } + case "compaction": + parts.push("compaction"); + break; + case "branch_summary": + parts.push("branch summary", entry.summary); + break; + case "model_change": + parts.push("model", entry.modelId); + break; + case "thinking_level_change": + parts.push("thinking", entry.thinkingLevel); + break; + case "custom": + parts.push("custom", entry.customType); + break; + case "label": + parts.push("label", entry.label ?? ""); + break; + } + + return parts.join(" "); + } + + invalidate(): void {} + + getSearchQuery(): string { + return this.searchQuery; + } + + getSelectedNode(): SessionTreeNode | undefined { + return this.filteredNodes[this.selectedIndex]?.node; + } + + updateNodeLabel(entryId: string, label: string | undefined): void { + for (const flatNode of this.flatNodes) { + if (flatNode.node.entry.id === entryId) { + flatNode.node.label = label; + break; + } + } + } + + private getFilterLabel(): string { + switch (this.filterMode) { + case "no-tools": + return " [no-tools]"; + case "user-only": + return " [user]"; + case "labeled-only": + return " [labeled]"; + case "all": + return " [all]"; + default: + return ""; + } + } + + render(width: number): string[] { + const lines: string[] = []; + + if (this.filteredNodes.length === 0) { + lines.push(truncateToWidth(theme.fg("muted", " No entries found"), width)); + lines.push(truncateToWidth(theme.fg("muted", ` (0/0)${this.getFilterLabel()}`), width)); + return lines; + } + + const startIndex = Math.max( + 0, + Math.min( + this.selectedIndex - Math.floor(this.maxVisibleLines / 2), + this.filteredNodes.length - this.maxVisibleLines, + ), + ); + const endIndex = Math.min(startIndex + this.maxVisibleLines, this.filteredNodes.length); + + for (let i = startIndex; i < endIndex; i++) { + const flatNode = this.filteredNodes[i]; + const entry = flatNode.node.entry; + const isSelected = i === this.selectedIndex; + + // Build line: cursor + prefix + path marker + label + content + const cursor = isSelected ? theme.fg("accent", "› ") : " "; + + // If multiple roots, shift display (roots at 0, not 1) + const displayIndent = this.multipleRoots ? Math.max(0, flatNode.indent - 1) : flatNode.indent; + + // Build prefix with gutters at their correct positions + // Each gutter has a position (displayIndent where its connector was shown) + const connector = + flatNode.showConnector && !flatNode.isVirtualRootChild ? (flatNode.isLast ? "└─ " : "├─ ") : ""; + const connectorPosition = connector ? displayIndent - 1 : -1; + + // Build prefix char by char, placing gutters and connector at their positions + const totalChars = displayIndent * 3; + const prefixChars: string[] = []; + const isFolded = this.foldedNodes.has(entry.id); + for (let i = 0; i < totalChars; i++) { + const level = Math.floor(i / 3); + const posInLevel = i % 3; + + // Check if there's a gutter at this level + const gutter = flatNode.gutters.find((g) => g.position === level); + if (gutter) { + if (posInLevel === 0) { + prefixChars.push(gutter.show ? "│" : " "); + } else { + prefixChars.push(" "); + } + } else if (connector && level === connectorPosition) { + // Connector at this level, with fold indicator + if (posInLevel === 0) { + prefixChars.push(flatNode.isLast ? "└" : "├"); + } else if (posInLevel === 1) { + const foldable = this.isFoldable(entry.id); + prefixChars.push(isFolded ? "⊞" : foldable ? "⊟" : "─"); + } else { + prefixChars.push(" "); + } + } else { + prefixChars.push(" "); + } + } + const prefix = prefixChars.join(""); + + // Fold marker for nodes without connectors (roots) + const showsFoldInConnector = flatNode.showConnector && !flatNode.isVirtualRootChild; + const foldMarker = isFolded && !showsFoldInConnector ? theme.fg("accent", "⊞ ") : ""; + + // Active path marker - shown right before the entry text + const isOnActivePath = this.activePathIds.has(entry.id); + const pathMarker = isOnActivePath ? theme.fg("accent", "• ") : ""; + + const label = flatNode.node.label ? theme.fg("warning", `[${flatNode.node.label}] `) : ""; + const content = this.getEntryDisplayText(flatNode.node, isSelected); + + let line = cursor + theme.fg("dim", prefix) + foldMarker + pathMarker + label + content; + if (isSelected) { + line = theme.bg("selectedBg", line); + } + lines.push(truncateToWidth(line, width)); + } + + lines.push( + truncateToWidth( + theme.fg("muted", ` (${this.selectedIndex + 1}/${this.filteredNodes.length})${this.getFilterLabel()}`), + width, + ), + ); + + return lines; + } + + private getEntryDisplayText(node: SessionTreeNode, isSelected: boolean): string { + const entry = node.entry; + let result: string; + + const normalize = (s: string) => s.replace(/[\n\t]/g, " ").trim(); + + switch (entry.type) { + case "message": { + const msg = entry.message; + const role = msg.role; + if (role === "user") { + const msgWithContent = msg as { content?: unknown }; + const content = normalize(this.extractContent(msgWithContent.content)); + result = theme.fg("accent", "user: ") + content; + } else if (role === "assistant") { + const msgWithContent = msg as { content?: unknown; stopReason?: string; errorMessage?: string }; + const textContent = normalize(this.extractContent(msgWithContent.content)); + if (textContent) { + result = theme.fg("success", "assistant: ") + textContent; + } else if (msgWithContent.stopReason === "aborted") { + result = theme.fg("success", "assistant: ") + theme.fg("muted", "(aborted)"); + } else if (msgWithContent.errorMessage) { + const errMsg = normalize(msgWithContent.errorMessage).slice(0, 80); + result = theme.fg("success", "assistant: ") + theme.fg("error", errMsg); + } else { + result = theme.fg("success", "assistant: ") + theme.fg("muted", "(no content)"); + } + } else if (role === "toolResult") { + const toolMsg = msg as { toolCallId?: string; toolName?: string }; + const toolCall = toolMsg.toolCallId ? this.toolCallMap.get(toolMsg.toolCallId) : undefined; + if (toolCall) { + result = theme.fg("muted", this.formatToolCall(toolCall.name, toolCall.arguments)); + } else { + result = theme.fg("muted", `[${toolMsg.toolName ?? "tool"}]`); + } + } else if (role === "bashExecution") { + const bashMsg = msg as { command?: string }; + result = theme.fg("dim", `[bash]: ${normalize(bashMsg.command ?? "")}`); + } else { + result = theme.fg("dim", `[${role}]`); + } + break; + } + case "custom_message": { + const content = + typeof entry.content === "string" + ? entry.content + : entry.content + .filter((c): c is { type: "text"; text: string } => c.type === "text") + .map((c) => c.text) + .join(""); + result = theme.fg("customMessageLabel", `[${entry.customType}]: `) + normalize(content); + break; + } + case "compaction": { + const tokens = Math.round(entry.tokensBefore / 1000); + result = theme.fg("borderAccent", `[compaction: ${tokens}k tokens]`); + break; + } + case "branch_summary": + result = theme.fg("warning", `[branch summary]: `) + normalize(entry.summary); + break; + case "model_change": + result = theme.fg("dim", `[model: ${entry.modelId}]`); + break; + case "thinking_level_change": + result = theme.fg("dim", `[thinking: ${entry.thinkingLevel}]`); + break; + case "custom": + result = theme.fg("dim", `[custom: ${entry.customType}]`); + break; + case "label": + result = theme.fg("dim", `[label: ${entry.label ?? "(cleared)"}]`); + break; + default: + result = ""; + } + + return isSelected ? theme.bold(result) : result; + } + + private extractContent(content: unknown): string { + const maxLen = 200; + if (typeof content === "string") return content.slice(0, maxLen); + if (Array.isArray(content)) { + let result = ""; + for (const c of content) { + if (typeof c === "object" && c !== null && "type" in c && c.type === "text") { + result += (c as { text: string }).text; + if (result.length >= maxLen) return result.slice(0, maxLen); + } + } + return result; + } + return ""; + } + + private hasTextContent(content: unknown): boolean { + if (typeof content === "string") return content.trim().length > 0; + if (Array.isArray(content)) { + for (const c of content) { + if (typeof c === "object" && c !== null && "type" in c && c.type === "text") { + const text = (c as { text?: string }).text; + if (text && text.trim().length > 0) return true; + } + } + } + return false; + } + + private formatToolCall(name: string, args: Record): string { + const shortenPath = (p: string): string => { + const home = process.env.HOME || process.env.USERPROFILE || ""; + if (home && p.startsWith(home)) return `~${p.slice(home.length)}`; + return p; + }; + + switch (name) { + case "read": { + const path = shortenPath(String(args.path || args.file_path || "")); + const offset = args.offset as number | undefined; + const limit = args.limit as number | undefined; + let display = path; + if (offset !== undefined || limit !== undefined) { + const start = offset ?? 1; + const end = limit !== undefined ? start + limit - 1 : ""; + display += `:${start}${end ? `-${end}` : ""}`; + } + return `[read: ${display}]`; + } + case "write": { + const path = shortenPath(String(args.path || args.file_path || "")); + return `[write: ${path}]`; + } + case "edit": { + const path = shortenPath(String(args.path || args.file_path || "")); + return `[edit: ${path}]`; + } + case "bash": { + const rawCmd = String(args.command || ""); + const cmd = rawCmd + .replace(/[\n\t]/g, " ") + .trim() + .slice(0, 50); + return `[bash: ${cmd}${rawCmd.length > 50 ? "..." : ""}]`; + } + case "grep": { + const pattern = String(args.pattern || ""); + const path = shortenPath(String(args.path || ".")); + return `[grep: /${pattern}/ in ${path}]`; + } + case "find": { + const pattern = String(args.pattern || ""); + const path = shortenPath(String(args.path || ".")); + return `[find: ${pattern} in ${path}]`; + } + case "ls": { + const path = shortenPath(String(args.path || ".")); + return `[ls: ${path}]`; + } + default: { + // Custom tool - show name and truncated JSON args + const argsStr = JSON.stringify(args).slice(0, 40); + return `[${name}: ${argsStr}${JSON.stringify(args).length > 40 ? "..." : ""}]`; + } + } + } + + handleInput(keyData: string): void { + const kb = getEditorKeybindings(); + if (kb.matches(keyData, "selectUp")) { + this.selectedIndex = this.selectedIndex === 0 ? this.filteredNodes.length - 1 : this.selectedIndex - 1; + } else if (kb.matches(keyData, "selectDown")) { + this.selectedIndex = this.selectedIndex === this.filteredNodes.length - 1 ? 0 : this.selectedIndex + 1; + } else if (kb.matches(keyData, "treeFoldOrUp")) { + const currentId = this.filteredNodes[this.selectedIndex]?.node.entry.id; + if (currentId && this.isFoldable(currentId) && !this.foldedNodes.has(currentId)) { + this.foldedNodes.add(currentId); + this.applyFilter(); + } else { + this.selectedIndex = this.findBranchSegmentStart("up"); + } + } else if (kb.matches(keyData, "treeUnfoldOrDown")) { + const currentId = this.filteredNodes[this.selectedIndex]?.node.entry.id; + if (currentId && this.foldedNodes.has(currentId)) { + this.foldedNodes.delete(currentId); + this.applyFilter(); + } else { + this.selectedIndex = this.findBranchSegmentStart("down"); + } + } else if (kb.matches(keyData, "cursorLeft") || kb.matches(keyData, "selectPageUp")) { + // Page up + this.selectedIndex = Math.max(0, this.selectedIndex - this.maxVisibleLines); + } else if (kb.matches(keyData, "cursorRight") || kb.matches(keyData, "selectPageDown")) { + // Page down + this.selectedIndex = Math.min(this.filteredNodes.length - 1, this.selectedIndex + this.maxVisibleLines); + } else if (kb.matches(keyData, "selectConfirm")) { + const selected = this.filteredNodes[this.selectedIndex]; + if (selected && this.onSelect) { + this.onSelect(selected.node.entry.id); + } + } else if (kb.matches(keyData, "selectCancel")) { + if (this.searchQuery) { + this.searchQuery = ""; + this.foldedNodes.clear(); + this.applyFilter(); + } else { + this.onCancel?.(); + } + } else if (matchesKey(keyData, "ctrl+d")) { + // Direct filter: default + this.filterMode = "default"; + this.foldedNodes.clear(); + this.applyFilter(); + } else if (matchesKey(keyData, "ctrl+t")) { + // Toggle filter: no-tools ↔ default + this.filterMode = this.filterMode === "no-tools" ? "default" : "no-tools"; + this.foldedNodes.clear(); + this.applyFilter(); + } else if (matchesKey(keyData, "ctrl+u")) { + // Toggle filter: user-only ↔ default + this.filterMode = this.filterMode === "user-only" ? "default" : "user-only"; + this.foldedNodes.clear(); + this.applyFilter(); + } else if (matchesKey(keyData, "ctrl+l")) { + // Toggle filter: labeled-only ↔ default + this.filterMode = this.filterMode === "labeled-only" ? "default" : "labeled-only"; + this.foldedNodes.clear(); + this.applyFilter(); + } else if (matchesKey(keyData, "ctrl+a")) { + // Toggle filter: all ↔ default + this.filterMode = this.filterMode === "all" ? "default" : "all"; + this.foldedNodes.clear(); + this.applyFilter(); + } else if (matchesKey(keyData, "shift+ctrl+o")) { + // Cycle filter backwards + const modes: FilterMode[] = ["default", "no-tools", "user-only", "labeled-only", "all"]; + const currentIndex = modes.indexOf(this.filterMode); + this.filterMode = modes[(currentIndex - 1 + modes.length) % modes.length]; + this.foldedNodes.clear(); + this.applyFilter(); + } else if (matchesKey(keyData, "ctrl+o")) { + // Cycle filter forwards: default → no-tools → user-only → labeled-only → all → default + const modes: FilterMode[] = ["default", "no-tools", "user-only", "labeled-only", "all"]; + const currentIndex = modes.indexOf(this.filterMode); + this.filterMode = modes[(currentIndex + 1) % modes.length]; + this.foldedNodes.clear(); + this.applyFilter(); + } else if (kb.matches(keyData, "deleteCharBackward")) { + if (this.searchQuery.length > 0) { + this.searchQuery = this.searchQuery.slice(0, -1); + this.foldedNodes.clear(); + this.applyFilter(); + } + } else if (matchesKey(keyData, "shift+l")) { + const selected = this.filteredNodes[this.selectedIndex]; + if (selected && this.onLabelEdit) { + this.onLabelEdit(selected.node.entry.id, selected.node.label); + } + } else { + const hasControlChars = [...keyData].some((ch) => { + const code = ch.charCodeAt(0); + return code < 32 || code === 0x7f || (code >= 0x80 && code <= 0x9f); + }); + if (!hasControlChars && keyData.length > 0) { + this.searchQuery += keyData; + this.foldedNodes.clear(); + this.applyFilter(); + } + } + } + + /** + * Whether a node can be folded. A node is foldable if it has visible children + * and is either a root (no visible parent) or a segment start (visible parent + * has multiple visible children). + */ + private isFoldable(entryId: string): boolean { + const children = this.visibleChildrenMap.get(entryId); + if (!children || children.length === 0) return false; + const parentId = this.visibleParentMap.get(entryId); + if (parentId === null || parentId === undefined) return true; + const siblings = this.visibleChildrenMap.get(parentId); + return siblings !== undefined && siblings.length > 1; + } + + /** + * Find the index of the next branch segment start in the given direction. + * A segment start is the first child of a branch point. + * + * "up" walks the visible parent chain; "down" walks visible children + * (always following the first child). + */ + private findBranchSegmentStart(direction: "up" | "down"): number { + const selectedId = this.filteredNodes[this.selectedIndex]?.node.entry.id; + if (!selectedId) return this.selectedIndex; + + const indexByEntryId = new Map(this.filteredNodes.map((node, i) => [node.node.entry.id, i])); + let currentId: string = selectedId; + if (direction === "down") { + while (true) { + const children: string[] = this.visibleChildrenMap.get(currentId) ?? []; + if (children.length === 0) return indexByEntryId.get(currentId)!; + if (children.length > 1) return indexByEntryId.get(children[0])!; + currentId = children[0]; + } + } + + // direction === "up" + while (true) { + const parentId: string | null = this.visibleParentMap.get(currentId) ?? null; + if (parentId === null) return indexByEntryId.get(currentId)!; + const children = this.visibleChildrenMap.get(parentId) ?? []; + if (children.length > 1) { + const segmentStart = indexByEntryId.get(currentId)!; + if (segmentStart < this.selectedIndex) { + return segmentStart; + } + } + currentId = parentId; + } + } +} + +/** Component that displays the current search query */ +class SearchLine implements Component { + constructor(private treeList: TreeList) {} + + invalidate(): void {} + + render(width: number): string[] { + const query = this.treeList.getSearchQuery(); + if (query) { + return [truncateToWidth(` ${theme.fg("muted", "Type to search:")} ${theme.fg("accent", query)}`, width)]; + } + return [truncateToWidth(` ${theme.fg("muted", "Type to search:")}`, width)]; + } + + handleInput(_keyData: string): void {} +} + +/** Label input component shown when editing a label */ +class LabelInput implements Component, Focusable { + private input: Input; + private entryId: string; + public onSubmit?: (entryId: string, label: string | undefined) => void; + public onCancel?: () => void; + + // Focusable implementation - propagate to input for IME cursor positioning + private _focused = false; + get focused(): boolean { + return this._focused; + } + set focused(value: boolean) { + this._focused = value; + this.input.focused = value; + } + + constructor(entryId: string, currentLabel: string | undefined) { + this.entryId = entryId; + this.input = new Input(); + if (currentLabel) { + this.input.setValue(currentLabel); + } + } + + invalidate(): void {} + + render(width: number): string[] { + const lines: string[] = []; + const indent = " "; + const availableWidth = width - indent.length; + lines.push(truncateToWidth(`${indent}${theme.fg("muted", "Label (empty to remove):")}`, width)); + lines.push(...this.input.render(availableWidth).map((line) => truncateToWidth(`${indent}${line}`, width))); + lines.push( + truncateToWidth(`${indent}${keyHint("selectConfirm", "save")} ${keyHint("selectCancel", "cancel")}`, width), + ); + return lines; + } + + handleInput(keyData: string): void { + const kb = getEditorKeybindings(); + if (kb.matches(keyData, "selectConfirm")) { + const value = this.input.getValue().trim(); + this.onSubmit?.(this.entryId, value || undefined); + } else if (kb.matches(keyData, "selectCancel")) { + this.onCancel?.(); + } else { + this.input.handleInput(keyData); + } + } +} + +/** + * Component that renders a session tree selector for navigation + */ +export class TreeSelectorComponent extends Container implements Focusable { + private treeList: TreeList; + private labelInput: LabelInput | null = null; + private labelInputContainer: Container; + private treeContainer: Container; + private onLabelChangeCallback?: (entryId: string, label: string | undefined) => void; + + // Focusable implementation - propagate to labelInput when active for IME cursor positioning + private _focused = false; + get focused(): boolean { + return this._focused; + } + set focused(value: boolean) { + this._focused = value; + // Propagate to labelInput when it's active + if (this.labelInput) { + this.labelInput.focused = value; + } + } + + constructor( + tree: SessionTreeNode[], + currentLeafId: string | null, + terminalHeight: number, + onSelect: (entryId: string) => void, + onCancel: () => void, + onLabelChange?: (entryId: string, label: string | undefined) => void, + initialSelectedId?: string, + initialFilterMode?: FilterMode, + ) { + super(); + + this.onLabelChangeCallback = onLabelChange; + const maxVisibleLines = Math.max(5, Math.floor(terminalHeight / 2)); + + this.treeList = new TreeList(tree, currentLeafId, maxVisibleLines, initialSelectedId, initialFilterMode); + this.treeList.onSelect = onSelect; + this.treeList.onCancel = onCancel; + this.treeList.onLabelEdit = (entryId, currentLabel) => this.showLabelInput(entryId, currentLabel); + + this.treeContainer = new Container(); + this.treeContainer.addChild(this.treeList); + + this.labelInputContainer = new Container(); + + this.addChild(new Spacer(1)); + this.addChild(new DynamicBorder()); + this.addChild(new Text(theme.bold(" Session Tree"), 1, 0)); + this.addChild( + new TruncatedText( + theme.fg("muted", " ↑/↓: move. ←/→: page. ^←/^→ or Alt+←/Alt+→: fold/branch. Shift+L: label. ") + + theme.fg("muted", "^D/^T/^U/^L/^A: filters (^O/⇧^O cycle)"), + 0, + 0, + ), + ); + this.addChild(new SearchLine(this.treeList)); + this.addChild(new DynamicBorder()); + this.addChild(new Spacer(1)); + this.addChild(this.treeContainer); + this.addChild(this.labelInputContainer); + this.addChild(new Spacer(1)); + this.addChild(new DynamicBorder()); + + if (tree.length === 0) { + setTimeout(() => onCancel(), 100); + } + } + + private showLabelInput(entryId: string, currentLabel: string | undefined): void { + this.labelInput = new LabelInput(entryId, currentLabel); + this.labelInput.onSubmit = (id, label) => { + this.treeList.updateNodeLabel(id, label); + this.onLabelChangeCallback?.(id, label); + this.hideLabelInput(); + }; + this.labelInput.onCancel = () => this.hideLabelInput(); + + // Propagate current focused state to the new labelInput + this.labelInput.focused = this._focused; + + this.treeContainer.clear(); + this.labelInputContainer.clear(); + this.labelInputContainer.addChild(this.labelInput); + } + + private hideLabelInput(): void { + this.labelInput = null; + this.labelInputContainer.clear(); + this.treeContainer.clear(); + this.treeContainer.addChild(this.treeList); + } + + handleInput(keyData: string): void { + if (this.labelInput) { + this.labelInput.handleInput(keyData); + } else { + this.treeList.handleInput(keyData); + } + } + + getTreeList(): TreeList { + return this.treeList; + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/user-message-selector.ts b/packages/pi-coding-agent/src/modes/interactive/components/user-message-selector.ts new file mode 100644 index 000000000..94ccf93df --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/user-message-selector.ts @@ -0,0 +1,143 @@ +import { type Component, Container, getEditorKeybindings, Spacer, Text, truncateToWidth } from "@gsd/pi-tui"; +import { theme } from "../theme/theme.js"; +import { DynamicBorder } from "./dynamic-border.js"; + +interface UserMessageItem { + id: string; // Entry ID in the session + text: string; // The message text + timestamp?: string; // Optional timestamp if available +} + +/** + * Custom user message list component with selection + */ +class UserMessageList implements Component { + private messages: UserMessageItem[] = []; + private selectedIndex: number = 0; + public onSelect?: (entryId: string) => void; + public onCancel?: () => void; + private maxVisible: number = 10; // Max messages visible + + constructor(messages: UserMessageItem[]) { + // Store messages in chronological order (oldest to newest) + this.messages = messages; + // Start with the last (most recent) message selected + this.selectedIndex = Math.max(0, messages.length - 1); + } + + invalidate(): void { + // No cached state to invalidate currently + } + + render(width: number): string[] { + const lines: string[] = []; + + if (this.messages.length === 0) { + lines.push(theme.fg("muted", " No user messages found")); + return lines; + } + + // Calculate visible range with scrolling + const startIndex = Math.max( + 0, + Math.min(this.selectedIndex - Math.floor(this.maxVisible / 2), this.messages.length - this.maxVisible), + ); + const endIndex = Math.min(startIndex + this.maxVisible, this.messages.length); + + // Render visible messages (2 lines per message + blank line) + for (let i = startIndex; i < endIndex; i++) { + const message = this.messages[i]; + const isSelected = i === this.selectedIndex; + + // Normalize message to single line + const normalizedMessage = message.text.replace(/\n/g, " ").trim(); + + // First line: cursor + message + const cursor = isSelected ? theme.fg("accent", "› ") : " "; + const maxMsgWidth = width - 2; // Account for cursor (2 chars) + const truncatedMsg = truncateToWidth(normalizedMessage, maxMsgWidth); + const messageLine = cursor + (isSelected ? theme.bold(truncatedMsg) : truncatedMsg); + + lines.push(messageLine); + + // Second line: metadata (position in history) + const position = i + 1; + const metadata = ` Message ${position} of ${this.messages.length}`; + const metadataLine = theme.fg("muted", metadata); + lines.push(metadataLine); + lines.push(""); // Blank line between messages + } + + // Add scroll indicator if needed + if (startIndex > 0 || endIndex < this.messages.length) { + const scrollInfo = theme.fg("muted", ` (${this.selectedIndex + 1}/${this.messages.length})`); + lines.push(scrollInfo); + } + + return lines; + } + + handleInput(keyData: string): void { + const kb = getEditorKeybindings(); + // Up arrow - go to previous (older) message, wrap to bottom when at top + if (kb.matches(keyData, "selectUp")) { + this.selectedIndex = this.selectedIndex === 0 ? this.messages.length - 1 : this.selectedIndex - 1; + } + // Down arrow - go to next (newer) message, wrap to top when at bottom + else if (kb.matches(keyData, "selectDown")) { + this.selectedIndex = this.selectedIndex === this.messages.length - 1 ? 0 : this.selectedIndex + 1; + } + // Enter - select message and branch + else if (kb.matches(keyData, "selectConfirm")) { + const selected = this.messages[this.selectedIndex]; + if (selected && this.onSelect) { + this.onSelect(selected.id); + } + } + // Escape - cancel + else if (kb.matches(keyData, "selectCancel")) { + if (this.onCancel) { + this.onCancel(); + } + } + } +} + +/** + * Component that renders a user message selector for branching + */ +export class UserMessageSelectorComponent extends Container { + private messageList: UserMessageList; + + constructor(messages: UserMessageItem[], onSelect: (entryId: string) => void, onCancel: () => void) { + super(); + + // Add header + this.addChild(new Spacer(1)); + this.addChild(new Text(theme.bold("Branch from Message"), 1, 0)); + this.addChild(new Text(theme.fg("muted", "Select a message to create a new branch from that point"), 1, 0)); + this.addChild(new Spacer(1)); + this.addChild(new DynamicBorder()); + this.addChild(new Spacer(1)); + + // Create message list + this.messageList = new UserMessageList(messages); + this.messageList.onSelect = onSelect; + this.messageList.onCancel = onCancel; + + this.addChild(this.messageList); + + // Add bottom border + this.addChild(new Spacer(1)); + this.addChild(new DynamicBorder()); + + // Auto-cancel if no messages + if (messages.length === 0) { + setTimeout(() => onCancel(), 100); + } + } + + getMessageList(): UserMessageList { + return this.messageList; + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/user-message.ts b/packages/pi-coding-agent/src/modes/interactive/components/user-message.ts new file mode 100644 index 000000000..a6de30a62 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/user-message.ts @@ -0,0 +1,32 @@ +import { Container, Markdown, type MarkdownTheme, Spacer } from "@gsd/pi-tui"; +import { getMarkdownTheme, theme } from "../theme/theme.js"; + +const OSC133_ZONE_START = "\x1b]133;A\x07"; +const OSC133_ZONE_END = "\x1b]133;B\x07"; + +/** + * Component that renders a user message + */ +export class UserMessageComponent extends Container { + constructor(text: string, markdownTheme: MarkdownTheme = getMarkdownTheme()) { + super(); + this.addChild(new Spacer(1)); + this.addChild( + new Markdown(text, 1, 1, markdownTheme, { + bgColor: (text: string) => theme.bg("userMessageBg", text), + color: (text: string) => theme.fg("userMessageText", text), + }), + ); + } + + override render(width: number): string[] { + const lines = super.render(width); + if (lines.length === 0) { + return lines; + } + + lines[0] = OSC133_ZONE_START + lines[0]; + lines[lines.length - 1] = lines[lines.length - 1] + OSC133_ZONE_END; + return lines; + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/components/visual-truncate.ts b/packages/pi-coding-agent/src/modes/interactive/components/visual-truncate.ts new file mode 100644 index 000000000..c609badfe --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/components/visual-truncate.ts @@ -0,0 +1,50 @@ +/** + * Shared utility for truncating text to visual lines (accounting for line wrapping). + * Used by both tool-execution.ts and bash-execution.ts for consistent behavior. + */ + +import { Text } from "@gsd/pi-tui"; + +export interface VisualTruncateResult { + /** The visual lines to display */ + visualLines: string[]; + /** Number of visual lines that were skipped (hidden) */ + skippedCount: number; +} + +/** + * Truncate text to a maximum number of visual lines (from the end). + * This accounts for line wrapping based on terminal width. + * + * @param text - The text content (may contain newlines) + * @param maxVisualLines - Maximum number of visual lines to show + * @param width - Terminal/render width + * @param paddingX - Horizontal padding for Text component (default 0). + * Use 0 when result will be placed in a Box (Box adds its own padding). + * Use 1 when result will be placed in a plain Container. + * @returns The truncated visual lines and count of skipped lines + */ +export function truncateToVisualLines( + text: string, + maxVisualLines: number, + width: number, + paddingX: number = 0, +): VisualTruncateResult { + if (!text) { + return { visualLines: [], skippedCount: 0 }; + } + + // Create a temporary Text component to render and get visual lines + const tempText = new Text(text, paddingX, 0); + const allVisualLines = tempText.render(width); + + if (allVisualLines.length <= maxVisualLines) { + return { visualLines: allVisualLines, skippedCount: 0 }; + } + + // Take the last N visual lines + const truncatedLines = allVisualLines.slice(-maxVisualLines); + const skippedCount = allVisualLines.length - maxVisualLines; + + return { visualLines: truncatedLines, skippedCount }; +} diff --git a/packages/pi-coding-agent/src/modes/interactive/interactive-mode.ts b/packages/pi-coding-agent/src/modes/interactive/interactive-mode.ts new file mode 100644 index 000000000..30ec1a693 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/interactive-mode.ts @@ -0,0 +1,4464 @@ +/** + * Interactive mode for the coding agent. + * Handles TUI rendering and user interaction, delegating business logic to AgentSession. + */ + +import * as crypto from "node:crypto"; +import * as fs from "node:fs"; +import * as os from "node:os"; +import * as path from "node:path"; +import type { AgentMessage } from "@gsd/pi-agent-core"; +import type { AssistantMessage, ImageContent, Message, Model, OAuthProviderId } from "@gsd/pi-ai"; +import type { + AutocompleteItem, + EditorAction, + EditorComponent, + EditorTheme, + KeyId, + MarkdownTheme, + OverlayHandle, + OverlayOptions, + SlashCommand, +} from "@gsd/pi-tui"; +import { + CombinedAutocompleteProvider, + type Component, + Container, + fuzzyFilter, + Loader, + Markdown, + matchesKey, + ProcessTerminal, + Spacer, + Text, + TruncatedText, + TUI, + visibleWidth, +} from "@gsd/pi-tui"; +import { spawn, spawnSync } from "child_process"; +import { + APP_NAME, + getAuthPath, + getDebugLogPath, + getShareViewerUrl, + getUpdateInstruction, + VERSION, +} from "../../config.js"; +import { type AgentSession, type AgentSessionEvent, parseSkillBlock } from "../../core/agent-session.js"; +import type { CompactionResult } from "../../core/compaction/index.js"; +import type { + ExtensionContext, + ExtensionRunner, + ExtensionUIContext, + ExtensionUIDialogOptions, + ExtensionWidgetOptions, +} from "../../core/extensions/index.js"; +import { FooterDataProvider, type ReadonlyFooterDataProvider } from "../../core/footer-data-provider.js"; +import { type AppAction, KeybindingsManager } from "../../core/keybindings.js"; +import { createCompactionSummaryMessage } from "../../core/messages.js"; +import { resolveModelScope } from "../../core/model-resolver.js"; +import type { ResourceDiagnostic } from "../../core/resource-loader.js"; +import { type SessionContext, SessionManager } from "../../core/session-manager.js"; +import { BUILTIN_SLASH_COMMANDS } from "../../core/slash-commands.js"; +import type { TruncationResult } from "../../core/tools/truncate.js"; +import { getChangelogPath, getNewEntries, parseChangelog } from "../../utils/changelog.js"; +import { copyToClipboard } from "../../utils/clipboard.js"; +import { extensionForImageMimeType, readClipboardImage } from "../../utils/clipboard-image.js"; +import { ensureTool } from "../../utils/tools-manager.js"; +import { ArminComponent } from "./components/armin.js"; +import { AssistantMessageComponent } from "./components/assistant-message.js"; +import { BashExecutionComponent } from "./components/bash-execution.js"; +import { BorderedLoader } from "./components/bordered-loader.js"; +import { BranchSummaryMessageComponent } from "./components/branch-summary-message.js"; +import { CompactionSummaryMessageComponent } from "./components/compaction-summary-message.js"; +import { CustomEditor } from "./components/custom-editor.js"; +import { CustomMessageComponent } from "./components/custom-message.js"; +import { DaxnutsComponent } from "./components/daxnuts.js"; +import { DynamicBorder } from "./components/dynamic-border.js"; +import { ExtensionEditorComponent } from "./components/extension-editor.js"; +import { ExtensionInputComponent } from "./components/extension-input.js"; +import { ExtensionSelectorComponent } from "./components/extension-selector.js"; +import { FooterComponent } from "./components/footer.js"; +import { appKey, appKeyHint, editorKey, keyHint, rawKeyHint } from "./components/keybinding-hints.js"; +import { LoginDialogComponent } from "./components/login-dialog.js"; +import { ModelSelectorComponent } from "./components/model-selector.js"; +import { OAuthSelectorComponent } from "./components/oauth-selector.js"; +import { ScopedModelsSelectorComponent } from "./components/scoped-models-selector.js"; +import { SessionSelectorComponent } from "./components/session-selector.js"; +import { SettingsSelectorComponent } from "./components/settings-selector.js"; +import { SkillInvocationMessageComponent } from "./components/skill-invocation-message.js"; +import { ToolExecutionComponent } from "./components/tool-execution.js"; +import { TreeSelectorComponent } from "./components/tree-selector.js"; +import { UserMessageComponent } from "./components/user-message.js"; +import { UserMessageSelectorComponent } from "./components/user-message-selector.js"; +import { + getAvailableThemes, + getAvailableThemesWithPaths, + getEditorTheme, + getMarkdownTheme, + getThemeByName, + initTheme, + onThemeChange, + setRegisteredThemes, + setTheme, + setThemeInstance, + Theme, + type ThemeColor, + theme, +} from "./theme/theme.js"; + +/** Interface for components that can be expanded/collapsed */ +interface Expandable { + setExpanded(expanded: boolean): void; +} + +function isExpandable(obj: unknown): obj is Expandable { + return typeof obj === "object" && obj !== null && "setExpanded" in obj && typeof obj.setExpanded === "function"; +} + +type CompactionQueuedMessage = { + text: string; + mode: "steer" | "followUp"; +}; + +/** + * Options for InteractiveMode initialization. + */ +export interface InteractiveModeOptions { + /** Providers that were migrated to auth.json (shows warning) */ + migratedProviders?: string[]; + /** Warning message if session model couldn't be restored */ + modelFallbackMessage?: string; + /** Initial message to send on startup (can include @file content) */ + initialMessage?: string; + /** Images to attach to the initial message */ + initialImages?: ImageContent[]; + /** Additional messages to send after the initial message */ + initialMessages?: string[]; + /** Force verbose startup (overrides quietStartup setting) */ + verbose?: boolean; +} + +export class InteractiveMode { + private session: AgentSession; + private ui: TUI; + private chatContainer: Container; + private pendingMessagesContainer: Container; + private statusContainer: Container; + private defaultEditor: CustomEditor; + private editor: EditorComponent; + private autocompleteProvider: CombinedAutocompleteProvider | undefined; + private fdPath: string | undefined; + private editorContainer: Container; + private footer: FooterComponent; + private footerDataProvider: FooterDataProvider; + private keybindings: KeybindingsManager; + private version: string; + private isInitialized = false; + private onInputCallback?: (text: string) => void; + private loadingAnimation: Loader | undefined = undefined; + private pendingWorkingMessage: string | undefined = undefined; + private readonly defaultWorkingMessage = "Working..."; + + private lastSigintTime = 0; + private lastEscapeTime = 0; + private changelogMarkdown: string | undefined = undefined; + + // Status line tracking (for mutating immediately-sequential status updates) + private lastStatusSpacer: Spacer | undefined = undefined; + private lastStatusText: Text | undefined = undefined; + + // Streaming message tracking + private streamingComponent: AssistantMessageComponent | undefined = undefined; + private streamingMessage: AssistantMessage | undefined = undefined; + + // Tool execution tracking: toolCallId -> component + private pendingTools = new Map(); + + // Tool output expansion state + private toolOutputExpanded = false; + + // Thinking block visibility state + private hideThinkingBlock = false; + + // Skill commands: command name -> skill file path + private skillCommands = new Map(); + + // Agent subscription unsubscribe function + private unsubscribe?: () => void; + + // Track if editor is in bash mode (text starts with !) + private isBashMode = false; + + // Track current bash execution component + private bashComponent: BashExecutionComponent | undefined = undefined; + + // Track pending bash components (shown in pending area, moved to chat on submit) + private pendingBashComponents: BashExecutionComponent[] = []; + + // Auto-compaction state + private autoCompactionLoader: Loader | undefined = undefined; + private autoCompactionEscapeHandler?: () => void; + + // Auto-retry state + private retryLoader: Loader | undefined = undefined; + private retryEscapeHandler?: () => void; + + // Messages queued while compaction is running + private compactionQueuedMessages: CompactionQueuedMessage[] = []; + + // Shutdown state + private shutdownRequested = false; + + // Extension UI state + private extensionSelector: ExtensionSelectorComponent | undefined = undefined; + private extensionInput: ExtensionInputComponent | undefined = undefined; + private extensionEditor: ExtensionEditorComponent | undefined = undefined; + private extensionTerminalInputUnsubscribers = new Set<() => void>(); + + // Extension widgets (components rendered above/below the editor) + private extensionWidgetsAbove = new Map(); + private extensionWidgetsBelow = new Map(); + private widgetContainerAbove!: Container; + private widgetContainerBelow!: Container; + + // Custom footer from extension (undefined = use built-in footer) + private customFooter: (Component & { dispose?(): void }) | undefined = undefined; + + // Header container that holds the built-in or custom header + private headerContainer: Container; + + // Built-in header (logo + keybinding hints + changelog) + private builtInHeader: Component | undefined = undefined; + + // Custom header from extension (undefined = use built-in header) + private customHeader: (Component & { dispose?(): void }) | undefined = undefined; + + // Convenience accessors + private get agent() { + return this.session.agent; + } + private get sessionManager() { + return this.session.sessionManager; + } + private get settingsManager() { + return this.session.settingsManager; + } + + constructor( + session: AgentSession, + private options: InteractiveModeOptions = {}, + ) { + this.session = session; + this.version = VERSION; + this.ui = new TUI(new ProcessTerminal(), this.settingsManager.getShowHardwareCursor()); + this.ui.setClearOnShrink(this.settingsManager.getClearOnShrink()); + this.headerContainer = new Container(); + this.chatContainer = new Container(); + this.pendingMessagesContainer = new Container(); + this.statusContainer = new Container(); + this.widgetContainerAbove = new Container(); + this.widgetContainerBelow = new Container(); + this.keybindings = KeybindingsManager.create(); + const editorPaddingX = this.settingsManager.getEditorPaddingX(); + const autocompleteMaxVisible = this.settingsManager.getAutocompleteMaxVisible(); + this.defaultEditor = new CustomEditor(this.ui, getEditorTheme(), this.keybindings, { + paddingX: editorPaddingX, + autocompleteMaxVisible, + }); + this.editor = this.defaultEditor; + this.editorContainer = new Container(); + this.editorContainer.addChild(this.editor as Component); + this.footerDataProvider = new FooterDataProvider(); + this.footer = new FooterComponent(session, this.footerDataProvider); + this.footer.setAutoCompactEnabled(session.autoCompactionEnabled); + + // Load hide thinking block setting + this.hideThinkingBlock = this.settingsManager.getHideThinkingBlock(); + + // Register themes from resource loader and initialize + setRegisteredThemes(this.session.resourceLoader.getThemes().themes); + initTheme(this.settingsManager.getTheme(), true); + } + + private setupAutocomplete(fdPath: string | undefined): void { + // Define commands for autocomplete + const slashCommands: SlashCommand[] = BUILTIN_SLASH_COMMANDS.map((command) => ({ + name: command.name, + description: command.description, + })); + + const modelCommand = slashCommands.find((command) => command.name === "model"); + if (modelCommand) { + modelCommand.getArgumentCompletions = (prefix: string): AutocompleteItem[] | null => { + // Get available models (scoped or from registry) + const models = + this.session.scopedModels.length > 0 + ? this.session.scopedModels.map((s) => s.model) + : this.session.modelRegistry.getAvailable(); + + if (models.length === 0) return null; + + // Create items with provider/id format + const items = models.map((m) => ({ + id: m.id, + provider: m.provider, + label: `${m.provider}/${m.id}`, + })); + + // Fuzzy filter by model ID + provider (allows "opus anthropic" to match) + const filtered = fuzzyFilter(items, prefix, (item) => `${item.id} ${item.provider}`); + + if (filtered.length === 0) return null; + + return filtered.map((item) => ({ + value: item.label, + label: item.id, + description: item.provider, + })); + }; + } + + // Convert prompt templates to SlashCommand format for autocomplete + const templateCommands: SlashCommand[] = this.session.promptTemplates.map((cmd) => ({ + name: cmd.name, + description: cmd.description, + })); + + // Convert extension commands to SlashCommand format + const builtinCommandNames = new Set(slashCommands.map((c) => c.name)); + const extensionCommands: SlashCommand[] = ( + this.session.extensionRunner?.getRegisteredCommands(builtinCommandNames) ?? [] + ).map((cmd) => ({ + name: cmd.name, + description: cmd.description ?? "(extension command)", + getArgumentCompletions: cmd.getArgumentCompletions, + })); + + // Build skill commands from session.skills (if enabled) + this.skillCommands.clear(); + const skillCommandList: SlashCommand[] = []; + if (this.settingsManager.getEnableSkillCommands()) { + for (const skill of this.session.resourceLoader.getSkills().skills) { + const commandName = `skill:${skill.name}`; + this.skillCommands.set(commandName, skill.filePath); + skillCommandList.push({ name: commandName, description: skill.description }); + } + } + + // Setup autocomplete + this.autocompleteProvider = new CombinedAutocompleteProvider( + [...slashCommands, ...templateCommands, ...extensionCommands, ...skillCommandList], + process.cwd(), + fdPath, + ); + this.defaultEditor.setAutocompleteProvider(this.autocompleteProvider); + if (this.editor !== this.defaultEditor) { + this.editor.setAutocompleteProvider?.(this.autocompleteProvider); + } + } + + async init(): Promise { + if (this.isInitialized) return; + + // Load changelog (only show new entries, skip for resumed sessions) + this.changelogMarkdown = this.getChangelogForDisplay(); + + // Ensure fd and rg are available (downloads if missing, adds to PATH via getBinDir) + // Both are needed: fd for autocomplete, rg for grep tool and bash commands + const [fdPath] = await Promise.all([ensureTool("fd"), ensureTool("rg")]); + this.fdPath = fdPath; + + // Add header container as first child + this.ui.addChild(this.headerContainer); + + // Add header with keybindings from config (unless silenced) + if (this.options.verbose || !this.settingsManager.getQuietStartup()) { + const logo = theme.bold(theme.fg("accent", APP_NAME)) + theme.fg("dim", ` v${this.version}`); + + // Build startup instructions using keybinding hint helpers + const kb = this.keybindings; + const hint = (action: AppAction, desc: string) => appKeyHint(kb, action, desc); + + const instructions = [ + hint("interrupt", "to interrupt"), + hint("clear", "to clear"), + rawKeyHint(`${appKey(kb, "clear")} twice`, "to exit"), + hint("exit", "to exit (empty)"), + hint("suspend", "to suspend"), + keyHint("deleteToLineEnd", "to delete to end"), + hint("cycleThinkingLevel", "to cycle thinking level"), + rawKeyHint(`${appKey(kb, "cycleModelForward")}/${appKey(kb, "cycleModelBackward")}`, "to cycle models"), + hint("selectModel", "to select model"), + hint("expandTools", "to expand tools"), + hint("toggleThinking", "to expand thinking"), + hint("externalEditor", "for external editor"), + rawKeyHint("/", "for commands"), + rawKeyHint("!", "to run bash"), + rawKeyHint("!!", "to run bash (no context)"), + hint("followUp", "to queue follow-up"), + hint("dequeue", "to edit all queued messages"), + hint("pasteImage", "to paste image"), + rawKeyHint("drop files", "to attach"), + ].join("\n"); + this.builtInHeader = new Text(`${logo}\n${instructions}`, 1, 0); + + // Setup UI layout + this.headerContainer.addChild(new Spacer(1)); + this.headerContainer.addChild(this.builtInHeader); + this.headerContainer.addChild(new Spacer(1)); + + // Add changelog if provided + if (this.changelogMarkdown) { + this.headerContainer.addChild(new DynamicBorder()); + if (this.settingsManager.getCollapseChangelog()) { + const versionMatch = this.changelogMarkdown.match(/##\s+\[?(\d+\.\d+\.\d+)\]?/); + const latestVersion = versionMatch ? versionMatch[1] : this.version; + const condensedText = `Updated to v${latestVersion}. Use ${theme.bold("/changelog")} to view full changelog.`; + this.headerContainer.addChild(new Text(condensedText, 1, 0)); + } else { + this.headerContainer.addChild(new Text(theme.bold(theme.fg("accent", "What's New")), 1, 0)); + this.headerContainer.addChild(new Spacer(1)); + this.headerContainer.addChild( + new Markdown(this.changelogMarkdown.trim(), 1, 0, this.getMarkdownThemeWithSettings()), + ); + this.headerContainer.addChild(new Spacer(1)); + } + this.headerContainer.addChild(new DynamicBorder()); + } + } else { + // Minimal header when silenced + this.builtInHeader = new Text("", 0, 0); + this.headerContainer.addChild(this.builtInHeader); + if (this.changelogMarkdown) { + // Still show changelog notification even in silent mode + this.headerContainer.addChild(new Spacer(1)); + const versionMatch = this.changelogMarkdown.match(/##\s+\[?(\d+\.\d+\.\d+)\]?/); + const latestVersion = versionMatch ? versionMatch[1] : this.version; + const condensedText = `Updated to v${latestVersion}. Use ${theme.bold("/changelog")} to view full changelog.`; + this.headerContainer.addChild(new Text(condensedText, 1, 0)); + } + } + + this.ui.addChild(this.chatContainer); + this.ui.addChild(this.pendingMessagesContainer); + this.ui.addChild(this.statusContainer); + this.renderWidgets(); // Initialize with default spacer + this.ui.addChild(this.widgetContainerAbove); + this.ui.addChild(this.editorContainer); + this.ui.addChild(this.widgetContainerBelow); + this.ui.addChild(this.footer); + this.ui.setFocus(this.editor); + + this.setupKeyHandlers(); + this.setupEditorSubmitHandler(); + + // Initialize extensions first so resources are shown before messages + await this.initExtensions(); + + // Render initial messages AFTER showing loaded resources + this.renderInitialMessages(); + + // Start the UI + this.ui.start(); + this.isInitialized = true; + + // Set terminal title + this.updateTerminalTitle(); + + // Subscribe to agent events + this.subscribeToAgent(); + + // Set up theme file watcher + onThemeChange(() => { + this.ui.invalidate(); + this.updateEditorBorderColor(); + this.ui.requestRender(); + }); + + // Set up git branch watcher (uses provider instead of footer) + this.footerDataProvider.onBranchChange(() => { + this.ui.requestRender(); + }); + + // Initialize available provider count for footer display + await this.updateAvailableProviderCount(); + } + + /** + * Update terminal title with session name and cwd. + */ + private updateTerminalTitle(): void { + const cwdBasename = path.basename(process.cwd()); + const sessionName = this.sessionManager.getSessionName(); + if (sessionName) { + this.ui.terminal.setTitle(`π - ${sessionName} - ${cwdBasename}`); + } else { + this.ui.terminal.setTitle(`π - ${cwdBasename}`); + } + } + + /** + * Run the interactive mode. This is the main entry point. + * Initializes the UI, shows warnings, processes initial messages, and starts the interactive loop. + */ + async run(): Promise { + await this.init(); + + // Start version check asynchronously + this.checkForNewVersion().then((newVersion) => { + if (newVersion) { + this.showNewVersionNotification(newVersion); + } + }); + + // Check tmux keyboard setup asynchronously + this.checkTmuxKeyboardSetup().then((warning) => { + if (warning) { + this.showWarning(warning); + } + }); + + // Show startup warnings + const { migratedProviders, modelFallbackMessage, initialMessage, initialImages, initialMessages } = this.options; + + if (migratedProviders && migratedProviders.length > 0) { + this.showWarning(`Migrated credentials to auth.json: ${migratedProviders.join(", ")}`); + } + + const modelsJsonError = this.session.modelRegistry.getError(); + if (modelsJsonError) { + this.showError(`models.json error: ${modelsJsonError}`); + } + + if (modelFallbackMessage) { + this.showWarning(modelFallbackMessage); + } + + // Process initial messages + if (initialMessage) { + try { + await this.session.prompt(initialMessage, { images: initialImages }); + } catch (error: unknown) { + const errorMessage = error instanceof Error ? error.message : "Unknown error occurred"; + this.showError(errorMessage); + } + } + + if (initialMessages) { + for (const message of initialMessages) { + try { + await this.session.prompt(message); + } catch (error: unknown) { + const errorMessage = error instanceof Error ? error.message : "Unknown error occurred"; + this.showError(errorMessage); + } + } + } + + // Main interactive loop + while (true) { + const userInput = await this.getUserInput(); + try { + await this.session.prompt(userInput); + } catch (error: unknown) { + const errorMessage = error instanceof Error ? error.message : "Unknown error occurred"; + this.showError(errorMessage); + } + } + } + + /** + * Check npm registry for a newer version. + */ + private async checkForNewVersion(): Promise { + if (process.env.PI_SKIP_VERSION_CHECK || process.env.PI_OFFLINE) return undefined; + + try { + const response = await fetch("https://registry.npmjs.org/@gsd/pi-coding-agent/latest", { + signal: AbortSignal.timeout(10000), + }); + if (!response.ok) return undefined; + + const data = (await response.json()) as { version?: string }; + const latestVersion = data.version; + + if (latestVersion && latestVersion !== this.version) { + return latestVersion; + } + + return undefined; + } catch { + return undefined; + } + } + + private async checkTmuxKeyboardSetup(): Promise { + if (!process.env.TMUX) return undefined; + + const runTmuxShow = (option: string): Promise => { + return new Promise((resolve) => { + const proc = spawn("tmux", ["show", "-gv", option], { + stdio: ["ignore", "pipe", "ignore"], + }); + let stdout = ""; + const timer = setTimeout(() => { + proc.kill(); + resolve(undefined); + }, 2000); + + proc.stdout?.on("data", (data) => { + stdout += data.toString(); + }); + proc.on("error", () => { + clearTimeout(timer); + resolve(undefined); + }); + proc.on("close", (code) => { + clearTimeout(timer); + resolve(code === 0 ? stdout.trim() : undefined); + }); + }); + }; + + const [extendedKeys, extendedKeysFormat] = await Promise.all([ + runTmuxShow("extended-keys"), + runTmuxShow("extended-keys-format"), + ]); + + if (extendedKeys !== "on" && extendedKeys !== "always") { + return "tmux extended-keys is off. Modified Enter keys may not work. Add `set -g extended-keys on` to ~/.tmux.conf and restart tmux."; + } + + if (extendedKeysFormat === "xterm") { + return "tmux extended-keys-format is xterm. Pi works best with csi-u. Add `set -g extended-keys-format csi-u` to ~/.tmux.conf and restart tmux."; + } + + return undefined; + } + + /** + * Get changelog entries to display on startup. + * Only shows new entries since last seen version, skips for resumed sessions. + */ + private getChangelogForDisplay(): string | undefined { + // Skip changelog for resumed/continued sessions (already have messages) + if (this.session.state.messages.length > 0) { + return undefined; + } + + const lastVersion = this.settingsManager.getLastChangelogVersion(); + const changelogPath = getChangelogPath(); + const entries = parseChangelog(changelogPath); + + if (!lastVersion) { + // Fresh install - just record the version, don't show changelog + this.settingsManager.setLastChangelogVersion(VERSION); + return undefined; + } else { + const newEntries = getNewEntries(entries, lastVersion); + if (newEntries.length > 0) { + this.settingsManager.setLastChangelogVersion(VERSION); + return newEntries.map((e) => e.content).join("\n\n"); + } + } + + return undefined; + } + + private getMarkdownThemeWithSettings(): MarkdownTheme { + return { + ...getMarkdownTheme(), + codeBlockIndent: this.settingsManager.getCodeBlockIndent(), + }; + } + + // ========================================================================= + // Extension System + // ========================================================================= + + private formatDisplayPath(p: string): string { + const home = os.homedir(); + let result = p; + + // Replace home directory with ~ + if (result.startsWith(home)) { + result = `~${result.slice(home.length)}`; + } + + return result; + } + + /** + * Get a short path relative to the package root for display. + */ + private getShortPath(fullPath: string, source: string): string { + // For npm packages, show path relative to node_modules/pkg/ + const npmMatch = fullPath.match(/node_modules\/(@?[^/]+(?:\/[^/]+)?)\/(.*)/); + if (npmMatch && source.startsWith("npm:")) { + return npmMatch[2]; + } + + // For git packages, show path relative to repo root + const gitMatch = fullPath.match(/git\/[^/]+\/[^/]+\/(.*)/); + if (gitMatch && source.startsWith("git:")) { + return gitMatch[1]; + } + + // For local/auto, just use formatDisplayPath + return this.formatDisplayPath(fullPath); + } + + private getDisplaySourceInfo( + source: string, + scope: string, + ): { label: string; scopeLabel?: string; color: "accent" | "muted" } { + if (source === "local") { + if (scope === "user") { + return { label: "user", color: "muted" }; + } + if (scope === "project") { + return { label: "project", color: "muted" }; + } + if (scope === "temporary") { + return { label: "path", scopeLabel: "temp", color: "muted" }; + } + return { label: "path", color: "muted" }; + } + + if (source === "cli") { + return { label: "path", scopeLabel: scope === "temporary" ? "temp" : undefined, color: "muted" }; + } + + const scopeLabel = + scope === "user" ? "user" : scope === "project" ? "project" : scope === "temporary" ? "temp" : undefined; + return { label: source, scopeLabel, color: "accent" }; + } + + private getScopeGroup(source: string, scope: string): "user" | "project" | "path" { + if (source === "cli" || scope === "temporary") return "path"; + if (scope === "user") return "user"; + if (scope === "project") return "project"; + return "path"; + } + + private isPackageSource(source: string): boolean { + return source.startsWith("npm:") || source.startsWith("git:"); + } + + private buildScopeGroups( + paths: string[], + metadata: Map, + ): Array<{ scope: "user" | "project" | "path"; paths: string[]; packages: Map }> { + const groups: Record< + "user" | "project" | "path", + { scope: "user" | "project" | "path"; paths: string[]; packages: Map } + > = { + user: { scope: "user", paths: [], packages: new Map() }, + project: { scope: "project", paths: [], packages: new Map() }, + path: { scope: "path", paths: [], packages: new Map() }, + }; + + for (const p of paths) { + const meta = this.findMetadata(p, metadata); + const source = meta?.source ?? "local"; + const scope = meta?.scope ?? "project"; + const groupKey = this.getScopeGroup(source, scope); + const group = groups[groupKey]; + + if (this.isPackageSource(source)) { + const list = group.packages.get(source) ?? []; + list.push(p); + group.packages.set(source, list); + } else { + group.paths.push(p); + } + } + + return [groups.project, groups.user, groups.path].filter( + (group) => group.paths.length > 0 || group.packages.size > 0, + ); + } + + private formatScopeGroups( + groups: Array<{ scope: "user" | "project" | "path"; paths: string[]; packages: Map }>, + options: { + formatPath: (p: string) => string; + formatPackagePath: (p: string, source: string) => string; + }, + ): string { + const lines: string[] = []; + + for (const group of groups) { + lines.push(` ${theme.fg("accent", group.scope)}`); + + const sortedPaths = [...group.paths].sort((a, b) => a.localeCompare(b)); + for (const p of sortedPaths) { + lines.push(theme.fg("dim", ` ${options.formatPath(p)}`)); + } + + const sortedPackages = Array.from(group.packages.entries()).sort(([a], [b]) => a.localeCompare(b)); + for (const [source, paths] of sortedPackages) { + lines.push(` ${theme.fg("mdLink", source)}`); + const sortedPackagePaths = [...paths].sort((a, b) => a.localeCompare(b)); + for (const p of sortedPackagePaths) { + lines.push(theme.fg("dim", ` ${options.formatPackagePath(p, source)}`)); + } + } + } + + return lines.join("\n"); + } + + /** + * Find metadata for a path, checking parent directories if exact match fails. + * Package manager stores metadata for directories, but we display file paths. + */ + private findMetadata( + p: string, + metadata: Map, + ): { source: string; scope: string; origin: string } | undefined { + // Try exact match first + const exact = metadata.get(p); + if (exact) return exact; + + // Try parent directories (package manager stores directory paths) + let current = p; + while (current.includes("/")) { + current = current.substring(0, current.lastIndexOf("/")); + const parent = metadata.get(current); + if (parent) return parent; + } + + return undefined; + } + + /** + * Format a path with its source/scope info from metadata. + */ + private formatPathWithSource( + p: string, + metadata: Map, + ): string { + const meta = this.findMetadata(p, metadata); + if (meta) { + const shortPath = this.getShortPath(p, meta.source); + const { label, scopeLabel } = this.getDisplaySourceInfo(meta.source, meta.scope); + const labelText = scopeLabel ? `${label} (${scopeLabel})` : label; + return `${labelText} ${shortPath}`; + } + return this.formatDisplayPath(p); + } + + /** + * Format resource diagnostics with nice collision display using metadata. + */ + private formatDiagnostics( + diagnostics: readonly ResourceDiagnostic[], + metadata: Map, + ): string { + const lines: string[] = []; + + // Group collision diagnostics by name + const collisions = new Map(); + const otherDiagnostics: ResourceDiagnostic[] = []; + + for (const d of diagnostics) { + if (d.type === "collision" && d.collision) { + const list = collisions.get(d.collision.name) ?? []; + list.push(d); + collisions.set(d.collision.name, list); + } else { + otherDiagnostics.push(d); + } + } + + // Format collision diagnostics grouped by name + for (const [name, collisionList] of collisions) { + const first = collisionList[0]?.collision; + if (!first) continue; + lines.push(theme.fg("warning", ` "${name}" collision:`)); + // Show winner + lines.push( + theme.fg("dim", ` ${theme.fg("success", "✓")} ${this.formatPathWithSource(first.winnerPath, metadata)}`), + ); + // Show all losers + for (const d of collisionList) { + if (d.collision) { + lines.push( + theme.fg( + "dim", + ` ${theme.fg("warning", "✗")} ${this.formatPathWithSource(d.collision.loserPath, metadata)} (skipped)`, + ), + ); + } + } + } + + // Format other diagnostics (skill name collisions, parse errors, etc.) + for (const d of otherDiagnostics) { + if (d.path) { + // Use metadata-aware formatting for paths + const sourceInfo = this.formatPathWithSource(d.path, metadata); + lines.push(theme.fg(d.type === "error" ? "error" : "warning", ` ${sourceInfo}`)); + lines.push(theme.fg(d.type === "error" ? "error" : "warning", ` ${d.message}`)); + } else { + lines.push(theme.fg(d.type === "error" ? "error" : "warning", ` ${d.message}`)); + } + } + + return lines.join("\n"); + } + + private showLoadedResources(options?: { + extensionPaths?: string[]; + force?: boolean; + showDiagnosticsWhenQuiet?: boolean; + }): void { + const showListing = options?.force || this.options.verbose || !this.settingsManager.getQuietStartup(); + const showDiagnostics = showListing || options?.showDiagnosticsWhenQuiet === true; + if (!showListing && !showDiagnostics) { + return; + } + + const metadata = this.session.resourceLoader.getPathMetadata(); + const sectionHeader = (name: string, color: ThemeColor = "mdHeading") => theme.fg(color, `[${name}]`); + + const skillsResult = this.session.resourceLoader.getSkills(); + const promptsResult = this.session.resourceLoader.getPrompts(); + const themesResult = this.session.resourceLoader.getThemes(); + + if (showListing) { + const contextFiles = this.session.resourceLoader.getAgentsFiles().agentsFiles; + if (contextFiles.length > 0) { + this.chatContainer.addChild(new Spacer(1)); + const contextList = contextFiles + .map((f) => theme.fg("dim", ` ${this.formatDisplayPath(f.path)}`)) + .join("\n"); + this.chatContainer.addChild(new Text(`${sectionHeader("Context")}\n${contextList}`, 0, 0)); + this.chatContainer.addChild(new Spacer(1)); + } + + const skills = skillsResult.skills; + if (skills.length > 0) { + const skillPaths = skills.map((s) => s.filePath); + const groups = this.buildScopeGroups(skillPaths, metadata); + const skillList = this.formatScopeGroups(groups, { + formatPath: (p) => this.formatDisplayPath(p), + formatPackagePath: (p, source) => this.getShortPath(p, source), + }); + this.chatContainer.addChild(new Text(`${sectionHeader("Skills")}\n${skillList}`, 0, 0)); + this.chatContainer.addChild(new Spacer(1)); + } + + const templates = this.session.promptTemplates; + if (templates.length > 0) { + const templatePaths = templates.map((t) => t.filePath); + const groups = this.buildScopeGroups(templatePaths, metadata); + const templateByPath = new Map(templates.map((t) => [t.filePath, t])); + const templateList = this.formatScopeGroups(groups, { + formatPath: (p) => { + const template = templateByPath.get(p); + return template ? `/${template.name}` : this.formatDisplayPath(p); + }, + formatPackagePath: (p) => { + const template = templateByPath.get(p); + return template ? `/${template.name}` : this.formatDisplayPath(p); + }, + }); + this.chatContainer.addChild(new Text(`${sectionHeader("Prompts")}\n${templateList}`, 0, 0)); + this.chatContainer.addChild(new Spacer(1)); + } + + const extensionPaths = options?.extensionPaths ?? []; + if (extensionPaths.length > 0) { + const groups = this.buildScopeGroups(extensionPaths, metadata); + const extList = this.formatScopeGroups(groups, { + formatPath: (p) => this.formatDisplayPath(p), + formatPackagePath: (p, source) => this.getShortPath(p, source), + }); + this.chatContainer.addChild(new Text(`${sectionHeader("Extensions", "mdHeading")}\n${extList}`, 0, 0)); + this.chatContainer.addChild(new Spacer(1)); + } + + // Show loaded themes (excluding built-in) + const loadedThemes = themesResult.themes; + const customThemes = loadedThemes.filter((t) => t.sourcePath); + if (customThemes.length > 0) { + const themePaths = customThemes.map((t) => t.sourcePath!); + const groups = this.buildScopeGroups(themePaths, metadata); + const themeList = this.formatScopeGroups(groups, { + formatPath: (p) => this.formatDisplayPath(p), + formatPackagePath: (p, source) => this.getShortPath(p, source), + }); + this.chatContainer.addChild(new Text(`${sectionHeader("Themes")}\n${themeList}`, 0, 0)); + this.chatContainer.addChild(new Spacer(1)); + } + } + + if (showDiagnostics) { + const skillDiagnostics = skillsResult.diagnostics; + if (skillDiagnostics.length > 0) { + const warningLines = this.formatDiagnostics(skillDiagnostics, metadata); + this.chatContainer.addChild(new Text(`${theme.fg("warning", "[Skill conflicts]")}\n${warningLines}`, 0, 0)); + this.chatContainer.addChild(new Spacer(1)); + } + + const promptDiagnostics = promptsResult.diagnostics; + if (promptDiagnostics.length > 0) { + const warningLines = this.formatDiagnostics(promptDiagnostics, metadata); + this.chatContainer.addChild( + new Text(`${theme.fg("warning", "[Prompt conflicts]")}\n${warningLines}`, 0, 0), + ); + this.chatContainer.addChild(new Spacer(1)); + } + + const extensionDiagnostics: ResourceDiagnostic[] = []; + const extensionErrors = this.session.resourceLoader.getExtensions().errors; + if (extensionErrors.length > 0) { + for (const error of extensionErrors) { + extensionDiagnostics.push({ type: "error", message: error.error, path: error.path }); + } + } + + const commandDiagnostics = this.session.extensionRunner?.getCommandDiagnostics() ?? []; + extensionDiagnostics.push(...commandDiagnostics); + + const shortcutDiagnostics = this.session.extensionRunner?.getShortcutDiagnostics() ?? []; + extensionDiagnostics.push(...shortcutDiagnostics); + + if (extensionDiagnostics.length > 0) { + const warningLines = this.formatDiagnostics(extensionDiagnostics, metadata); + this.chatContainer.addChild( + new Text(`${theme.fg("warning", "[Extension issues]")}\n${warningLines}`, 0, 0), + ); + this.chatContainer.addChild(new Spacer(1)); + } + + const themeDiagnostics = themesResult.diagnostics; + if (themeDiagnostics.length > 0) { + const warningLines = this.formatDiagnostics(themeDiagnostics, metadata); + this.chatContainer.addChild(new Text(`${theme.fg("warning", "[Theme conflicts]")}\n${warningLines}`, 0, 0)); + this.chatContainer.addChild(new Spacer(1)); + } + } + } + + /** + * Initialize the extension system with TUI-based UI context. + */ + private async initExtensions(): Promise { + const uiContext = this.createExtensionUIContext(); + await this.session.bindExtensions({ + uiContext, + commandContextActions: { + waitForIdle: () => this.session.agent.waitForIdle(), + newSession: async (options) => { + if (this.loadingAnimation) { + this.loadingAnimation.stop(); + this.loadingAnimation = undefined; + } + this.statusContainer.clear(); + + // Delegate to AgentSession (handles setup + agent state sync) + const success = await this.session.newSession(options); + if (!success) { + return { cancelled: true }; + } + + // Clear UI state + this.chatContainer.clear(); + this.pendingMessagesContainer.clear(); + this.compactionQueuedMessages = []; + this.streamingComponent = undefined; + this.streamingMessage = undefined; + this.pendingTools.clear(); + + // Render any messages added via setup, or show empty session + this.renderInitialMessages(); + this.ui.requestRender(); + + return { cancelled: false }; + }, + fork: async (entryId) => { + const result = await this.session.fork(entryId); + if (result.cancelled) { + return { cancelled: true }; + } + + this.chatContainer.clear(); + this.renderInitialMessages(); + this.editor.setText(result.selectedText); + this.showStatus("Forked to new session"); + + return { cancelled: false }; + }, + navigateTree: async (targetId, options) => { + const result = await this.session.navigateTree(targetId, { + summarize: options?.summarize, + customInstructions: options?.customInstructions, + replaceInstructions: options?.replaceInstructions, + label: options?.label, + }); + if (result.cancelled) { + return { cancelled: true }; + } + + this.chatContainer.clear(); + this.renderInitialMessages(); + if (result.editorText && !this.editor.getText().trim()) { + this.editor.setText(result.editorText); + } + this.showStatus("Navigated to selected point"); + + return { cancelled: false }; + }, + switchSession: async (sessionPath) => { + await this.handleResumeSession(sessionPath); + return { cancelled: false }; + }, + reload: async () => { + await this.handleReloadCommand(); + }, + }, + shutdownHandler: () => { + this.shutdownRequested = true; + if (!this.session.isStreaming) { + void this.shutdown(); + } + }, + onError: (error) => { + this.showExtensionError(error.extensionPath, error.error, error.stack); + }, + }); + + setRegisteredThemes(this.session.resourceLoader.getThemes().themes); + this.setupAutocomplete(this.fdPath); + + const extensionRunner = this.session.extensionRunner; + if (!extensionRunner) { + this.showLoadedResources({ extensionPaths: [], force: false }); + return; + } + + this.setupExtensionShortcuts(extensionRunner); + this.showLoadedResources({ extensionPaths: extensionRunner.getExtensionPaths(), force: false }); + } + + /** + * Get a registered tool definition by name (for custom rendering). + */ + private getRegisteredToolDefinition(toolName: string) { + const tools = this.session.extensionRunner?.getAllRegisteredTools() ?? []; + const registeredTool = tools.find((t) => t.definition.name === toolName); + return registeredTool?.definition; + } + + /** + * Set up keyboard shortcuts registered by extensions. + */ + private setupExtensionShortcuts(extensionRunner: ExtensionRunner): void { + const shortcuts = extensionRunner.getShortcuts(this.keybindings.getEffectiveConfig()); + if (shortcuts.size === 0) return; + + // Create a context for shortcut handlers + const createContext = (): ExtensionContext => ({ + ui: this.createExtensionUIContext(), + hasUI: true, + cwd: process.cwd(), + sessionManager: this.sessionManager, + modelRegistry: this.session.modelRegistry, + model: this.session.model, + isIdle: () => !this.session.isStreaming, + abort: () => this.session.abort(), + hasPendingMessages: () => this.session.pendingMessageCount > 0, + shutdown: () => { + this.shutdownRequested = true; + }, + getContextUsage: () => this.session.getContextUsage(), + compact: (options) => { + void (async () => { + try { + const result = await this.executeCompaction(options?.customInstructions, false); + if (result) { + options?.onComplete?.(result); + } + } catch (error) { + const err = error instanceof Error ? error : new Error(String(error)); + options?.onError?.(err); + } + })(); + }, + getSystemPrompt: () => this.session.systemPrompt, + }); + + // Set up the extension shortcut handler on the default editor + this.defaultEditor.onExtensionShortcut = (data: string) => { + for (const [shortcutStr, shortcut] of shortcuts) { + // Cast to KeyId - extension shortcuts use the same format + if (matchesKey(data, shortcutStr as KeyId)) { + // Run handler async, don't block input + Promise.resolve(shortcut.handler(createContext())).catch((err) => { + this.showError(`Shortcut handler error: ${err instanceof Error ? err.message : String(err)}`); + }); + return true; + } + } + return false; + }; + } + + /** + * Set extension status text in the footer. + */ + private setExtensionStatus(key: string, text: string | undefined): void { + this.footerDataProvider.setExtensionStatus(key, text); + this.ui.requestRender(); + } + + /** + * Set an extension widget (string array or custom component). + */ + private setExtensionWidget( + key: string, + content: string[] | ((tui: TUI, thm: Theme) => Component & { dispose?(): void }) | undefined, + options?: ExtensionWidgetOptions, + ): void { + const placement = options?.placement ?? "aboveEditor"; + const removeExisting = (map: Map) => { + const existing = map.get(key); + if (existing?.dispose) existing.dispose(); + map.delete(key); + }; + + removeExisting(this.extensionWidgetsAbove); + removeExisting(this.extensionWidgetsBelow); + + if (content === undefined) { + this.renderWidgets(); + return; + } + + let component: Component & { dispose?(): void }; + + if (Array.isArray(content)) { + // Wrap string array in a Container with Text components + const container = new Container(); + for (const line of content.slice(0, InteractiveMode.MAX_WIDGET_LINES)) { + container.addChild(new Text(line, 1, 0)); + } + if (content.length > InteractiveMode.MAX_WIDGET_LINES) { + container.addChild(new Text(theme.fg("muted", "... (widget truncated)"), 1, 0)); + } + component = container; + } else { + // Factory function - create component + component = content(this.ui, theme); + } + + const targetMap = placement === "belowEditor" ? this.extensionWidgetsBelow : this.extensionWidgetsAbove; + targetMap.set(key, component); + this.renderWidgets(); + } + + private clearExtensionWidgets(): void { + for (const widget of this.extensionWidgetsAbove.values()) { + widget.dispose?.(); + } + for (const widget of this.extensionWidgetsBelow.values()) { + widget.dispose?.(); + } + this.extensionWidgetsAbove.clear(); + this.extensionWidgetsBelow.clear(); + this.renderWidgets(); + } + + private resetExtensionUI(): void { + if (this.extensionSelector) { + this.hideExtensionSelector(); + } + if (this.extensionInput) { + this.hideExtensionInput(); + } + if (this.extensionEditor) { + this.hideExtensionEditor(); + } + this.ui.hideOverlay(); + this.clearExtensionTerminalInputListeners(); + this.setExtensionFooter(undefined); + this.setExtensionHeader(undefined); + this.clearExtensionWidgets(); + this.footerDataProvider.clearExtensionStatuses(); + this.footer.invalidate(); + this.setCustomEditorComponent(undefined); + this.defaultEditor.onExtensionShortcut = undefined; + this.updateTerminalTitle(); + if (this.loadingAnimation) { + this.loadingAnimation.setMessage( + `${this.defaultWorkingMessage} (${appKey(this.keybindings, "interrupt")} to interrupt)`, + ); + } + } + + // Maximum total widget lines to prevent viewport overflow + private static readonly MAX_WIDGET_LINES = 10; + + /** + * Render all extension widgets to the widget container. + */ + private renderWidgets(): void { + if (!this.widgetContainerAbove || !this.widgetContainerBelow) return; + this.renderWidgetContainer(this.widgetContainerAbove, this.extensionWidgetsAbove, true, true); + this.renderWidgetContainer(this.widgetContainerBelow, this.extensionWidgetsBelow, false, false); + this.ui.requestRender(); + } + + private renderWidgetContainer( + container: Container, + widgets: Map, + spacerWhenEmpty: boolean, + leadingSpacer: boolean, + ): void { + container.clear(); + + if (widgets.size === 0) { + if (spacerWhenEmpty) { + container.addChild(new Spacer(1)); + } + return; + } + + if (leadingSpacer) { + container.addChild(new Spacer(1)); + } + for (const component of widgets.values()) { + container.addChild(component); + } + } + + /** + * Set a custom footer component, or restore the built-in footer. + */ + private setExtensionFooter( + factory: + | ((tui: TUI, thm: Theme, footerData: ReadonlyFooterDataProvider) => Component & { dispose?(): void }) + | undefined, + ): void { + // Dispose existing custom footer + if (this.customFooter?.dispose) { + this.customFooter.dispose(); + } + + // Remove current footer from UI + if (this.customFooter) { + this.ui.removeChild(this.customFooter); + } else { + this.ui.removeChild(this.footer); + } + + if (factory) { + // Create and add custom footer, passing the data provider + this.customFooter = factory(this.ui, theme, this.footerDataProvider); + this.ui.addChild(this.customFooter); + } else { + // Restore built-in footer + this.customFooter = undefined; + this.ui.addChild(this.footer); + } + + this.ui.requestRender(); + } + + /** + * Set a custom header component, or restore the built-in header. + */ + private setExtensionHeader(factory: ((tui: TUI, thm: Theme) => Component & { dispose?(): void }) | undefined): void { + // Header may not be initialized yet if called during early initialization + if (!this.builtInHeader) { + return; + } + + // Dispose existing custom header + if (this.customHeader?.dispose) { + this.customHeader.dispose(); + } + + // Find the index of the current header in the header container + const currentHeader = this.customHeader || this.builtInHeader; + const index = this.headerContainer.children.indexOf(currentHeader); + + if (factory) { + // Create and add custom header + this.customHeader = factory(this.ui, theme); + if (index !== -1) { + this.headerContainer.children[index] = this.customHeader; + } else { + // If not found (e.g. builtInHeader was never added), add at the top + this.headerContainer.children.unshift(this.customHeader); + } + } else { + // Restore built-in header + this.customHeader = undefined; + if (index !== -1) { + this.headerContainer.children[index] = this.builtInHeader; + } + } + + this.ui.requestRender(); + } + + private addExtensionTerminalInputListener( + handler: (data: string) => { consume?: boolean; data?: string } | undefined, + ): () => void { + const unsubscribe = this.ui.addInputListener(handler); + this.extensionTerminalInputUnsubscribers.add(unsubscribe); + return () => { + unsubscribe(); + this.extensionTerminalInputUnsubscribers.delete(unsubscribe); + }; + } + + private clearExtensionTerminalInputListeners(): void { + for (const unsubscribe of this.extensionTerminalInputUnsubscribers) { + unsubscribe(); + } + this.extensionTerminalInputUnsubscribers.clear(); + } + + /** + * Create the ExtensionUIContext for extensions. + */ + private createExtensionUIContext(): ExtensionUIContext { + return { + select: (title, options, opts) => this.showExtensionSelector(title, options, opts), + confirm: (title, message, opts) => this.showExtensionConfirm(title, message, opts), + input: (title, placeholder, opts) => this.showExtensionInput(title, placeholder, opts), + notify: (message, type) => this.showExtensionNotify(message, type), + onTerminalInput: (handler) => this.addExtensionTerminalInputListener(handler), + setStatus: (key, text) => this.setExtensionStatus(key, text), + setWorkingMessage: (message) => { + if (this.loadingAnimation) { + if (message) { + this.loadingAnimation.setMessage(message); + } else { + this.loadingAnimation.setMessage( + `${this.defaultWorkingMessage} (${appKey(this.keybindings, "interrupt")} to interrupt)`, + ); + } + } else { + // Queue message for when loadingAnimation is created (handles agent_start race) + this.pendingWorkingMessage = message; + } + }, + setWidget: (key, content, options) => this.setExtensionWidget(key, content, options), + setFooter: (factory) => this.setExtensionFooter(factory), + setHeader: (factory) => this.setExtensionHeader(factory), + setTitle: (title) => this.ui.terminal.setTitle(title), + custom: (factory, options) => this.showExtensionCustom(factory, options), + pasteToEditor: (text) => this.editor.handleInput(`\x1b[200~${text}\x1b[201~`), + setEditorText: (text) => this.editor.setText(text), + getEditorText: () => this.editor.getText(), + editor: (title, prefill) => this.showExtensionEditor(title, prefill), + setEditorComponent: (factory) => this.setCustomEditorComponent(factory), + get theme() { + return theme; + }, + getAllThemes: () => getAvailableThemesWithPaths(), + getTheme: (name) => getThemeByName(name), + setTheme: (themeOrName) => { + if (themeOrName instanceof Theme) { + setThemeInstance(themeOrName); + this.ui.requestRender(); + return { success: true }; + } + const result = setTheme(themeOrName, true); + if (result.success) { + if (this.settingsManager.getTheme() !== themeOrName) { + this.settingsManager.setTheme(themeOrName); + } + this.ui.requestRender(); + } + return result; + }, + getToolsExpanded: () => this.toolOutputExpanded, + setToolsExpanded: (expanded) => this.setToolsExpanded(expanded), + }; + } + + /** + * Show a selector for extensions. + */ + private showExtensionSelector( + title: string, + options: string[], + opts?: ExtensionUIDialogOptions, + ): Promise { + return new Promise((resolve) => { + if (opts?.signal?.aborted) { + resolve(undefined); + return; + } + + const onAbort = () => { + this.hideExtensionSelector(); + resolve(undefined); + }; + opts?.signal?.addEventListener("abort", onAbort, { once: true }); + + this.extensionSelector = new ExtensionSelectorComponent( + title, + options, + (option) => { + opts?.signal?.removeEventListener("abort", onAbort); + this.hideExtensionSelector(); + resolve(option); + }, + () => { + opts?.signal?.removeEventListener("abort", onAbort); + this.hideExtensionSelector(); + resolve(undefined); + }, + { tui: this.ui, timeout: opts?.timeout }, + ); + + this.editorContainer.clear(); + this.editorContainer.addChild(this.extensionSelector); + this.ui.setFocus(this.extensionSelector); + this.ui.requestRender(); + }); + } + + /** + * Hide the extension selector. + */ + private hideExtensionSelector(): void { + this.extensionSelector?.dispose(); + this.editorContainer.clear(); + this.editorContainer.addChild(this.editor); + this.extensionSelector = undefined; + this.ui.setFocus(this.editor); + this.ui.requestRender(); + } + + /** + * Show a confirmation dialog for extensions. + */ + private async showExtensionConfirm( + title: string, + message: string, + opts?: ExtensionUIDialogOptions, + ): Promise { + const result = await this.showExtensionSelector(`${title}\n${message}`, ["Yes", "No"], opts); + return result === "Yes"; + } + + /** + * Show a text input for extensions. + */ + private showExtensionInput( + title: string, + placeholder?: string, + opts?: ExtensionUIDialogOptions, + ): Promise { + return new Promise((resolve) => { + if (opts?.signal?.aborted) { + resolve(undefined); + return; + } + + const onAbort = () => { + this.hideExtensionInput(); + resolve(undefined); + }; + opts?.signal?.addEventListener("abort", onAbort, { once: true }); + + this.extensionInput = new ExtensionInputComponent( + title, + placeholder, + (value) => { + opts?.signal?.removeEventListener("abort", onAbort); + this.hideExtensionInput(); + resolve(value); + }, + () => { + opts?.signal?.removeEventListener("abort", onAbort); + this.hideExtensionInput(); + resolve(undefined); + }, + { tui: this.ui, timeout: opts?.timeout }, + ); + + this.editorContainer.clear(); + this.editorContainer.addChild(this.extensionInput); + this.ui.setFocus(this.extensionInput); + this.ui.requestRender(); + }); + } + + /** + * Hide the extension input. + */ + private hideExtensionInput(): void { + this.extensionInput?.dispose(); + this.editorContainer.clear(); + this.editorContainer.addChild(this.editor); + this.extensionInput = undefined; + this.ui.setFocus(this.editor); + this.ui.requestRender(); + } + + /** + * Show a multi-line editor for extensions (with Ctrl+G support). + */ + private showExtensionEditor(title: string, prefill?: string): Promise { + return new Promise((resolve) => { + this.extensionEditor = new ExtensionEditorComponent( + this.ui, + this.keybindings, + title, + prefill, + (value) => { + this.hideExtensionEditor(); + resolve(value); + }, + () => { + this.hideExtensionEditor(); + resolve(undefined); + }, + ); + + this.editorContainer.clear(); + this.editorContainer.addChild(this.extensionEditor); + this.ui.setFocus(this.extensionEditor); + this.ui.requestRender(); + }); + } + + /** + * Hide the extension editor. + */ + private hideExtensionEditor(): void { + this.editorContainer.clear(); + this.editorContainer.addChild(this.editor); + this.extensionEditor = undefined; + this.ui.setFocus(this.editor); + this.ui.requestRender(); + } + + /** + * Set a custom editor component from an extension. + * Pass undefined to restore the default editor. + */ + private setCustomEditorComponent( + factory: ((tui: TUI, theme: EditorTheme, keybindings: KeybindingsManager) => EditorComponent) | undefined, + ): void { + // Save text from current editor before switching + const currentText = this.editor.getText(); + + this.editorContainer.clear(); + + if (factory) { + // Create the custom editor with tui, theme, and keybindings + const newEditor = factory(this.ui, getEditorTheme(), this.keybindings); + + // Wire up callbacks from the default editor + newEditor.onSubmit = this.defaultEditor.onSubmit; + newEditor.onChange = this.defaultEditor.onChange; + + // Copy text from previous editor + newEditor.setText(currentText); + + // Copy appearance settings if supported + if (newEditor.borderColor !== undefined) { + newEditor.borderColor = this.defaultEditor.borderColor; + } + if (newEditor.setPaddingX !== undefined) { + newEditor.setPaddingX(this.defaultEditor.getPaddingX()); + } + + // Set autocomplete if supported + if (newEditor.setAutocompleteProvider && this.autocompleteProvider) { + newEditor.setAutocompleteProvider(this.autocompleteProvider); + } + + // If extending CustomEditor, copy app-level handlers + // Use duck typing since instanceof fails across jiti module boundaries + const customEditor = newEditor as unknown as Record; + if ("actionHandlers" in customEditor && customEditor.actionHandlers instanceof Map) { + if (!customEditor.onEscape) { + customEditor.onEscape = () => this.defaultEditor.onEscape?.(); + } + if (!customEditor.onCtrlD) { + customEditor.onCtrlD = () => this.defaultEditor.onCtrlD?.(); + } + if (!customEditor.onPasteImage) { + customEditor.onPasteImage = () => this.defaultEditor.onPasteImage?.(); + } + if (!customEditor.onExtensionShortcut) { + customEditor.onExtensionShortcut = (data: string) => this.defaultEditor.onExtensionShortcut?.(data); + } + // Copy action handlers (clear, suspend, model switching, etc.) + for (const [action, handler] of this.defaultEditor.actionHandlers) { + (customEditor.actionHandlers as Map void>).set(action, handler); + } + } + + this.editor = newEditor; + } else { + // Restore default editor with text from custom editor + this.defaultEditor.setText(currentText); + this.editor = this.defaultEditor; + } + + this.editorContainer.addChild(this.editor as Component); + this.ui.setFocus(this.editor as Component); + this.ui.requestRender(); + } + + /** + * Show a notification for extensions. + */ + private showExtensionNotify(message: string, type?: "info" | "warning" | "error"): void { + if (type === "error") { + this.showError(message); + } else if (type === "warning") { + this.showWarning(message); + } else { + this.showStatus(message); + } + } + + /** Show a custom component with keyboard focus. Overlay mode renders on top of existing content. */ + private async showExtensionCustom( + factory: ( + tui: TUI, + theme: Theme, + keybindings: KeybindingsManager, + done: (result: T) => void, + ) => (Component & { dispose?(): void }) | Promise, + options?: { + overlay?: boolean; + overlayOptions?: OverlayOptions | (() => OverlayOptions); + onHandle?: (handle: OverlayHandle) => void; + }, + ): Promise { + const savedText = this.editor.getText(); + const isOverlay = options?.overlay ?? false; + + const restoreEditor = () => { + this.editorContainer.clear(); + this.editorContainer.addChild(this.editor); + this.editor.setText(savedText); + this.ui.setFocus(this.editor); + this.ui.requestRender(); + }; + + return new Promise((resolve, reject) => { + let component: Component & { dispose?(): void }; + let closed = false; + + const close = (result: T) => { + if (closed) return; + closed = true; + if (isOverlay) this.ui.hideOverlay(); + else restoreEditor(); + // Note: both branches above already call requestRender + resolve(result); + try { + component?.dispose?.(); + } catch { + /* ignore dispose errors */ + } + }; + + Promise.resolve(factory(this.ui, theme, this.keybindings, close)) + .then((c) => { + if (closed) return; + component = c; + if (isOverlay) { + // Resolve overlay options - can be static or dynamic function + const resolveOptions = (): OverlayOptions | undefined => { + if (options?.overlayOptions) { + const opts = + typeof options.overlayOptions === "function" + ? options.overlayOptions() + : options.overlayOptions; + return opts; + } + // Fallback: use component's width property if available + const w = (component as { width?: number }).width; + return w ? { width: w } : undefined; + }; + const handle = this.ui.showOverlay(component, resolveOptions()); + // Expose handle to caller for visibility control + options?.onHandle?.(handle); + } else { + this.editorContainer.clear(); + this.editorContainer.addChild(component); + this.ui.setFocus(component); + this.ui.requestRender(); + } + }) + .catch((err) => { + if (closed) return; + if (!isOverlay) restoreEditor(); + reject(err); + }); + }); + } + + /** + * Show an extension error in the UI. + */ + private showExtensionError(extensionPath: string, error: string, stack?: string): void { + const errorMsg = `Extension "${extensionPath}" error: ${error}`; + const errorText = new Text(theme.fg("error", errorMsg), 1, 0); + this.chatContainer.addChild(errorText); + if (stack) { + // Show stack trace in dim color, indented + const stackLines = stack + .split("\n") + .slice(1) // Skip first line (duplicates error message) + .map((line) => theme.fg("dim", ` ${line.trim()}`)) + .join("\n"); + if (stackLines) { + this.chatContainer.addChild(new Text(stackLines, 1, 0)); + } + } + this.ui.requestRender(); + } + + // ========================================================================= + // Key Handlers + // ========================================================================= + + private setupKeyHandlers(): void { + // Set up handlers on defaultEditor - they use this.editor for text access + // so they work correctly regardless of which editor is active + this.defaultEditor.onEscape = () => { + if (this.loadingAnimation) { + this.restoreQueuedMessagesToEditor({ abort: true }); + } else if (this.session.isBashRunning) { + this.session.abortBash(); + } else if (this.isBashMode) { + this.editor.setText(""); + this.isBashMode = false; + this.updateEditorBorderColor(); + } else if (!this.editor.getText().trim()) { + // Double-escape with empty editor triggers /tree, /fork, or nothing based on setting + const action = this.settingsManager.getDoubleEscapeAction(); + if (action !== "none") { + const now = Date.now(); + if (now - this.lastEscapeTime < 500) { + if (action === "tree") { + this.showTreeSelector(); + } else { + this.showUserMessageSelector(); + } + this.lastEscapeTime = 0; + } else { + this.lastEscapeTime = now; + } + } + } + }; + + // Register app action handlers + this.defaultEditor.onAction("clear", () => this.handleCtrlC()); + this.defaultEditor.onCtrlD = () => this.handleCtrlD(); + this.defaultEditor.onAction("suspend", () => this.handleCtrlZ()); + this.defaultEditor.onAction("cycleThinkingLevel", () => this.cycleThinkingLevel()); + this.defaultEditor.onAction("cycleModelForward", () => this.cycleModel("forward")); + this.defaultEditor.onAction("cycleModelBackward", () => this.cycleModel("backward")); + + // Global debug handler on TUI (works regardless of focus) + this.ui.onDebug = () => this.handleDebugCommand(); + this.defaultEditor.onAction("selectModel", () => this.showModelSelector()); + this.defaultEditor.onAction("expandTools", () => this.toggleToolOutputExpansion()); + this.defaultEditor.onAction("toggleThinking", () => this.toggleThinkingBlockVisibility()); + this.defaultEditor.onAction("externalEditor", () => this.openExternalEditor()); + this.defaultEditor.onAction("followUp", () => this.handleFollowUp()); + this.defaultEditor.onAction("dequeue", () => this.handleDequeue()); + this.defaultEditor.onAction("newSession", () => this.handleClearCommand()); + this.defaultEditor.onAction("tree", () => this.showTreeSelector()); + this.defaultEditor.onAction("fork", () => this.showUserMessageSelector()); + this.defaultEditor.onAction("resume", () => this.showSessionSelector()); + + this.defaultEditor.onChange = (text: string) => { + const wasBashMode = this.isBashMode; + this.isBashMode = text.trimStart().startsWith("!"); + if (wasBashMode !== this.isBashMode) { + this.updateEditorBorderColor(); + } + }; + + // Handle clipboard image paste (triggered on Ctrl+V) + this.defaultEditor.onPasteImage = () => { + this.handleClipboardImagePaste(); + }; + } + + private async handleClipboardImagePaste(): Promise { + try { + const image = await readClipboardImage(); + if (!image) { + return; + } + + // Write to temp file + const tmpDir = os.tmpdir(); + const ext = extensionForImageMimeType(image.mimeType) ?? "png"; + const fileName = `pi-clipboard-${crypto.randomUUID()}.${ext}`; + const filePath = path.join(tmpDir, fileName); + fs.writeFileSync(filePath, Buffer.from(image.bytes)); + + // Insert file path directly + this.editor.insertTextAtCursor?.(filePath); + this.ui.requestRender(); + } catch { + // Silently ignore clipboard errors (may not have permission, etc.) + } + } + + private setupEditorSubmitHandler(): void { + this.defaultEditor.onSubmit = async (text: string) => { + text = text.trim(); + if (!text) return; + + // Handle commands + if (text === "/settings") { + this.showSettingsSelector(); + this.editor.setText(""); + return; + } + if (text === "/scoped-models") { + this.editor.setText(""); + await this.showModelsSelector(); + return; + } + if (text === "/model" || text.startsWith("/model ")) { + const searchTerm = text.startsWith("/model ") ? text.slice(7).trim() : undefined; + this.editor.setText(""); + await this.handleModelCommand(searchTerm); + return; + } + if (text.startsWith("/export")) { + await this.handleExportCommand(text); + this.editor.setText(""); + return; + } + if (text === "/share") { + await this.handleShareCommand(); + this.editor.setText(""); + return; + } + if (text === "/copy") { + this.handleCopyCommand(); + this.editor.setText(""); + return; + } + if (text === "/name" || text.startsWith("/name ")) { + this.handleNameCommand(text); + this.editor.setText(""); + return; + } + if (text === "/session") { + this.handleSessionCommand(); + this.editor.setText(""); + return; + } + if (text === "/changelog") { + this.handleChangelogCommand(); + this.editor.setText(""); + return; + } + if (text === "/hotkeys") { + this.handleHotkeysCommand(); + this.editor.setText(""); + return; + } + if (text === "/fork") { + this.showUserMessageSelector(); + this.editor.setText(""); + return; + } + if (text === "/tree") { + this.showTreeSelector(); + this.editor.setText(""); + return; + } + if (text === "/login") { + this.showOAuthSelector("login"); + this.editor.setText(""); + return; + } + if (text === "/logout") { + this.showOAuthSelector("logout"); + this.editor.setText(""); + return; + } + if (text === "/new") { + this.editor.setText(""); + await this.handleClearCommand(); + return; + } + if (text === "/compact" || text.startsWith("/compact ")) { + const customInstructions = text.startsWith("/compact ") ? text.slice(9).trim() : undefined; + this.editor.setText(""); + await this.handleCompactCommand(customInstructions); + return; + } + if (text === "/reload") { + this.editor.setText(""); + await this.handleReloadCommand(); + return; + } + if (text === "/debug") { + this.handleDebugCommand(); + this.editor.setText(""); + return; + } + if (text === "/arminsayshi") { + this.handleArminSaysHi(); + this.editor.setText(""); + return; + } + if (text === "/resume") { + this.showSessionSelector(); + this.editor.setText(""); + return; + } + if (text === "/quit") { + this.editor.setText(""); + await this.shutdown(); + return; + } + + // Handle bash command (! for normal, !! for excluded from context) + if (text.startsWith("!")) { + const isExcluded = text.startsWith("!!"); + const command = isExcluded ? text.slice(2).trim() : text.slice(1).trim(); + if (command) { + if (this.session.isBashRunning) { + this.showWarning("A bash command is already running. Press Esc to cancel it first."); + this.editor.setText(text); + return; + } + this.editor.addToHistory?.(text); + await this.handleBashCommand(command, isExcluded); + this.isBashMode = false; + this.updateEditorBorderColor(); + return; + } + } + + // Queue input during compaction (extension commands execute immediately) + if (this.session.isCompacting) { + if (this.isExtensionCommand(text)) { + this.editor.addToHistory?.(text); + this.editor.setText(""); + await this.session.prompt(text); + } else { + this.queueCompactionMessage(text, "steer"); + } + return; + } + + // If streaming, use prompt() with steer behavior + // This handles extension commands (execute immediately), prompt template expansion, and queueing + if (this.session.isStreaming) { + this.editor.addToHistory?.(text); + this.editor.setText(""); + await this.session.prompt(text, { streamingBehavior: "steer" }); + this.updatePendingMessagesDisplay(); + this.ui.requestRender(); + return; + } + + // Normal message submission + // First, move any pending bash components to chat + this.flushPendingBashComponents(); + + if (this.onInputCallback) { + this.onInputCallback(text); + } + this.editor.addToHistory?.(text); + }; + } + + private subscribeToAgent(): void { + this.unsubscribe = this.session.subscribe(async (event) => { + await this.handleEvent(event); + }); + } + + private async handleEvent(event: AgentSessionEvent): Promise { + if (!this.isInitialized) { + await this.init(); + } + + this.footer.invalidate(); + + switch (event.type) { + case "agent_start": + // Restore main escape handler if retry handler is still active + // (retry success event fires later, but we need main handler now) + if (this.retryEscapeHandler) { + this.defaultEditor.onEscape = this.retryEscapeHandler; + this.retryEscapeHandler = undefined; + } + if (this.retryLoader) { + this.retryLoader.stop(); + this.retryLoader = undefined; + } + if (this.loadingAnimation) { + this.loadingAnimation.stop(); + } + this.statusContainer.clear(); + this.loadingAnimation = new Loader( + this.ui, + (spinner) => theme.fg("accent", spinner), + (text) => theme.fg("muted", text), + this.defaultWorkingMessage, + ); + this.statusContainer.addChild(this.loadingAnimation); + // Apply any pending working message queued before loader existed + if (this.pendingWorkingMessage !== undefined) { + if (this.pendingWorkingMessage) { + this.loadingAnimation.setMessage(this.pendingWorkingMessage); + } + this.pendingWorkingMessage = undefined; + } + this.ui.requestRender(); + break; + + case "message_start": + if (event.message.role === "custom") { + this.addMessageToChat(event.message); + this.ui.requestRender(); + } else if (event.message.role === "user") { + this.addMessageToChat(event.message); + this.updatePendingMessagesDisplay(); + this.ui.requestRender(); + } else if (event.message.role === "assistant") { + this.streamingComponent = new AssistantMessageComponent( + undefined, + this.hideThinkingBlock, + this.getMarkdownThemeWithSettings(), + ); + this.streamingMessage = event.message; + this.chatContainer.addChild(this.streamingComponent); + this.streamingComponent.updateContent(this.streamingMessage); + this.ui.requestRender(); + } + break; + + case "message_update": + if (this.streamingComponent && event.message.role === "assistant") { + this.streamingMessage = event.message; + this.streamingComponent.updateContent(this.streamingMessage); + + for (const content of this.streamingMessage.content) { + if (content.type === "toolCall") { + if (!this.pendingTools.has(content.id)) { + const component = new ToolExecutionComponent( + content.name, + content.arguments, + { + showImages: this.settingsManager.getShowImages(), + }, + this.getRegisteredToolDefinition(content.name), + this.ui, + ); + component.setExpanded(this.toolOutputExpanded); + this.chatContainer.addChild(component); + this.pendingTools.set(content.id, component); + } else { + const component = this.pendingTools.get(content.id); + if (component) { + component.updateArgs(content.arguments); + } + } + } + } + this.ui.requestRender(); + } + break; + + case "message_end": + if (event.message.role === "user") break; + if (this.streamingComponent && event.message.role === "assistant") { + this.streamingMessage = event.message; + let errorMessage: string | undefined; + if (this.streamingMessage.stopReason === "aborted") { + const retryAttempt = this.session.retryAttempt; + errorMessage = + retryAttempt > 0 + ? `Aborted after ${retryAttempt} retry attempt${retryAttempt > 1 ? "s" : ""}` + : "Operation aborted"; + this.streamingMessage.errorMessage = errorMessage; + } + this.streamingComponent.updateContent(this.streamingMessage); + + if (this.streamingMessage.stopReason === "aborted" || this.streamingMessage.stopReason === "error") { + if (!errorMessage) { + errorMessage = this.streamingMessage.errorMessage || "Error"; + } + for (const [, component] of this.pendingTools.entries()) { + component.updateResult({ + content: [{ type: "text", text: errorMessage }], + isError: true, + }); + } + this.pendingTools.clear(); + } else { + // Args are now complete - trigger diff computation for edit tools + for (const [, component] of this.pendingTools.entries()) { + component.setArgsComplete(); + } + } + this.streamingComponent = undefined; + this.streamingMessage = undefined; + this.footer.invalidate(); + } + this.ui.requestRender(); + break; + + case "tool_execution_start": { + if (!this.pendingTools.has(event.toolCallId)) { + const component = new ToolExecutionComponent( + event.toolName, + event.args, + { + showImages: this.settingsManager.getShowImages(), + }, + this.getRegisteredToolDefinition(event.toolName), + this.ui, + ); + component.setExpanded(this.toolOutputExpanded); + this.chatContainer.addChild(component); + this.pendingTools.set(event.toolCallId, component); + this.ui.requestRender(); + } + break; + } + + case "tool_execution_update": { + const component = this.pendingTools.get(event.toolCallId); + if (component) { + component.updateResult({ ...event.partialResult, isError: false }, true); + this.ui.requestRender(); + } + break; + } + + case "tool_execution_end": { + const component = this.pendingTools.get(event.toolCallId); + if (component) { + component.updateResult({ ...event.result, isError: event.isError }); + this.pendingTools.delete(event.toolCallId); + this.ui.requestRender(); + } + break; + } + + case "agent_end": + if (this.loadingAnimation) { + this.loadingAnimation.stop(); + this.loadingAnimation = undefined; + this.statusContainer.clear(); + } + if (this.streamingComponent) { + this.chatContainer.removeChild(this.streamingComponent); + this.streamingComponent = undefined; + this.streamingMessage = undefined; + } + this.pendingTools.clear(); + + await this.checkShutdownRequested(); + + this.ui.requestRender(); + break; + + case "auto_compaction_start": { + // Keep editor active; submissions are queued during compaction. + // Set up escape to abort auto-compaction + this.autoCompactionEscapeHandler = this.defaultEditor.onEscape; + this.defaultEditor.onEscape = () => { + this.session.abortCompaction(); + }; + // Show compacting indicator with reason + this.statusContainer.clear(); + const reasonText = event.reason === "overflow" ? "Context overflow detected, " : ""; + this.autoCompactionLoader = new Loader( + this.ui, + (spinner) => theme.fg("accent", spinner), + (text) => theme.fg("muted", text), + `${reasonText}Auto-compacting... (${appKey(this.keybindings, "interrupt")} to cancel)`, + ); + this.statusContainer.addChild(this.autoCompactionLoader); + this.ui.requestRender(); + break; + } + + case "auto_compaction_end": { + // Restore escape handler + if (this.autoCompactionEscapeHandler) { + this.defaultEditor.onEscape = this.autoCompactionEscapeHandler; + this.autoCompactionEscapeHandler = undefined; + } + // Stop loader + if (this.autoCompactionLoader) { + this.autoCompactionLoader.stop(); + this.autoCompactionLoader = undefined; + this.statusContainer.clear(); + } + // Handle result + if (event.aborted) { + this.showStatus("Auto-compaction cancelled"); + } else if (event.result) { + // Rebuild chat to show compacted state + this.chatContainer.clear(); + this.rebuildChatFromMessages(); + // Add compaction component at bottom so user sees it without scrolling + this.addMessageToChat({ + role: "compactionSummary", + tokensBefore: event.result.tokensBefore, + summary: event.result.summary, + timestamp: Date.now(), + }); + this.footer.invalidate(); + } else if (event.errorMessage) { + // Compaction failed (e.g., quota exceeded, API error) + this.chatContainer.addChild(new Spacer(1)); + this.chatContainer.addChild(new Text(theme.fg("error", event.errorMessage), 1, 0)); + } + void this.flushCompactionQueue({ willRetry: event.willRetry }); + this.ui.requestRender(); + break; + } + + case "auto_retry_start": { + // Set up escape to abort retry + this.retryEscapeHandler = this.defaultEditor.onEscape; + this.defaultEditor.onEscape = () => { + this.session.abortRetry(); + }; + // Show retry indicator + this.statusContainer.clear(); + const delaySeconds = Math.round(event.delayMs / 1000); + this.retryLoader = new Loader( + this.ui, + (spinner) => theme.fg("warning", spinner), + (text) => theme.fg("muted", text), + `Retrying (${event.attempt}/${event.maxAttempts}) in ${delaySeconds}s... (${appKey(this.keybindings, "interrupt")} to cancel)`, + ); + this.statusContainer.addChild(this.retryLoader); + this.ui.requestRender(); + break; + } + + case "auto_retry_end": { + // Restore escape handler + if (this.retryEscapeHandler) { + this.defaultEditor.onEscape = this.retryEscapeHandler; + this.retryEscapeHandler = undefined; + } + // Stop loader + if (this.retryLoader) { + this.retryLoader.stop(); + this.retryLoader = undefined; + this.statusContainer.clear(); + } + // Show error only on final failure (success shows normal response) + if (!event.success) { + this.showError(`Retry failed after ${event.attempt} attempts: ${event.finalError || "Unknown error"}`); + } + this.ui.requestRender(); + break; + } + } + } + + /** Extract text content from a user message */ + private getUserMessageText(message: Message): string { + if (message.role !== "user") return ""; + const textBlocks = + typeof message.content === "string" + ? [{ type: "text", text: message.content }] + : message.content.filter((c: { type: string }) => c.type === "text"); + return textBlocks.map((c) => (c as { text: string }).text).join(""); + } + + /** + * Show a status message in the chat. + * + * If multiple status messages are emitted back-to-back (without anything else being added to the chat), + * we update the previous status line instead of appending new ones to avoid log spam. + */ + private showStatus(message: string): void { + const children = this.chatContainer.children; + const last = children.length > 0 ? children[children.length - 1] : undefined; + const secondLast = children.length > 1 ? children[children.length - 2] : undefined; + + if (last && secondLast && last === this.lastStatusText && secondLast === this.lastStatusSpacer) { + this.lastStatusText.setText(theme.fg("dim", message)); + this.ui.requestRender(); + return; + } + + const spacer = new Spacer(1); + const text = new Text(theme.fg("dim", message), 1, 0); + this.chatContainer.addChild(spacer); + this.chatContainer.addChild(text); + this.lastStatusSpacer = spacer; + this.lastStatusText = text; + this.ui.requestRender(); + } + + private addMessageToChat(message: AgentMessage, options?: { populateHistory?: boolean }): void { + switch (message.role) { + case "bashExecution": { + const component = new BashExecutionComponent(message.command, this.ui, message.excludeFromContext); + if (message.output) { + component.appendOutput(message.output); + } + component.setComplete( + message.exitCode, + message.cancelled, + message.truncated ? ({ truncated: true } as TruncationResult) : undefined, + message.fullOutputPath, + ); + this.chatContainer.addChild(component); + break; + } + case "custom": { + if (message.display) { + const renderer = this.session.extensionRunner?.getMessageRenderer(message.customType); + const component = new CustomMessageComponent(message, renderer, this.getMarkdownThemeWithSettings()); + component.setExpanded(this.toolOutputExpanded); + this.chatContainer.addChild(component); + } + break; + } + case "compactionSummary": { + this.chatContainer.addChild(new Spacer(1)); + const component = new CompactionSummaryMessageComponent(message, this.getMarkdownThemeWithSettings()); + component.setExpanded(this.toolOutputExpanded); + this.chatContainer.addChild(component); + break; + } + case "branchSummary": { + this.chatContainer.addChild(new Spacer(1)); + const component = new BranchSummaryMessageComponent(message, this.getMarkdownThemeWithSettings()); + component.setExpanded(this.toolOutputExpanded); + this.chatContainer.addChild(component); + break; + } + case "user": { + const textContent = this.getUserMessageText(message); + if (textContent) { + const skillBlock = parseSkillBlock(textContent); + if (skillBlock) { + // Render skill block (collapsible) + this.chatContainer.addChild(new Spacer(1)); + const component = new SkillInvocationMessageComponent( + skillBlock, + this.getMarkdownThemeWithSettings(), + ); + component.setExpanded(this.toolOutputExpanded); + this.chatContainer.addChild(component); + // Render user message separately if present + if (skillBlock.userMessage) { + const userComponent = new UserMessageComponent( + skillBlock.userMessage, + this.getMarkdownThemeWithSettings(), + ); + this.chatContainer.addChild(userComponent); + } + } else { + const userComponent = new UserMessageComponent(textContent, this.getMarkdownThemeWithSettings()); + this.chatContainer.addChild(userComponent); + } + if (options?.populateHistory) { + this.editor.addToHistory?.(textContent); + } + } + break; + } + case "assistant": { + const assistantComponent = new AssistantMessageComponent( + message, + this.hideThinkingBlock, + this.getMarkdownThemeWithSettings(), + ); + this.chatContainer.addChild(assistantComponent); + break; + } + case "toolResult": { + // Tool results are rendered inline with tool calls, handled separately + break; + } + default: { + const _exhaustive: never = message; + } + } + } + + /** + * Render session context to chat. Used for initial load and rebuild after compaction. + * @param sessionContext Session context to render + * @param options.updateFooter Update footer state + * @param options.populateHistory Add user messages to editor history + */ + private renderSessionContext( + sessionContext: SessionContext, + options: { updateFooter?: boolean; populateHistory?: boolean } = {}, + ): void { + this.pendingTools.clear(); + + if (options.updateFooter) { + this.footer.invalidate(); + this.updateEditorBorderColor(); + } + + for (const message of sessionContext.messages) { + // Assistant messages need special handling for tool calls + if (message.role === "assistant") { + this.addMessageToChat(message); + // Render tool call components + for (const content of message.content) { + if (content.type === "toolCall") { + const component = new ToolExecutionComponent( + content.name, + content.arguments, + { showImages: this.settingsManager.getShowImages() }, + this.getRegisteredToolDefinition(content.name), + this.ui, + ); + component.setExpanded(this.toolOutputExpanded); + this.chatContainer.addChild(component); + + if (message.stopReason === "aborted" || message.stopReason === "error") { + let errorMessage: string; + if (message.stopReason === "aborted") { + const retryAttempt = this.session.retryAttempt; + errorMessage = + retryAttempt > 0 + ? `Aborted after ${retryAttempt} retry attempt${retryAttempt > 1 ? "s" : ""}` + : "Operation aborted"; + } else { + errorMessage = message.errorMessage || "Error"; + } + component.updateResult({ content: [{ type: "text", text: errorMessage }], isError: true }); + } else { + this.pendingTools.set(content.id, component); + } + } + } + } else if (message.role === "toolResult") { + // Match tool results to pending tool components + const component = this.pendingTools.get(message.toolCallId); + if (component) { + component.updateResult(message); + this.pendingTools.delete(message.toolCallId); + } + } else { + // All other messages use standard rendering + this.addMessageToChat(message, options); + } + } + + this.pendingTools.clear(); + this.ui.requestRender(); + } + + renderInitialMessages(): void { + // Get aligned messages and entries from session context + const context = this.sessionManager.buildSessionContext(); + this.renderSessionContext(context, { + updateFooter: true, + populateHistory: true, + }); + + // Show compaction info if session was compacted + const allEntries = this.sessionManager.getEntries(); + const compactionCount = allEntries.filter((e) => e.type === "compaction").length; + if (compactionCount > 0) { + const times = compactionCount === 1 ? "1 time" : `${compactionCount} times`; + this.showStatus(`Session compacted ${times}`); + } + } + + async getUserInput(): Promise { + return new Promise((resolve) => { + this.onInputCallback = (text: string) => { + this.onInputCallback = undefined; + resolve(text); + }; + }); + } + + private rebuildChatFromMessages(): void { + this.chatContainer.clear(); + const context = this.sessionManager.buildSessionContext(); + this.renderSessionContext(context); + } + + // ========================================================================= + // Key handlers + // ========================================================================= + + private handleCtrlC(): void { + const now = Date.now(); + if (now - this.lastSigintTime < 500) { + void this.shutdown(); + } else { + this.clearEditor(); + this.lastSigintTime = now; + } + } + + private handleCtrlD(): void { + // Only called when editor is empty (enforced by CustomEditor) + void this.shutdown(); + } + + /** + * Gracefully shutdown the agent. + * Emits shutdown event to extensions, then exits. + */ + private isShuttingDown = false; + + private async shutdown(): Promise { + if (this.isShuttingDown) return; + this.isShuttingDown = true; + + // Emit shutdown event to extensions + const extensionRunner = this.session.extensionRunner; + if (extensionRunner?.hasHandlers("session_shutdown")) { + await extensionRunner.emit({ + type: "session_shutdown", + }); + } + + // Wait for any pending renders to complete + // requestRender() uses process.nextTick(), so we wait one tick + await new Promise((resolve) => process.nextTick(resolve)); + + // Drain any in-flight Kitty key release events before stopping. + // This prevents escape sequences from leaking to the parent shell over slow SSH. + await this.ui.terminal.drainInput(1000); + + this.stop(); + process.exit(0); + } + + /** + * Check if shutdown was requested and perform shutdown if so. + */ + private async checkShutdownRequested(): Promise { + if (!this.shutdownRequested) return; + await this.shutdown(); + } + + private handleCtrlZ(): void { + // Ignore SIGINT while suspended so Ctrl+C in the terminal does not + // kill the backgrounded process. The handler is removed on resume. + const ignoreSigint = () => {}; + process.on("SIGINT", ignoreSigint); + + // Set up handler to restore TUI when resumed + process.once("SIGCONT", () => { + process.removeListener("SIGINT", ignoreSigint); + this.ui.start(); + this.ui.requestRender(true); + }); + + // Stop the TUI (restore terminal to normal mode) + this.ui.stop(); + + // Send SIGTSTP to process group (pid=0 means all processes in group) + process.kill(0, "SIGTSTP"); + } + + private async handleFollowUp(): Promise { + const text = (this.editor.getExpandedText?.() ?? this.editor.getText()).trim(); + if (!text) return; + + // Queue input during compaction (extension commands execute immediately) + if (this.session.isCompacting) { + if (this.isExtensionCommand(text)) { + this.editor.addToHistory?.(text); + this.editor.setText(""); + await this.session.prompt(text); + } else { + this.queueCompactionMessage(text, "followUp"); + } + return; + } + + // Alt+Enter queues a follow-up message (waits until agent finishes) + // This handles extension commands (execute immediately), prompt template expansion, and queueing + if (this.session.isStreaming) { + this.editor.addToHistory?.(text); + this.editor.setText(""); + await this.session.prompt(text, { streamingBehavior: "followUp" }); + this.updatePendingMessagesDisplay(); + this.ui.requestRender(); + } + // If not streaming, Alt+Enter acts like regular Enter (trigger onSubmit) + else if (this.editor.onSubmit) { + this.editor.onSubmit(text); + } + } + + private handleDequeue(): void { + const restored = this.restoreQueuedMessagesToEditor(); + if (restored === 0) { + this.showStatus("No queued messages to restore"); + } else { + this.showStatus(`Restored ${restored} queued message${restored > 1 ? "s" : ""} to editor`); + } + } + + private updateEditorBorderColor(): void { + if (this.isBashMode) { + this.editor.borderColor = theme.getBashModeBorderColor(); + } else { + const level = this.session.thinkingLevel || "off"; + this.editor.borderColor = theme.getThinkingBorderColor(level); + } + this.ui.requestRender(); + } + + private cycleThinkingLevel(): void { + const newLevel = this.session.cycleThinkingLevel(); + if (newLevel === undefined) { + this.showStatus("Current model does not support thinking"); + } else { + this.footer.invalidate(); + this.updateEditorBorderColor(); + this.showStatus(`Thinking level: ${newLevel}`); + } + } + + private async cycleModel(direction: "forward" | "backward"): Promise { + try { + const result = await this.session.cycleModel(direction); + if (result === undefined) { + const msg = this.session.scopedModels.length > 0 ? "Only one model in scope" : "Only one model available"; + this.showStatus(msg); + } else { + this.footer.invalidate(); + this.updateEditorBorderColor(); + const thinkingStr = + result.model.reasoning && result.thinkingLevel !== "off" ? ` (thinking: ${result.thinkingLevel})` : ""; + this.showStatus(`Switched to ${result.model.name || result.model.id}${thinkingStr}`); + } + } catch (error) { + this.showError(error instanceof Error ? error.message : String(error)); + } + } + + private toggleToolOutputExpansion(): void { + this.setToolsExpanded(!this.toolOutputExpanded); + } + + private setToolsExpanded(expanded: boolean): void { + this.toolOutputExpanded = expanded; + for (const child of this.chatContainer.children) { + if (isExpandable(child)) { + child.setExpanded(expanded); + } + } + this.ui.requestRender(); + } + + private toggleThinkingBlockVisibility(): void { + this.hideThinkingBlock = !this.hideThinkingBlock; + this.settingsManager.setHideThinkingBlock(this.hideThinkingBlock); + + // Rebuild chat from session messages + this.chatContainer.clear(); + this.rebuildChatFromMessages(); + + // If streaming, re-add the streaming component with updated visibility and re-render + if (this.streamingComponent && this.streamingMessage) { + this.streamingComponent.setHideThinkingBlock(this.hideThinkingBlock); + this.streamingComponent.updateContent(this.streamingMessage); + this.chatContainer.addChild(this.streamingComponent); + } + + this.showStatus(`Thinking blocks: ${this.hideThinkingBlock ? "hidden" : "visible"}`); + } + + private openExternalEditor(): void { + // Determine editor (respect $VISUAL, then $EDITOR) + const editorCmd = process.env.VISUAL || process.env.EDITOR; + if (!editorCmd) { + this.showWarning("No editor configured. Set $VISUAL or $EDITOR environment variable."); + return; + } + + const currentText = this.editor.getExpandedText?.() ?? this.editor.getText(); + const tmpFile = path.join(os.tmpdir(), `pi-editor-${Date.now()}.pi.md`); + + try { + // Write current content to temp file + fs.writeFileSync(tmpFile, currentText, "utf-8"); + + // Stop TUI to release terminal + this.ui.stop(); + + // Split by space to support editor arguments (e.g., "code --wait") + const [editor, ...editorArgs] = editorCmd.split(" "); + + // Spawn editor synchronously with inherited stdio for interactive editing + const result = spawnSync(editor, [...editorArgs, tmpFile], { + stdio: "inherit", + shell: process.platform === "win32", + }); + + // On successful exit (status 0), replace editor content + if (result.status === 0) { + const newContent = fs.readFileSync(tmpFile, "utf-8").replace(/\n$/, ""); + this.editor.setText(newContent); + } + // On non-zero exit, keep original text (no action needed) + } finally { + // Clean up temp file + try { + fs.unlinkSync(tmpFile); + } catch { + // Ignore cleanup errors + } + + // Restart TUI + this.ui.start(); + // Force full re-render since external editor uses alternate screen + this.ui.requestRender(true); + } + } + + // ========================================================================= + // UI helpers + // ========================================================================= + + clearEditor(): void { + this.editor.setText(""); + this.ui.requestRender(); + } + + showError(errorMessage: string): void { + this.chatContainer.addChild(new Spacer(1)); + this.chatContainer.addChild(new Text(theme.fg("error", `Error: ${errorMessage}`), 1, 0)); + this.ui.requestRender(); + } + + showWarning(warningMessage: string): void { + this.chatContainer.addChild(new Spacer(1)); + this.chatContainer.addChild(new Text(theme.fg("warning", `Warning: ${warningMessage}`), 1, 0)); + this.ui.requestRender(); + } + + showNewVersionNotification(newVersion: string): void { + const action = theme.fg("accent", getUpdateInstruction("@gsd/pi-coding-agent")); + const updateInstruction = theme.fg("muted", `New version ${newVersion} is available. `) + action; + const changelogUrl = theme.fg( + "accent", + "https://github.com/badlogic/pi-mono/blob/main/packages/coding-agent/CHANGELOG.md", + ); + const changelogLine = theme.fg("muted", "Changelog: ") + changelogUrl; + + this.chatContainer.addChild(new Spacer(1)); + this.chatContainer.addChild(new DynamicBorder((text) => theme.fg("warning", text))); + this.chatContainer.addChild( + new Text( + `${theme.bold(theme.fg("warning", "Update Available"))}\n${updateInstruction}\n${changelogLine}`, + 1, + 0, + ), + ); + this.chatContainer.addChild(new DynamicBorder((text) => theme.fg("warning", text))); + this.ui.requestRender(); + } + + /** + * Get all queued messages (read-only). + * Combines session queue and compaction queue. + */ + private getAllQueuedMessages(): { steering: string[]; followUp: string[] } { + return { + steering: [ + ...this.session.getSteeringMessages(), + ...this.compactionQueuedMessages.filter((msg) => msg.mode === "steer").map((msg) => msg.text), + ], + followUp: [ + ...this.session.getFollowUpMessages(), + ...this.compactionQueuedMessages.filter((msg) => msg.mode === "followUp").map((msg) => msg.text), + ], + }; + } + + /** + * Clear all queued messages and return their contents. + * Clears both session queue and compaction queue. + */ + private clearAllQueues(): { steering: string[]; followUp: string[] } { + const { steering, followUp } = this.session.clearQueue(); + const compactionSteering = this.compactionQueuedMessages + .filter((msg) => msg.mode === "steer") + .map((msg) => msg.text); + const compactionFollowUp = this.compactionQueuedMessages + .filter((msg) => msg.mode === "followUp") + .map((msg) => msg.text); + this.compactionQueuedMessages = []; + return { + steering: [...steering, ...compactionSteering], + followUp: [...followUp, ...compactionFollowUp], + }; + } + + private updatePendingMessagesDisplay(): void { + this.pendingMessagesContainer.clear(); + const { steering: steeringMessages, followUp: followUpMessages } = this.getAllQueuedMessages(); + if (steeringMessages.length > 0 || followUpMessages.length > 0) { + this.pendingMessagesContainer.addChild(new Spacer(1)); + for (const message of steeringMessages) { + const text = theme.fg("dim", `Steering: ${message}`); + this.pendingMessagesContainer.addChild(new TruncatedText(text, 1, 0)); + } + for (const message of followUpMessages) { + const text = theme.fg("dim", `Follow-up: ${message}`); + this.pendingMessagesContainer.addChild(new TruncatedText(text, 1, 0)); + } + const dequeueHint = this.getAppKeyDisplay("dequeue"); + const hintText = theme.fg("dim", `↳ ${dequeueHint} to edit all queued messages`); + this.pendingMessagesContainer.addChild(new TruncatedText(hintText, 1, 0)); + } + } + + private restoreQueuedMessagesToEditor(options?: { abort?: boolean; currentText?: string }): number { + const { steering, followUp } = this.clearAllQueues(); + const allQueued = [...steering, ...followUp]; + if (allQueued.length === 0) { + this.updatePendingMessagesDisplay(); + if (options?.abort) { + this.agent.abort(); + } + return 0; + } + const queuedText = allQueued.join("\n\n"); + const currentText = options?.currentText ?? this.editor.getText(); + const combinedText = [queuedText, currentText].filter((t) => t.trim()).join("\n\n"); + this.editor.setText(combinedText); + this.updatePendingMessagesDisplay(); + if (options?.abort) { + this.agent.abort(); + } + return allQueued.length; + } + + private queueCompactionMessage(text: string, mode: "steer" | "followUp"): void { + this.compactionQueuedMessages.push({ text, mode }); + this.editor.addToHistory?.(text); + this.editor.setText(""); + this.updatePendingMessagesDisplay(); + this.showStatus("Queued message for after compaction"); + } + + private isExtensionCommand(text: string): boolean { + if (!text.startsWith("/")) return false; + + const extensionRunner = this.session.extensionRunner; + if (!extensionRunner) return false; + + const spaceIndex = text.indexOf(" "); + const commandName = spaceIndex === -1 ? text.slice(1) : text.slice(1, spaceIndex); + return !!extensionRunner.getCommand(commandName); + } + + private async flushCompactionQueue(options?: { willRetry?: boolean }): Promise { + if (this.compactionQueuedMessages.length === 0) { + return; + } + + const queuedMessages = [...this.compactionQueuedMessages]; + this.compactionQueuedMessages = []; + this.updatePendingMessagesDisplay(); + + const restoreQueue = (error: unknown) => { + this.session.clearQueue(); + this.compactionQueuedMessages = queuedMessages; + this.updatePendingMessagesDisplay(); + this.showError( + `Failed to send queued message${queuedMessages.length > 1 ? "s" : ""}: ${ + error instanceof Error ? error.message : String(error) + }`, + ); + }; + + try { + if (options?.willRetry) { + // When retry is pending, queue messages for the retry turn + for (const message of queuedMessages) { + if (this.isExtensionCommand(message.text)) { + await this.session.prompt(message.text); + } else if (message.mode === "followUp") { + await this.session.followUp(message.text); + } else { + await this.session.steer(message.text); + } + } + this.updatePendingMessagesDisplay(); + return; + } + + // Find first non-extension-command message to use as prompt + const firstPromptIndex = queuedMessages.findIndex((message) => !this.isExtensionCommand(message.text)); + if (firstPromptIndex === -1) { + // All extension commands - execute them all + for (const message of queuedMessages) { + await this.session.prompt(message.text); + } + return; + } + + // Execute any extension commands before the first prompt + const preCommands = queuedMessages.slice(0, firstPromptIndex); + const firstPrompt = queuedMessages[firstPromptIndex]; + const rest = queuedMessages.slice(firstPromptIndex + 1); + + for (const message of preCommands) { + await this.session.prompt(message.text); + } + + // Send first prompt (starts streaming) + const promptPromise = this.session.prompt(firstPrompt.text).catch((error) => { + restoreQueue(error); + }); + + // Queue remaining messages + for (const message of rest) { + if (this.isExtensionCommand(message.text)) { + await this.session.prompt(message.text); + } else if (message.mode === "followUp") { + await this.session.followUp(message.text); + } else { + await this.session.steer(message.text); + } + } + this.updatePendingMessagesDisplay(); + void promptPromise; + } catch (error) { + restoreQueue(error); + } + } + + /** Move pending bash components from pending area to chat */ + private flushPendingBashComponents(): void { + for (const component of this.pendingBashComponents) { + this.pendingMessagesContainer.removeChild(component); + this.chatContainer.addChild(component); + } + this.pendingBashComponents = []; + } + + // ========================================================================= + // Selectors + // ========================================================================= + + /** + * Shows a selector component in place of the editor. + * @param create Factory that receives a `done` callback and returns the component and focus target + */ + private showSelector(create: (done: () => void) => { component: Component; focus: Component }): void { + const done = () => { + this.editorContainer.clear(); + this.editorContainer.addChild(this.editor); + this.ui.setFocus(this.editor); + }; + const { component, focus } = create(done); + this.editorContainer.clear(); + this.editorContainer.addChild(component); + this.ui.setFocus(focus); + this.ui.requestRender(); + } + + private showSettingsSelector(): void { + this.showSelector((done) => { + const selector = new SettingsSelectorComponent( + { + autoCompact: this.session.autoCompactionEnabled, + showImages: this.settingsManager.getShowImages(), + autoResizeImages: this.settingsManager.getImageAutoResize(), + blockImages: this.settingsManager.getBlockImages(), + enableSkillCommands: this.settingsManager.getEnableSkillCommands(), + steeringMode: this.session.steeringMode, + followUpMode: this.session.followUpMode, + transport: this.settingsManager.getTransport(), + thinkingLevel: this.session.thinkingLevel, + availableThinkingLevels: this.session.getAvailableThinkingLevels(), + currentTheme: this.settingsManager.getTheme() || "dark", + availableThemes: getAvailableThemes(), + hideThinkingBlock: this.hideThinkingBlock, + collapseChangelog: this.settingsManager.getCollapseChangelog(), + doubleEscapeAction: this.settingsManager.getDoubleEscapeAction(), + treeFilterMode: this.settingsManager.getTreeFilterMode(), + showHardwareCursor: this.settingsManager.getShowHardwareCursor(), + editorPaddingX: this.settingsManager.getEditorPaddingX(), + autocompleteMaxVisible: this.settingsManager.getAutocompleteMaxVisible(), + quietStartup: this.settingsManager.getQuietStartup(), + clearOnShrink: this.settingsManager.getClearOnShrink(), + }, + { + onAutoCompactChange: (enabled) => { + this.session.setAutoCompactionEnabled(enabled); + this.footer.setAutoCompactEnabled(enabled); + }, + onShowImagesChange: (enabled) => { + this.settingsManager.setShowImages(enabled); + for (const child of this.chatContainer.children) { + if (child instanceof ToolExecutionComponent) { + child.setShowImages(enabled); + } + } + }, + onAutoResizeImagesChange: (enabled) => { + this.settingsManager.setImageAutoResize(enabled); + }, + onBlockImagesChange: (blocked) => { + this.settingsManager.setBlockImages(blocked); + }, + onEnableSkillCommandsChange: (enabled) => { + this.settingsManager.setEnableSkillCommands(enabled); + this.setupAutocomplete(this.fdPath); + }, + onSteeringModeChange: (mode) => { + this.session.setSteeringMode(mode); + }, + onFollowUpModeChange: (mode) => { + this.session.setFollowUpMode(mode); + }, + onTransportChange: (transport) => { + this.settingsManager.setTransport(transport); + this.session.agent.setTransport(transport); + }, + onThinkingLevelChange: (level) => { + this.session.setThinkingLevel(level); + this.footer.invalidate(); + this.updateEditorBorderColor(); + }, + onThemeChange: (themeName) => { + const result = setTheme(themeName, true); + this.settingsManager.setTheme(themeName); + this.ui.invalidate(); + if (!result.success) { + this.showError(`Failed to load theme "${themeName}": ${result.error}\nFell back to dark theme.`); + } + }, + onThemePreview: (themeName) => { + const result = setTheme(themeName, true); + if (result.success) { + this.ui.invalidate(); + this.ui.requestRender(); + } + }, + onHideThinkingBlockChange: (hidden) => { + this.hideThinkingBlock = hidden; + this.settingsManager.setHideThinkingBlock(hidden); + for (const child of this.chatContainer.children) { + if (child instanceof AssistantMessageComponent) { + child.setHideThinkingBlock(hidden); + } + } + this.chatContainer.clear(); + this.rebuildChatFromMessages(); + }, + onCollapseChangelogChange: (collapsed) => { + this.settingsManager.setCollapseChangelog(collapsed); + }, + onQuietStartupChange: (enabled) => { + this.settingsManager.setQuietStartup(enabled); + }, + onDoubleEscapeActionChange: (action) => { + this.settingsManager.setDoubleEscapeAction(action); + }, + onTreeFilterModeChange: (mode) => { + this.settingsManager.setTreeFilterMode(mode); + }, + onShowHardwareCursorChange: (enabled) => { + this.settingsManager.setShowHardwareCursor(enabled); + this.ui.setShowHardwareCursor(enabled); + }, + onEditorPaddingXChange: (padding) => { + this.settingsManager.setEditorPaddingX(padding); + this.defaultEditor.setPaddingX(padding); + if (this.editor !== this.defaultEditor && this.editor.setPaddingX !== undefined) { + this.editor.setPaddingX(padding); + } + }, + onAutocompleteMaxVisibleChange: (maxVisible) => { + this.settingsManager.setAutocompleteMaxVisible(maxVisible); + this.defaultEditor.setAutocompleteMaxVisible(maxVisible); + if (this.editor !== this.defaultEditor && this.editor.setAutocompleteMaxVisible !== undefined) { + this.editor.setAutocompleteMaxVisible(maxVisible); + } + }, + onClearOnShrinkChange: (enabled) => { + this.settingsManager.setClearOnShrink(enabled); + this.ui.setClearOnShrink(enabled); + }, + onCancel: () => { + done(); + this.ui.requestRender(); + }, + }, + ); + return { component: selector, focus: selector.getSettingsList() }; + }); + } + + private async handleModelCommand(searchTerm?: string): Promise { + if (!searchTerm) { + this.showModelSelector(); + return; + } + + const model = await this.findExactModelMatch(searchTerm); + if (model) { + try { + await this.session.setModel(model); + this.footer.invalidate(); + this.updateEditorBorderColor(); + this.showStatus(`Model: ${model.id}`); + this.checkDaxnutsEasterEgg(model); + } catch (error) { + this.showError(error instanceof Error ? error.message : String(error)); + } + return; + } + + this.showModelSelector(searchTerm); + } + + private async findExactModelMatch(searchTerm: string): Promise | undefined> { + const term = searchTerm.trim(); + if (!term) return undefined; + + let targetProvider: string | undefined; + let targetModelId = ""; + + if (term.includes("/")) { + const parts = term.split("/", 2); + targetProvider = parts[0]?.trim().toLowerCase(); + targetModelId = parts[1]?.trim().toLowerCase() ?? ""; + } else { + targetModelId = term.toLowerCase(); + } + + if (!targetModelId) return undefined; + + const models = await this.getModelCandidates(); + const exactMatches = models.filter((item) => { + const idMatch = item.id.toLowerCase() === targetModelId; + const providerMatch = !targetProvider || item.provider.toLowerCase() === targetProvider; + return idMatch && providerMatch; + }); + + return exactMatches.length === 1 ? exactMatches[0] : undefined; + } + + private async getModelCandidates(): Promise[]> { + if (this.session.scopedModels.length > 0) { + return this.session.scopedModels.map((scoped) => scoped.model); + } + + this.session.modelRegistry.refresh(); + try { + return await this.session.modelRegistry.getAvailable(); + } catch { + return []; + } + } + + /** Update the footer's available provider count from current model candidates */ + private async updateAvailableProviderCount(): Promise { + const models = await this.getModelCandidates(); + const uniqueProviders = new Set(models.map((m) => m.provider)); + this.footerDataProvider.setAvailableProviderCount(uniqueProviders.size); + } + + private showModelSelector(initialSearchInput?: string): void { + this.showSelector((done) => { + const selector = new ModelSelectorComponent( + this.ui, + this.session.model, + this.settingsManager, + this.session.modelRegistry, + this.session.scopedModels, + async (model) => { + try { + await this.session.setModel(model); + this.footer.invalidate(); + this.updateEditorBorderColor(); + done(); + this.showStatus(`Model: ${model.id}`); + this.checkDaxnutsEasterEgg(model); + } catch (error) { + done(); + this.showError(error instanceof Error ? error.message : String(error)); + } + }, + () => { + done(); + this.ui.requestRender(); + }, + initialSearchInput, + ); + return { component: selector, focus: selector }; + }); + } + + private async showModelsSelector(): Promise { + // Get all available models + this.session.modelRegistry.refresh(); + const allModels = this.session.modelRegistry.getAvailable(); + + if (allModels.length === 0) { + this.showStatus("No models available"); + return; + } + + // Check if session has scoped models (from previous session-only changes or CLI --models) + const sessionScopedModels = this.session.scopedModels; + const hasSessionScope = sessionScopedModels.length > 0; + + // Build enabled model IDs from session state or settings + const enabledModelIds = new Set(); + let hasFilter = false; + + if (hasSessionScope) { + // Use current session's scoped models + for (const sm of sessionScopedModels) { + enabledModelIds.add(`${sm.model.provider}/${sm.model.id}`); + } + hasFilter = true; + } else { + // Fall back to settings + const patterns = this.settingsManager.getEnabledModels(); + if (patterns !== undefined && patterns.length > 0) { + hasFilter = true; + const scopedModels = await resolveModelScope(patterns, this.session.modelRegistry); + for (const sm of scopedModels) { + enabledModelIds.add(`${sm.model.provider}/${sm.model.id}`); + } + } + } + + // Track current enabled state (session-only until persisted) + const currentEnabledIds = new Set(enabledModelIds); + let currentHasFilter = hasFilter; + + // Helper to update session's scoped models (session-only, no persist) + const updateSessionModels = async (enabledIds: Set) => { + if (enabledIds.size > 0 && enabledIds.size < allModels.length) { + const newScopedModels = await resolveModelScope(Array.from(enabledIds), this.session.modelRegistry); + this.session.setScopedModels( + newScopedModels.map((sm) => ({ + model: sm.model, + thinkingLevel: sm.thinkingLevel, + })), + ); + } else { + // All enabled or none enabled = no filter + this.session.setScopedModels([]); + } + await this.updateAvailableProviderCount(); + this.ui.requestRender(); + }; + + this.showSelector((done) => { + const selector = new ScopedModelsSelectorComponent( + { + allModels, + enabledModelIds: currentEnabledIds, + hasEnabledModelsFilter: currentHasFilter, + }, + { + onModelToggle: async (modelId, enabled) => { + if (enabled) { + currentEnabledIds.add(modelId); + } else { + currentEnabledIds.delete(modelId); + } + currentHasFilter = true; + await updateSessionModels(currentEnabledIds); + }, + onEnableAll: async (allModelIds) => { + currentEnabledIds.clear(); + for (const id of allModelIds) { + currentEnabledIds.add(id); + } + currentHasFilter = false; + await updateSessionModels(currentEnabledIds); + }, + onClearAll: async () => { + currentEnabledIds.clear(); + currentHasFilter = true; + await updateSessionModels(currentEnabledIds); + }, + onToggleProvider: async (_provider, modelIds, enabled) => { + for (const id of modelIds) { + if (enabled) { + currentEnabledIds.add(id); + } else { + currentEnabledIds.delete(id); + } + } + currentHasFilter = true; + await updateSessionModels(currentEnabledIds); + }, + onPersist: (enabledIds) => { + // Persist to settings + const newPatterns = + enabledIds.length === allModels.length + ? undefined // All enabled = clear filter + : enabledIds; + this.settingsManager.setEnabledModels(newPatterns); + this.showStatus("Model selection saved to settings"); + }, + onCancel: () => { + done(); + this.ui.requestRender(); + }, + }, + ); + return { component: selector, focus: selector }; + }); + } + + private showUserMessageSelector(): void { + const userMessages = this.session.getUserMessagesForForking(); + + if (userMessages.length === 0) { + this.showStatus("No messages to fork from"); + return; + } + + this.showSelector((done) => { + const selector = new UserMessageSelectorComponent( + userMessages.map((m) => ({ id: m.entryId, text: m.text })), + async (entryId) => { + const result = await this.session.fork(entryId); + if (result.cancelled) { + // Extension cancelled the fork + done(); + this.ui.requestRender(); + return; + } + + this.chatContainer.clear(); + this.renderInitialMessages(); + this.editor.setText(result.selectedText); + done(); + this.showStatus("Branched to new session"); + }, + () => { + done(); + this.ui.requestRender(); + }, + ); + return { component: selector, focus: selector.getMessageList() }; + }); + } + + private showTreeSelector(initialSelectedId?: string): void { + const tree = this.sessionManager.getTree(); + const realLeafId = this.sessionManager.getLeafId(); + const initialFilterMode = this.settingsManager.getTreeFilterMode(); + + if (tree.length === 0) { + this.showStatus("No entries in session"); + return; + } + + this.showSelector((done) => { + const selector = new TreeSelectorComponent( + tree, + realLeafId, + this.ui.terminal.rows, + async (entryId) => { + // Selecting the current leaf is a no-op (already there) + if (entryId === realLeafId) { + done(); + this.showStatus("Already at this point"); + return; + } + + // Ask about summarization + done(); // Close selector first + + // Loop until user makes a complete choice or cancels to tree + let wantsSummary = false; + let customInstructions: string | undefined; + + // Check if we should skip the prompt (user preference to always default to no summary) + if (!this.settingsManager.getBranchSummarySkipPrompt()) { + while (true) { + const summaryChoice = await this.showExtensionSelector("Summarize branch?", [ + "No summary", + "Summarize", + "Summarize with custom prompt", + ]); + + if (summaryChoice === undefined) { + // User pressed escape - re-show tree selector with same selection + this.showTreeSelector(entryId); + return; + } + + wantsSummary = summaryChoice !== "No summary"; + + if (summaryChoice === "Summarize with custom prompt") { + customInstructions = await this.showExtensionEditor("Custom summarization instructions"); + if (customInstructions === undefined) { + // User cancelled - loop back to summary selector + continue; + } + } + + // User made a complete choice + break; + } + } + + // Set up escape handler and loader if summarizing + let summaryLoader: Loader | undefined; + const originalOnEscape = this.defaultEditor.onEscape; + + if (wantsSummary) { + this.defaultEditor.onEscape = () => { + this.session.abortBranchSummary(); + }; + this.chatContainer.addChild(new Spacer(1)); + summaryLoader = new Loader( + this.ui, + (spinner) => theme.fg("accent", spinner), + (text) => theme.fg("muted", text), + `Summarizing branch... (${appKey(this.keybindings, "interrupt")} to cancel)`, + ); + this.statusContainer.addChild(summaryLoader); + this.ui.requestRender(); + } + + try { + const result = await this.session.navigateTree(entryId, { + summarize: wantsSummary, + customInstructions, + }); + + if (result.aborted) { + // Summarization aborted - re-show tree selector with same selection + this.showStatus("Branch summarization cancelled"); + this.showTreeSelector(entryId); + return; + } + if (result.cancelled) { + this.showStatus("Navigation cancelled"); + return; + } + + // Update UI + this.chatContainer.clear(); + this.renderInitialMessages(); + if (result.editorText && !this.editor.getText().trim()) { + this.editor.setText(result.editorText); + } + this.showStatus("Navigated to selected point"); + } catch (error) { + this.showError(error instanceof Error ? error.message : String(error)); + } finally { + if (summaryLoader) { + summaryLoader.stop(); + this.statusContainer.clear(); + } + this.defaultEditor.onEscape = originalOnEscape; + } + }, + () => { + done(); + this.ui.requestRender(); + }, + (entryId, label) => { + this.sessionManager.appendLabelChange(entryId, label); + this.ui.requestRender(); + }, + initialSelectedId, + initialFilterMode, + ); + return { component: selector, focus: selector }; + }); + } + + private showSessionSelector(): void { + this.showSelector((done) => { + const selector = new SessionSelectorComponent( + (onProgress) => + SessionManager.list(this.sessionManager.getCwd(), this.sessionManager.getSessionDir(), onProgress), + SessionManager.listAll, + async (sessionPath) => { + done(); + await this.handleResumeSession(sessionPath); + }, + () => { + done(); + this.ui.requestRender(); + }, + () => { + void this.shutdown(); + }, + () => this.ui.requestRender(), + { + renameSession: async (sessionFilePath: string, nextName: string | undefined) => { + const next = (nextName ?? "").trim(); + if (!next) return; + const mgr = SessionManager.open(sessionFilePath); + mgr.appendSessionInfo(next); + }, + showRenameHint: true, + keybindings: this.keybindings, + }, + + this.sessionManager.getSessionFile(), + ); + return { component: selector, focus: selector }; + }); + } + + private async handleResumeSession(sessionPath: string): Promise { + // Stop loading animation + if (this.loadingAnimation) { + this.loadingAnimation.stop(); + this.loadingAnimation = undefined; + } + this.statusContainer.clear(); + + // Clear UI state + this.pendingMessagesContainer.clear(); + this.compactionQueuedMessages = []; + this.streamingComponent = undefined; + this.streamingMessage = undefined; + this.pendingTools.clear(); + + // Switch session via AgentSession (emits extension session events) + await this.session.switchSession(sessionPath); + + // Clear and re-render the chat + this.chatContainer.clear(); + this.renderInitialMessages(); + this.showStatus("Resumed session"); + } + + private async showOAuthSelector(mode: "login" | "logout"): Promise { + if (mode === "logout") { + const providers = this.session.modelRegistry.authStorage.list(); + const loggedInProviders = providers.filter( + (p) => this.session.modelRegistry.authStorage.get(p)?.type === "oauth", + ); + if (loggedInProviders.length === 0) { + this.showStatus("No OAuth providers logged in. Use /login first."); + return; + } + } + + this.showSelector((done) => { + const selector = new OAuthSelectorComponent( + mode, + this.session.modelRegistry.authStorage, + async (providerId: string) => { + done(); + + if (mode === "login") { + await this.showLoginDialog(providerId); + } else { + // Logout flow + const providerInfo = this.session.modelRegistry.authStorage + .getOAuthProviders() + .find((p) => p.id === providerId); + const providerName = providerInfo?.name || providerId; + + try { + this.session.modelRegistry.authStorage.logout(providerId); + this.session.modelRegistry.refresh(); + await this.updateAvailableProviderCount(); + this.showStatus(`Logged out of ${providerName}`); + } catch (error: unknown) { + this.showError(`Logout failed: ${error instanceof Error ? error.message : String(error)}`); + } + } + }, + () => { + done(); + this.ui.requestRender(); + }, + ); + return { component: selector, focus: selector }; + }); + } + + private async showLoginDialog(providerId: string): Promise { + const providerInfo = this.session.modelRegistry.authStorage.getOAuthProviders().find((p) => p.id === providerId); + const providerName = providerInfo?.name || providerId; + + // Providers that use callback servers (can paste redirect URL) + const usesCallbackServer = providerInfo?.usesCallbackServer ?? false; + + // Create login dialog component + const dialog = new LoginDialogComponent(this.ui, providerId, (_success, _message) => { + // Completion handled below + }); + + // Show dialog in editor container + this.editorContainer.clear(); + this.editorContainer.addChild(dialog); + this.ui.setFocus(dialog); + this.ui.requestRender(); + + // Promise for manual code input (racing with callback server) + let manualCodeResolve: ((code: string) => void) | undefined; + let manualCodeReject: ((err: Error) => void) | undefined; + const manualCodePromise = new Promise((resolve, reject) => { + manualCodeResolve = resolve; + manualCodeReject = reject; + }); + + // Restore editor helper + const restoreEditor = () => { + this.editorContainer.clear(); + this.editorContainer.addChild(this.editor); + this.ui.setFocus(this.editor); + this.ui.requestRender(); + }; + + try { + await this.session.modelRegistry.authStorage.login(providerId as OAuthProviderId, { + onAuth: (info: { url: string; instructions?: string }) => { + dialog.showAuth(info.url, info.instructions); + + if (usesCallbackServer) { + // Show input for manual paste, racing with callback + dialog + .showManualInput("Paste redirect URL below, or complete login in browser:") + .then((value) => { + if (value && manualCodeResolve) { + manualCodeResolve(value); + manualCodeResolve = undefined; + } + }) + .catch(() => { + if (manualCodeReject) { + manualCodeReject(new Error("Login cancelled")); + manualCodeReject = undefined; + } + }); + } else if (providerId === "github-copilot") { + // GitHub Copilot polls after onAuth + dialog.showWaiting("Waiting for browser authentication..."); + } + // For Anthropic: onPrompt is called immediately after + }, + + onPrompt: async (prompt: { message: string; placeholder?: string }) => { + return dialog.showPrompt(prompt.message, prompt.placeholder); + }, + + onProgress: (message: string) => { + dialog.showProgress(message); + }, + + onManualCodeInput: () => manualCodePromise, + + signal: dialog.signal, + }); + + // Success + restoreEditor(); + this.session.modelRegistry.refresh(); + await this.updateAvailableProviderCount(); + this.showStatus(`Logged in to ${providerName}. Credentials saved to ${getAuthPath()}`); + } catch (error: unknown) { + restoreEditor(); + const errorMsg = error instanceof Error ? error.message : String(error); + if (errorMsg !== "Login cancelled") { + this.showError(`Failed to login to ${providerName}: ${errorMsg}`); + } + } + } + + // ========================================================================= + // Command handlers + // ========================================================================= + + private async handleReloadCommand(): Promise { + if (this.session.isStreaming) { + this.showWarning("Wait for the current response to finish before reloading."); + return; + } + if (this.session.isCompacting) { + this.showWarning("Wait for compaction to finish before reloading."); + return; + } + + this.resetExtensionUI(); + + const loader = new BorderedLoader(this.ui, theme, "Reloading extensions, skills, prompts, themes...", { + cancellable: false, + }); + const previousEditor = this.editor; + this.editorContainer.clear(); + this.editorContainer.addChild(loader); + this.ui.setFocus(loader); + this.ui.requestRender(); + + const dismissLoader = (editor: Component) => { + loader.dispose(); + this.editorContainer.clear(); + this.editorContainer.addChild(editor); + this.ui.setFocus(editor); + this.ui.requestRender(); + }; + + try { + await this.session.reload(); + setRegisteredThemes(this.session.resourceLoader.getThemes().themes); + this.hideThinkingBlock = this.settingsManager.getHideThinkingBlock(); + const themeName = this.settingsManager.getTheme(); + const themeResult = themeName ? setTheme(themeName, true) : { success: true }; + if (!themeResult.success) { + this.showError(`Failed to load theme "${themeName}": ${themeResult.error}\nFell back to dark theme.`); + } + const editorPaddingX = this.settingsManager.getEditorPaddingX(); + const autocompleteMaxVisible = this.settingsManager.getAutocompleteMaxVisible(); + this.defaultEditor.setPaddingX(editorPaddingX); + this.defaultEditor.setAutocompleteMaxVisible(autocompleteMaxVisible); + if (this.editor !== this.defaultEditor) { + this.editor.setPaddingX?.(editorPaddingX); + this.editor.setAutocompleteMaxVisible?.(autocompleteMaxVisible); + } + this.ui.setShowHardwareCursor(this.settingsManager.getShowHardwareCursor()); + this.ui.setClearOnShrink(this.settingsManager.getClearOnShrink()); + this.setupAutocomplete(this.fdPath); + const runner = this.session.extensionRunner; + if (runner) { + this.setupExtensionShortcuts(runner); + } + this.rebuildChatFromMessages(); + dismissLoader(this.editor as Component); + this.showLoadedResources({ + extensionPaths: runner?.getExtensionPaths() ?? [], + force: false, + showDiagnosticsWhenQuiet: true, + }); + const modelsJsonError = this.session.modelRegistry.getError(); + if (modelsJsonError) { + this.showError(`models.json error: ${modelsJsonError}`); + } + this.showStatus("Reloaded extensions, skills, prompts, themes"); + } catch (error) { + dismissLoader(previousEditor as Component); + this.showError(`Reload failed: ${error instanceof Error ? error.message : String(error)}`); + } + } + + private async handleExportCommand(text: string): Promise { + const parts = text.split(/\s+/); + const outputPath = parts.length > 1 ? parts[1] : undefined; + + try { + const filePath = await this.session.exportToHtml(outputPath); + this.showStatus(`Session exported to: ${filePath}`); + } catch (error: unknown) { + this.showError(`Failed to export session: ${error instanceof Error ? error.message : "Unknown error"}`); + } + } + + private async handleShareCommand(): Promise { + // Check if gh is available and logged in + try { + const authResult = spawnSync("gh", ["auth", "status"], { encoding: "utf-8" }); + if (authResult.status !== 0) { + this.showError("GitHub CLI is not logged in. Run 'gh auth login' first."); + return; + } + } catch { + this.showError("GitHub CLI (gh) is not installed. Install it from https://cli.github.com/"); + return; + } + + // Export to a temp file + const tmpFile = path.join(os.tmpdir(), "session.html"); + try { + await this.session.exportToHtml(tmpFile); + } catch (error: unknown) { + this.showError(`Failed to export session: ${error instanceof Error ? error.message : "Unknown error"}`); + return; + } + + // Show cancellable loader, replacing the editor + const loader = new BorderedLoader(this.ui, theme, "Creating gist..."); + this.editorContainer.clear(); + this.editorContainer.addChild(loader); + this.ui.setFocus(loader); + this.ui.requestRender(); + + const restoreEditor = () => { + loader.dispose(); + this.editorContainer.clear(); + this.editorContainer.addChild(this.editor); + this.ui.setFocus(this.editor); + try { + fs.unlinkSync(tmpFile); + } catch { + // Ignore cleanup errors + } + }; + + // Create a secret gist asynchronously + let proc: ReturnType | null = null; + + loader.onAbort = () => { + proc?.kill(); + restoreEditor(); + this.showStatus("Share cancelled"); + }; + + try { + const result = await new Promise<{ stdout: string; stderr: string; code: number | null }>((resolve) => { + proc = spawn("gh", ["gist", "create", "--public=false", tmpFile]); + let stdout = ""; + let stderr = ""; + proc.stdout?.on("data", (data) => { + stdout += data.toString(); + }); + proc.stderr?.on("data", (data) => { + stderr += data.toString(); + }); + proc.on("close", (code) => resolve({ stdout, stderr, code })); + }); + + if (loader.signal.aborted) return; + + restoreEditor(); + + if (result.code !== 0) { + const errorMsg = result.stderr?.trim() || "Unknown error"; + this.showError(`Failed to create gist: ${errorMsg}`); + return; + } + + // Extract gist ID from the URL returned by gh + // gh returns something like: https://gist.github.com/username/GIST_ID + const gistUrl = result.stdout?.trim(); + const gistId = gistUrl?.split("/").pop(); + if (!gistId) { + this.showError("Failed to parse gist ID from gh output"); + return; + } + + // Create the preview URL + const previewUrl = getShareViewerUrl(gistId); + this.showStatus(`Share URL: ${previewUrl}\nGist: ${gistUrl}`); + } catch (error: unknown) { + if (!loader.signal.aborted) { + restoreEditor(); + this.showError(`Failed to create gist: ${error instanceof Error ? error.message : "Unknown error"}`); + } + } + } + + private handleCopyCommand(): void { + const text = this.session.getLastAssistantText(); + if (!text) { + this.showError("No agent messages to copy yet."); + return; + } + + try { + copyToClipboard(text); + this.showStatus("Copied last agent message to clipboard"); + } catch (error) { + this.showError(error instanceof Error ? error.message : String(error)); + } + } + + private handleNameCommand(text: string): void { + const name = text.replace(/^\/name\s*/, "").trim(); + if (!name) { + const currentName = this.sessionManager.getSessionName(); + if (currentName) { + this.chatContainer.addChild(new Spacer(1)); + this.chatContainer.addChild(new Text(theme.fg("dim", `Session name: ${currentName}`), 1, 0)); + } else { + this.showWarning("Usage: /name "); + } + this.ui.requestRender(); + return; + } + + this.sessionManager.appendSessionInfo(name); + this.updateTerminalTitle(); + this.chatContainer.addChild(new Spacer(1)); + this.chatContainer.addChild(new Text(theme.fg("dim", `Session name set: ${name}`), 1, 0)); + this.ui.requestRender(); + } + + private handleSessionCommand(): void { + const stats = this.session.getSessionStats(); + const sessionName = this.sessionManager.getSessionName(); + + let info = `${theme.bold("Session Info")}\n\n`; + if (sessionName) { + info += `${theme.fg("dim", "Name:")} ${sessionName}\n`; + } + info += `${theme.fg("dim", "File:")} ${stats.sessionFile ?? "In-memory"}\n`; + info += `${theme.fg("dim", "ID:")} ${stats.sessionId}\n\n`; + info += `${theme.bold("Messages")}\n`; + info += `${theme.fg("dim", "User:")} ${stats.userMessages}\n`; + info += `${theme.fg("dim", "Assistant:")} ${stats.assistantMessages}\n`; + info += `${theme.fg("dim", "Tool Calls:")} ${stats.toolCalls}\n`; + info += `${theme.fg("dim", "Tool Results:")} ${stats.toolResults}\n`; + info += `${theme.fg("dim", "Total:")} ${stats.totalMessages}\n\n`; + info += `${theme.bold("Tokens")}\n`; + info += `${theme.fg("dim", "Input:")} ${stats.tokens.input.toLocaleString()}\n`; + info += `${theme.fg("dim", "Output:")} ${stats.tokens.output.toLocaleString()}\n`; + if (stats.tokens.cacheRead > 0) { + info += `${theme.fg("dim", "Cache Read:")} ${stats.tokens.cacheRead.toLocaleString()}\n`; + } + if (stats.tokens.cacheWrite > 0) { + info += `${theme.fg("dim", "Cache Write:")} ${stats.tokens.cacheWrite.toLocaleString()}\n`; + } + info += `${theme.fg("dim", "Total:")} ${stats.tokens.total.toLocaleString()}\n`; + + if (stats.cost > 0) { + info += `\n${theme.bold("Cost")}\n`; + info += `${theme.fg("dim", "Total:")} ${stats.cost.toFixed(4)}`; + } + + this.chatContainer.addChild(new Spacer(1)); + this.chatContainer.addChild(new Text(info, 1, 0)); + this.ui.requestRender(); + } + + private handleChangelogCommand(): void { + const changelogPath = getChangelogPath(); + const allEntries = parseChangelog(changelogPath); + + const changelogMarkdown = + allEntries.length > 0 + ? allEntries + .reverse() + .map((e) => e.content) + .join("\n\n") + : "No changelog entries found."; + + this.chatContainer.addChild(new Spacer(1)); + this.chatContainer.addChild(new DynamicBorder()); + this.chatContainer.addChild(new Text(theme.bold(theme.fg("accent", "What's New")), 1, 0)); + this.chatContainer.addChild(new Spacer(1)); + this.chatContainer.addChild(new Markdown(changelogMarkdown, 1, 1, this.getMarkdownThemeWithSettings())); + this.chatContainer.addChild(new DynamicBorder()); + this.ui.requestRender(); + } + + /** + * Capitalize keybinding for display (e.g., "ctrl+c" -> "Ctrl+C"). + */ + private capitalizeKey(key: string): string { + return key + .split("/") + .map((k) => + k + .split("+") + .map((part) => part.charAt(0).toUpperCase() + part.slice(1)) + .join("+"), + ) + .join("/"); + } + + /** + * Get capitalized display string for an app keybinding action. + */ + private getAppKeyDisplay(action: AppAction): string { + return this.capitalizeKey(appKey(this.keybindings, action)); + } + + /** + * Get capitalized display string for an editor keybinding action. + */ + private getEditorKeyDisplay(action: EditorAction): string { + return this.capitalizeKey(editorKey(action)); + } + + private handleHotkeysCommand(): void { + // Navigation keybindings + const cursorWordLeft = this.getEditorKeyDisplay("cursorWordLeft"); + const cursorWordRight = this.getEditorKeyDisplay("cursorWordRight"); + const cursorLineStart = this.getEditorKeyDisplay("cursorLineStart"); + const cursorLineEnd = this.getEditorKeyDisplay("cursorLineEnd"); + const jumpForward = this.getEditorKeyDisplay("jumpForward"); + const jumpBackward = this.getEditorKeyDisplay("jumpBackward"); + const pageUp = this.getEditorKeyDisplay("pageUp"); + const pageDown = this.getEditorKeyDisplay("pageDown"); + + // Editing keybindings + const submit = this.getEditorKeyDisplay("submit"); + const newLine = this.getEditorKeyDisplay("newLine"); + const deleteWordBackward = this.getEditorKeyDisplay("deleteWordBackward"); + const deleteWordForward = this.getEditorKeyDisplay("deleteWordForward"); + const deleteToLineStart = this.getEditorKeyDisplay("deleteToLineStart"); + const deleteToLineEnd = this.getEditorKeyDisplay("deleteToLineEnd"); + const yank = this.getEditorKeyDisplay("yank"); + const yankPop = this.getEditorKeyDisplay("yankPop"); + const undo = this.getEditorKeyDisplay("undo"); + const tab = this.getEditorKeyDisplay("tab"); + + // App keybindings + const interrupt = this.getAppKeyDisplay("interrupt"); + const clear = this.getAppKeyDisplay("clear"); + const exit = this.getAppKeyDisplay("exit"); + const suspend = this.getAppKeyDisplay("suspend"); + const cycleThinkingLevel = this.getAppKeyDisplay("cycleThinkingLevel"); + const cycleModelForward = this.getAppKeyDisplay("cycleModelForward"); + const selectModel = this.getAppKeyDisplay("selectModel"); + const expandTools = this.getAppKeyDisplay("expandTools"); + const toggleThinking = this.getAppKeyDisplay("toggleThinking"); + const externalEditor = this.getAppKeyDisplay("externalEditor"); + const followUp = this.getAppKeyDisplay("followUp"); + const dequeue = this.getAppKeyDisplay("dequeue"); + + let hotkeys = ` +**Navigation** +| Key | Action | +|-----|--------| +| \`Arrow keys\` | Move cursor / browse history (Up when empty) | +| \`${cursorWordLeft}\` / \`${cursorWordRight}\` | Move by word | +| \`${cursorLineStart}\` | Start of line | +| \`${cursorLineEnd}\` | End of line | +| \`${jumpForward}\` | Jump forward to character | +| \`${jumpBackward}\` | Jump backward to character | +| \`${pageUp}\` / \`${pageDown}\` | Scroll by page | + +**Editing** +| Key | Action | +|-----|--------| +| \`${submit}\` | Send message | +| \`${newLine}\` | New line${process.platform === "win32" ? " (Ctrl+Enter on Windows Terminal)" : ""} | +| \`${deleteWordBackward}\` | Delete word backwards | +| \`${deleteWordForward}\` | Delete word forwards | +| \`${deleteToLineStart}\` | Delete to start of line | +| \`${deleteToLineEnd}\` | Delete to end of line | +| \`${yank}\` | Paste the most-recently-deleted text | +| \`${yankPop}\` | Cycle through the deleted text after pasting | +| \`${undo}\` | Undo | + +**Other** +| Key | Action | +|-----|--------| +| \`${tab}\` | Path completion / accept autocomplete | +| \`${interrupt}\` | Cancel autocomplete / abort streaming | +| \`${clear}\` | Clear editor (first) / exit (second) | +| \`${exit}\` | Exit (when editor is empty) | +| \`${suspend}\` | Suspend to background | +| \`${cycleThinkingLevel}\` | Cycle thinking level | +| \`${cycleModelForward}\` | Cycle models | +| \`${selectModel}\` | Open model selector | +| \`${expandTools}\` | Toggle tool output expansion | +| \`${toggleThinking}\` | Toggle thinking block visibility | +| \`${externalEditor}\` | Edit message in external editor | +| \`${followUp}\` | Queue follow-up message | +| \`${dequeue}\` | Restore queued messages | +| \`Ctrl+V\` | Paste image from clipboard | +| \`/\` | Slash commands | +| \`!\` | Run bash command | +| \`!!\` | Run bash command (excluded from context) | +`; + + // Add extension-registered shortcuts + const extensionRunner = this.session.extensionRunner; + if (extensionRunner) { + const shortcuts = extensionRunner.getShortcuts(this.keybindings.getEffectiveConfig()); + if (shortcuts.size > 0) { + hotkeys += ` +**Extensions** +| Key | Action | +|-----|--------| +`; + for (const [key, shortcut] of shortcuts) { + const description = shortcut.description ?? shortcut.extensionPath; + const keyDisplay = key.replace(/\b\w/g, (c) => c.toUpperCase()); + hotkeys += `| \`${keyDisplay}\` | ${description} |\n`; + } + } + } + + this.chatContainer.addChild(new Spacer(1)); + this.chatContainer.addChild(new DynamicBorder()); + this.chatContainer.addChild(new Text(theme.bold(theme.fg("accent", "Keyboard Shortcuts")), 1, 0)); + this.chatContainer.addChild(new Spacer(1)); + this.chatContainer.addChild(new Markdown(hotkeys.trim(), 1, 1, this.getMarkdownThemeWithSettings())); + this.chatContainer.addChild(new DynamicBorder()); + this.ui.requestRender(); + } + + private async handleClearCommand(): Promise { + // Stop loading animation + if (this.loadingAnimation) { + this.loadingAnimation.stop(); + this.loadingAnimation = undefined; + } + this.statusContainer.clear(); + + // New session via session (emits extension session events) + await this.session.newSession(); + + // Clear UI state + this.headerContainer.clear(); + this.chatContainer.clear(); + this.pendingMessagesContainer.clear(); + this.compactionQueuedMessages = []; + this.streamingComponent = undefined; + this.streamingMessage = undefined; + this.pendingTools.clear(); + + this.chatContainer.addChild(new Spacer(1)); + this.chatContainer.addChild(new Text(`${theme.fg("accent", "✓ New session started")}`, 1, 1)); + this.ui.requestRender(); + } + + private handleDebugCommand(): void { + const width = this.ui.terminal.columns; + const height = this.ui.terminal.rows; + const allLines = this.ui.render(width); + + const debugLogPath = getDebugLogPath(); + const debugData = [ + `Debug output at ${new Date().toISOString()}`, + `Terminal: ${width}x${height}`, + `Total lines: ${allLines.length}`, + "", + "=== All rendered lines with visible widths ===", + ...allLines.map((line, idx) => { + const vw = visibleWidth(line); + const escaped = JSON.stringify(line); + return `[${idx}] (w=${vw}) ${escaped}`; + }), + "", + "=== Agent messages (JSONL) ===", + ...this.session.messages.map((msg) => JSON.stringify(msg)), + "", + ].join("\n"); + + fs.mkdirSync(path.dirname(debugLogPath), { recursive: true }); + fs.writeFileSync(debugLogPath, debugData); + + this.chatContainer.addChild(new Spacer(1)); + this.chatContainer.addChild( + new Text(`${theme.fg("accent", "✓ Debug log written")}\n${theme.fg("muted", debugLogPath)}`, 1, 1), + ); + this.ui.requestRender(); + } + + private handleArminSaysHi(): void { + this.chatContainer.addChild(new Spacer(1)); + this.chatContainer.addChild(new ArminComponent(this.ui)); + this.ui.requestRender(); + } + + private handleDaxnuts(): void { + this.chatContainer.addChild(new Spacer(1)); + this.chatContainer.addChild(new DaxnutsComponent(this.ui)); + this.ui.requestRender(); + } + + private checkDaxnutsEasterEgg(model: { provider: string; id: string }): void { + if (model.provider === "opencode" && model.id.toLowerCase().includes("kimi-k2.5")) { + this.handleDaxnuts(); + } + } + + private async handleBashCommand(command: string, excludeFromContext = false): Promise { + const extensionRunner = this.session.extensionRunner; + + // Emit user_bash event to let extensions intercept + const eventResult = extensionRunner + ? await extensionRunner.emitUserBash({ + type: "user_bash", + command, + excludeFromContext, + cwd: process.cwd(), + }) + : undefined; + + // If extension returned a full result, use it directly + if (eventResult?.result) { + const result = eventResult.result; + + // Create UI component for display + this.bashComponent = new BashExecutionComponent(command, this.ui, excludeFromContext); + if (this.session.isStreaming) { + this.pendingMessagesContainer.addChild(this.bashComponent); + this.pendingBashComponents.push(this.bashComponent); + } else { + this.chatContainer.addChild(this.bashComponent); + } + + // Show output and complete + if (result.output) { + this.bashComponent.appendOutput(result.output); + } + this.bashComponent.setComplete( + result.exitCode, + result.cancelled, + result.truncated ? ({ truncated: true, content: result.output } as TruncationResult) : undefined, + result.fullOutputPath, + ); + + // Record the result in session + this.session.recordBashResult(command, result, { excludeFromContext }); + this.bashComponent = undefined; + this.ui.requestRender(); + return; + } + + // Normal execution path (possibly with custom operations) + const isDeferred = this.session.isStreaming; + this.bashComponent = new BashExecutionComponent(command, this.ui, excludeFromContext); + + if (isDeferred) { + // Show in pending area when agent is streaming + this.pendingMessagesContainer.addChild(this.bashComponent); + this.pendingBashComponents.push(this.bashComponent); + } else { + // Show in chat immediately when agent is idle + this.chatContainer.addChild(this.bashComponent); + } + this.ui.requestRender(); + + try { + const result = await this.session.executeBash( + command, + (chunk) => { + if (this.bashComponent) { + this.bashComponent.appendOutput(chunk); + this.ui.requestRender(); + } + }, + { excludeFromContext, operations: eventResult?.operations }, + ); + + if (this.bashComponent) { + this.bashComponent.setComplete( + result.exitCode, + result.cancelled, + result.truncated ? ({ truncated: true, content: result.output } as TruncationResult) : undefined, + result.fullOutputPath, + ); + } + } catch (error) { + if (this.bashComponent) { + this.bashComponent.setComplete(undefined, false); + } + this.showError(`Bash command failed: ${error instanceof Error ? error.message : "Unknown error"}`); + } + + this.bashComponent = undefined; + this.ui.requestRender(); + } + + private async handleCompactCommand(customInstructions?: string): Promise { + const entries = this.sessionManager.getEntries(); + const messageCount = entries.filter((e) => e.type === "message").length; + + if (messageCount < 2) { + this.showWarning("Nothing to compact (no messages yet)"); + return; + } + + await this.executeCompaction(customInstructions, false); + } + + private async executeCompaction(customInstructions?: string, isAuto = false): Promise { + // Stop loading animation + if (this.loadingAnimation) { + this.loadingAnimation.stop(); + this.loadingAnimation = undefined; + } + this.statusContainer.clear(); + + // Set up escape handler during compaction + const originalOnEscape = this.defaultEditor.onEscape; + this.defaultEditor.onEscape = () => { + this.session.abortCompaction(); + }; + + // Show compacting status + this.chatContainer.addChild(new Spacer(1)); + const cancelHint = `(${appKey(this.keybindings, "interrupt")} to cancel)`; + const label = isAuto ? `Auto-compacting context... ${cancelHint}` : `Compacting context... ${cancelHint}`; + const compactingLoader = new Loader( + this.ui, + (spinner) => theme.fg("accent", spinner), + (text) => theme.fg("muted", text), + label, + ); + this.statusContainer.addChild(compactingLoader); + this.ui.requestRender(); + + let result: CompactionResult | undefined; + + try { + result = await this.session.compact(customInstructions); + + // Rebuild UI + this.rebuildChatFromMessages(); + + // Add compaction component at bottom so user sees it without scrolling + const msg = createCompactionSummaryMessage(result.summary, result.tokensBefore, new Date().toISOString()); + this.addMessageToChat(msg); + + this.footer.invalidate(); + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + if (message === "Compaction cancelled" || (error instanceof Error && error.name === "AbortError")) { + this.showError("Compaction cancelled"); + } else { + this.showError(`Compaction failed: ${message}`); + } + } finally { + compactingLoader.stop(); + this.statusContainer.clear(); + this.defaultEditor.onEscape = originalOnEscape; + } + void this.flushCompactionQueue({ willRetry: false }); + return result; + } + + stop(): void { + if (this.loadingAnimation) { + this.loadingAnimation.stop(); + this.loadingAnimation = undefined; + } + this.clearExtensionTerminalInputListeners(); + this.footer.dispose(); + this.footerDataProvider.dispose(); + if (this.unsubscribe) { + this.unsubscribe(); + } + if (this.isInitialized) { + this.ui.stop(); + this.isInitialized = false; + } + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/theme/dark.json b/packages/pi-coding-agent/src/modes/interactive/theme/dark.json new file mode 100644 index 000000000..0ca2af510 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/theme/dark.json @@ -0,0 +1,85 @@ +{ + "$schema": "https://raw.githubusercontent.com/badlogic/pi-mono/main/packages/coding-agent/src/modes/interactive/theme/theme-schema.json", + "name": "dark", + "vars": { + "cyan": "#00d7ff", + "blue": "#5f87ff", + "green": "#b5bd68", + "red": "#cc6666", + "yellow": "#ffff00", + "gray": "#808080", + "dimGray": "#666666", + "darkGray": "#505050", + "accent": "#8abeb7", + "selectedBg": "#3a3a4a", + "userMsgBg": "#343541", + "toolPendingBg": "#282832", + "toolSuccessBg": "#283228", + "toolErrorBg": "#3c2828", + "customMsgBg": "#2d2838" + }, + "colors": { + "accent": "accent", + "border": "blue", + "borderAccent": "cyan", + "borderMuted": "darkGray", + "success": "green", + "error": "red", + "warning": "yellow", + "muted": "gray", + "dim": "dimGray", + "text": "", + "thinkingText": "gray", + + "selectedBg": "selectedBg", + "userMessageBg": "userMsgBg", + "userMessageText": "", + "customMessageBg": "customMsgBg", + "customMessageText": "", + "customMessageLabel": "#9575cd", + "toolPendingBg": "toolPendingBg", + "toolSuccessBg": "toolSuccessBg", + "toolErrorBg": "toolErrorBg", + "toolTitle": "", + "toolOutput": "gray", + + "mdHeading": "#f0c674", + "mdLink": "#81a2be", + "mdLinkUrl": "dimGray", + "mdCode": "accent", + "mdCodeBlock": "green", + "mdCodeBlockBorder": "gray", + "mdQuote": "gray", + "mdQuoteBorder": "gray", + "mdHr": "gray", + "mdListBullet": "accent", + + "toolDiffAdded": "green", + "toolDiffRemoved": "red", + "toolDiffContext": "gray", + + "syntaxComment": "#6A9955", + "syntaxKeyword": "#569CD6", + "syntaxFunction": "#DCDCAA", + "syntaxVariable": "#9CDCFE", + "syntaxString": "#CE9178", + "syntaxNumber": "#B5CEA8", + "syntaxType": "#4EC9B0", + "syntaxOperator": "#D4D4D4", + "syntaxPunctuation": "#D4D4D4", + + "thinkingOff": "darkGray", + "thinkingMinimal": "#6e6e6e", + "thinkingLow": "#5f87af", + "thinkingMedium": "#81a2be", + "thinkingHigh": "#b294bb", + "thinkingXhigh": "#d183e8", + + "bashMode": "green" + }, + "export": { + "pageBg": "#18181e", + "cardBg": "#1e1e24", + "infoBg": "#3c3728" + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/theme/light.json b/packages/pi-coding-agent/src/modes/interactive/theme/light.json new file mode 100644 index 000000000..58ab93e98 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/theme/light.json @@ -0,0 +1,84 @@ +{ + "$schema": "https://raw.githubusercontent.com/badlogic/pi-mono/main/packages/coding-agent/src/modes/interactive/theme/theme-schema.json", + "name": "light", + "vars": { + "teal": "#5a8080", + "blue": "#547da7", + "green": "#588458", + "red": "#aa5555", + "yellow": "#9a7326", + "mediumGray": "#6c6c6c", + "dimGray": "#767676", + "lightGray": "#b0b0b0", + "selectedBg": "#d0d0e0", + "userMsgBg": "#e8e8e8", + "toolPendingBg": "#e8e8f0", + "toolSuccessBg": "#e8f0e8", + "toolErrorBg": "#f0e8e8", + "customMsgBg": "#ede7f6" + }, + "colors": { + "accent": "teal", + "border": "blue", + "borderAccent": "teal", + "borderMuted": "lightGray", + "success": "green", + "error": "red", + "warning": "yellow", + "muted": "mediumGray", + "dim": "dimGray", + "text": "", + "thinkingText": "mediumGray", + + "selectedBg": "selectedBg", + "userMessageBg": "userMsgBg", + "userMessageText": "", + "customMessageBg": "customMsgBg", + "customMessageText": "", + "customMessageLabel": "#7e57c2", + "toolPendingBg": "toolPendingBg", + "toolSuccessBg": "toolSuccessBg", + "toolErrorBg": "toolErrorBg", + "toolTitle": "", + "toolOutput": "mediumGray", + + "mdHeading": "yellow", + "mdLink": "blue", + "mdLinkUrl": "dimGray", + "mdCode": "teal", + "mdCodeBlock": "green", + "mdCodeBlockBorder": "mediumGray", + "mdQuote": "mediumGray", + "mdQuoteBorder": "mediumGray", + "mdHr": "mediumGray", + "mdListBullet": "green", + + "toolDiffAdded": "green", + "toolDiffRemoved": "red", + "toolDiffContext": "mediumGray", + + "syntaxComment": "#008000", + "syntaxKeyword": "#0000FF", + "syntaxFunction": "#795E26", + "syntaxVariable": "#001080", + "syntaxString": "#A31515", + "syntaxNumber": "#098658", + "syntaxType": "#267F99", + "syntaxOperator": "#000000", + "syntaxPunctuation": "#000000", + + "thinkingOff": "lightGray", + "thinkingMinimal": "#767676", + "thinkingLow": "blue", + "thinkingMedium": "teal", + "thinkingHigh": "#875f87", + "thinkingXhigh": "#8b008b", + + "bashMode": "green" + }, + "export": { + "pageBg": "#f8f8f8", + "cardBg": "#ffffff", + "infoBg": "#fffae6" + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/theme/theme-schema.json b/packages/pi-coding-agent/src/modes/interactive/theme/theme-schema.json new file mode 100644 index 000000000..7bc495da0 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/theme/theme-schema.json @@ -0,0 +1,335 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "title": "Pi Coding Agent Theme", + "description": "Theme schema for Pi coding agent", + "type": "object", + "required": ["name", "colors"], + "properties": { + "$schema": { + "type": "string", + "description": "JSON schema reference" + }, + "name": { + "type": "string", + "description": "Theme name" + }, + "vars": { + "type": "object", + "description": "Reusable color variables", + "additionalProperties": { + "oneOf": [ + { + "type": "string", + "description": "Hex color (#RRGGBB), variable reference, or empty string for terminal default" + }, + { + "type": "integer", + "minimum": 0, + "maximum": 255, + "description": "256-color palette index (0-255)" + } + ] + } + }, + "colors": { + "type": "object", + "description": "Theme color definitions (all required)", + "required": [ + "accent", + "border", + "borderAccent", + "borderMuted", + "success", + "error", + "warning", + "muted", + "dim", + "text", + "thinkingText", + "selectedBg", + "userMessageBg", + "userMessageText", + "customMessageBg", + "customMessageText", + "customMessageLabel", + "toolPendingBg", + "toolSuccessBg", + "toolErrorBg", + "toolTitle", + "toolOutput", + "mdHeading", + "mdLink", + "mdLinkUrl", + "mdCode", + "mdCodeBlock", + "mdCodeBlockBorder", + "mdQuote", + "mdQuoteBorder", + "mdHr", + "mdListBullet", + "toolDiffAdded", + "toolDiffRemoved", + "toolDiffContext", + "syntaxComment", + "syntaxKeyword", + "syntaxFunction", + "syntaxVariable", + "syntaxString", + "syntaxNumber", + "syntaxType", + "syntaxOperator", + "syntaxPunctuation", + "thinkingOff", + "thinkingMinimal", + "thinkingLow", + "thinkingMedium", + "thinkingHigh", + "thinkingXhigh", + "bashMode" + ], + "properties": { + "accent": { + "$ref": "#/$defs/colorValue", + "description": "Primary accent color (logo, selected items, cursor)" + }, + "border": { + "$ref": "#/$defs/colorValue", + "description": "Normal borders" + }, + "borderAccent": { + "$ref": "#/$defs/colorValue", + "description": "Highlighted borders" + }, + "borderMuted": { + "$ref": "#/$defs/colorValue", + "description": "Subtle borders" + }, + "success": { + "$ref": "#/$defs/colorValue", + "description": "Success states" + }, + "error": { + "$ref": "#/$defs/colorValue", + "description": "Error states" + }, + "warning": { + "$ref": "#/$defs/colorValue", + "description": "Warning states" + }, + "muted": { + "$ref": "#/$defs/colorValue", + "description": "Secondary/dimmed text" + }, + "dim": { + "$ref": "#/$defs/colorValue", + "description": "Very dimmed text (more subtle than muted)" + }, + "text": { + "$ref": "#/$defs/colorValue", + "description": "Default text color (usually empty string)" + }, + "thinkingText": { + "$ref": "#/$defs/colorValue", + "description": "Thinking block text color" + }, + "selectedBg": { + "$ref": "#/$defs/colorValue", + "description": "Selected item background" + }, + "userMessageBg": { + "$ref": "#/$defs/colorValue", + "description": "User message background" + }, + "userMessageText": { + "$ref": "#/$defs/colorValue", + "description": "User message text color" + }, + "customMessageBg": { + "$ref": "#/$defs/colorValue", + "description": "Custom message background (hook-injected messages)" + }, + "customMessageText": { + "$ref": "#/$defs/colorValue", + "description": "Custom message text color" + }, + "customMessageLabel": { + "$ref": "#/$defs/colorValue", + "description": "Custom message type label color" + }, + "toolPendingBg": { + "$ref": "#/$defs/colorValue", + "description": "Tool execution box (pending state)" + }, + "toolSuccessBg": { + "$ref": "#/$defs/colorValue", + "description": "Tool execution box (success state)" + }, + "toolErrorBg": { + "$ref": "#/$defs/colorValue", + "description": "Tool execution box (error state)" + }, + "toolTitle": { + "$ref": "#/$defs/colorValue", + "description": "Tool execution box title color" + }, + "toolOutput": { + "$ref": "#/$defs/colorValue", + "description": "Tool execution box output text color" + }, + "mdHeading": { + "$ref": "#/$defs/colorValue", + "description": "Markdown heading text" + }, + "mdLink": { + "$ref": "#/$defs/colorValue", + "description": "Markdown link text" + }, + "mdLinkUrl": { + "$ref": "#/$defs/colorValue", + "description": "Markdown link URL" + }, + "mdCode": { + "$ref": "#/$defs/colorValue", + "description": "Markdown inline code" + }, + "mdCodeBlock": { + "$ref": "#/$defs/colorValue", + "description": "Markdown code block content" + }, + "mdCodeBlockBorder": { + "$ref": "#/$defs/colorValue", + "description": "Markdown code block fences" + }, + "mdQuote": { + "$ref": "#/$defs/colorValue", + "description": "Markdown blockquote text" + }, + "mdQuoteBorder": { + "$ref": "#/$defs/colorValue", + "description": "Markdown blockquote border" + }, + "mdHr": { + "$ref": "#/$defs/colorValue", + "description": "Markdown horizontal rule" + }, + "mdListBullet": { + "$ref": "#/$defs/colorValue", + "description": "Markdown list bullets/numbers" + }, + "toolDiffAdded": { + "$ref": "#/$defs/colorValue", + "description": "Added lines in tool diffs" + }, + "toolDiffRemoved": { + "$ref": "#/$defs/colorValue", + "description": "Removed lines in tool diffs" + }, + "toolDiffContext": { + "$ref": "#/$defs/colorValue", + "description": "Context lines in tool diffs" + }, + "syntaxComment": { + "$ref": "#/$defs/colorValue", + "description": "Syntax highlighting: comments" + }, + "syntaxKeyword": { + "$ref": "#/$defs/colorValue", + "description": "Syntax highlighting: keywords" + }, + "syntaxFunction": { + "$ref": "#/$defs/colorValue", + "description": "Syntax highlighting: function names" + }, + "syntaxVariable": { + "$ref": "#/$defs/colorValue", + "description": "Syntax highlighting: variable names" + }, + "syntaxString": { + "$ref": "#/$defs/colorValue", + "description": "Syntax highlighting: string literals" + }, + "syntaxNumber": { + "$ref": "#/$defs/colorValue", + "description": "Syntax highlighting: number literals" + }, + "syntaxType": { + "$ref": "#/$defs/colorValue", + "description": "Syntax highlighting: type names" + }, + "syntaxOperator": { + "$ref": "#/$defs/colorValue", + "description": "Syntax highlighting: operators" + }, + "syntaxPunctuation": { + "$ref": "#/$defs/colorValue", + "description": "Syntax highlighting: punctuation" + }, + "thinkingOff": { + "$ref": "#/$defs/colorValue", + "description": "Thinking level border: off" + }, + "thinkingMinimal": { + "$ref": "#/$defs/colorValue", + "description": "Thinking level border: minimal" + }, + "thinkingLow": { + "$ref": "#/$defs/colorValue", + "description": "Thinking level border: low" + }, + "thinkingMedium": { + "$ref": "#/$defs/colorValue", + "description": "Thinking level border: medium" + }, + "thinkingHigh": { + "$ref": "#/$defs/colorValue", + "description": "Thinking level border: high" + }, + "thinkingXhigh": { + "$ref": "#/$defs/colorValue", + "description": "Thinking level border: xhigh (OpenAI codex-max only)" + }, + "bashMode": { + "$ref": "#/$defs/colorValue", + "description": "Editor border color in bash mode" + } + }, + "additionalProperties": false + }, + "export": { + "type": "object", + "description": "Optional colors for HTML export (defaults derived from userMessageBg if not specified)", + "properties": { + "pageBg": { + "$ref": "#/$defs/colorValue", + "description": "Page background color" + }, + "cardBg": { + "$ref": "#/$defs/colorValue", + "description": "Card/container background color" + }, + "infoBg": { + "$ref": "#/$defs/colorValue", + "description": "Info sections background (system prompt, notices)" + } + }, + "additionalProperties": false + } + }, + "additionalProperties": false, + "$defs": { + "colorValue": { + "oneOf": [ + { + "type": "string", + "description": "Hex color (#RRGGBB), variable reference, or empty string for terminal default" + }, + { + "type": "integer", + "minimum": 0, + "maximum": 255, + "description": "256-color palette index (0-255)" + } + ] + } + } +} diff --git a/packages/pi-coding-agent/src/modes/interactive/theme/theme.ts b/packages/pi-coding-agent/src/modes/interactive/theme/theme.ts new file mode 100644 index 000000000..df66ee623 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/interactive/theme/theme.ts @@ -0,0 +1,1105 @@ +import * as fs from "node:fs"; +import * as path from "node:path"; +import type { EditorTheme, MarkdownTheme, SelectListTheme } from "@gsd/pi-tui"; +import { type Static, Type } from "@sinclair/typebox"; +import { TypeCompiler } from "@sinclair/typebox/compiler"; +import chalk from "chalk"; +import { highlight, supportsLanguage } from "cli-highlight"; +import { getCustomThemesDir, getThemesDir } from "../../../config.js"; + +// ============================================================================ +// Types & Schema +// ============================================================================ + +const ColorValueSchema = Type.Union([ + Type.String(), // hex "#ff0000", var ref "primary", or empty "" + Type.Integer({ minimum: 0, maximum: 255 }), // 256-color index +]); + +type ColorValue = Static; + +const ThemeJsonSchema = Type.Object({ + $schema: Type.Optional(Type.String()), + name: Type.String(), + vars: Type.Optional(Type.Record(Type.String(), ColorValueSchema)), + colors: Type.Object({ + // Core UI (10 colors) + accent: ColorValueSchema, + border: ColorValueSchema, + borderAccent: ColorValueSchema, + borderMuted: ColorValueSchema, + success: ColorValueSchema, + error: ColorValueSchema, + warning: ColorValueSchema, + muted: ColorValueSchema, + dim: ColorValueSchema, + text: ColorValueSchema, + thinkingText: ColorValueSchema, + // Backgrounds & Content Text (11 colors) + selectedBg: ColorValueSchema, + userMessageBg: ColorValueSchema, + userMessageText: ColorValueSchema, + customMessageBg: ColorValueSchema, + customMessageText: ColorValueSchema, + customMessageLabel: ColorValueSchema, + toolPendingBg: ColorValueSchema, + toolSuccessBg: ColorValueSchema, + toolErrorBg: ColorValueSchema, + toolTitle: ColorValueSchema, + toolOutput: ColorValueSchema, + // Markdown (10 colors) + mdHeading: ColorValueSchema, + mdLink: ColorValueSchema, + mdLinkUrl: ColorValueSchema, + mdCode: ColorValueSchema, + mdCodeBlock: ColorValueSchema, + mdCodeBlockBorder: ColorValueSchema, + mdQuote: ColorValueSchema, + mdQuoteBorder: ColorValueSchema, + mdHr: ColorValueSchema, + mdListBullet: ColorValueSchema, + // Tool Diffs (3 colors) + toolDiffAdded: ColorValueSchema, + toolDiffRemoved: ColorValueSchema, + toolDiffContext: ColorValueSchema, + // Syntax Highlighting (9 colors) + syntaxComment: ColorValueSchema, + syntaxKeyword: ColorValueSchema, + syntaxFunction: ColorValueSchema, + syntaxVariable: ColorValueSchema, + syntaxString: ColorValueSchema, + syntaxNumber: ColorValueSchema, + syntaxType: ColorValueSchema, + syntaxOperator: ColorValueSchema, + syntaxPunctuation: ColorValueSchema, + // Thinking Level Borders (6 colors) + thinkingOff: ColorValueSchema, + thinkingMinimal: ColorValueSchema, + thinkingLow: ColorValueSchema, + thinkingMedium: ColorValueSchema, + thinkingHigh: ColorValueSchema, + thinkingXhigh: ColorValueSchema, + // Bash Mode (1 color) + bashMode: ColorValueSchema, + }), + export: Type.Optional( + Type.Object({ + pageBg: Type.Optional(ColorValueSchema), + cardBg: Type.Optional(ColorValueSchema), + infoBg: Type.Optional(ColorValueSchema), + }), + ), +}); + +type ThemeJson = Static; + +const validateThemeJson = TypeCompiler.Compile(ThemeJsonSchema); + +export type ThemeColor = + | "accent" + | "border" + | "borderAccent" + | "borderMuted" + | "success" + | "error" + | "warning" + | "muted" + | "dim" + | "text" + | "thinkingText" + | "userMessageText" + | "customMessageText" + | "customMessageLabel" + | "toolTitle" + | "toolOutput" + | "mdHeading" + | "mdLink" + | "mdLinkUrl" + | "mdCode" + | "mdCodeBlock" + | "mdCodeBlockBorder" + | "mdQuote" + | "mdQuoteBorder" + | "mdHr" + | "mdListBullet" + | "toolDiffAdded" + | "toolDiffRemoved" + | "toolDiffContext" + | "syntaxComment" + | "syntaxKeyword" + | "syntaxFunction" + | "syntaxVariable" + | "syntaxString" + | "syntaxNumber" + | "syntaxType" + | "syntaxOperator" + | "syntaxPunctuation" + | "thinkingOff" + | "thinkingMinimal" + | "thinkingLow" + | "thinkingMedium" + | "thinkingHigh" + | "thinkingXhigh" + | "bashMode"; + +export type ThemeBg = + | "selectedBg" + | "userMessageBg" + | "customMessageBg" + | "toolPendingBg" + | "toolSuccessBg" + | "toolErrorBg"; + +type ColorMode = "truecolor" | "256color"; + +// ============================================================================ +// Color Utilities +// ============================================================================ + +function detectColorMode(): ColorMode { + const colorterm = process.env.COLORTERM; + if (colorterm === "truecolor" || colorterm === "24bit") { + return "truecolor"; + } + // Windows Terminal supports truecolor + if (process.env.WT_SESSION) { + return "truecolor"; + } + const term = process.env.TERM || ""; + // Fall back to 256color for truly limited terminals + if (term === "dumb" || term === "" || term === "linux") { + return "256color"; + } + // Terminal.app also doesn't support truecolor + if (process.env.TERM_PROGRAM === "Apple_Terminal") { + return "256color"; + } + // GNU screen doesn't support truecolor unless explicitly opted in via COLORTERM=truecolor. + // TERM under screen is typically "screen", "screen-256color", or "screen.xterm-256color". + if (term === "screen" || term.startsWith("screen-") || term.startsWith("screen.")) { + return "256color"; + } + // Assume truecolor for everything else - virtually all modern terminals support it + return "truecolor"; +} + +function hexToRgb(hex: string): { r: number; g: number; b: number } { + const cleaned = hex.replace("#", ""); + if (cleaned.length !== 6) { + throw new Error(`Invalid hex color: ${hex}`); + } + const r = parseInt(cleaned.substring(0, 2), 16); + const g = parseInt(cleaned.substring(2, 4), 16); + const b = parseInt(cleaned.substring(4, 6), 16); + if (Number.isNaN(r) || Number.isNaN(g) || Number.isNaN(b)) { + throw new Error(`Invalid hex color: ${hex}`); + } + return { r, g, b }; +} + +// The 6x6x6 color cube channel values (indices 0-5) +const CUBE_VALUES = [0, 95, 135, 175, 215, 255]; + +// Grayscale ramp values (indices 232-255, 24 grays from 8 to 238) +const GRAY_VALUES = Array.from({ length: 24 }, (_, i) => 8 + i * 10); + +function findClosestCubeIndex(value: number): number { + let minDist = Infinity; + let minIdx = 0; + for (let i = 0; i < CUBE_VALUES.length; i++) { + const dist = Math.abs(value - CUBE_VALUES[i]); + if (dist < minDist) { + minDist = dist; + minIdx = i; + } + } + return minIdx; +} + +function findClosestGrayIndex(gray: number): number { + let minDist = Infinity; + let minIdx = 0; + for (let i = 0; i < GRAY_VALUES.length; i++) { + const dist = Math.abs(gray - GRAY_VALUES[i]); + if (dist < minDist) { + minDist = dist; + minIdx = i; + } + } + return minIdx; +} + +function colorDistance(r1: number, g1: number, b1: number, r2: number, g2: number, b2: number): number { + // Weighted Euclidean distance (human eye is more sensitive to green) + const dr = r1 - r2; + const dg = g1 - g2; + const db = b1 - b2; + return dr * dr * 0.299 + dg * dg * 0.587 + db * db * 0.114; +} + +function rgbTo256(r: number, g: number, b: number): number { + // Find closest color in the 6x6x6 cube + const rIdx = findClosestCubeIndex(r); + const gIdx = findClosestCubeIndex(g); + const bIdx = findClosestCubeIndex(b); + const cubeR = CUBE_VALUES[rIdx]; + const cubeG = CUBE_VALUES[gIdx]; + const cubeB = CUBE_VALUES[bIdx]; + const cubeIndex = 16 + 36 * rIdx + 6 * gIdx + bIdx; + const cubeDist = colorDistance(r, g, b, cubeR, cubeG, cubeB); + + // Find closest grayscale + const gray = Math.round(0.299 * r + 0.587 * g + 0.114 * b); + const grayIdx = findClosestGrayIndex(gray); + const grayValue = GRAY_VALUES[grayIdx]; + const grayIndex = 232 + grayIdx; + const grayDist = colorDistance(r, g, b, grayValue, grayValue, grayValue); + + // Check if color has noticeable saturation (hue matters) + // If max-min spread is significant, prefer cube to preserve tint + const maxC = Math.max(r, g, b); + const minC = Math.min(r, g, b); + const spread = maxC - minC; + + // Only consider grayscale if color is nearly neutral (spread < 10) + // AND grayscale is actually closer + if (spread < 10 && grayDist < cubeDist) { + return grayIndex; + } + + return cubeIndex; +} + +function hexTo256(hex: string): number { + const { r, g, b } = hexToRgb(hex); + return rgbTo256(r, g, b); +} + +function fgAnsi(color: string | number, mode: ColorMode): string { + if (color === "") return "\x1b[39m"; + if (typeof color === "number") return `\x1b[38;5;${color}m`; + if (color.startsWith("#")) { + if (mode === "truecolor") { + const { r, g, b } = hexToRgb(color); + return `\x1b[38;2;${r};${g};${b}m`; + } else { + const index = hexTo256(color); + return `\x1b[38;5;${index}m`; + } + } + throw new Error(`Invalid color value: ${color}`); +} + +function bgAnsi(color: string | number, mode: ColorMode): string { + if (color === "") return "\x1b[49m"; + if (typeof color === "number") return `\x1b[48;5;${color}m`; + if (color.startsWith("#")) { + if (mode === "truecolor") { + const { r, g, b } = hexToRgb(color); + return `\x1b[48;2;${r};${g};${b}m`; + } else { + const index = hexTo256(color); + return `\x1b[48;5;${index}m`; + } + } + throw new Error(`Invalid color value: ${color}`); +} + +function resolveVarRefs( + value: ColorValue, + vars: Record, + visited = new Set(), +): string | number { + if (typeof value === "number" || value === "" || value.startsWith("#")) { + return value; + } + if (visited.has(value)) { + throw new Error(`Circular variable reference detected: ${value}`); + } + if (!(value in vars)) { + throw new Error(`Variable reference not found: ${value}`); + } + visited.add(value); + return resolveVarRefs(vars[value], vars, visited); +} + +function resolveThemeColors>( + colors: T, + vars: Record = {}, +): Record { + const resolved: Record = {}; + for (const [key, value] of Object.entries(colors)) { + resolved[key] = resolveVarRefs(value, vars); + } + return resolved as Record; +} + +// ============================================================================ +// Theme Class +// ============================================================================ + +export class Theme { + readonly name?: string; + readonly sourcePath?: string; + private fgColors: Map; + private bgColors: Map; + private mode: ColorMode; + + constructor( + fgColors: Record, + bgColors: Record, + mode: ColorMode, + options: { name?: string; sourcePath?: string } = {}, + ) { + this.name = options.name; + this.sourcePath = options.sourcePath; + this.mode = mode; + this.fgColors = new Map(); + for (const [key, value] of Object.entries(fgColors) as [ThemeColor, string | number][]) { + this.fgColors.set(key, fgAnsi(value, mode)); + } + this.bgColors = new Map(); + for (const [key, value] of Object.entries(bgColors) as [ThemeBg, string | number][]) { + this.bgColors.set(key, bgAnsi(value, mode)); + } + } + + fg(color: ThemeColor, text: string): string { + const ansi = this.fgColors.get(color); + if (!ansi) throw new Error(`Unknown theme color: ${color}`); + return `${ansi}${text}\x1b[39m`; // Reset only foreground color + } + + bg(color: ThemeBg, text: string): string { + const ansi = this.bgColors.get(color); + if (!ansi) throw new Error(`Unknown theme background color: ${color}`); + return `${ansi}${text}\x1b[49m`; // Reset only background color + } + + bold(text: string): string { + return chalk.bold(text); + } + + italic(text: string): string { + return chalk.italic(text); + } + + underline(text: string): string { + return chalk.underline(text); + } + + inverse(text: string): string { + return chalk.inverse(text); + } + + strikethrough(text: string): string { + return chalk.strikethrough(text); + } + + getFgAnsi(color: ThemeColor): string { + const ansi = this.fgColors.get(color); + if (!ansi) throw new Error(`Unknown theme color: ${color}`); + return ansi; + } + + getBgAnsi(color: ThemeBg): string { + const ansi = this.bgColors.get(color); + if (!ansi) throw new Error(`Unknown theme background color: ${color}`); + return ansi; + } + + getColorMode(): ColorMode { + return this.mode; + } + + getThinkingBorderColor(level: "off" | "minimal" | "low" | "medium" | "high" | "xhigh"): (str: string) => string { + // Map thinking levels to dedicated theme colors + switch (level) { + case "off": + return (str: string) => this.fg("thinkingOff", str); + case "minimal": + return (str: string) => this.fg("thinkingMinimal", str); + case "low": + return (str: string) => this.fg("thinkingLow", str); + case "medium": + return (str: string) => this.fg("thinkingMedium", str); + case "high": + return (str: string) => this.fg("thinkingHigh", str); + case "xhigh": + return (str: string) => this.fg("thinkingXhigh", str); + default: + return (str: string) => this.fg("thinkingOff", str); + } + } + + getBashModeBorderColor(): (str: string) => string { + return (str: string) => this.fg("bashMode", str); + } +} + +// ============================================================================ +// Theme Loading +// ============================================================================ + +let BUILTIN_THEMES: Record | undefined; + +function getBuiltinThemes(): Record { + if (!BUILTIN_THEMES) { + const themesDir = getThemesDir(); + const darkPath = path.join(themesDir, "dark.json"); + const lightPath = path.join(themesDir, "light.json"); + BUILTIN_THEMES = { + dark: JSON.parse(fs.readFileSync(darkPath, "utf-8")) as ThemeJson, + light: JSON.parse(fs.readFileSync(lightPath, "utf-8")) as ThemeJson, + }; + } + return BUILTIN_THEMES; +} + +export function getAvailableThemes(): string[] { + const themes = new Set(Object.keys(getBuiltinThemes())); + const customThemesDir = getCustomThemesDir(); + if (fs.existsSync(customThemesDir)) { + const files = fs.readdirSync(customThemesDir); + for (const file of files) { + if (file.endsWith(".json")) { + themes.add(file.slice(0, -5)); + } + } + } + for (const name of registeredThemes.keys()) { + themes.add(name); + } + return Array.from(themes).sort(); +} + +export interface ThemeInfo { + name: string; + path: string | undefined; +} + +export function getAvailableThemesWithPaths(): ThemeInfo[] { + const themesDir = getThemesDir(); + const customThemesDir = getCustomThemesDir(); + const result: ThemeInfo[] = []; + + // Built-in themes + for (const name of Object.keys(getBuiltinThemes())) { + result.push({ name, path: path.join(themesDir, `${name}.json`) }); + } + + // Custom themes + if (fs.existsSync(customThemesDir)) { + for (const file of fs.readdirSync(customThemesDir)) { + if (file.endsWith(".json")) { + const name = file.slice(0, -5); + if (!result.some((t) => t.name === name)) { + result.push({ name, path: path.join(customThemesDir, file) }); + } + } + } + } + + for (const [name, theme] of registeredThemes.entries()) { + if (!result.some((t) => t.name === name)) { + result.push({ name, path: theme.sourcePath }); + } + } + + return result.sort((a, b) => a.name.localeCompare(b.name)); +} + +function parseThemeJson(label: string, json: unknown): ThemeJson { + if (!validateThemeJson.Check(json)) { + const errors = Array.from(validateThemeJson.Errors(json)); + const missingColors: string[] = []; + const otherErrors: string[] = []; + + for (const e of errors) { + // Check for missing required color properties + const match = e.path.match(/^\/colors\/(\w+)$/); + if (match && e.message.includes("Required")) { + missingColors.push(match[1]); + } else { + otherErrors.push(` - ${e.path}: ${e.message}`); + } + } + + let errorMessage = `Invalid theme "${label}":\n`; + if (missingColors.length > 0) { + errorMessage += "\nMissing required color tokens:\n"; + errorMessage += missingColors.map((c) => ` - ${c}`).join("\n"); + errorMessage += '\n\nPlease add these colors to your theme\'s "colors" object.'; + errorMessage += "\nSee the built-in themes (dark.json, light.json) for reference values."; + } + if (otherErrors.length > 0) { + errorMessage += `\n\nOther errors:\n${otherErrors.join("\n")}`; + } + + throw new Error(errorMessage); + } + + return json as ThemeJson; +} + +function parseThemeJsonContent(label: string, content: string): ThemeJson { + let json: unknown; + try { + json = JSON.parse(content); + } catch (error) { + throw new Error(`Failed to parse theme ${label}: ${error}`); + } + return parseThemeJson(label, json); +} + +function loadThemeJson(name: string): ThemeJson { + const builtinThemes = getBuiltinThemes(); + if (name in builtinThemes) { + return builtinThemes[name]; + } + const registeredTheme = registeredThemes.get(name); + if (registeredTheme?.sourcePath) { + const content = fs.readFileSync(registeredTheme.sourcePath, "utf-8"); + return parseThemeJsonContent(registeredTheme.sourcePath, content); + } + if (registeredTheme) { + throw new Error(`Theme "${name}" does not have a source path for export`); + } + const customThemesDir = getCustomThemesDir(); + const themePath = path.join(customThemesDir, `${name}.json`); + if (!fs.existsSync(themePath)) { + throw new Error(`Theme not found: ${name}`); + } + const content = fs.readFileSync(themePath, "utf-8"); + return parseThemeJsonContent(name, content); +} + +function createTheme(themeJson: ThemeJson, mode?: ColorMode, sourcePath?: string): Theme { + const colorMode = mode ?? detectColorMode(); + const resolvedColors = resolveThemeColors(themeJson.colors, themeJson.vars); + const fgColors: Record = {} as Record; + const bgColors: Record = {} as Record; + const bgColorKeys: Set = new Set([ + "selectedBg", + "userMessageBg", + "customMessageBg", + "toolPendingBg", + "toolSuccessBg", + "toolErrorBg", + ]); + for (const [key, value] of Object.entries(resolvedColors)) { + if (bgColorKeys.has(key)) { + bgColors[key as ThemeBg] = value; + } else { + fgColors[key as ThemeColor] = value; + } + } + return new Theme(fgColors, bgColors, colorMode, { + name: themeJson.name, + sourcePath, + }); +} + +export function loadThemeFromPath(themePath: string, mode?: ColorMode): Theme { + const content = fs.readFileSync(themePath, "utf-8"); + const themeJson = parseThemeJsonContent(themePath, content); + return createTheme(themeJson, mode, themePath); +} + +function loadTheme(name: string, mode?: ColorMode): Theme { + const registeredTheme = registeredThemes.get(name); + if (registeredTheme) { + return registeredTheme; + } + const themeJson = loadThemeJson(name); + return createTheme(themeJson, mode); +} + +export function getThemeByName(name: string): Theme | undefined { + try { + return loadTheme(name); + } catch { + return undefined; + } +} + +function detectTerminalBackground(): "dark" | "light" { + const colorfgbg = process.env.COLORFGBG || ""; + if (colorfgbg) { + const parts = colorfgbg.split(";"); + if (parts.length >= 2) { + const bg = parseInt(parts[1], 10); + if (!Number.isNaN(bg)) { + const result = bg < 8 ? "dark" : "light"; + return result; + } + } + } + return "dark"; +} + +function getDefaultTheme(): string { + return detectTerminalBackground(); +} + +// ============================================================================ +// Global Theme Instance +// ============================================================================ + +// Use globalThis to share theme across module loaders (tsx + jiti in dev mode) +const THEME_KEY = Symbol.for("@gsd/pi-coding-agent:theme"); + +// Export theme as a getter that reads from globalThis +// This ensures all module instances (tsx, jiti) see the same theme +export const theme: Theme = new Proxy({} as Theme, { + get(_target, prop) { + const t = (globalThis as Record)[THEME_KEY]; + if (!t) throw new Error("Theme not initialized. Call initTheme() first."); + return (t as unknown as Record)[prop]; + }, +}); + +function setGlobalTheme(t: Theme): void { + (globalThis as Record)[THEME_KEY] = t; +} + +let currentThemeName: string | undefined; +let themeWatcher: fs.FSWatcher | undefined; +let onThemeChangeCallback: (() => void) | undefined; +const registeredThemes = new Map(); + +export function setRegisteredThemes(themes: Theme[]): void { + registeredThemes.clear(); + for (const theme of themes) { + if (theme.name) { + registeredThemes.set(theme.name, theme); + } + } +} + +export function initTheme(themeName?: string, enableWatcher: boolean = false): void { + const name = themeName ?? getDefaultTheme(); + currentThemeName = name; + try { + setGlobalTheme(loadTheme(name)); + if (enableWatcher) { + startThemeWatcher(); + } + } catch (_error) { + // Theme is invalid - fall back to dark theme silently + currentThemeName = "dark"; + setGlobalTheme(loadTheme("dark")); + // Don't start watcher for fallback theme + } +} + +export function setTheme(name: string, enableWatcher: boolean = false): { success: boolean; error?: string } { + currentThemeName = name; + try { + setGlobalTheme(loadTheme(name)); + if (enableWatcher) { + startThemeWatcher(); + } + if (onThemeChangeCallback) { + onThemeChangeCallback(); + } + return { success: true }; + } catch (error) { + // Theme is invalid - fall back to dark theme + currentThemeName = "dark"; + setGlobalTheme(loadTheme("dark")); + // Don't start watcher for fallback theme + return { + success: false, + error: error instanceof Error ? error.message : String(error), + }; + } +} + +export function setThemeInstance(themeInstance: Theme): void { + setGlobalTheme(themeInstance); + currentThemeName = ""; + stopThemeWatcher(); // Can't watch a direct instance + if (onThemeChangeCallback) { + onThemeChangeCallback(); + } +} + +export function onThemeChange(callback: () => void): void { + onThemeChangeCallback = callback; +} + +function startThemeWatcher(): void { + // Stop existing watcher if any + if (themeWatcher) { + themeWatcher.close(); + themeWatcher = undefined; + } + + // Only watch if it's a custom theme (not built-in) + if (!currentThemeName || currentThemeName === "dark" || currentThemeName === "light") { + return; + } + + const customThemesDir = getCustomThemesDir(); + const themeFile = path.join(customThemesDir, `${currentThemeName}.json`); + + // Only watch if the file exists + if (!fs.existsSync(themeFile)) { + return; + } + + try { + themeWatcher = fs.watch(themeFile, (eventType) => { + if (eventType === "change") { + // Debounce rapid changes + setTimeout(() => { + try { + // Reload the theme + setGlobalTheme(loadTheme(currentThemeName!)); + // Notify callback (to invalidate UI) + if (onThemeChangeCallback) { + onThemeChangeCallback(); + } + } catch (_error) { + // Ignore errors (file might be in invalid state while being edited) + } + }, 100); + } else if (eventType === "rename") { + // File was deleted or renamed - fall back to default theme + setTimeout(() => { + if (!fs.existsSync(themeFile)) { + currentThemeName = "dark"; + setGlobalTheme(loadTheme("dark")); + if (themeWatcher) { + themeWatcher.close(); + themeWatcher = undefined; + } + if (onThemeChangeCallback) { + onThemeChangeCallback(); + } + } + }, 100); + } + }); + } catch (_error) { + // Ignore errors starting watcher + } +} + +export function stopThemeWatcher(): void { + if (themeWatcher) { + themeWatcher.close(); + themeWatcher = undefined; + } +} + +// ============================================================================ +// HTML Export Helpers +// ============================================================================ + +/** + * Convert a 256-color index to hex string. + * Indices 0-15: basic colors (approximate) + * Indices 16-231: 6x6x6 color cube + * Indices 232-255: grayscale ramp + */ +function ansi256ToHex(index: number): string { + // Basic colors (0-15) - approximate common terminal values + const basicColors = [ + "#000000", + "#800000", + "#008000", + "#808000", + "#000080", + "#800080", + "#008080", + "#c0c0c0", + "#808080", + "#ff0000", + "#00ff00", + "#ffff00", + "#0000ff", + "#ff00ff", + "#00ffff", + "#ffffff", + ]; + if (index < 16) { + return basicColors[index]; + } + + // Color cube (16-231): 6x6x6 = 216 colors + if (index < 232) { + const cubeIndex = index - 16; + const r = Math.floor(cubeIndex / 36); + const g = Math.floor((cubeIndex % 36) / 6); + const b = cubeIndex % 6; + const toHex = (n: number) => (n === 0 ? 0 : 55 + n * 40).toString(16).padStart(2, "0"); + return `#${toHex(r)}${toHex(g)}${toHex(b)}`; + } + + // Grayscale (232-255): 24 shades + const gray = 8 + (index - 232) * 10; + const grayHex = gray.toString(16).padStart(2, "0"); + return `#${grayHex}${grayHex}${grayHex}`; +} + +/** + * Get resolved theme colors as CSS-compatible hex strings. + * Used by HTML export to generate CSS custom properties. + */ +export function getResolvedThemeColors(themeName?: string): Record { + const name = themeName ?? currentThemeName ?? getDefaultTheme(); + const isLight = name === "light"; + const themeJson = loadThemeJson(name); + const resolved = resolveThemeColors(themeJson.colors, themeJson.vars); + + // Default text color for empty values (terminal uses default fg color) + const defaultText = isLight ? "#000000" : "#e5e5e7"; + + const cssColors: Record = {}; + for (const [key, value] of Object.entries(resolved)) { + if (typeof value === "number") { + cssColors[key] = ansi256ToHex(value); + } else if (value === "") { + // Empty means default terminal color - use sensible fallback for HTML + cssColors[key] = defaultText; + } else { + cssColors[key] = value; + } + } + return cssColors; +} + +/** + * Check if a theme is a "light" theme (for CSS that needs light/dark variants). + */ +export function isLightTheme(themeName?: string): boolean { + // Currently just check the name - could be extended to analyze colors + return themeName === "light"; +} + +/** + * Get explicit export colors from theme JSON, if specified. + * Returns undefined for each color that isn't explicitly set. + */ +export function getThemeExportColors(themeName?: string): { + pageBg?: string; + cardBg?: string; + infoBg?: string; +} { + const name = themeName ?? currentThemeName ?? getDefaultTheme(); + try { + const themeJson = loadThemeJson(name); + const exportSection = themeJson.export; + if (!exportSection) return {}; + + const vars = themeJson.vars ?? {}; + const resolve = (value: string | number | undefined): string | undefined => { + if (value === undefined) return undefined; + if (typeof value === "number") return ansi256ToHex(value); + if (value.startsWith("$")) { + const resolved = vars[value]; + if (resolved === undefined) return undefined; + if (typeof resolved === "number") return ansi256ToHex(resolved); + return resolved; + } + return value; + }; + + return { + pageBg: resolve(exportSection.pageBg), + cardBg: resolve(exportSection.cardBg), + infoBg: resolve(exportSection.infoBg), + }; + } catch { + return {}; + } +} + +// ============================================================================ +// TUI Helpers +// ============================================================================ + +type CliHighlightTheme = Record string>; + +let cachedHighlightThemeFor: Theme | undefined; +let cachedCliHighlightTheme: CliHighlightTheme | undefined; + +function buildCliHighlightTheme(t: Theme): CliHighlightTheme { + return { + keyword: (s: string) => t.fg("syntaxKeyword", s), + built_in: (s: string) => t.fg("syntaxType", s), + literal: (s: string) => t.fg("syntaxNumber", s), + number: (s: string) => t.fg("syntaxNumber", s), + string: (s: string) => t.fg("syntaxString", s), + comment: (s: string) => t.fg("syntaxComment", s), + function: (s: string) => t.fg("syntaxFunction", s), + title: (s: string) => t.fg("syntaxFunction", s), + class: (s: string) => t.fg("syntaxType", s), + type: (s: string) => t.fg("syntaxType", s), + attr: (s: string) => t.fg("syntaxVariable", s), + variable: (s: string) => t.fg("syntaxVariable", s), + params: (s: string) => t.fg("syntaxVariable", s), + operator: (s: string) => t.fg("syntaxOperator", s), + punctuation: (s: string) => t.fg("syntaxPunctuation", s), + }; +} + +function getCliHighlightTheme(t: Theme): CliHighlightTheme { + if (cachedHighlightThemeFor !== t || !cachedCliHighlightTheme) { + cachedHighlightThemeFor = t; + cachedCliHighlightTheme = buildCliHighlightTheme(t); + } + return cachedCliHighlightTheme; +} + +/** + * Highlight code with syntax coloring based on file extension or language. + * Returns array of highlighted lines. + */ +export function highlightCode(code: string, lang?: string): string[] { + // Validate language before highlighting to avoid stderr spam from cli-highlight + const validLang = lang && supportsLanguage(lang) ? lang : undefined; + const opts = { + language: validLang, + ignoreIllegals: true, + theme: getCliHighlightTheme(theme), + }; + try { + return highlight(code, opts).split("\n"); + } catch { + return code.split("\n"); + } +} + +/** + * Get language identifier from file path extension. + */ +export function getLanguageFromPath(filePath: string): string | undefined { + const ext = filePath.split(".").pop()?.toLowerCase(); + if (!ext) return undefined; + + const extToLang: Record = { + ts: "typescript", + tsx: "typescript", + js: "javascript", + jsx: "javascript", + mjs: "javascript", + cjs: "javascript", + py: "python", + rb: "ruby", + rs: "rust", + go: "go", + java: "java", + kt: "kotlin", + swift: "swift", + c: "c", + h: "c", + cpp: "cpp", + cc: "cpp", + cxx: "cpp", + hpp: "cpp", + cs: "csharp", + php: "php", + sh: "bash", + bash: "bash", + zsh: "bash", + fish: "fish", + ps1: "powershell", + sql: "sql", + html: "html", + htm: "html", + css: "css", + scss: "scss", + sass: "sass", + less: "less", + json: "json", + yaml: "yaml", + yml: "yaml", + toml: "toml", + xml: "xml", + md: "markdown", + markdown: "markdown", + dockerfile: "dockerfile", + makefile: "makefile", + cmake: "cmake", + lua: "lua", + perl: "perl", + r: "r", + scala: "scala", + clj: "clojure", + ex: "elixir", + exs: "elixir", + erl: "erlang", + hs: "haskell", + ml: "ocaml", + vim: "vim", + graphql: "graphql", + proto: "protobuf", + tf: "hcl", + hcl: "hcl", + }; + + return extToLang[ext]; +} + +export function getMarkdownTheme(): MarkdownTheme { + return { + heading: (text: string) => theme.fg("mdHeading", text), + link: (text: string) => theme.fg("mdLink", text), + linkUrl: (text: string) => theme.fg("mdLinkUrl", text), + code: (text: string) => theme.fg("mdCode", text), + codeBlock: (text: string) => theme.fg("mdCodeBlock", text), + codeBlockBorder: (text: string) => theme.fg("mdCodeBlockBorder", text), + quote: (text: string) => theme.fg("mdQuote", text), + quoteBorder: (text: string) => theme.fg("mdQuoteBorder", text), + hr: (text: string) => theme.fg("mdHr", text), + listBullet: (text: string) => theme.fg("mdListBullet", text), + bold: (text: string) => theme.bold(text), + italic: (text: string) => theme.italic(text), + underline: (text: string) => theme.underline(text), + strikethrough: (text: string) => chalk.strikethrough(text), + highlightCode: (code: string, lang?: string): string[] => { + // Validate language before highlighting to avoid stderr spam from cli-highlight + const validLang = lang && supportsLanguage(lang) ? lang : undefined; + const opts = { + language: validLang, + ignoreIllegals: true, + theme: getCliHighlightTheme(theme), + }; + try { + return highlight(code, opts).split("\n"); + } catch { + return code.split("\n").map((line) => theme.fg("mdCodeBlock", line)); + } + }, + }; +} + +export function getSelectListTheme(): SelectListTheme { + return { + selectedPrefix: (text: string) => theme.fg("accent", text), + selectedText: (text: string) => theme.fg("accent", text), + description: (text: string) => theme.fg("muted", text), + scrollInfo: (text: string) => theme.fg("muted", text), + noMatch: (text: string) => theme.fg("muted", text), + }; +} + +export function getEditorTheme(): EditorTheme { + return { + borderColor: (text: string) => theme.fg("borderMuted", text), + selectList: getSelectListTheme(), + }; +} + +export function getSettingsListTheme(): import("@gsd/pi-tui").SettingsListTheme { + return { + label: (text: string, selected: boolean) => (selected ? theme.fg("accent", text) : text), + value: (text: string, selected: boolean) => (selected ? theme.fg("accent", text) : theme.fg("muted", text)), + description: (text: string) => theme.fg("dim", text), + cursor: theme.fg("accent", "→ "), + hint: (text: string) => theme.fg("dim", text), + }; +} diff --git a/packages/pi-coding-agent/src/modes/print-mode.ts b/packages/pi-coding-agent/src/modes/print-mode.ts new file mode 100644 index 000000000..479bae1fd --- /dev/null +++ b/packages/pi-coding-agent/src/modes/print-mode.ts @@ -0,0 +1,124 @@ +/** + * Print mode (single-shot): Send prompts, output result, exit. + * + * Used for: + * - `pi -p "prompt"` - text output + * - `pi --mode json "prompt"` - JSON event stream + */ + +import type { AssistantMessage, ImageContent } from "@gsd/pi-ai"; +import type { AgentSession } from "../core/agent-session.js"; + +/** + * Options for print mode. + */ +export interface PrintModeOptions { + /** Output mode: "text" for final response only, "json" for all events */ + mode: "text" | "json"; + /** Array of additional prompts to send after initialMessage */ + messages?: string[]; + /** First message to send (may contain @file content) */ + initialMessage?: string; + /** Images to attach to the initial message */ + initialImages?: ImageContent[]; +} + +/** + * Run in print (single-shot) mode. + * Sends prompts to the agent and outputs the result. + */ +export async function runPrintMode(session: AgentSession, options: PrintModeOptions): Promise { + const { mode, messages = [], initialMessage, initialImages } = options; + if (mode === "json") { + const header = session.sessionManager.getHeader(); + if (header) { + console.log(JSON.stringify(header)); + } + } + // Set up extensions for print mode (no UI) + await session.bindExtensions({ + commandContextActions: { + waitForIdle: () => session.agent.waitForIdle(), + newSession: async (options) => { + const success = await session.newSession({ parentSession: options?.parentSession }); + if (success && options?.setup) { + await options.setup(session.sessionManager); + } + return { cancelled: !success }; + }, + fork: async (entryId) => { + const result = await session.fork(entryId); + return { cancelled: result.cancelled }; + }, + navigateTree: async (targetId, options) => { + const result = await session.navigateTree(targetId, { + summarize: options?.summarize, + customInstructions: options?.customInstructions, + replaceInstructions: options?.replaceInstructions, + label: options?.label, + }); + return { cancelled: result.cancelled }; + }, + switchSession: async (sessionPath) => { + const success = await session.switchSession(sessionPath); + return { cancelled: !success }; + }, + reload: async () => { + await session.reload(); + }, + }, + onError: (err) => { + console.error(`Extension error (${err.extensionPath}): ${err.error}`); + }, + }); + + // Always subscribe to enable session persistence via _handleAgentEvent + session.subscribe((event) => { + // In JSON mode, output all events + if (mode === "json") { + console.log(JSON.stringify(event)); + } + }); + + // Send initial message with attachments + if (initialMessage) { + await session.prompt(initialMessage, { images: initialImages }); + } + + // Send remaining messages + for (const message of messages) { + await session.prompt(message); + } + + // In text mode, output final response + if (mode === "text") { + const state = session.state; + const lastMessage = state.messages[state.messages.length - 1]; + + if (lastMessage?.role === "assistant") { + const assistantMsg = lastMessage as AssistantMessage; + + // Check for error/aborted + if (assistantMsg.stopReason === "error" || assistantMsg.stopReason === "aborted") { + console.error(assistantMsg.errorMessage || `Request ${assistantMsg.stopReason}`); + process.exit(1); + } + + // Output text content + for (const content of assistantMsg.content) { + if (content.type === "text") { + console.log(content.text); + } + } + } + } + + // Ensure stdout is fully flushed before returning + // This prevents race conditions where the process exits before all output is written + await new Promise((resolve, reject) => { + process.stdout.write("", (err) => { + if (err) reject(err); + else resolve(); + }); + }); +} diff --git a/packages/pi-coding-agent/src/modes/rpc/jsonl.ts b/packages/pi-coding-agent/src/modes/rpc/jsonl.ts new file mode 100644 index 000000000..8962c7340 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/rpc/jsonl.ts @@ -0,0 +1,58 @@ +import type { Readable } from "node:stream"; +import { StringDecoder } from "node:string_decoder"; + +/** + * Serialize a single strict JSONL record. + * + * Framing is LF-only. Payload strings may contain other Unicode separators such as + * U+2028 and U+2029. Clients must split records on `\n` only. + */ +export function serializeJsonLine(value: unknown): string { + return `${JSON.stringify(value)}\n`; +} + +/** + * Attach an LF-only JSONL reader to a stream. + * + * This intentionally does not use Node readline. Readline splits on additional + * Unicode separators that are valid inside JSON strings and therefore does not + * implement strict JSONL framing. + */ +export function attachJsonlLineReader(stream: Readable, onLine: (line: string) => void): () => void { + const decoder = new StringDecoder("utf8"); + let buffer = ""; + + const emitLine = (line: string) => { + onLine(line.endsWith("\r") ? line.slice(0, -1) : line); + }; + + const onData = (chunk: string | Buffer) => { + buffer += typeof chunk === "string" ? chunk : decoder.write(chunk); + + while (true) { + const newlineIndex = buffer.indexOf("\n"); + if (newlineIndex === -1) { + return; + } + + emitLine(buffer.slice(0, newlineIndex)); + buffer = buffer.slice(newlineIndex + 1); + } + }; + + const onEnd = () => { + buffer += decoder.end(); + if (buffer.length > 0) { + emitLine(buffer); + buffer = ""; + } + }; + + stream.on("data", onData); + stream.on("end", onEnd); + + return () => { + stream.off("data", onData); + stream.off("end", onEnd); + }; +} diff --git a/packages/pi-coding-agent/src/modes/rpc/rpc-client.ts b/packages/pi-coding-agent/src/modes/rpc/rpc-client.ts new file mode 100644 index 000000000..a93dbb412 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/rpc/rpc-client.ts @@ -0,0 +1,505 @@ +/** + * RPC Client for programmatic access to the coding agent. + * + * Spawns the agent in RPC mode and provides a typed API for all operations. + */ + +import { type ChildProcess, spawn } from "node:child_process"; +import type { AgentEvent, AgentMessage, ThinkingLevel } from "@gsd/pi-agent-core"; +import type { ImageContent } from "@gsd/pi-ai"; +import type { SessionStats } from "../../core/agent-session.js"; +import type { BashResult } from "../../core/bash-executor.js"; +import type { CompactionResult } from "../../core/compaction/index.js"; +import { attachJsonlLineReader, serializeJsonLine } from "./jsonl.js"; +import type { RpcCommand, RpcResponse, RpcSessionState, RpcSlashCommand } from "./rpc-types.js"; + +// ============================================================================ +// Types +// ============================================================================ + +/** Distributive Omit that works with union types */ +type DistributiveOmit = T extends unknown ? Omit : never; + +/** RpcCommand without the id field (for internal send) */ +type RpcCommandBody = DistributiveOmit; + +export interface RpcClientOptions { + /** Path to the CLI entry point (default: searches for dist/cli.js) */ + cliPath?: string; + /** Working directory for the agent */ + cwd?: string; + /** Environment variables */ + env?: Record; + /** Provider to use */ + provider?: string; + /** Model ID to use */ + model?: string; + /** Additional CLI arguments */ + args?: string[]; +} + +export interface ModelInfo { + provider: string; + id: string; + contextWindow: number; + reasoning: boolean; +} + +export type RpcEventListener = (event: AgentEvent) => void; + +// ============================================================================ +// RPC Client +// ============================================================================ + +export class RpcClient { + private process: ChildProcess | null = null; + private stopReadingStdout: (() => void) | null = null; + private eventListeners: RpcEventListener[] = []; + private pendingRequests: Map void; reject: (error: Error) => void }> = + new Map(); + private requestId = 0; + private stderr = ""; + + constructor(private options: RpcClientOptions = {}) {} + + /** + * Start the RPC agent process. + */ + async start(): Promise { + if (this.process) { + throw new Error("Client already started"); + } + + const cliPath = this.options.cliPath ?? "dist/cli.js"; + const args = ["--mode", "rpc"]; + + if (this.options.provider) { + args.push("--provider", this.options.provider); + } + if (this.options.model) { + args.push("--model", this.options.model); + } + if (this.options.args) { + args.push(...this.options.args); + } + + this.process = spawn("node", [cliPath, ...args], { + cwd: this.options.cwd, + env: { ...process.env, ...this.options.env }, + stdio: ["pipe", "pipe", "pipe"], + }); + + // Collect stderr for debugging + this.process.stderr?.on("data", (data) => { + this.stderr += data.toString(); + }); + + // Set up strict JSONL reader for stdout. + this.stopReadingStdout = attachJsonlLineReader(this.process.stdout!, (line) => { + this.handleLine(line); + }); + + // Wait a moment for process to initialize + await new Promise((resolve) => setTimeout(resolve, 100)); + + if (this.process.exitCode !== null) { + throw new Error(`Agent process exited immediately with code ${this.process.exitCode}. Stderr: ${this.stderr}`); + } + } + + /** + * Stop the RPC agent process. + */ + async stop(): Promise { + if (!this.process) return; + + this.stopReadingStdout?.(); + this.stopReadingStdout = null; + this.process.kill("SIGTERM"); + + // Wait for process to exit + await new Promise((resolve) => { + const timeout = setTimeout(() => { + this.process?.kill("SIGKILL"); + resolve(); + }, 1000); + + this.process?.on("exit", () => { + clearTimeout(timeout); + resolve(); + }); + }); + + this.process = null; + this.pendingRequests.clear(); + } + + /** + * Subscribe to agent events. + */ + onEvent(listener: RpcEventListener): () => void { + this.eventListeners.push(listener); + return () => { + const index = this.eventListeners.indexOf(listener); + if (index !== -1) { + this.eventListeners.splice(index, 1); + } + }; + } + + /** + * Get collected stderr output (useful for debugging). + */ + getStderr(): string { + return this.stderr; + } + + // ========================================================================= + // Command Methods + // ========================================================================= + + /** + * Send a prompt to the agent. + * Returns immediately after sending; use onEvent() to receive streaming events. + * Use waitForIdle() to wait for completion. + */ + async prompt(message: string, images?: ImageContent[]): Promise { + await this.send({ type: "prompt", message, images }); + } + + /** + * Queue a steering message to interrupt the agent mid-run. + */ + async steer(message: string, images?: ImageContent[]): Promise { + await this.send({ type: "steer", message, images }); + } + + /** + * Queue a follow-up message to be processed after the agent finishes. + */ + async followUp(message: string, images?: ImageContent[]): Promise { + await this.send({ type: "follow_up", message, images }); + } + + /** + * Abort current operation. + */ + async abort(): Promise { + await this.send({ type: "abort" }); + } + + /** + * Start a new session, optionally with parent tracking. + * @param parentSession - Optional parent session path for lineage tracking + * @returns Object with `cancelled: true` if an extension cancelled the new session + */ + async newSession(parentSession?: string): Promise<{ cancelled: boolean }> { + const response = await this.send({ type: "new_session", parentSession }); + return this.getData(response); + } + + /** + * Get current session state. + */ + async getState(): Promise { + const response = await this.send({ type: "get_state" }); + return this.getData(response); + } + + /** + * Set model by provider and ID. + */ + async setModel(provider: string, modelId: string): Promise<{ provider: string; id: string }> { + const response = await this.send({ type: "set_model", provider, modelId }); + return this.getData(response); + } + + /** + * Cycle to next model. + */ + async cycleModel(): Promise<{ + model: { provider: string; id: string }; + thinkingLevel: ThinkingLevel; + isScoped: boolean; + } | null> { + const response = await this.send({ type: "cycle_model" }); + return this.getData(response); + } + + /** + * Get list of available models. + */ + async getAvailableModels(): Promise { + const response = await this.send({ type: "get_available_models" }); + return this.getData<{ models: ModelInfo[] }>(response).models; + } + + /** + * Set thinking level. + */ + async setThinkingLevel(level: ThinkingLevel): Promise { + await this.send({ type: "set_thinking_level", level }); + } + + /** + * Cycle thinking level. + */ + async cycleThinkingLevel(): Promise<{ level: ThinkingLevel } | null> { + const response = await this.send({ type: "cycle_thinking_level" }); + return this.getData(response); + } + + /** + * Set steering mode. + */ + async setSteeringMode(mode: "all" | "one-at-a-time"): Promise { + await this.send({ type: "set_steering_mode", mode }); + } + + /** + * Set follow-up mode. + */ + async setFollowUpMode(mode: "all" | "one-at-a-time"): Promise { + await this.send({ type: "set_follow_up_mode", mode }); + } + + /** + * Compact session context. + */ + async compact(customInstructions?: string): Promise { + const response = await this.send({ type: "compact", customInstructions }); + return this.getData(response); + } + + /** + * Set auto-compaction enabled/disabled. + */ + async setAutoCompaction(enabled: boolean): Promise { + await this.send({ type: "set_auto_compaction", enabled }); + } + + /** + * Set auto-retry enabled/disabled. + */ + async setAutoRetry(enabled: boolean): Promise { + await this.send({ type: "set_auto_retry", enabled }); + } + + /** + * Abort in-progress retry. + */ + async abortRetry(): Promise { + await this.send({ type: "abort_retry" }); + } + + /** + * Execute a bash command. + */ + async bash(command: string): Promise { + const response = await this.send({ type: "bash", command }); + return this.getData(response); + } + + /** + * Abort running bash command. + */ + async abortBash(): Promise { + await this.send({ type: "abort_bash" }); + } + + /** + * Get session statistics. + */ + async getSessionStats(): Promise { + const response = await this.send({ type: "get_session_stats" }); + return this.getData(response); + } + + /** + * Export session to HTML. + */ + async exportHtml(outputPath?: string): Promise<{ path: string }> { + const response = await this.send({ type: "export_html", outputPath }); + return this.getData(response); + } + + /** + * Switch to a different session file. + * @returns Object with `cancelled: true` if an extension cancelled the switch + */ + async switchSession(sessionPath: string): Promise<{ cancelled: boolean }> { + const response = await this.send({ type: "switch_session", sessionPath }); + return this.getData(response); + } + + /** + * Fork from a specific message. + * @returns Object with `text` (the message text) and `cancelled` (if extension cancelled) + */ + async fork(entryId: string): Promise<{ text: string; cancelled: boolean }> { + const response = await this.send({ type: "fork", entryId }); + return this.getData(response); + } + + /** + * Get messages available for forking. + */ + async getForkMessages(): Promise> { + const response = await this.send({ type: "get_fork_messages" }); + return this.getData<{ messages: Array<{ entryId: string; text: string }> }>(response).messages; + } + + /** + * Get text of last assistant message. + */ + async getLastAssistantText(): Promise { + const response = await this.send({ type: "get_last_assistant_text" }); + return this.getData<{ text: string | null }>(response).text; + } + + /** + * Set the session display name. + */ + async setSessionName(name: string): Promise { + await this.send({ type: "set_session_name", name }); + } + + /** + * Get all messages in the session. + */ + async getMessages(): Promise { + const response = await this.send({ type: "get_messages" }); + return this.getData<{ messages: AgentMessage[] }>(response).messages; + } + + /** + * Get available commands (extension commands, prompt templates, skills). + */ + async getCommands(): Promise { + const response = await this.send({ type: "get_commands" }); + return this.getData<{ commands: RpcSlashCommand[] }>(response).commands; + } + + // ========================================================================= + // Helpers + // ========================================================================= + + /** + * Wait for agent to become idle (no streaming). + * Resolves when agent_end event is received. + */ + waitForIdle(timeout = 60000): Promise { + return new Promise((resolve, reject) => { + const timer = setTimeout(() => { + unsubscribe(); + reject(new Error(`Timeout waiting for agent to become idle. Stderr: ${this.stderr}`)); + }, timeout); + + const unsubscribe = this.onEvent((event) => { + if (event.type === "agent_end") { + clearTimeout(timer); + unsubscribe(); + resolve(); + } + }); + }); + } + + /** + * Collect events until agent becomes idle. + */ + collectEvents(timeout = 60000): Promise { + return new Promise((resolve, reject) => { + const events: AgentEvent[] = []; + const timer = setTimeout(() => { + unsubscribe(); + reject(new Error(`Timeout collecting events. Stderr: ${this.stderr}`)); + }, timeout); + + const unsubscribe = this.onEvent((event) => { + events.push(event); + if (event.type === "agent_end") { + clearTimeout(timer); + unsubscribe(); + resolve(events); + } + }); + }); + } + + /** + * Send prompt and wait for completion, returning all events. + */ + async promptAndWait(message: string, images?: ImageContent[], timeout = 60000): Promise { + const eventsPromise = this.collectEvents(timeout); + await this.prompt(message, images); + return eventsPromise; + } + + // ========================================================================= + // Internal + // ========================================================================= + + private handleLine(line: string): void { + try { + const data = JSON.parse(line); + + // Check if it's a response to a pending request + if (data.type === "response" && data.id && this.pendingRequests.has(data.id)) { + const pending = this.pendingRequests.get(data.id)!; + this.pendingRequests.delete(data.id); + pending.resolve(data as RpcResponse); + return; + } + + // Otherwise it's an event + for (const listener of this.eventListeners) { + listener(data as AgentEvent); + } + } catch { + // Ignore non-JSON lines + } + } + + private async send(command: RpcCommandBody): Promise { + if (!this.process?.stdin) { + throw new Error("Client not started"); + } + + const id = `req_${++this.requestId}`; + const fullCommand = { ...command, id } as RpcCommand; + + return new Promise((resolve, reject) => { + this.pendingRequests.set(id, { resolve, reject }); + + const timeout = setTimeout(() => { + this.pendingRequests.delete(id); + reject(new Error(`Timeout waiting for response to ${command.type}. Stderr: ${this.stderr}`)); + }, 30000); + + this.pendingRequests.set(id, { + resolve: (response) => { + clearTimeout(timeout); + resolve(response); + }, + reject: (error) => { + clearTimeout(timeout); + reject(error); + }, + }); + + this.process!.stdin!.write(serializeJsonLine(fullCommand)); + }); + } + + private getData(response: RpcResponse): T { + if (!response.success) { + const errorResponse = response as Extract; + throw new Error(errorResponse.error); + } + // Type assertion: we trust response.data matches T based on the command sent. + // This is safe because each public method specifies the correct T for its command. + const successResponse = response as Extract; + return successResponse.data as T; + } +} diff --git a/packages/pi-coding-agent/src/modes/rpc/rpc-mode.ts b/packages/pi-coding-agent/src/modes/rpc/rpc-mode.ts new file mode 100644 index 000000000..b38f43a12 --- /dev/null +++ b/packages/pi-coding-agent/src/modes/rpc/rpc-mode.ts @@ -0,0 +1,638 @@ +/** + * RPC mode: Headless operation with JSON stdin/stdout protocol. + * + * Used for embedding the agent in other applications. + * Receives commands as JSON on stdin, outputs events and responses as JSON on stdout. + * + * Protocol: + * - Commands: JSON objects with `type` field, optional `id` for correlation + * - Responses: JSON objects with `type: "response"`, `command`, `success`, and optional `data`/`error` + * - Events: AgentSessionEvent objects streamed as they occur + * - Extension UI: Extension UI requests are emitted, client responds with extension_ui_response + */ + +import * as crypto from "node:crypto"; +import type { AgentSession } from "../../core/agent-session.js"; +import type { + ExtensionUIContext, + ExtensionUIDialogOptions, + ExtensionWidgetOptions, +} from "../../core/extensions/index.js"; +import { type Theme, theme } from "../interactive/theme/theme.js"; +import { attachJsonlLineReader, serializeJsonLine } from "./jsonl.js"; +import type { + RpcCommand, + RpcExtensionUIRequest, + RpcExtensionUIResponse, + RpcResponse, + RpcSessionState, + RpcSlashCommand, +} from "./rpc-types.js"; + +// Re-export types for consumers +export type { + RpcCommand, + RpcExtensionUIRequest, + RpcExtensionUIResponse, + RpcResponse, + RpcSessionState, +} from "./rpc-types.js"; + +/** + * Run in RPC mode. + * Listens for JSON commands on stdin, outputs events and responses on stdout. + */ +export async function runRpcMode(session: AgentSession): Promise { + const output = (obj: RpcResponse | RpcExtensionUIRequest | object) => { + process.stdout.write(serializeJsonLine(obj)); + }; + + const success = ( + id: string | undefined, + command: T, + data?: object | null, + ): RpcResponse => { + if (data === undefined) { + return { id, type: "response", command, success: true } as RpcResponse; + } + return { id, type: "response", command, success: true, data } as RpcResponse; + }; + + const error = (id: string | undefined, command: string, message: string): RpcResponse => { + return { id, type: "response", command, success: false, error: message }; + }; + + // Pending extension UI requests waiting for response + const pendingExtensionRequests = new Map< + string, + { resolve: (value: any) => void; reject: (error: Error) => void } + >(); + + // Shutdown request flag + let shutdownRequested = false; + + /** Helper for dialog methods with signal/timeout support */ + function createDialogPromise( + opts: ExtensionUIDialogOptions | undefined, + defaultValue: T, + request: Record, + parseResponse: (response: RpcExtensionUIResponse) => T, + ): Promise { + if (opts?.signal?.aborted) return Promise.resolve(defaultValue); + + const id = crypto.randomUUID(); + return new Promise((resolve, reject) => { + let timeoutId: ReturnType | undefined; + + const cleanup = () => { + if (timeoutId) clearTimeout(timeoutId); + opts?.signal?.removeEventListener("abort", onAbort); + pendingExtensionRequests.delete(id); + }; + + const onAbort = () => { + cleanup(); + resolve(defaultValue); + }; + opts?.signal?.addEventListener("abort", onAbort, { once: true }); + + if (opts?.timeout) { + timeoutId = setTimeout(() => { + cleanup(); + resolve(defaultValue); + }, opts.timeout); + } + + pendingExtensionRequests.set(id, { + resolve: (response: RpcExtensionUIResponse) => { + cleanup(); + resolve(parseResponse(response)); + }, + reject, + }); + output({ type: "extension_ui_request", id, ...request } as RpcExtensionUIRequest); + }); + } + + /** + * Create an extension UI context that uses the RPC protocol. + */ + const createExtensionUIContext = (): ExtensionUIContext => ({ + select: (title, options, opts) => + createDialogPromise(opts, undefined, { method: "select", title, options, timeout: opts?.timeout }, (r) => + "cancelled" in r && r.cancelled ? undefined : "value" in r ? r.value : undefined, + ), + + confirm: (title, message, opts) => + createDialogPromise(opts, false, { method: "confirm", title, message, timeout: opts?.timeout }, (r) => + "cancelled" in r && r.cancelled ? false : "confirmed" in r ? r.confirmed : false, + ), + + input: (title, placeholder, opts) => + createDialogPromise(opts, undefined, { method: "input", title, placeholder, timeout: opts?.timeout }, (r) => + "cancelled" in r && r.cancelled ? undefined : "value" in r ? r.value : undefined, + ), + + notify(message: string, type?: "info" | "warning" | "error"): void { + // Fire and forget - no response needed + output({ + type: "extension_ui_request", + id: crypto.randomUUID(), + method: "notify", + message, + notifyType: type, + } as RpcExtensionUIRequest); + }, + + onTerminalInput(): () => void { + // Raw terminal input not supported in RPC mode + return () => {}; + }, + + setStatus(key: string, text: string | undefined): void { + // Fire and forget - no response needed + output({ + type: "extension_ui_request", + id: crypto.randomUUID(), + method: "setStatus", + statusKey: key, + statusText: text, + } as RpcExtensionUIRequest); + }, + + setWorkingMessage(_message?: string): void { + // Working message not supported in RPC mode - requires TUI loader access + }, + + setWidget(key: string, content: unknown, options?: ExtensionWidgetOptions): void { + // Only support string arrays in RPC mode - factory functions are ignored + if (content === undefined || Array.isArray(content)) { + output({ + type: "extension_ui_request", + id: crypto.randomUUID(), + method: "setWidget", + widgetKey: key, + widgetLines: content as string[] | undefined, + widgetPlacement: options?.placement, + } as RpcExtensionUIRequest); + } + // Component factories are not supported in RPC mode - would need TUI access + }, + + setFooter(_factory: unknown): void { + // Custom footer not supported in RPC mode - requires TUI access + }, + + setHeader(_factory: unknown): void { + // Custom header not supported in RPC mode - requires TUI access + }, + + setTitle(title: string): void { + // Fire and forget - host can implement terminal title control + output({ + type: "extension_ui_request", + id: crypto.randomUUID(), + method: "setTitle", + title, + } as RpcExtensionUIRequest); + }, + + async custom() { + // Custom UI not supported in RPC mode + return undefined as never; + }, + + pasteToEditor(text: string): void { + // Paste handling not supported in RPC mode - falls back to setEditorText + this.setEditorText(text); + }, + + setEditorText(text: string): void { + // Fire and forget - host can implement editor control + output({ + type: "extension_ui_request", + id: crypto.randomUUID(), + method: "set_editor_text", + text, + } as RpcExtensionUIRequest); + }, + + getEditorText(): string { + // Synchronous method can't wait for RPC response + // Host should track editor state locally if needed + return ""; + }, + + async editor(title: string, prefill?: string): Promise { + const id = crypto.randomUUID(); + return new Promise((resolve, reject) => { + pendingExtensionRequests.set(id, { + resolve: (response: RpcExtensionUIResponse) => { + if ("cancelled" in response && response.cancelled) { + resolve(undefined); + } else if ("value" in response) { + resolve(response.value); + } else { + resolve(undefined); + } + }, + reject, + }); + output({ type: "extension_ui_request", id, method: "editor", title, prefill } as RpcExtensionUIRequest); + }); + }, + + setEditorComponent(): void { + // Custom editor components not supported in RPC mode + }, + + get theme() { + return theme; + }, + + getAllThemes() { + return []; + }, + + getTheme(_name: string) { + return undefined; + }, + + setTheme(_theme: string | Theme) { + // Theme switching not supported in RPC mode + return { success: false, error: "Theme switching not supported in RPC mode" }; + }, + + getToolsExpanded() { + // Tool expansion not supported in RPC mode - no TUI + return false; + }, + + setToolsExpanded(_expanded: boolean) { + // Tool expansion not supported in RPC mode - no TUI + }, + }); + + // Set up extensions with RPC-based UI context + await session.bindExtensions({ + uiContext: createExtensionUIContext(), + commandContextActions: { + waitForIdle: () => session.agent.waitForIdle(), + newSession: async (options) => { + // Delegate to AgentSession (handles setup + agent state sync) + const success = await session.newSession(options); + return { cancelled: !success }; + }, + fork: async (entryId) => { + const result = await session.fork(entryId); + return { cancelled: result.cancelled }; + }, + navigateTree: async (targetId, options) => { + const result = await session.navigateTree(targetId, { + summarize: options?.summarize, + customInstructions: options?.customInstructions, + replaceInstructions: options?.replaceInstructions, + label: options?.label, + }); + return { cancelled: result.cancelled }; + }, + switchSession: async (sessionPath) => { + const success = await session.switchSession(sessionPath); + return { cancelled: !success }; + }, + reload: async () => { + await session.reload(); + }, + }, + shutdownHandler: () => { + shutdownRequested = true; + }, + onError: (err) => { + output({ type: "extension_error", extensionPath: err.extensionPath, event: err.event, error: err.error }); + }, + }); + + // Output all agent events as JSON + session.subscribe((event) => { + output(event); + }); + + // Handle a single command + const handleCommand = async (command: RpcCommand): Promise => { + const id = command.id; + + switch (command.type) { + // ================================================================= + // Prompting + // ================================================================= + + case "prompt": { + // Don't await - events will stream + // Extension commands are executed immediately, file prompt templates are expanded + // If streaming and streamingBehavior specified, queues via steer/followUp + session + .prompt(command.message, { + images: command.images, + streamingBehavior: command.streamingBehavior, + source: "rpc", + }) + .catch((e) => output(error(id, "prompt", e.message))); + return success(id, "prompt"); + } + + case "steer": { + await session.steer(command.message, command.images); + return success(id, "steer"); + } + + case "follow_up": { + await session.followUp(command.message, command.images); + return success(id, "follow_up"); + } + + case "abort": { + await session.abort(); + return success(id, "abort"); + } + + case "new_session": { + const options = command.parentSession ? { parentSession: command.parentSession } : undefined; + const cancelled = !(await session.newSession(options)); + return success(id, "new_session", { cancelled }); + } + + // ================================================================= + // State + // ================================================================= + + case "get_state": { + const state: RpcSessionState = { + model: session.model, + thinkingLevel: session.thinkingLevel, + isStreaming: session.isStreaming, + isCompacting: session.isCompacting, + steeringMode: session.steeringMode, + followUpMode: session.followUpMode, + sessionFile: session.sessionFile, + sessionId: session.sessionId, + sessionName: session.sessionName, + autoCompactionEnabled: session.autoCompactionEnabled, + messageCount: session.messages.length, + pendingMessageCount: session.pendingMessageCount, + }; + return success(id, "get_state", state); + } + + // ================================================================= + // Model + // ================================================================= + + case "set_model": { + const models = await session.modelRegistry.getAvailable(); + const model = models.find((m) => m.provider === command.provider && m.id === command.modelId); + if (!model) { + return error(id, "set_model", `Model not found: ${command.provider}/${command.modelId}`); + } + await session.setModel(model); + return success(id, "set_model", model); + } + + case "cycle_model": { + const result = await session.cycleModel(); + if (!result) { + return success(id, "cycle_model", null); + } + return success(id, "cycle_model", result); + } + + case "get_available_models": { + const models = await session.modelRegistry.getAvailable(); + return success(id, "get_available_models", { models }); + } + + // ================================================================= + // Thinking + // ================================================================= + + case "set_thinking_level": { + session.setThinkingLevel(command.level); + return success(id, "set_thinking_level"); + } + + case "cycle_thinking_level": { + const level = session.cycleThinkingLevel(); + if (!level) { + return success(id, "cycle_thinking_level", null); + } + return success(id, "cycle_thinking_level", { level }); + } + + // ================================================================= + // Queue Modes + // ================================================================= + + case "set_steering_mode": { + session.setSteeringMode(command.mode); + return success(id, "set_steering_mode"); + } + + case "set_follow_up_mode": { + session.setFollowUpMode(command.mode); + return success(id, "set_follow_up_mode"); + } + + // ================================================================= + // Compaction + // ================================================================= + + case "compact": { + const result = await session.compact(command.customInstructions); + return success(id, "compact", result); + } + + case "set_auto_compaction": { + session.setAutoCompactionEnabled(command.enabled); + return success(id, "set_auto_compaction"); + } + + // ================================================================= + // Retry + // ================================================================= + + case "set_auto_retry": { + session.setAutoRetryEnabled(command.enabled); + return success(id, "set_auto_retry"); + } + + case "abort_retry": { + session.abortRetry(); + return success(id, "abort_retry"); + } + + // ================================================================= + // Bash + // ================================================================= + + case "bash": { + const result = await session.executeBash(command.command); + return success(id, "bash", result); + } + + case "abort_bash": { + session.abortBash(); + return success(id, "abort_bash"); + } + + // ================================================================= + // Session + // ================================================================= + + case "get_session_stats": { + const stats = session.getSessionStats(); + return success(id, "get_session_stats", stats); + } + + case "export_html": { + const path = await session.exportToHtml(command.outputPath); + return success(id, "export_html", { path }); + } + + case "switch_session": { + const cancelled = !(await session.switchSession(command.sessionPath)); + return success(id, "switch_session", { cancelled }); + } + + case "fork": { + const result = await session.fork(command.entryId); + return success(id, "fork", { text: result.selectedText, cancelled: result.cancelled }); + } + + case "get_fork_messages": { + const messages = session.getUserMessagesForForking(); + return success(id, "get_fork_messages", { messages }); + } + + case "get_last_assistant_text": { + const text = session.getLastAssistantText(); + return success(id, "get_last_assistant_text", { text }); + } + + case "set_session_name": { + const name = command.name.trim(); + if (!name) { + return error(id, "set_session_name", "Session name cannot be empty"); + } + session.setSessionName(name); + return success(id, "set_session_name"); + } + + // ================================================================= + // Messages + // ================================================================= + + case "get_messages": { + return success(id, "get_messages", { messages: session.messages }); + } + + // ================================================================= + // Commands (available for invocation via prompt) + // ================================================================= + + case "get_commands": { + const commands: RpcSlashCommand[] = []; + + // Extension commands + for (const { command, extensionPath } of session.extensionRunner?.getRegisteredCommandsWithPaths() ?? []) { + commands.push({ + name: command.name, + description: command.description, + source: "extension", + path: extensionPath, + }); + } + + // Prompt templates (source is always "user" | "project" | "path" in coding-agent) + for (const template of session.promptTemplates) { + commands.push({ + name: template.name, + description: template.description, + source: "prompt", + location: template.source as RpcSlashCommand["location"], + path: template.filePath, + }); + } + + // Skills (source is always "user" | "project" | "path" in coding-agent) + for (const skill of session.resourceLoader.getSkills().skills) { + commands.push({ + name: `skill:${skill.name}`, + description: skill.description, + source: "skill", + location: skill.source as RpcSlashCommand["location"], + path: skill.filePath, + }); + } + + return success(id, "get_commands", { commands }); + } + + default: { + const unknownCommand = command as { type: string }; + return error(undefined, unknownCommand.type, `Unknown command: ${unknownCommand.type}`); + } + } + }; + + /** + * Check if shutdown was requested and perform shutdown if so. + * Called after handling each command when waiting for the next command. + */ + let detachInput = () => {}; + + async function checkShutdownRequested(): Promise { + if (!shutdownRequested) return; + + const currentRunner = session.extensionRunner; + if (currentRunner?.hasHandlers("session_shutdown")) { + await currentRunner.emit({ type: "session_shutdown" }); + } + + detachInput(); + process.stdin.pause(); + process.exit(0); + } + + const handleInputLine = async (line: string) => { + try { + const parsed = JSON.parse(line); + + // Handle extension UI responses + if (parsed.type === "extension_ui_response") { + const response = parsed as RpcExtensionUIResponse; + const pending = pendingExtensionRequests.get(response.id); + if (pending) { + pendingExtensionRequests.delete(response.id); + pending.resolve(response); + } + return; + } + + // Handle regular commands + const command = parsed as RpcCommand; + const response = await handleCommand(command); + output(response); + + // Check for deferred shutdown request (idle between commands) + await checkShutdownRequested(); + } catch (e: any) { + output(error(undefined, "parse", `Failed to parse command: ${e.message}`)); + } + }; + + detachInput = attachJsonlLineReader(process.stdin, (line) => { + void handleInputLine(line); + }); + + // Keep process alive forever + return new Promise(() => {}); +} diff --git a/packages/pi-coding-agent/src/modes/rpc/rpc-types.ts b/packages/pi-coding-agent/src/modes/rpc/rpc-types.ts new file mode 100644 index 000000000..e2d15716c --- /dev/null +++ b/packages/pi-coding-agent/src/modes/rpc/rpc-types.ts @@ -0,0 +1,263 @@ +/** + * RPC protocol types for headless operation. + * + * Commands are sent as JSON lines on stdin. + * Responses and events are emitted as JSON lines on stdout. + */ + +import type { AgentMessage, ThinkingLevel } from "@gsd/pi-agent-core"; +import type { ImageContent, Model } from "@gsd/pi-ai"; +import type { SessionStats } from "../../core/agent-session.js"; +import type { BashResult } from "../../core/bash-executor.js"; +import type { CompactionResult } from "../../core/compaction/index.js"; + +// ============================================================================ +// RPC Commands (stdin) +// ============================================================================ + +export type RpcCommand = + // Prompting + | { id?: string; type: "prompt"; message: string; images?: ImageContent[]; streamingBehavior?: "steer" | "followUp" } + | { id?: string; type: "steer"; message: string; images?: ImageContent[] } + | { id?: string; type: "follow_up"; message: string; images?: ImageContent[] } + | { id?: string; type: "abort" } + | { id?: string; type: "new_session"; parentSession?: string } + + // State + | { id?: string; type: "get_state" } + + // Model + | { id?: string; type: "set_model"; provider: string; modelId: string } + | { id?: string; type: "cycle_model" } + | { id?: string; type: "get_available_models" } + + // Thinking + | { id?: string; type: "set_thinking_level"; level: ThinkingLevel } + | { id?: string; type: "cycle_thinking_level" } + + // Queue modes + | { id?: string; type: "set_steering_mode"; mode: "all" | "one-at-a-time" } + | { id?: string; type: "set_follow_up_mode"; mode: "all" | "one-at-a-time" } + + // Compaction + | { id?: string; type: "compact"; customInstructions?: string } + | { id?: string; type: "set_auto_compaction"; enabled: boolean } + + // Retry + | { id?: string; type: "set_auto_retry"; enabled: boolean } + | { id?: string; type: "abort_retry" } + + // Bash + | { id?: string; type: "bash"; command: string } + | { id?: string; type: "abort_bash" } + + // Session + | { id?: string; type: "get_session_stats" } + | { id?: string; type: "export_html"; outputPath?: string } + | { id?: string; type: "switch_session"; sessionPath: string } + | { id?: string; type: "fork"; entryId: string } + | { id?: string; type: "get_fork_messages" } + | { id?: string; type: "get_last_assistant_text" } + | { id?: string; type: "set_session_name"; name: string } + + // Messages + | { id?: string; type: "get_messages" } + + // Commands (available for invocation via prompt) + | { id?: string; type: "get_commands" }; + +// ============================================================================ +// RPC Slash Command (for get_commands response) +// ============================================================================ + +/** A command available for invocation via prompt */ +export interface RpcSlashCommand { + /** Command name (without leading slash) */ + name: string; + /** Human-readable description */ + description?: string; + /** What kind of command this is */ + source: "extension" | "prompt" | "skill"; + /** Where the command was loaded from (undefined for extensions) */ + location?: "user" | "project" | "path"; + /** File path to the command source */ + path?: string; +} + +// ============================================================================ +// RPC State +// ============================================================================ + +export interface RpcSessionState { + model?: Model; + thinkingLevel: ThinkingLevel; + isStreaming: boolean; + isCompacting: boolean; + steeringMode: "all" | "one-at-a-time"; + followUpMode: "all" | "one-at-a-time"; + sessionFile?: string; + sessionId: string; + sessionName?: string; + autoCompactionEnabled: boolean; + messageCount: number; + pendingMessageCount: number; +} + +// ============================================================================ +// RPC Responses (stdout) +// ============================================================================ + +// Success responses with data +export type RpcResponse = + // Prompting (async - events follow) + | { id?: string; type: "response"; command: "prompt"; success: true } + | { id?: string; type: "response"; command: "steer"; success: true } + | { id?: string; type: "response"; command: "follow_up"; success: true } + | { id?: string; type: "response"; command: "abort"; success: true } + | { id?: string; type: "response"; command: "new_session"; success: true; data: { cancelled: boolean } } + + // State + | { id?: string; type: "response"; command: "get_state"; success: true; data: RpcSessionState } + + // Model + | { + id?: string; + type: "response"; + command: "set_model"; + success: true; + data: Model; + } + | { + id?: string; + type: "response"; + command: "cycle_model"; + success: true; + data: { model: Model; thinkingLevel: ThinkingLevel; isScoped: boolean } | null; + } + | { + id?: string; + type: "response"; + command: "get_available_models"; + success: true; + data: { models: Model[] }; + } + + // Thinking + | { id?: string; type: "response"; command: "set_thinking_level"; success: true } + | { + id?: string; + type: "response"; + command: "cycle_thinking_level"; + success: true; + data: { level: ThinkingLevel } | null; + } + + // Queue modes + | { id?: string; type: "response"; command: "set_steering_mode"; success: true } + | { id?: string; type: "response"; command: "set_follow_up_mode"; success: true } + + // Compaction + | { id?: string; type: "response"; command: "compact"; success: true; data: CompactionResult } + | { id?: string; type: "response"; command: "set_auto_compaction"; success: true } + + // Retry + | { id?: string; type: "response"; command: "set_auto_retry"; success: true } + | { id?: string; type: "response"; command: "abort_retry"; success: true } + + // Bash + | { id?: string; type: "response"; command: "bash"; success: true; data: BashResult } + | { id?: string; type: "response"; command: "abort_bash"; success: true } + + // Session + | { id?: string; type: "response"; command: "get_session_stats"; success: true; data: SessionStats } + | { id?: string; type: "response"; command: "export_html"; success: true; data: { path: string } } + | { id?: string; type: "response"; command: "switch_session"; success: true; data: { cancelled: boolean } } + | { id?: string; type: "response"; command: "fork"; success: true; data: { text: string; cancelled: boolean } } + | { + id?: string; + type: "response"; + command: "get_fork_messages"; + success: true; + data: { messages: Array<{ entryId: string; text: string }> }; + } + | { + id?: string; + type: "response"; + command: "get_last_assistant_text"; + success: true; + data: { text: string | null }; + } + | { id?: string; type: "response"; command: "set_session_name"; success: true } + + // Messages + | { id?: string; type: "response"; command: "get_messages"; success: true; data: { messages: AgentMessage[] } } + + // Commands + | { + id?: string; + type: "response"; + command: "get_commands"; + success: true; + data: { commands: RpcSlashCommand[] }; + } + + // Error response (any command can fail) + | { id?: string; type: "response"; command: string; success: false; error: string }; + +// ============================================================================ +// Extension UI Events (stdout) +// ============================================================================ + +/** Emitted when an extension needs user input */ +export type RpcExtensionUIRequest = + | { type: "extension_ui_request"; id: string; method: "select"; title: string; options: string[]; timeout?: number } + | { type: "extension_ui_request"; id: string; method: "confirm"; title: string; message: string; timeout?: number } + | { + type: "extension_ui_request"; + id: string; + method: "input"; + title: string; + placeholder?: string; + timeout?: number; + } + | { type: "extension_ui_request"; id: string; method: "editor"; title: string; prefill?: string } + | { + type: "extension_ui_request"; + id: string; + method: "notify"; + message: string; + notifyType?: "info" | "warning" | "error"; + } + | { + type: "extension_ui_request"; + id: string; + method: "setStatus"; + statusKey: string; + statusText: string | undefined; + } + | { + type: "extension_ui_request"; + id: string; + method: "setWidget"; + widgetKey: string; + widgetLines: string[] | undefined; + widgetPlacement?: "aboveEditor" | "belowEditor"; + } + | { type: "extension_ui_request"; id: string; method: "setTitle"; title: string } + | { type: "extension_ui_request"; id: string; method: "set_editor_text"; text: string }; + +// ============================================================================ +// Extension UI Commands (stdin) +// ============================================================================ + +/** Response to an extension UI request */ +export type RpcExtensionUIResponse = + | { type: "extension_ui_response"; id: string; value: string } + | { type: "extension_ui_response"; id: string; confirmed: boolean } + | { type: "extension_ui_response"; id: string; cancelled: true }; + +// ============================================================================ +// Helper type for extracting command types +// ============================================================================ + +export type RpcCommandType = RpcCommand["type"]; diff --git a/packages/pi-coding-agent/src/utils/changelog.ts b/packages/pi-coding-agent/src/utils/changelog.ts new file mode 100644 index 000000000..79b17d0e0 --- /dev/null +++ b/packages/pi-coding-agent/src/utils/changelog.ts @@ -0,0 +1,99 @@ +import { existsSync, readFileSync } from "fs"; + +export interface ChangelogEntry { + major: number; + minor: number; + patch: number; + content: string; +} + +/** + * Parse changelog entries from CHANGELOG.md + * Scans for ## lines and collects content until next ## or EOF + */ +export function parseChangelog(changelogPath: string): ChangelogEntry[] { + if (!existsSync(changelogPath)) { + return []; + } + + try { + const content = readFileSync(changelogPath, "utf-8"); + const lines = content.split("\n"); + const entries: ChangelogEntry[] = []; + + let currentLines: string[] = []; + let currentVersion: { major: number; minor: number; patch: number } | null = null; + + for (const line of lines) { + // Check if this is a version header (## [x.y.z] ...) + if (line.startsWith("## ")) { + // Save previous entry if exists + if (currentVersion && currentLines.length > 0) { + entries.push({ + ...currentVersion, + content: currentLines.join("\n").trim(), + }); + } + + // Try to parse version from this line + const versionMatch = line.match(/##\s+\[?(\d+)\.(\d+)\.(\d+)\]?/); + if (versionMatch) { + currentVersion = { + major: Number.parseInt(versionMatch[1], 10), + minor: Number.parseInt(versionMatch[2], 10), + patch: Number.parseInt(versionMatch[3], 10), + }; + currentLines = [line]; + } else { + // Reset if we can't parse version + currentVersion = null; + currentLines = []; + } + } else if (currentVersion) { + // Collect lines for current version + currentLines.push(line); + } + } + + // Save last entry + if (currentVersion && currentLines.length > 0) { + entries.push({ + ...currentVersion, + content: currentLines.join("\n").trim(), + }); + } + + return entries; + } catch (error) { + console.error(`Warning: Could not parse changelog: ${error}`); + return []; + } +} + +/** + * Compare versions. Returns: -1 if v1 < v2, 0 if v1 === v2, 1 if v1 > v2 + */ +export function compareVersions(v1: ChangelogEntry, v2: ChangelogEntry): number { + if (v1.major !== v2.major) return v1.major - v2.major; + if (v1.minor !== v2.minor) return v1.minor - v2.minor; + return v1.patch - v2.patch; +} + +/** + * Get entries newer than lastVersion + */ +export function getNewEntries(entries: ChangelogEntry[], lastVersion: string): ChangelogEntry[] { + // Parse lastVersion + const parts = lastVersion.split(".").map(Number); + const last: ChangelogEntry = { + major: parts[0] || 0, + minor: parts[1] || 0, + patch: parts[2] || 0, + content: "", + }; + + return entries.filter((entry) => compareVersions(entry, last) > 0); +} + +// Re-export getChangelogPath from paths.ts for convenience +export { getChangelogPath } from "../config.js"; diff --git a/packages/pi-coding-agent/src/utils/clipboard-image.ts b/packages/pi-coding-agent/src/utils/clipboard-image.ts new file mode 100644 index 000000000..58cd5913f --- /dev/null +++ b/packages/pi-coding-agent/src/utils/clipboard-image.ts @@ -0,0 +1,207 @@ +import { spawnSync } from "child_process"; + +import { clipboard } from "./clipboard-native.js"; +import { loadPhoton } from "./photon.js"; + +export type ClipboardImage = { + bytes: Uint8Array; + mimeType: string; +}; + +const SUPPORTED_IMAGE_MIME_TYPES = ["image/png", "image/jpeg", "image/webp", "image/gif"] as const; + +const DEFAULT_LIST_TIMEOUT_MS = 1000; +const DEFAULT_READ_TIMEOUT_MS = 3000; +const DEFAULT_MAX_BUFFER_BYTES = 50 * 1024 * 1024; + +export function isWaylandSession(env: NodeJS.ProcessEnv = process.env): boolean { + return Boolean(env.WAYLAND_DISPLAY) || env.XDG_SESSION_TYPE === "wayland"; +} + +function baseMimeType(mimeType: string): string { + return mimeType.split(";")[0]?.trim().toLowerCase() ?? mimeType.toLowerCase(); +} + +export function extensionForImageMimeType(mimeType: string): string | null { + switch (baseMimeType(mimeType)) { + case "image/png": + return "png"; + case "image/jpeg": + return "jpg"; + case "image/webp": + return "webp"; + case "image/gif": + return "gif"; + default: + return null; + } +} + +function selectPreferredImageMimeType(mimeTypes: string[]): string | null { + const normalized = mimeTypes + .map((t) => t.trim()) + .filter(Boolean) + .map((t) => ({ raw: t, base: baseMimeType(t) })); + + for (const preferred of SUPPORTED_IMAGE_MIME_TYPES) { + const match = normalized.find((t) => t.base === preferred); + if (match) { + return match.raw; + } + } + + const anyImage = normalized.find((t) => t.base.startsWith("image/")); + return anyImage?.raw ?? null; +} + +function isSupportedImageMimeType(mimeType: string): boolean { + const base = baseMimeType(mimeType); + return SUPPORTED_IMAGE_MIME_TYPES.some((t) => t === base); +} + +/** + * Convert unsupported image formats to PNG using Photon. + * Returns null if conversion is unavailable or fails. + */ +async function convertToPng(bytes: Uint8Array): Promise { + const photon = await loadPhoton(); + if (!photon) { + return null; + } + + try { + const image = photon.PhotonImage.new_from_byteslice(bytes); + try { + return image.get_bytes(); + } finally { + image.free(); + } + } catch { + return null; + } +} + +function runCommand( + command: string, + args: string[], + options?: { timeoutMs?: number; maxBufferBytes?: number }, +): { stdout: Buffer; ok: boolean } { + const timeoutMs = options?.timeoutMs ?? DEFAULT_READ_TIMEOUT_MS; + const maxBufferBytes = options?.maxBufferBytes ?? DEFAULT_MAX_BUFFER_BYTES; + + const result = spawnSync(command, args, { + timeout: timeoutMs, + maxBuffer: maxBufferBytes, + }); + + if (result.error) { + return { ok: false, stdout: Buffer.alloc(0) }; + } + + if (result.status !== 0) { + return { ok: false, stdout: Buffer.alloc(0) }; + } + + const stdout = Buffer.isBuffer(result.stdout) + ? result.stdout + : Buffer.from(result.stdout ?? "", typeof result.stdout === "string" ? "utf-8" : undefined); + + return { ok: true, stdout }; +} + +function readClipboardImageViaWlPaste(): ClipboardImage | null { + const list = runCommand("wl-paste", ["--list-types"], { timeoutMs: DEFAULT_LIST_TIMEOUT_MS }); + if (!list.ok) { + return null; + } + + const types = list.stdout + .toString("utf-8") + .split(/\r?\n/) + .map((t) => t.trim()) + .filter(Boolean); + + const selectedType = selectPreferredImageMimeType(types); + if (!selectedType) { + return null; + } + + const data = runCommand("wl-paste", ["--type", selectedType, "--no-newline"]); + if (!data.ok || data.stdout.length === 0) { + return null; + } + + return { bytes: data.stdout, mimeType: baseMimeType(selectedType) }; +} + +function readClipboardImageViaXclip(): ClipboardImage | null { + const targets = runCommand("xclip", ["-selection", "clipboard", "-t", "TARGETS", "-o"], { + timeoutMs: DEFAULT_LIST_TIMEOUT_MS, + }); + + let candidateTypes: string[] = []; + if (targets.ok) { + candidateTypes = targets.stdout + .toString("utf-8") + .split(/\r?\n/) + .map((t) => t.trim()) + .filter(Boolean); + } + + const preferred = candidateTypes.length > 0 ? selectPreferredImageMimeType(candidateTypes) : null; + const tryTypes = preferred ? [preferred, ...SUPPORTED_IMAGE_MIME_TYPES] : [...SUPPORTED_IMAGE_MIME_TYPES]; + + for (const mimeType of tryTypes) { + const data = runCommand("xclip", ["-selection", "clipboard", "-t", mimeType, "-o"]); + if (data.ok && data.stdout.length > 0) { + return { bytes: data.stdout, mimeType: baseMimeType(mimeType) }; + } + } + + return null; +} + +export async function readClipboardImage(options?: { + env?: NodeJS.ProcessEnv; + platform?: NodeJS.Platform; +}): Promise { + const env = options?.env ?? process.env; + const platform = options?.platform ?? process.platform; + + if (env.TERMUX_VERSION) { + return null; + } + + let image: ClipboardImage | null = null; + + if (platform === "linux" && isWaylandSession(env)) { + image = readClipboardImageViaWlPaste() ?? readClipboardImageViaXclip(); + } else { + if (!clipboard || !clipboard.hasImage()) { + return null; + } + + const imageData = await clipboard.getImageBinary(); + if (!imageData || imageData.length === 0) { + return null; + } + + const bytes = imageData instanceof Uint8Array ? imageData : Uint8Array.from(imageData); + image = { bytes, mimeType: "image/png" }; + } + + if (!image) { + return null; + } + + // Convert unsupported formats (e.g., BMP from WSLg) to PNG + if (!isSupportedImageMimeType(image.mimeType)) { + const pngBytes = await convertToPng(image.bytes); + if (!pngBytes) { + return null; + } + return { bytes: pngBytes, mimeType: "image/png" }; + } + + return image; +} diff --git a/packages/pi-coding-agent/src/utils/clipboard-native.ts b/packages/pi-coding-agent/src/utils/clipboard-native.ts new file mode 100644 index 000000000..e176d47c3 --- /dev/null +++ b/packages/pi-coding-agent/src/utils/clipboard-native.ts @@ -0,0 +1,21 @@ +import { createRequire } from "module"; + +export type ClipboardModule = { + hasImage: () => boolean; + getImageBinary: () => Promise>; +}; + +const require = createRequire(import.meta.url); +let clipboard: ClipboardModule | null = null; + +const hasDisplay = process.platform !== "linux" || Boolean(process.env.DISPLAY || process.env.WAYLAND_DISPLAY); + +if (!process.env.TERMUX_VERSION && hasDisplay) { + try { + clipboard = require("@mariozechner/clipboard") as ClipboardModule; + } catch { + clipboard = null; + } +} + +export { clipboard }; diff --git a/packages/pi-coding-agent/src/utils/clipboard.ts b/packages/pi-coding-agent/src/utils/clipboard.ts new file mode 100644 index 000000000..233b0eda0 --- /dev/null +++ b/packages/pi-coding-agent/src/utils/clipboard.ts @@ -0,0 +1,62 @@ +import { execSync, spawn } from "child_process"; +import { platform } from "os"; +import { isWaylandSession } from "./clipboard-image.js"; + +export function copyToClipboard(text: string): void { + // Always emit OSC 52 - works over SSH/mosh, harmless locally + const encoded = Buffer.from(text).toString("base64"); + process.stdout.write(`\x1b]52;c;${encoded}\x07`); + + // Also try native tools (best effort for local sessions) + const p = platform(); + const options = { input: text, timeout: 5000 }; + + try { + if (p === "darwin") { + execSync("pbcopy", options); + } else if (p === "win32") { + execSync("clip", options); + } else { + // Linux. Try Termux, Wayland, or X11 clipboard tools. + if (process.env.TERMUX_VERSION) { + try { + execSync("termux-clipboard-set", options); + return; + } catch { + // Fall back to Wayland or X11 tools. + } + } + + const isWayland = isWaylandSession(); + if (isWayland) { + try { + // Verify wl-copy exists (spawn errors are async and won't be caught) + execSync("which wl-copy", { stdio: "ignore" }); + // wl-copy with execSync hangs due to fork behavior; use spawn instead + const proc = spawn("wl-copy", [], { stdio: ["pipe", "ignore", "ignore"] }); + proc.stdin.on("error", () => { + // Ignore EPIPE errors if wl-copy exits early + }); + proc.stdin.write(text); + proc.stdin.end(); + proc.unref(); + } catch { + // Fall back to xclip/xsel (works on XWayland) + try { + execSync("xclip -selection clipboard", options); + } catch { + execSync("xsel --clipboard --input", options); + } + } + } else { + try { + execSync("xclip -selection clipboard", options); + } catch { + execSync("xsel --clipboard --input", options); + } + } + } + } catch { + // Ignore - OSC 52 already emitted as fallback + } +} diff --git a/packages/pi-coding-agent/src/utils/frontmatter.ts b/packages/pi-coding-agent/src/utils/frontmatter.ts new file mode 100644 index 000000000..847e2e539 --- /dev/null +++ b/packages/pi-coding-agent/src/utils/frontmatter.ts @@ -0,0 +1,39 @@ +import { parse } from "yaml"; + +type ParsedFrontmatter> = { + frontmatter: T; + body: string; +}; + +const normalizeNewlines = (value: string): string => value.replace(/\r\n/g, "\n").replace(/\r/g, "\n"); + +const extractFrontmatter = (content: string): { yamlString: string | null; body: string } => { + const normalized = normalizeNewlines(content); + + if (!normalized.startsWith("---")) { + return { yamlString: null, body: normalized }; + } + + const endIndex = normalized.indexOf("\n---", 3); + if (endIndex === -1) { + return { yamlString: null, body: normalized }; + } + + return { + yamlString: normalized.slice(4, endIndex), + body: normalized.slice(endIndex + 4).trim(), + }; +}; + +export const parseFrontmatter = = Record>( + content: string, +): ParsedFrontmatter => { + const { yamlString, body } = extractFrontmatter(content); + if (!yamlString) { + return { frontmatter: {} as T, body }; + } + const parsed = parse(yamlString); + return { frontmatter: (parsed ?? {}) as T, body }; +}; + +export const stripFrontmatter = (content: string): string => parseFrontmatter(content).body; diff --git a/packages/pi-coding-agent/src/utils/git.ts b/packages/pi-coding-agent/src/utils/git.ts new file mode 100644 index 000000000..9652e4018 --- /dev/null +++ b/packages/pi-coding-agent/src/utils/git.ts @@ -0,0 +1,192 @@ +import hostedGitInfo from "hosted-git-info"; + +/** + * Parsed git URL information. + */ +export type GitSource = { + /** Always "git" for git sources */ + type: "git"; + /** Clone URL (always valid for git clone, without ref suffix) */ + repo: string; + /** Git host domain (e.g., "github.com") */ + host: string; + /** Repository path (e.g., "user/repo") */ + path: string; + /** Git ref (branch, tag, commit) if specified */ + ref?: string; + /** True if ref was specified (package won't be auto-updated) */ + pinned: boolean; +}; + +function splitRef(url: string): { repo: string; ref?: string } { + const scpLikeMatch = url.match(/^git@([^:]+):(.+)$/); + if (scpLikeMatch) { + const pathWithMaybeRef = scpLikeMatch[2] ?? ""; + const refSeparator = pathWithMaybeRef.indexOf("@"); + if (refSeparator < 0) return { repo: url }; + const repoPath = pathWithMaybeRef.slice(0, refSeparator); + const ref = pathWithMaybeRef.slice(refSeparator + 1); + if (!repoPath || !ref) return { repo: url }; + return { + repo: `git@${scpLikeMatch[1] ?? ""}:${repoPath}`, + ref, + }; + } + + if (url.includes("://")) { + try { + const parsed = new URL(url); + const pathWithMaybeRef = parsed.pathname.replace(/^\/+/, ""); + const refSeparator = pathWithMaybeRef.indexOf("@"); + if (refSeparator < 0) return { repo: url }; + const repoPath = pathWithMaybeRef.slice(0, refSeparator); + const ref = pathWithMaybeRef.slice(refSeparator + 1); + if (!repoPath || !ref) return { repo: url }; + parsed.pathname = `/${repoPath}`; + return { + repo: parsed.toString().replace(/\/$/, ""), + ref, + }; + } catch { + return { repo: url }; + } + } + + const slashIndex = url.indexOf("/"); + if (slashIndex < 0) { + return { repo: url }; + } + const host = url.slice(0, slashIndex); + const pathWithMaybeRef = url.slice(slashIndex + 1); + const refSeparator = pathWithMaybeRef.indexOf("@"); + if (refSeparator < 0) { + return { repo: url }; + } + const repoPath = pathWithMaybeRef.slice(0, refSeparator); + const ref = pathWithMaybeRef.slice(refSeparator + 1); + if (!repoPath || !ref) { + return { repo: url }; + } + return { + repo: `${host}/${repoPath}`, + ref, + }; +} + +function parseGenericGitUrl(url: string): GitSource | null { + const { repo: repoWithoutRef, ref } = splitRef(url); + let repo = repoWithoutRef; + let host = ""; + let path = ""; + + const scpLikeMatch = repoWithoutRef.match(/^git@([^:]+):(.+)$/); + if (scpLikeMatch) { + host = scpLikeMatch[1] ?? ""; + path = scpLikeMatch[2] ?? ""; + } else if ( + repoWithoutRef.startsWith("https://") || + repoWithoutRef.startsWith("http://") || + repoWithoutRef.startsWith("ssh://") || + repoWithoutRef.startsWith("git://") + ) { + try { + const parsed = new URL(repoWithoutRef); + host = parsed.hostname; + path = parsed.pathname.replace(/^\/+/, ""); + } catch { + return null; + } + } else { + const slashIndex = repoWithoutRef.indexOf("/"); + if (slashIndex < 0) { + return null; + } + host = repoWithoutRef.slice(0, slashIndex); + path = repoWithoutRef.slice(slashIndex + 1); + if (!host.includes(".") && host !== "localhost") { + return null; + } + repo = `https://${repoWithoutRef}`; + } + + const normalizedPath = path.replace(/\.git$/, "").replace(/^\/+/, ""); + if (!host || !normalizedPath || normalizedPath.split("/").length < 2) { + return null; + } + + return { + type: "git", + repo, + host, + path: normalizedPath, + ref, + pinned: Boolean(ref), + }; +} + +/** + * Parse git source into a GitSource. + * + * Rules: + * - With git: prefix, accept all historical shorthand forms. + * - Without git: prefix, only accept explicit protocol URLs. + */ +export function parseGitUrl(source: string): GitSource | null { + const trimmed = source.trim(); + const hasGitPrefix = trimmed.startsWith("git:"); + const url = hasGitPrefix ? trimmed.slice(4).trim() : trimmed; + + if (!hasGitPrefix && !/^(https?|ssh|git):\/\//i.test(url)) { + return null; + } + + const split = splitRef(url); + + const hostedCandidates = [split.ref ? `${split.repo}#${split.ref}` : undefined, url].filter( + (value): value is string => Boolean(value), + ); + for (const candidate of hostedCandidates) { + const info = hostedGitInfo.fromUrl(candidate); + if (info) { + if (split.ref && info.project?.includes("@")) { + continue; + } + const useHttpsPrefix = + !split.repo.startsWith("http://") && + !split.repo.startsWith("https://") && + !split.repo.startsWith("ssh://") && + !split.repo.startsWith("git://") && + !split.repo.startsWith("git@"); + return { + type: "git", + repo: useHttpsPrefix ? `https://${split.repo}` : split.repo, + host: info.domain || "", + path: `${info.user}/${info.project}`.replace(/\.git$/, ""), + ref: info.committish || split.ref || undefined, + pinned: Boolean(info.committish || split.ref), + }; + } + } + + const httpsCandidates = [split.ref ? `https://${split.repo}#${split.ref}` : undefined, `https://${url}`].filter( + (value): value is string => Boolean(value), + ); + for (const candidate of httpsCandidates) { + const info = hostedGitInfo.fromUrl(candidate); + if (info) { + if (split.ref && info.project?.includes("@")) { + continue; + } + return { + type: "git", + repo: `https://${split.repo}`, + host: info.domain || "", + path: `${info.user}/${info.project}`.replace(/\.git$/, ""), + ref: info.committish || split.ref || undefined, + pinned: Boolean(info.committish || split.ref), + }; + } + } + + return parseGenericGitUrl(url); +} diff --git a/packages/pi-coding-agent/src/utils/image-convert.ts b/packages/pi-coding-agent/src/utils/image-convert.ts new file mode 100644 index 000000000..74198c630 --- /dev/null +++ b/packages/pi-coding-agent/src/utils/image-convert.ts @@ -0,0 +1,38 @@ +import { loadPhoton } from "./photon.js"; + +/** + * Convert image to PNG format for terminal display. + * Kitty graphics protocol requires PNG format (f=100). + */ +export async function convertToPng( + base64Data: string, + mimeType: string, +): Promise<{ data: string; mimeType: string } | null> { + // Already PNG, no conversion needed + if (mimeType === "image/png") { + return { data: base64Data, mimeType }; + } + + const photon = await loadPhoton(); + if (!photon) { + // Photon not available, can't convert + return null; + } + + try { + const bytes = new Uint8Array(Buffer.from(base64Data, "base64")); + const image = photon.PhotonImage.new_from_byteslice(bytes); + try { + const pngBuffer = image.get_bytes(); + return { + data: Buffer.from(pngBuffer).toString("base64"), + mimeType: "image/png", + }; + } finally { + image.free(); + } + } catch { + // Conversion failed + return null; + } +} diff --git a/packages/pi-coding-agent/src/utils/image-resize.ts b/packages/pi-coding-agent/src/utils/image-resize.ts new file mode 100644 index 000000000..87047f2f9 --- /dev/null +++ b/packages/pi-coding-agent/src/utils/image-resize.ts @@ -0,0 +1,231 @@ +import type { ImageContent } from "@gsd/pi-ai"; +import { loadPhoton } from "./photon.js"; + +export interface ImageResizeOptions { + maxWidth?: number; // Default: 2000 + maxHeight?: number; // Default: 2000 + maxBytes?: number; // Default: 4.5MB (below Anthropic's 5MB limit) + jpegQuality?: number; // Default: 80 +} + +export interface ResizedImage { + data: string; // base64 + mimeType: string; + originalWidth: number; + originalHeight: number; + width: number; + height: number; + wasResized: boolean; +} + +// 4.5MB - provides headroom below Anthropic's 5MB limit +const DEFAULT_MAX_BYTES = 4.5 * 1024 * 1024; + +const DEFAULT_OPTIONS: Required = { + maxWidth: 2000, + maxHeight: 2000, + maxBytes: DEFAULT_MAX_BYTES, + jpegQuality: 80, +}; + +/** Helper to pick the smaller of two buffers */ +function pickSmaller( + a: { buffer: Uint8Array; mimeType: string }, + b: { buffer: Uint8Array; mimeType: string }, +): { buffer: Uint8Array; mimeType: string } { + return a.buffer.length <= b.buffer.length ? a : b; +} + +/** + * Resize an image to fit within the specified max dimensions and file size. + * Returns the original image if it already fits within the limits. + * + * Uses Photon (Rust/WASM) for image processing. If Photon is not available, + * returns the original image unchanged. + * + * Strategy for staying under maxBytes: + * 1. First resize to maxWidth/maxHeight + * 2. Try both PNG and JPEG formats, pick the smaller one + * 3. If still too large, try JPEG with decreasing quality + * 4. If still too large, progressively reduce dimensions + */ +export async function resizeImage(img: ImageContent, options?: ImageResizeOptions): Promise { + const opts = { ...DEFAULT_OPTIONS, ...options }; + const inputBuffer = Buffer.from(img.data, "base64"); + + const photon = await loadPhoton(); + if (!photon) { + // Photon not available, return original image + return { + data: img.data, + mimeType: img.mimeType, + originalWidth: 0, + originalHeight: 0, + width: 0, + height: 0, + wasResized: false, + }; + } + + let image: ReturnType | undefined; + try { + image = photon.PhotonImage.new_from_byteslice(new Uint8Array(inputBuffer)); + + const originalWidth = image.get_width(); + const originalHeight = image.get_height(); + const format = img.mimeType?.split("/")[1] ?? "png"; + + // Check if already within all limits (dimensions AND size) + const originalSize = inputBuffer.length; + if (originalWidth <= opts.maxWidth && originalHeight <= opts.maxHeight && originalSize <= opts.maxBytes) { + return { + data: img.data, + mimeType: img.mimeType ?? `image/${format}`, + originalWidth, + originalHeight, + width: originalWidth, + height: originalHeight, + wasResized: false, + }; + } + + // Calculate initial dimensions respecting max limits + let targetWidth = originalWidth; + let targetHeight = originalHeight; + + if (targetWidth > opts.maxWidth) { + targetHeight = Math.round((targetHeight * opts.maxWidth) / targetWidth); + targetWidth = opts.maxWidth; + } + if (targetHeight > opts.maxHeight) { + targetWidth = Math.round((targetWidth * opts.maxHeight) / targetHeight); + targetHeight = opts.maxHeight; + } + + // Helper to resize and encode in both formats, returning the smaller one + function tryBothFormats( + width: number, + height: number, + jpegQuality: number, + ): { buffer: Uint8Array; mimeType: string } { + const resized = photon!.resize(image!, width, height, photon!.SamplingFilter.Lanczos3); + + try { + const pngBuffer = resized.get_bytes(); + const jpegBuffer = resized.get_bytes_jpeg(jpegQuality); + + return pickSmaller( + { buffer: pngBuffer, mimeType: "image/png" }, + { buffer: jpegBuffer, mimeType: "image/jpeg" }, + ); + } finally { + resized.free(); + } + } + + // Try to produce an image under maxBytes + const qualitySteps = [85, 70, 55, 40]; + const scaleSteps = [1.0, 0.75, 0.5, 0.35, 0.25]; + + let best: { buffer: Uint8Array; mimeType: string }; + let finalWidth = targetWidth; + let finalHeight = targetHeight; + + // First attempt: resize to target dimensions, try both formats + best = tryBothFormats(targetWidth, targetHeight, opts.jpegQuality); + + if (best.buffer.length <= opts.maxBytes) { + return { + data: Buffer.from(best.buffer).toString("base64"), + mimeType: best.mimeType, + originalWidth, + originalHeight, + width: finalWidth, + height: finalHeight, + wasResized: true, + }; + } + + // Still too large - try JPEG with decreasing quality + for (const quality of qualitySteps) { + best = tryBothFormats(targetWidth, targetHeight, quality); + + if (best.buffer.length <= opts.maxBytes) { + return { + data: Buffer.from(best.buffer).toString("base64"), + mimeType: best.mimeType, + originalWidth, + originalHeight, + width: finalWidth, + height: finalHeight, + wasResized: true, + }; + } + } + + // Still too large - reduce dimensions progressively + for (const scale of scaleSteps) { + finalWidth = Math.round(targetWidth * scale); + finalHeight = Math.round(targetHeight * scale); + + if (finalWidth < 100 || finalHeight < 100) { + break; + } + + for (const quality of qualitySteps) { + best = tryBothFormats(finalWidth, finalHeight, quality); + + if (best.buffer.length <= opts.maxBytes) { + return { + data: Buffer.from(best.buffer).toString("base64"), + mimeType: best.mimeType, + originalWidth, + originalHeight, + width: finalWidth, + height: finalHeight, + wasResized: true, + }; + } + } + } + + // Last resort: return smallest version we produced + return { + data: Buffer.from(best.buffer).toString("base64"), + mimeType: best.mimeType, + originalWidth, + originalHeight, + width: finalWidth, + height: finalHeight, + wasResized: true, + }; + } catch { + // Failed to load image + return { + data: img.data, + mimeType: img.mimeType, + originalWidth: 0, + originalHeight: 0, + width: 0, + height: 0, + wasResized: false, + }; + } finally { + if (image) { + image.free(); + } + } +} + +/** + * Format a dimension note for resized images. + * This helps the model understand the coordinate mapping. + */ +export function formatDimensionNote(result: ResizedImage): string | undefined { + if (!result.wasResized) { + return undefined; + } + + const scale = result.originalWidth / result.width; + return `[Image: original ${result.originalWidth}x${result.originalHeight}, displayed at ${result.width}x${result.height}. Multiply coordinates by ${scale.toFixed(2)} to map to original image.]`; +} diff --git a/packages/pi-coding-agent/src/utils/mime.ts b/packages/pi-coding-agent/src/utils/mime.ts new file mode 100644 index 000000000..f9ded46e2 --- /dev/null +++ b/packages/pi-coding-agent/src/utils/mime.ts @@ -0,0 +1,30 @@ +import { open } from "node:fs/promises"; +import { fileTypeFromBuffer } from "file-type"; + +const IMAGE_MIME_TYPES = new Set(["image/jpeg", "image/png", "image/gif", "image/webp"]); + +const FILE_TYPE_SNIFF_BYTES = 4100; + +export async function detectSupportedImageMimeTypeFromFile(filePath: string): Promise { + const fileHandle = await open(filePath, "r"); + try { + const buffer = Buffer.alloc(FILE_TYPE_SNIFF_BYTES); + const { bytesRead } = await fileHandle.read(buffer, 0, FILE_TYPE_SNIFF_BYTES, 0); + if (bytesRead === 0) { + return null; + } + + const fileType = await fileTypeFromBuffer(buffer.subarray(0, bytesRead)); + if (!fileType) { + return null; + } + + if (!IMAGE_MIME_TYPES.has(fileType.mime)) { + return null; + } + + return fileType.mime; + } finally { + await fileHandle.close(); + } +} diff --git a/packages/pi-coding-agent/src/utils/photon.ts b/packages/pi-coding-agent/src/utils/photon.ts new file mode 100644 index 000000000..6c320705e --- /dev/null +++ b/packages/pi-coding-agent/src/utils/photon.ts @@ -0,0 +1,139 @@ +/** + * Photon image processing wrapper. + * + * This module provides a unified interface to @silvia-odwyer/photon-node that works in: + * 1. Node.js (development, npm run build) + * 2. Bun compiled binaries (standalone distribution) + * + * The challenge: photon-node's CJS entry uses fs.readFileSync(__dirname + '/photon_rs_bg.wasm') + * which bakes the build machine's absolute path into Bun compiled binaries. + * + * Solution: + * 1. Patch fs.readFileSync to redirect missing photon_rs_bg.wasm reads + * 2. Copy photon_rs_bg.wasm next to the executable in build:binary + */ + +import type { PathOrFileDescriptor } from "fs"; +import { createRequire } from "module"; +import * as path from "path"; +import { fileURLToPath } from "url"; + +const require = createRequire(import.meta.url); +const fs = require("fs") as typeof import("fs"); + +// Re-export types from the main package +export type { PhotonImage as PhotonImageType } from "@silvia-odwyer/photon-node"; + +type ReadFileSync = typeof fs.readFileSync; + +const WASM_FILENAME = "photon_rs_bg.wasm"; + +// Lazy-loaded photon module +let photonModule: typeof import("@silvia-odwyer/photon-node") | null = null; +let loadPromise: Promise | null = null; + +function pathOrNull(file: PathOrFileDescriptor): string | null { + if (typeof file === "string") { + return file; + } + if (file instanceof URL) { + return fileURLToPath(file); + } + return null; +} + +function getFallbackWasmPaths(): string[] { + const execDir = path.dirname(process.execPath); + return [ + path.join(execDir, WASM_FILENAME), + path.join(execDir, "photon", WASM_FILENAME), + path.join(process.cwd(), WASM_FILENAME), + ]; +} + +function patchPhotonWasmRead(): () => void { + const originalReadFileSync: ReadFileSync = fs.readFileSync.bind(fs); + const fallbackPaths = getFallbackWasmPaths(); + const mutableFs = fs as { readFileSync: ReadFileSync }; + + const patchedReadFileSync: ReadFileSync = ((...args: Parameters) => { + const [file, options] = args; + const resolvedPath = pathOrNull(file); + + if (resolvedPath?.endsWith(WASM_FILENAME)) { + try { + return originalReadFileSync(...args); + } catch (error) { + const err = error as NodeJS.ErrnoException; + if (err?.code && err.code !== "ENOENT") { + throw error; + } + + for (const fallbackPath of fallbackPaths) { + if (!fs.existsSync(fallbackPath)) { + continue; + } + if (options === undefined) { + return originalReadFileSync(fallbackPath); + } + return originalReadFileSync(fallbackPath, options); + } + + throw error; + } + } + + return originalReadFileSync(...args); + }) as ReadFileSync; + + try { + mutableFs.readFileSync = patchedReadFileSync; + } catch { + Object.defineProperty(fs, "readFileSync", { + value: patchedReadFileSync, + writable: true, + configurable: true, + }); + } + + return () => { + try { + mutableFs.readFileSync = originalReadFileSync; + } catch { + Object.defineProperty(fs, "readFileSync", { + value: originalReadFileSync, + writable: true, + configurable: true, + }); + } + }; +} + +/** + * Load the photon module asynchronously. + * Returns cached module on subsequent calls. + */ +export async function loadPhoton(): Promise { + if (photonModule) { + return photonModule; + } + + if (loadPromise) { + return loadPromise; + } + + loadPromise = (async () => { + const restoreReadFileSync = patchPhotonWasmRead(); + try { + photonModule = await import("@silvia-odwyer/photon-node"); + return photonModule; + } catch { + photonModule = null; + return photonModule; + } finally { + restoreReadFileSync(); + } + })(); + + return loadPromise; +} diff --git a/packages/pi-coding-agent/src/utils/shell.ts b/packages/pi-coding-agent/src/utils/shell.ts new file mode 100644 index 000000000..8f43b0b54 --- /dev/null +++ b/packages/pi-coding-agent/src/utils/shell.ts @@ -0,0 +1,202 @@ +import { existsSync } from "node:fs"; +import { delimiter } from "node:path"; +import { spawn, spawnSync } from "child_process"; +import { getBinDir, getSettingsPath } from "../config.js"; +import { SettingsManager } from "../core/settings-manager.js"; + +let cachedShellConfig: { shell: string; args: string[] } | null = null; + +/** + * Find bash executable on PATH (cross-platform) + */ +function findBashOnPath(): string | null { + if (process.platform === "win32") { + // Windows: Use 'where' and verify file exists (where can return non-existent paths) + try { + const result = spawnSync("where", ["bash.exe"], { encoding: "utf-8", timeout: 5000 }); + if (result.status === 0 && result.stdout) { + const firstMatch = result.stdout.trim().split(/\r?\n/)[0]; + if (firstMatch && existsSync(firstMatch)) { + return firstMatch; + } + } + } catch { + // Ignore errors + } + return null; + } + + // Unix: Use 'which' and trust its output (handles Termux and special filesystems) + try { + const result = spawnSync("which", ["bash"], { encoding: "utf-8", timeout: 5000 }); + if (result.status === 0 && result.stdout) { + const firstMatch = result.stdout.trim().split(/\r?\n/)[0]; + if (firstMatch) { + return firstMatch; + } + } + } catch { + // Ignore errors + } + return null; +} + +/** + * Get shell configuration based on platform. + * Resolution order: + * 1. User-specified shellPath in settings.json + * 2. On Windows: Git Bash in known locations, then bash on PATH + * 3. On Unix: /bin/bash, then bash on PATH, then fallback to sh + */ +export function getShellConfig(): { shell: string; args: string[] } { + if (cachedShellConfig) { + return cachedShellConfig; + } + + const settings = SettingsManager.create(); + const customShellPath = settings.getShellPath(); + + // 1. Check user-specified shell path + if (customShellPath) { + if (existsSync(customShellPath)) { + cachedShellConfig = { shell: customShellPath, args: ["-c"] }; + return cachedShellConfig; + } + throw new Error( + `Custom shell path not found: ${customShellPath}\nPlease update shellPath in ${getSettingsPath()}`, + ); + } + + if (process.platform === "win32") { + // 2. Try Git Bash in known locations + const paths: string[] = []; + const programFiles = process.env.ProgramFiles; + if (programFiles) { + paths.push(`${programFiles}\\Git\\bin\\bash.exe`); + } + const programFilesX86 = process.env["ProgramFiles(x86)"]; + if (programFilesX86) { + paths.push(`${programFilesX86}\\Git\\bin\\bash.exe`); + } + + for (const path of paths) { + if (existsSync(path)) { + cachedShellConfig = { shell: path, args: ["-c"] }; + return cachedShellConfig; + } + } + + // 3. Fallback: search bash.exe on PATH (Cygwin, MSYS2, WSL, etc.) + const bashOnPath = findBashOnPath(); + if (bashOnPath) { + cachedShellConfig = { shell: bashOnPath, args: ["-c"] }; + return cachedShellConfig; + } + + throw new Error( + `No bash shell found. Options:\n` + + ` 1. Install Git for Windows: https://git-scm.com/download/win\n` + + ` 2. Add your bash to PATH (Cygwin, MSYS2, etc.)\n` + + ` 3. Set shellPath in ${getSettingsPath()}\n\n` + + `Searched Git Bash in:\n${paths.map((p) => ` ${p}`).join("\n")}`, + ); + } + + // Unix: try /bin/bash, then bash on PATH, then fallback to sh + if (existsSync("/bin/bash")) { + cachedShellConfig = { shell: "/bin/bash", args: ["-c"] }; + return cachedShellConfig; + } + + const bashOnPath = findBashOnPath(); + if (bashOnPath) { + cachedShellConfig = { shell: bashOnPath, args: ["-c"] }; + return cachedShellConfig; + } + + cachedShellConfig = { shell: "sh", args: ["-c"] }; + return cachedShellConfig; +} + +export function getShellEnv(): NodeJS.ProcessEnv { + const binDir = getBinDir(); + const pathKey = Object.keys(process.env).find((key) => key.toLowerCase() === "path") ?? "PATH"; + const currentPath = process.env[pathKey] ?? ""; + const pathEntries = currentPath.split(delimiter).filter(Boolean); + const hasBinDir = pathEntries.includes(binDir); + const updatedPath = hasBinDir ? currentPath : [binDir, currentPath].filter(Boolean).join(delimiter); + + return { + ...process.env, + [pathKey]: updatedPath, + }; +} + +/** + * Sanitize binary output for display/storage. + * Removes characters that crash string-width or cause display issues: + * - Control characters (except tab, newline, carriage return) + * - Lone surrogates + * - Unicode Format characters (crash string-width due to a bug) + * - Characters with undefined code points + */ +export function sanitizeBinaryOutput(str: string): string { + // Use Array.from to properly iterate over code points (not code units) + // This handles surrogate pairs correctly and catches edge cases where + // codePointAt() might return undefined + return Array.from(str) + .filter((char) => { + // Filter out characters that cause string-width to crash + // This includes: + // - Unicode format characters + // - Lone surrogates (already filtered by Array.from) + // - Control chars except \t \n \r + // - Characters with undefined code points + + const code = char.codePointAt(0); + + // Skip if code point is undefined (edge case with invalid strings) + if (code === undefined) return false; + + // Allow tab, newline, carriage return + if (code === 0x09 || code === 0x0a || code === 0x0d) return true; + + // Filter out control characters (0x00-0x1F, except 0x09, 0x0a, 0x0x0d) + if (code <= 0x1f) return false; + + // Filter out Unicode format characters + if (code >= 0xfff9 && code <= 0xfffb) return false; + + return true; + }) + .join(""); +} + +/** + * Kill a process and all its children (cross-platform) + */ +export function killProcessTree(pid: number): void { + if (process.platform === "win32") { + // Use taskkill on Windows to kill process tree + try { + spawn("taskkill", ["/F", "/T", "/PID", String(pid)], { + stdio: "ignore", + detached: true, + }); + } catch { + // Ignore errors if taskkill fails + } + } else { + // Use SIGKILL on Unix/Linux/Mac + try { + process.kill(-pid, "SIGKILL"); + } catch { + // Fallback to killing just the child if process group kill fails + try { + process.kill(pid, "SIGKILL"); + } catch { + // Process already dead + } + } + } +} diff --git a/packages/pi-coding-agent/src/utils/sleep.ts b/packages/pi-coding-agent/src/utils/sleep.ts new file mode 100644 index 000000000..948f93c47 --- /dev/null +++ b/packages/pi-coding-agent/src/utils/sleep.ts @@ -0,0 +1,18 @@ +/** + * Sleep helper that respects abort signal. + */ +export function sleep(ms: number, signal?: AbortSignal): Promise { + return new Promise((resolve, reject) => { + if (signal?.aborted) { + reject(new Error("Aborted")); + return; + } + + const timeout = setTimeout(resolve, ms); + + signal?.addEventListener("abort", () => { + clearTimeout(timeout); + reject(new Error("Aborted")); + }); + }); +} diff --git a/packages/pi-coding-agent/src/utils/tools-manager.ts b/packages/pi-coding-agent/src/utils/tools-manager.ts new file mode 100644 index 000000000..f51e042b4 --- /dev/null +++ b/packages/pi-coding-agent/src/utils/tools-manager.ts @@ -0,0 +1,286 @@ +import chalk from "chalk"; +import { spawnSync } from "child_process"; +import extractZip from "extract-zip"; +import { chmodSync, createWriteStream, existsSync, mkdirSync, readdirSync, renameSync, rmSync } from "fs"; +import { arch, platform } from "os"; +import { join } from "path"; +import { Readable } from "stream"; +import { finished } from "stream/promises"; +import { APP_NAME, getBinDir } from "../config.js"; + +const TOOLS_DIR = getBinDir(); +const NETWORK_TIMEOUT_MS = 10000; + +function isOfflineModeEnabled(): boolean { + const value = process.env.PI_OFFLINE; + if (!value) return false; + return value === "1" || value.toLowerCase() === "true" || value.toLowerCase() === "yes"; +} + +interface ToolConfig { + name: string; + repo: string; // GitHub repo (e.g., "sharkdp/fd") + binaryName: string; // Name of the binary inside the archive + tagPrefix: string; // Prefix for tags (e.g., "v" for v1.0.0, "" for 1.0.0) + getAssetName: (version: string, plat: string, architecture: string) => string | null; +} + +const TOOLS: Record = { + fd: { + name: "fd", + repo: "sharkdp/fd", + binaryName: "fd", + tagPrefix: "v", + getAssetName: (version, plat, architecture) => { + if (plat === "darwin") { + const archStr = architecture === "arm64" ? "aarch64" : "x86_64"; + return `fd-v${version}-${archStr}-apple-darwin.tar.gz`; + } else if (plat === "linux") { + const archStr = architecture === "arm64" ? "aarch64" : "x86_64"; + return `fd-v${version}-${archStr}-unknown-linux-gnu.tar.gz`; + } else if (plat === "win32") { + const archStr = architecture === "arm64" ? "aarch64" : "x86_64"; + return `fd-v${version}-${archStr}-pc-windows-msvc.zip`; + } + return null; + }, + }, + rg: { + name: "ripgrep", + repo: "BurntSushi/ripgrep", + binaryName: "rg", + tagPrefix: "", + getAssetName: (version, plat, architecture) => { + if (plat === "darwin") { + const archStr = architecture === "arm64" ? "aarch64" : "x86_64"; + return `ripgrep-${version}-${archStr}-apple-darwin.tar.gz`; + } else if (plat === "linux") { + if (architecture === "arm64") { + return `ripgrep-${version}-aarch64-unknown-linux-gnu.tar.gz`; + } + return `ripgrep-${version}-x86_64-unknown-linux-musl.tar.gz`; + } else if (plat === "win32") { + const archStr = architecture === "arm64" ? "aarch64" : "x86_64"; + return `ripgrep-${version}-${archStr}-pc-windows-msvc.zip`; + } + return null; + }, + }, +}; + +// Check if a command exists in PATH by trying to run it +function commandExists(cmd: string): boolean { + try { + const result = spawnSync(cmd, ["--version"], { stdio: "pipe" }); + // Check for ENOENT error (command not found) + return result.error === undefined || result.error === null; + } catch { + return false; + } +} + +// Get the path to a tool (system-wide or in our tools dir) +export function getToolPath(tool: "fd" | "rg"): string | null { + const config = TOOLS[tool]; + if (!config) return null; + + // Check our tools directory first + const localPath = join(TOOLS_DIR, config.binaryName + (platform() === "win32" ? ".exe" : "")); + if (existsSync(localPath)) { + return localPath; + } + + // Check system PATH - if found, just return the command name (it's in PATH) + if (commandExists(config.binaryName)) { + return config.binaryName; + } + + return null; +} + +// Fetch latest release version from GitHub +async function getLatestVersion(repo: string): Promise { + const response = await fetch(`https://api.github.com/repos/${repo}/releases/latest`, { + headers: { "User-Agent": `${APP_NAME}-coding-agent` }, + signal: AbortSignal.timeout(NETWORK_TIMEOUT_MS), + }); + + if (!response.ok) { + throw new Error(`GitHub API error: ${response.status}`); + } + + const data = (await response.json()) as { tag_name: string }; + return data.tag_name.replace(/^v/, ""); +} + +// Download a file from URL +async function downloadFile(url: string, dest: string): Promise { + const response = await fetch(url, { + signal: AbortSignal.timeout(NETWORK_TIMEOUT_MS), + }); + + if (!response.ok) { + throw new Error(`Failed to download: ${response.status}`); + } + + if (!response.body) { + throw new Error("No response body"); + } + + const fileStream = createWriteStream(dest); + await finished(Readable.fromWeb(response.body as any).pipe(fileStream)); +} + +function findBinaryRecursively(rootDir: string, binaryFileName: string): string | null { + const stack: string[] = [rootDir]; + + while (stack.length > 0) { + const currentDir = stack.pop(); + if (!currentDir) continue; + + const entries = readdirSync(currentDir, { withFileTypes: true }); + for (const entry of entries) { + const fullPath = join(currentDir, entry.name); + if (entry.isFile() && entry.name === binaryFileName) { + return fullPath; + } + if (entry.isDirectory()) { + stack.push(fullPath); + } + } + } + + return null; +} + +// Download and install a tool +async function downloadTool(tool: "fd" | "rg"): Promise { + const config = TOOLS[tool]; + if (!config) throw new Error(`Unknown tool: ${tool}`); + + const plat = platform(); + const architecture = arch(); + + // Get latest version + const version = await getLatestVersion(config.repo); + + // Get asset name for this platform + const assetName = config.getAssetName(version, plat, architecture); + if (!assetName) { + throw new Error(`Unsupported platform: ${plat}/${architecture}`); + } + + // Create tools directory + mkdirSync(TOOLS_DIR, { recursive: true }); + + const downloadUrl = `https://github.com/${config.repo}/releases/download/${config.tagPrefix}${version}/${assetName}`; + const archivePath = join(TOOLS_DIR, assetName); + const binaryExt = plat === "win32" ? ".exe" : ""; + const binaryPath = join(TOOLS_DIR, config.binaryName + binaryExt); + + // Download + await downloadFile(downloadUrl, archivePath); + + // Extract into a unique temp directory. fd and rg downloads can run concurrently + // during startup, so sharing a fixed directory causes races. + const extractDir = join( + TOOLS_DIR, + `extract_tmp_${config.binaryName}_${process.pid}_${Date.now()}_${Math.random().toString(36).slice(2, 10)}`, + ); + mkdirSync(extractDir, { recursive: true }); + + try { + if (assetName.endsWith(".tar.gz")) { + const extractResult = spawnSync("tar", ["xzf", archivePath, "-C", extractDir], { stdio: "pipe" }); + if (extractResult.error || extractResult.status !== 0) { + const errMsg = extractResult.error?.message ?? extractResult.stderr?.toString().trim() ?? "unknown error"; + throw new Error(`Failed to extract ${assetName}: ${errMsg}`); + } + } else if (assetName.endsWith(".zip")) { + await extractZip(archivePath, { dir: extractDir }); + } else { + throw new Error(`Unsupported archive format: ${assetName}`); + } + + // Find the binary in extracted files. Some archives contain files directly + // at root, others nest under a versioned subdirectory. + const binaryFileName = config.binaryName + binaryExt; + const extractedDir = join(extractDir, assetName.replace(/\.(tar\.gz|zip)$/, "")); + const extractedBinaryCandidates = [join(extractedDir, binaryFileName), join(extractDir, binaryFileName)]; + let extractedBinary = extractedBinaryCandidates.find((candidate) => existsSync(candidate)); + + if (!extractedBinary) { + extractedBinary = findBinaryRecursively(extractDir, binaryFileName) ?? undefined; + } + + if (extractedBinary) { + renameSync(extractedBinary, binaryPath); + } else { + throw new Error(`Binary not found in archive: expected ${binaryFileName} under ${extractDir}`); + } + + // Make executable (Unix only) + if (plat !== "win32") { + chmodSync(binaryPath, 0o755); + } + } finally { + // Cleanup + rmSync(archivePath, { force: true }); + rmSync(extractDir, { recursive: true, force: true }); + } + + return binaryPath; +} + +// Termux package names for tools +const TERMUX_PACKAGES: Record = { + fd: "fd", + rg: "ripgrep", +}; + +// Ensure a tool is available, downloading if necessary +// Returns the path to the tool, or null if unavailable +export async function ensureTool(tool: "fd" | "rg", silent: boolean = false): Promise { + const existingPath = getToolPath(tool); + if (existingPath) { + return existingPath; + } + + const config = TOOLS[tool]; + if (!config) return undefined; + + if (isOfflineModeEnabled()) { + if (!silent) { + console.log(chalk.yellow(`${config.name} not found. Offline mode enabled, skipping download.`)); + } + return undefined; + } + + // On Android/Termux, Linux binaries don't work due to Bionic libc incompatibility. + // Users must install via pkg. + if (platform() === "android") { + const pkgName = TERMUX_PACKAGES[tool] ?? tool; + if (!silent) { + console.log(chalk.yellow(`${config.name} not found. Install with: pkg install ${pkgName}`)); + } + return undefined; + } + + // Tool not found - download it + if (!silent) { + console.log(chalk.dim(`${config.name} not found. Downloading...`)); + } + + try { + const path = await downloadTool(tool); + if (!silent) { + console.log(chalk.dim(`${config.name} installed to ${path}`)); + } + return path; + } catch (e) { + if (!silent) { + console.log(chalk.yellow(`Failed to download ${config.name}: ${e instanceof Error ? e.message : e}`)); + } + return undefined; + } +} diff --git a/packages/pi-coding-agent/tsconfig.json b/packages/pi-coding-agent/tsconfig.json new file mode 100644 index 000000000..6f6331d49 --- /dev/null +++ b/packages/pi-coding-agent/tsconfig.json @@ -0,0 +1,27 @@ +{ + "compilerOptions": { + "target": "ES2024", + "module": "Node16", + "lib": ["ES2024"], + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "declaration": true, + "declarationMap": true, + "sourceMap": true, + "inlineSources": true, + "inlineSourceMap": false, + "moduleResolution": "Node16", + "resolveJsonModule": true, + "allowImportingTsExtensions": false, + "experimentalDecorators": true, + "emitDecoratorMetadata": true, + "useDefineForClassFields": false, + "types": ["node"], + "outDir": "./dist", + "rootDir": "./src" + }, + "include": ["src/**/*.ts"], + "exclude": ["node_modules", "dist", "**/*.d.ts", "src/**/*.d.ts"] +} diff --git a/packages/pi-tui/package.json b/packages/pi-tui/package.json new file mode 100644 index 000000000..c5611eff7 --- /dev/null +++ b/packages/pi-tui/package.json @@ -0,0 +1,21 @@ +{ + "name": "@gsd/pi-tui", + "version": "0.57.1", + "description": "Terminal User Interface library (vendored from pi-mono)", + "type": "module", + "main": "dist/index.js", + "types": "./dist/index.d.ts", + "scripts": { + "build": "tsc -p tsconfig.json" + }, + "dependencies": { + "@types/mime-types": "^2.1.4", + "chalk": "^5.5.0", + "get-east-asian-width": "^1.3.0", + "marked": "^15.0.12", + "mime-types": "^3.0.1" + }, + "optionalDependencies": { + "koffi": "^2.9.0" + } +} diff --git a/packages/pi-tui/src/autocomplete.ts b/packages/pi-tui/src/autocomplete.ts new file mode 100644 index 000000000..7a7f55b53 --- /dev/null +++ b/packages/pi-tui/src/autocomplete.ts @@ -0,0 +1,736 @@ +import { spawnSync } from "child_process"; +import { readdirSync, statSync } from "fs"; +import { homedir } from "os"; +import { basename, dirname, join } from "path"; +import { fuzzyFilter } from "./fuzzy.js"; + +const PATH_DELIMITERS = new Set([" ", "\t", '"', "'", "="]); + +function findLastDelimiter(text: string): number { + for (let i = text.length - 1; i >= 0; i -= 1) { + if (PATH_DELIMITERS.has(text[i] ?? "")) { + return i; + } + } + return -1; +} + +function findUnclosedQuoteStart(text: string): number | null { + let inQuotes = false; + let quoteStart = -1; + + for (let i = 0; i < text.length; i += 1) { + if (text[i] === '"') { + inQuotes = !inQuotes; + if (inQuotes) { + quoteStart = i; + } + } + } + + return inQuotes ? quoteStart : null; +} + +function isTokenStart(text: string, index: number): boolean { + return index === 0 || PATH_DELIMITERS.has(text[index - 1] ?? ""); +} + +function extractQuotedPrefix(text: string): string | null { + const quoteStart = findUnclosedQuoteStart(text); + if (quoteStart === null) { + return null; + } + + if (quoteStart > 0 && text[quoteStart - 1] === "@") { + if (!isTokenStart(text, quoteStart - 1)) { + return null; + } + return text.slice(quoteStart - 1); + } + + if (!isTokenStart(text, quoteStart)) { + return null; + } + + return text.slice(quoteStart); +} + +function parsePathPrefix(prefix: string): { rawPrefix: string; isAtPrefix: boolean; isQuotedPrefix: boolean } { + if (prefix.startsWith('@"')) { + return { rawPrefix: prefix.slice(2), isAtPrefix: true, isQuotedPrefix: true }; + } + if (prefix.startsWith('"')) { + return { rawPrefix: prefix.slice(1), isAtPrefix: false, isQuotedPrefix: true }; + } + if (prefix.startsWith("@")) { + return { rawPrefix: prefix.slice(1), isAtPrefix: true, isQuotedPrefix: false }; + } + return { rawPrefix: prefix, isAtPrefix: false, isQuotedPrefix: false }; +} + +function buildCompletionValue( + path: string, + options: { isDirectory: boolean; isAtPrefix: boolean; isQuotedPrefix: boolean }, +): string { + const needsQuotes = options.isQuotedPrefix || path.includes(" "); + const prefix = options.isAtPrefix ? "@" : ""; + + if (!needsQuotes) { + return `${prefix}${path}`; + } + + const openQuote = `${prefix}"`; + const closeQuote = '"'; + return `${openQuote}${path}${closeQuote}`; +} + +// Use fd to walk directory tree (fast, respects .gitignore) +function walkDirectoryWithFd( + baseDir: string, + fdPath: string, + query: string, + maxResults: number, +): Array<{ path: string; isDirectory: boolean }> { + const args = [ + "--base-directory", + baseDir, + "--max-results", + String(maxResults), + "--type", + "f", + "--type", + "d", + "--full-path", + "--hidden", + "--exclude", + ".git", + "--exclude", + ".git/*", + "--exclude", + ".git/**", + ]; + + // Add query as pattern if provided + if (query) { + args.push(query); + } + + const result = spawnSync(fdPath, args, { + encoding: "utf-8", + stdio: ["pipe", "pipe", "pipe"], + maxBuffer: 10 * 1024 * 1024, + }); + + if (result.status !== 0 || !result.stdout) { + return []; + } + + const lines = result.stdout.trim().split("\n").filter(Boolean); + const results: Array<{ path: string; isDirectory: boolean }> = []; + + for (const line of lines) { + const normalizedPath = line.endsWith("/") ? line.slice(0, -1) : line; + if (normalizedPath === ".git" || normalizedPath.startsWith(".git/") || normalizedPath.includes("/.git/")) { + continue; + } + + // fd outputs directories with trailing / + const isDirectory = line.endsWith("/"); + results.push({ + path: line, + isDirectory, + }); + } + + return results; +} + +export interface AutocompleteItem { + value: string; + label: string; + description?: string; +} + +export interface SlashCommand { + name: string; + description?: string; + // Function to get argument completions for this command + // Returns null if no argument completion is available + getArgumentCompletions?(argumentPrefix: string): AutocompleteItem[] | null; +} + +export interface AutocompleteProvider { + // Get autocomplete suggestions for current text/cursor position + // Returns null if no suggestions available + getSuggestions( + lines: string[], + cursorLine: number, + cursorCol: number, + ): { + items: AutocompleteItem[]; + prefix: string; // What we're matching against (e.g., "/" or "src/") + } | null; + + // Apply the selected item + // Returns the new text and cursor position + applyCompletion( + lines: string[], + cursorLine: number, + cursorCol: number, + item: AutocompleteItem, + prefix: string, + ): { + lines: string[]; + cursorLine: number; + cursorCol: number; + }; +} + +// Combined provider that handles both slash commands and file paths +export class CombinedAutocompleteProvider implements AutocompleteProvider { + private commands: (SlashCommand | AutocompleteItem)[]; + private basePath: string; + private fdPath: string | null; + + constructor( + commands: (SlashCommand | AutocompleteItem)[] = [], + basePath: string = process.cwd(), + fdPath: string | null = null, + ) { + this.commands = commands; + this.basePath = basePath; + this.fdPath = fdPath; + } + + getSuggestions( + lines: string[], + cursorLine: number, + cursorCol: number, + ): { items: AutocompleteItem[]; prefix: string } | null { + const currentLine = lines[cursorLine] || ""; + const textBeforeCursor = currentLine.slice(0, cursorCol); + + // Check for @ file reference (fuzzy search) - must be after a delimiter or at start + const atPrefix = this.extractAtPrefix(textBeforeCursor); + if (atPrefix) { + const { rawPrefix, isQuotedPrefix } = parsePathPrefix(atPrefix); + const suggestions = this.getFuzzyFileSuggestions(rawPrefix, { isQuotedPrefix: isQuotedPrefix }); + if (suggestions.length === 0) return null; + + return { + items: suggestions, + prefix: atPrefix, + }; + } + + // Check for slash commands + if (textBeforeCursor.startsWith("/")) { + const spaceIndex = textBeforeCursor.indexOf(" "); + + if (spaceIndex === -1) { + // No space yet - complete command names with fuzzy matching + const prefix = textBeforeCursor.slice(1); // Remove the "/" + const commandItems = this.commands.map((cmd) => ({ + name: "name" in cmd ? cmd.name : cmd.value, + label: "name" in cmd ? cmd.name : cmd.label, + description: cmd.description, + })); + + const filtered = fuzzyFilter(commandItems, prefix, (item) => item.name).map((item) => ({ + value: item.name, + label: item.label, + ...(item.description && { description: item.description }), + })); + + if (filtered.length === 0) return null; + + return { + items: filtered, + prefix: textBeforeCursor, + }; + } else { + // Space found - complete command arguments + const commandName = textBeforeCursor.slice(1, spaceIndex); // Command without "/" + const argumentText = textBeforeCursor.slice(spaceIndex + 1); // Text after space + + const command = this.commands.find((cmd) => { + const name = "name" in cmd ? cmd.name : cmd.value; + return name === commandName; + }); + if (!command || !("getArgumentCompletions" in command) || !command.getArgumentCompletions) { + return null; // No argument completion for this command + } + + const argumentSuggestions = command.getArgumentCompletions(argumentText); + if (!argumentSuggestions || argumentSuggestions.length === 0) { + return null; + } + + return { + items: argumentSuggestions, + prefix: argumentText, + }; + } + } + + // Check for file paths - triggered by Tab or if we detect a path pattern + const pathMatch = this.extractPathPrefix(textBeforeCursor, false); + + if (pathMatch !== null) { + const suggestions = this.getFileSuggestions(pathMatch); + if (suggestions.length === 0) return null; + + // Check if we have an exact match that is a directory + // In that case, we might want to return suggestions for the directory content instead + // But only if the prefix ends with / + if (suggestions.length === 1 && suggestions[0]?.value === pathMatch && !pathMatch.endsWith("/")) { + // Exact match found (e.g. user typed "src" and "src/" is the only match) + // We still return it so user can select it and add / + return { + items: suggestions, + prefix: pathMatch, + }; + } + + return { + items: suggestions, + prefix: pathMatch, + }; + } + + return null; + } + + applyCompletion( + lines: string[], + cursorLine: number, + cursorCol: number, + item: AutocompleteItem, + prefix: string, + ): { lines: string[]; cursorLine: number; cursorCol: number } { + const currentLine = lines[cursorLine] || ""; + const beforePrefix = currentLine.slice(0, cursorCol - prefix.length); + const afterCursor = currentLine.slice(cursorCol); + const isQuotedPrefix = prefix.startsWith('"') || prefix.startsWith('@"'); + const hasLeadingQuoteAfterCursor = afterCursor.startsWith('"'); + const hasTrailingQuoteInItem = item.value.endsWith('"'); + const adjustedAfterCursor = + isQuotedPrefix && hasTrailingQuoteInItem && hasLeadingQuoteAfterCursor ? afterCursor.slice(1) : afterCursor; + + // Check if we're completing a slash command (prefix starts with "/" but NOT a file path) + // Slash commands are at the start of the line and don't contain path separators after the first / + const isSlashCommand = prefix.startsWith("/") && beforePrefix.trim() === "" && !prefix.slice(1).includes("/"); + if (isSlashCommand) { + // This is a command name completion + const newLine = `${beforePrefix}/${item.value} ${adjustedAfterCursor}`; + const newLines = [...lines]; + newLines[cursorLine] = newLine; + + return { + lines: newLines, + cursorLine, + cursorCol: beforePrefix.length + item.value.length + 2, // +2 for "/" and space + }; + } + + // Check if we're completing a file attachment (prefix starts with "@") + if (prefix.startsWith("@")) { + // This is a file attachment completion + // Don't add space after directories so user can continue autocompleting + const isDirectory = item.label.endsWith("/"); + const suffix = isDirectory ? "" : " "; + const newLine = `${beforePrefix + item.value}${suffix}${adjustedAfterCursor}`; + const newLines = [...lines]; + newLines[cursorLine] = newLine; + + const hasTrailingQuote = item.value.endsWith('"'); + const cursorOffset = isDirectory && hasTrailingQuote ? item.value.length - 1 : item.value.length; + + return { + lines: newLines, + cursorLine, + cursorCol: beforePrefix.length + cursorOffset + suffix.length, + }; + } + + // Check if we're in a slash command context (beforePrefix contains "/command ") + const textBeforeCursor = currentLine.slice(0, cursorCol); + if (textBeforeCursor.includes("/") && textBeforeCursor.includes(" ")) { + // This is likely a command argument completion + const newLine = beforePrefix + item.value + adjustedAfterCursor; + const newLines = [...lines]; + newLines[cursorLine] = newLine; + + const isDirectory = item.label.endsWith("/"); + const hasTrailingQuote = item.value.endsWith('"'); + const cursorOffset = isDirectory && hasTrailingQuote ? item.value.length - 1 : item.value.length; + + return { + lines: newLines, + cursorLine, + cursorCol: beforePrefix.length + cursorOffset, + }; + } + + // For file paths, complete the path + const newLine = beforePrefix + item.value + adjustedAfterCursor; + const newLines = [...lines]; + newLines[cursorLine] = newLine; + + const isDirectory = item.label.endsWith("/"); + const hasTrailingQuote = item.value.endsWith('"'); + const cursorOffset = isDirectory && hasTrailingQuote ? item.value.length - 1 : item.value.length; + + return { + lines: newLines, + cursorLine, + cursorCol: beforePrefix.length + cursorOffset, + }; + } + + // Extract @ prefix for fuzzy file suggestions + private extractAtPrefix(text: string): string | null { + const quotedPrefix = extractQuotedPrefix(text); + if (quotedPrefix?.startsWith('@"')) { + return quotedPrefix; + } + + const lastDelimiterIndex = findLastDelimiter(text); + const tokenStart = lastDelimiterIndex === -1 ? 0 : lastDelimiterIndex + 1; + + if (text[tokenStart] === "@") { + return text.slice(tokenStart); + } + + return null; + } + + // Extract a path-like prefix from the text before cursor + private extractPathPrefix(text: string, forceExtract: boolean = false): string | null { + const quotedPrefix = extractQuotedPrefix(text); + if (quotedPrefix) { + return quotedPrefix; + } + + const lastDelimiterIndex = findLastDelimiter(text); + const pathPrefix = lastDelimiterIndex === -1 ? text : text.slice(lastDelimiterIndex + 1); + + // For forced extraction (Tab key), always return something + if (forceExtract) { + return pathPrefix; + } + + // For natural triggers, return if it looks like a path, ends with /, starts with ~/, . + // Only return empty string if the text looks like it's starting a path context + if (pathPrefix.includes("/") || pathPrefix.startsWith(".") || pathPrefix.startsWith("~/")) { + return pathPrefix; + } + + // Return empty string only after a space (not for completely empty text) + // Empty text should not trigger file suggestions - that's for forced Tab completion + if (pathPrefix === "" && text.endsWith(" ")) { + return pathPrefix; + } + + return null; + } + + // Expand home directory (~/) to actual home path + private expandHomePath(path: string): string { + if (path.startsWith("~/")) { + const expandedPath = join(homedir(), path.slice(2)); + // Preserve trailing slash if original path had one + return path.endsWith("/") && !expandedPath.endsWith("/") ? `${expandedPath}/` : expandedPath; + } else if (path === "~") { + return homedir(); + } + return path; + } + + private resolveScopedFuzzyQuery(rawQuery: string): { baseDir: string; query: string; displayBase: string } | null { + const slashIndex = rawQuery.lastIndexOf("/"); + if (slashIndex === -1) { + return null; + } + + const displayBase = rawQuery.slice(0, slashIndex + 1); + const query = rawQuery.slice(slashIndex + 1); + + let baseDir: string; + if (displayBase.startsWith("~/")) { + baseDir = this.expandHomePath(displayBase); + } else if (displayBase.startsWith("/")) { + baseDir = displayBase; + } else { + baseDir = join(this.basePath, displayBase); + } + + try { + if (!statSync(baseDir).isDirectory()) { + return null; + } + } catch { + return null; + } + + return { baseDir, query, displayBase }; + } + + private scopedPathForDisplay(displayBase: string, relativePath: string): string { + if (displayBase === "/") { + return `/${relativePath}`; + } + return `${displayBase}${relativePath}`; + } + + // Get file/directory suggestions for a given path prefix + private getFileSuggestions(prefix: string): AutocompleteItem[] { + try { + let searchDir: string; + let searchPrefix: string; + const { rawPrefix, isAtPrefix, isQuotedPrefix } = parsePathPrefix(prefix); + let expandedPrefix = rawPrefix; + + // Handle home directory expansion + if (expandedPrefix.startsWith("~")) { + expandedPrefix = this.expandHomePath(expandedPrefix); + } + + const isRootPrefix = + rawPrefix === "" || + rawPrefix === "./" || + rawPrefix === "../" || + rawPrefix === "~" || + rawPrefix === "~/" || + rawPrefix === "/" || + (isAtPrefix && rawPrefix === ""); + + if (isRootPrefix) { + // Complete from specified position + if (rawPrefix.startsWith("~") || expandedPrefix.startsWith("/")) { + searchDir = expandedPrefix; + } else { + searchDir = join(this.basePath, expandedPrefix); + } + searchPrefix = ""; + } else if (rawPrefix.endsWith("/")) { + // If prefix ends with /, show contents of that directory + if (rawPrefix.startsWith("~") || expandedPrefix.startsWith("/")) { + searchDir = expandedPrefix; + } else { + searchDir = join(this.basePath, expandedPrefix); + } + searchPrefix = ""; + } else { + // Split into directory and file prefix + const dir = dirname(expandedPrefix); + const file = basename(expandedPrefix); + if (rawPrefix.startsWith("~") || expandedPrefix.startsWith("/")) { + searchDir = dir; + } else { + searchDir = join(this.basePath, dir); + } + searchPrefix = file; + } + + const entries = readdirSync(searchDir, { withFileTypes: true }); + const suggestions: AutocompleteItem[] = []; + + for (const entry of entries) { + if (!entry.name.toLowerCase().startsWith(searchPrefix.toLowerCase())) { + continue; + } + + // Check if entry is a directory (or a symlink pointing to a directory) + let isDirectory = entry.isDirectory(); + if (!isDirectory && entry.isSymbolicLink()) { + try { + const fullPath = join(searchDir, entry.name); + isDirectory = statSync(fullPath).isDirectory(); + } catch { + // Broken symlink or permission error - treat as file + } + } + + let relativePath: string; + const name = entry.name; + const displayPrefix = rawPrefix; + + if (displayPrefix.endsWith("/")) { + // If prefix ends with /, append entry to the prefix + relativePath = displayPrefix + name; + } else if (displayPrefix.includes("/")) { + // Preserve ~/ format for home directory paths + if (displayPrefix.startsWith("~/")) { + const homeRelativeDir = displayPrefix.slice(2); // Remove ~/ + const dir = dirname(homeRelativeDir); + relativePath = `~/${dir === "." ? name : join(dir, name)}`; + } else if (displayPrefix.startsWith("/")) { + // Absolute path - construct properly + const dir = dirname(displayPrefix); + if (dir === "/") { + relativePath = `/${name}`; + } else { + relativePath = `${dir}/${name}`; + } + } else { + relativePath = join(dirname(displayPrefix), name); + } + } else { + // For standalone entries, preserve ~/ if original prefix was ~/ + if (displayPrefix.startsWith("~")) { + relativePath = `~/${name}`; + } else { + relativePath = name; + } + } + + const pathValue = isDirectory ? `${relativePath}/` : relativePath; + const value = buildCompletionValue(pathValue, { + isDirectory, + isAtPrefix, + isQuotedPrefix, + }); + + suggestions.push({ + value, + label: name + (isDirectory ? "/" : ""), + }); + } + + // Sort directories first, then alphabetically + suggestions.sort((a, b) => { + const aIsDir = a.value.endsWith("/"); + const bIsDir = b.value.endsWith("/"); + if (aIsDir && !bIsDir) return -1; + if (!aIsDir && bIsDir) return 1; + return a.label.localeCompare(b.label); + }); + + return suggestions; + } catch (_e) { + // Directory doesn't exist or not accessible + return []; + } + } + + // Score an entry against the query (higher = better match) + // isDirectory adds bonus to prioritize folders + private scoreEntry(filePath: string, query: string, isDirectory: boolean): number { + const fileName = basename(filePath); + const lowerFileName = fileName.toLowerCase(); + const lowerQuery = query.toLowerCase(); + + let score = 0; + + // Exact filename match (highest) + if (lowerFileName === lowerQuery) score = 100; + // Filename starts with query + else if (lowerFileName.startsWith(lowerQuery)) score = 80; + // Substring match in filename + else if (lowerFileName.includes(lowerQuery)) score = 50; + // Substring match in full path + else if (filePath.toLowerCase().includes(lowerQuery)) score = 30; + + // Directories get a bonus to appear first + if (isDirectory && score > 0) score += 10; + + return score; + } + + // Fuzzy file search using fd (fast, respects .gitignore) + private getFuzzyFileSuggestions(query: string, options: { isQuotedPrefix: boolean }): AutocompleteItem[] { + if (!this.fdPath) { + // fd not available, return empty results + return []; + } + + try { + const scopedQuery = this.resolveScopedFuzzyQuery(query); + const fdBaseDir = scopedQuery?.baseDir ?? this.basePath; + const fdQuery = scopedQuery?.query ?? query; + const entries = walkDirectoryWithFd(fdBaseDir, this.fdPath, fdQuery, 100); + + // Score entries + const scoredEntries = entries + .map((entry) => ({ + ...entry, + score: fdQuery ? this.scoreEntry(entry.path, fdQuery, entry.isDirectory) : 1, + })) + .filter((entry) => entry.score > 0); + + // Sort by score (descending) and take top 20 + scoredEntries.sort((a, b) => b.score - a.score); + const topEntries = scoredEntries.slice(0, 20); + + // Build suggestions + const suggestions: AutocompleteItem[] = []; + for (const { path: entryPath, isDirectory } of topEntries) { + // fd already includes trailing / for directories + const pathWithoutSlash = isDirectory ? entryPath.slice(0, -1) : entryPath; + const displayPath = scopedQuery + ? this.scopedPathForDisplay(scopedQuery.displayBase, pathWithoutSlash) + : pathWithoutSlash; + const entryName = basename(pathWithoutSlash); + const completionPath = isDirectory ? `${displayPath}/` : displayPath; + const value = buildCompletionValue(completionPath, { + isDirectory, + isAtPrefix: true, + isQuotedPrefix: options.isQuotedPrefix, + }); + + suggestions.push({ + value, + label: entryName + (isDirectory ? "/" : ""), + description: displayPath, + }); + } + + return suggestions; + } catch { + return []; + } + } + + // Force file completion (called on Tab key) - always returns suggestions + getForceFileSuggestions( + lines: string[], + cursorLine: number, + cursorCol: number, + ): { items: AutocompleteItem[]; prefix: string } | null { + const currentLine = lines[cursorLine] || ""; + const textBeforeCursor = currentLine.slice(0, cursorCol); + + // Don't trigger if we're typing a slash command at the start of the line + if (textBeforeCursor.trim().startsWith("/") && !textBeforeCursor.trim().includes(" ")) { + return null; + } + + // Force extract path prefix - this will always return something + const pathMatch = this.extractPathPrefix(textBeforeCursor, true); + if (pathMatch !== null) { + const suggestions = this.getFileSuggestions(pathMatch); + if (suggestions.length === 0) return null; + + return { + items: suggestions, + prefix: pathMatch, + }; + } + + return null; + } + + // Check if we should trigger file completion (called on Tab key) + shouldTriggerFileCompletion(lines: string[], cursorLine: number, cursorCol: number): boolean { + const currentLine = lines[cursorLine] || ""; + const textBeforeCursor = currentLine.slice(0, cursorCol); + + // Don't trigger if we're typing a slash command at the start of the line + if (textBeforeCursor.trim().startsWith("/") && !textBeforeCursor.trim().includes(" ")) { + return false; + } + + return true; + } +} diff --git a/packages/pi-tui/src/components/box.ts b/packages/pi-tui/src/components/box.ts new file mode 100644 index 000000000..c99b8600b --- /dev/null +++ b/packages/pi-tui/src/components/box.ts @@ -0,0 +1,137 @@ +import type { Component } from "../tui.js"; +import { applyBackgroundToLine, visibleWidth } from "../utils.js"; + +type RenderCache = { + childLines: string[]; + width: number; + bgSample: string | undefined; + lines: string[]; +}; + +/** + * Box component - a container that applies padding and background to all children + */ +export class Box implements Component { + children: Component[] = []; + private paddingX: number; + private paddingY: number; + private bgFn?: (text: string) => string; + + // Cache for rendered output + private cache?: RenderCache; + + constructor(paddingX = 1, paddingY = 1, bgFn?: (text: string) => string) { + this.paddingX = paddingX; + this.paddingY = paddingY; + this.bgFn = bgFn; + } + + addChild(component: Component): void { + this.children.push(component); + this.invalidateCache(); + } + + removeChild(component: Component): void { + const index = this.children.indexOf(component); + if (index !== -1) { + this.children.splice(index, 1); + this.invalidateCache(); + } + } + + clear(): void { + this.children = []; + this.invalidateCache(); + } + + setBgFn(bgFn?: (text: string) => string): void { + this.bgFn = bgFn; + // Don't invalidate here - we'll detect bgFn changes by sampling output + } + + private invalidateCache(): void { + this.cache = undefined; + } + + private matchCache(width: number, childLines: string[], bgSample: string | undefined): boolean { + const cache = this.cache; + return ( + !!cache && + cache.width === width && + cache.bgSample === bgSample && + cache.childLines.length === childLines.length && + cache.childLines.every((line, i) => line === childLines[i]) + ); + } + + invalidate(): void { + this.invalidateCache(); + for (const child of this.children) { + child.invalidate?.(); + } + } + + render(width: number): string[] { + if (this.children.length === 0) { + return []; + } + + const contentWidth = Math.max(1, width - this.paddingX * 2); + const leftPad = " ".repeat(this.paddingX); + + // Render all children + const childLines: string[] = []; + for (const child of this.children) { + const lines = child.render(contentWidth); + for (const line of lines) { + childLines.push(leftPad + line); + } + } + + if (childLines.length === 0) { + return []; + } + + // Check if bgFn output changed by sampling + const bgSample = this.bgFn ? this.bgFn("test") : undefined; + + // Check cache validity + if (this.matchCache(width, childLines, bgSample)) { + return this.cache!.lines; + } + + // Apply background and padding + const result: string[] = []; + + // Top padding + for (let i = 0; i < this.paddingY; i++) { + result.push(this.applyBg("", width)); + } + + // Content + for (const line of childLines) { + result.push(this.applyBg(line, width)); + } + + // Bottom padding + for (let i = 0; i < this.paddingY; i++) { + result.push(this.applyBg("", width)); + } + + // Update cache + this.cache = { childLines, width, bgSample, lines: result }; + + return result; + } + + private applyBg(line: string, width: number): string { + const visLen = visibleWidth(line); + const padNeeded = Math.max(0, width - visLen); + const padded = line + " ".repeat(padNeeded); + + if (this.bgFn) { + return applyBackgroundToLine(padded, width, this.bgFn); + } + return padded; + } +} diff --git a/packages/pi-tui/src/components/cancellable-loader.ts b/packages/pi-tui/src/components/cancellable-loader.ts new file mode 100644 index 000000000..506b763de --- /dev/null +++ b/packages/pi-tui/src/components/cancellable-loader.ts @@ -0,0 +1,40 @@ +import { getEditorKeybindings } from "../keybindings.js"; +import { Loader } from "./loader.js"; + +/** + * Loader that can be cancelled with Escape. + * Extends Loader with an AbortSignal for cancelling async operations. + * + * @example + * const loader = new CancellableLoader(tui, cyan, dim, "Working..."); + * loader.onAbort = () => done(null); + * doWork(loader.signal).then(done); + */ +export class CancellableLoader extends Loader { + private abortController = new AbortController(); + + /** Called when user presses Escape */ + onAbort?: () => void; + + /** AbortSignal that is aborted when user presses Escape */ + get signal(): AbortSignal { + return this.abortController.signal; + } + + /** Whether the loader was aborted */ + get aborted(): boolean { + return this.abortController.signal.aborted; + } + + handleInput(data: string): void { + const kb = getEditorKeybindings(); + if (kb.matches(data, "selectCancel")) { + this.abortController.abort(); + this.onAbort?.(); + } + } + + dispose(): void { + this.stop(); + } +} diff --git a/packages/pi-tui/src/components/editor.ts b/packages/pi-tui/src/components/editor.ts new file mode 100644 index 000000000..f4a7353cc --- /dev/null +++ b/packages/pi-tui/src/components/editor.ts @@ -0,0 +1,2035 @@ +import type { AutocompleteProvider, CombinedAutocompleteProvider } from "../autocomplete.js"; +import { getEditorKeybindings } from "../keybindings.js"; +import { decodeKittyPrintable, matchesKey } from "../keys.js"; +import { KillRing } from "../kill-ring.js"; +import { type Component, CURSOR_MARKER, type Focusable, type TUI } from "../tui.js"; +import { UndoStack } from "../undo-stack.js"; +import { getSegmenter, isPunctuationChar, isWhitespaceChar, visibleWidth } from "../utils.js"; +import { SelectList, type SelectListTheme } from "./select-list.js"; + +const segmenter = getSegmenter(); + +/** + * Represents a chunk of text for word-wrap layout. + * Tracks both the text content and its position in the original line. + */ +export interface TextChunk { + text: string; + startIndex: number; + endIndex: number; +} + +/** + * Split a line into word-wrapped chunks. + * Wraps at word boundaries when possible, falling back to character-level + * wrapping for words longer than the available width. + * + * @param line - The text line to wrap + * @param maxWidth - Maximum visible width per chunk + * @returns Array of chunks with text and position information + */ +export function wordWrapLine(line: string, maxWidth: number): TextChunk[] { + if (!line || maxWidth <= 0) { + return [{ text: "", startIndex: 0, endIndex: 0 }]; + } + + const lineWidth = visibleWidth(line); + if (lineWidth <= maxWidth) { + return [{ text: line, startIndex: 0, endIndex: line.length }]; + } + + const chunks: TextChunk[] = []; + const segments = [...segmenter.segment(line)]; + + let currentWidth = 0; + let chunkStart = 0; + + // Wrap opportunity: the position after the last whitespace before a non-whitespace + // grapheme, i.e. where a line break is allowed. + let wrapOppIndex = -1; + let wrapOppWidth = 0; + + for (let i = 0; i < segments.length; i++) { + const seg = segments[i]!; + const grapheme = seg.segment; + const gWidth = visibleWidth(grapheme); + const charIndex = seg.index; + const isWs = isWhitespaceChar(grapheme); + + // Overflow check before advancing. + if (currentWidth + gWidth > maxWidth) { + if (wrapOppIndex >= 0) { + // Backtrack to last wrap opportunity. + chunks.push({ text: line.slice(chunkStart, wrapOppIndex), startIndex: chunkStart, endIndex: wrapOppIndex }); + chunkStart = wrapOppIndex; + currentWidth -= wrapOppWidth; + } else if (chunkStart < charIndex) { + // No wrap opportunity: force-break at current position. + chunks.push({ text: line.slice(chunkStart, charIndex), startIndex: chunkStart, endIndex: charIndex }); + chunkStart = charIndex; + currentWidth = 0; + } + wrapOppIndex = -1; + } + + // Advance. + currentWidth += gWidth; + + // Record wrap opportunity: whitespace followed by non-whitespace. + // Multiple spaces join (no break between them); the break point is + // after the last space before the next word. + const next = segments[i + 1]; + if (isWs && next && !isWhitespaceChar(next.segment)) { + wrapOppIndex = next.index; + wrapOppWidth = currentWidth; + } + } + + // Push final chunk. + chunks.push({ text: line.slice(chunkStart), startIndex: chunkStart, endIndex: line.length }); + + return chunks; +} + +// Kitty CSI-u sequences for printable keys, including optional shifted/base codepoints. +interface EditorState { + lines: string[]; + cursorLine: number; + cursorCol: number; +} + +interface LayoutLine { + text: string; + hasCursor: boolean; + cursorPos?: number; +} + +export interface EditorTheme { + borderColor: (str: string) => string; + selectList: SelectListTheme; +} + +export interface EditorOptions { + paddingX?: number; + autocompleteMaxVisible?: number; +} + +export class Editor implements Component, Focusable { + private state: EditorState = { + lines: [""], + cursorLine: 0, + cursorCol: 0, + }; + + /** Focusable interface - set by TUI when focus changes */ + focused: boolean = false; + + protected tui: TUI; + private theme: EditorTheme; + private paddingX: number = 0; + + // Store last render width for cursor navigation + private lastWidth: number = 80; + + // Vertical scrolling support + private scrollOffset: number = 0; + + // Border color (can be changed dynamically) + public borderColor: (str: string) => string; + + // Autocomplete support + private autocompleteProvider?: AutocompleteProvider; + private autocompleteList?: SelectList; + private autocompleteState: "regular" | "force" | null = null; + private autocompletePrefix: string = ""; + private autocompleteMaxVisible: number = 5; + + // Paste tracking for large pastes + private pastes: Map = new Map(); + private pasteCounter: number = 0; + + // Bracketed paste mode buffering + private pasteBuffer: string = ""; + private isInPaste: boolean = false; + + // Prompt history for up/down navigation + private history: string[] = []; + private historyIndex: number = -1; // -1 = not browsing, 0 = most recent, 1 = older, etc. + + // Kill ring for Emacs-style kill/yank operations + private killRing = new KillRing(); + private lastAction: "kill" | "yank" | "type-word" | null = null; + + // Character jump mode + private jumpMode: "forward" | "backward" | null = null; + + // Preferred visual column for vertical cursor movement (sticky column) + private preferredVisualCol: number | null = null; + + // Undo support + private undoStack = new UndoStack(); + + public onSubmit?: (text: string) => void; + public onChange?: (text: string) => void; + public disableSubmit: boolean = false; + + constructor(tui: TUI, theme: EditorTheme, options: EditorOptions = {}) { + this.tui = tui; + this.theme = theme; + this.borderColor = theme.borderColor; + const paddingX = options.paddingX ?? 0; + this.paddingX = Number.isFinite(paddingX) ? Math.max(0, Math.floor(paddingX)) : 0; + const maxVisible = options.autocompleteMaxVisible ?? 5; + this.autocompleteMaxVisible = Number.isFinite(maxVisible) ? Math.max(3, Math.min(20, Math.floor(maxVisible))) : 5; + } + + getPaddingX(): number { + return this.paddingX; + } + + setPaddingX(padding: number): void { + const newPadding = Number.isFinite(padding) ? Math.max(0, Math.floor(padding)) : 0; + if (this.paddingX !== newPadding) { + this.paddingX = newPadding; + this.tui.requestRender(); + } + } + + getAutocompleteMaxVisible(): number { + return this.autocompleteMaxVisible; + } + + setAutocompleteMaxVisible(maxVisible: number): void { + const newMaxVisible = Number.isFinite(maxVisible) ? Math.max(3, Math.min(20, Math.floor(maxVisible))) : 5; + if (this.autocompleteMaxVisible !== newMaxVisible) { + this.autocompleteMaxVisible = newMaxVisible; + this.tui.requestRender(); + } + } + + setAutocompleteProvider(provider: AutocompleteProvider): void { + this.autocompleteProvider = provider; + } + + /** + * Add a prompt to history for up/down arrow navigation. + * Called after successful submission. + */ + addToHistory(text: string): void { + const trimmed = text.trim(); + if (!trimmed) return; + // Don't add consecutive duplicates + if (this.history.length > 0 && this.history[0] === trimmed) return; + this.history.unshift(trimmed); + // Limit history size + if (this.history.length > 100) { + this.history.pop(); + } + } + + private isEditorEmpty(): boolean { + return this.state.lines.length === 1 && this.state.lines[0] === ""; + } + + private isOnFirstVisualLine(): boolean { + const visualLines = this.buildVisualLineMap(this.lastWidth); + const currentVisualLine = this.findCurrentVisualLine(visualLines); + return currentVisualLine === 0; + } + + private isOnLastVisualLine(): boolean { + const visualLines = this.buildVisualLineMap(this.lastWidth); + const currentVisualLine = this.findCurrentVisualLine(visualLines); + return currentVisualLine === visualLines.length - 1; + } + + private navigateHistory(direction: 1 | -1): void { + this.lastAction = null; + if (this.history.length === 0) return; + + const newIndex = this.historyIndex - direction; // Up(-1) increases index, Down(1) decreases + if (newIndex < -1 || newIndex >= this.history.length) return; + + // Capture state when first entering history browsing mode + if (this.historyIndex === -1 && newIndex >= 0) { + this.pushUndoSnapshot(); + } + + this.historyIndex = newIndex; + + if (this.historyIndex === -1) { + // Returned to "current" state - clear editor + this.setTextInternal(""); + } else { + this.setTextInternal(this.history[this.historyIndex] || ""); + } + } + + /** Internal setText that doesn't reset history state - used by navigateHistory */ + private setTextInternal(text: string): void { + const lines = text.replace(/\r\n/g, "\n").replace(/\r/g, "\n").split("\n"); + this.state.lines = lines.length === 0 ? [""] : lines; + this.state.cursorLine = this.state.lines.length - 1; + this.setCursorCol(this.state.lines[this.state.cursorLine]?.length || 0); + // Reset scroll - render() will adjust to show cursor + this.scrollOffset = 0; + + if (this.onChange) { + this.onChange(this.getText()); + } + } + + invalidate(): void { + // No cached state to invalidate currently + } + + render(width: number): string[] { + const maxPadding = Math.max(0, Math.floor((width - 1) / 2)); + const paddingX = Math.min(this.paddingX, maxPadding); + const contentWidth = Math.max(1, width - paddingX * 2); + + // Layout width: with padding the cursor can overflow into it, + // without padding we reserve 1 column for the cursor. + const layoutWidth = Math.max(1, contentWidth - (paddingX ? 0 : 1)); + + // Store for cursor navigation (must match wrapping width) + this.lastWidth = layoutWidth; + + const horizontal = this.borderColor("─"); + + // Layout the text + const layoutLines = this.layoutText(layoutWidth); + + // Calculate max visible lines: 30% of terminal height, minimum 5 lines + const terminalRows = this.tui.terminal.rows; + const maxVisibleLines = Math.max(5, Math.floor(terminalRows * 0.3)); + + // Find the cursor line index in layoutLines + let cursorLineIndex = layoutLines.findIndex((line) => line.hasCursor); + if (cursorLineIndex === -1) cursorLineIndex = 0; + + // Adjust scroll offset to keep cursor visible + if (cursorLineIndex < this.scrollOffset) { + this.scrollOffset = cursorLineIndex; + } else if (cursorLineIndex >= this.scrollOffset + maxVisibleLines) { + this.scrollOffset = cursorLineIndex - maxVisibleLines + 1; + } + + // Clamp scroll offset to valid range + const maxScrollOffset = Math.max(0, layoutLines.length - maxVisibleLines); + this.scrollOffset = Math.max(0, Math.min(this.scrollOffset, maxScrollOffset)); + + // Get visible lines slice + const visibleLines = layoutLines.slice(this.scrollOffset, this.scrollOffset + maxVisibleLines); + + const result: string[] = []; + const leftPadding = " ".repeat(paddingX); + const rightPadding = leftPadding; + + // Render top border (with scroll indicator if scrolled down) + if (this.scrollOffset > 0) { + const indicator = `─── ↑ ${this.scrollOffset} more `; + const remaining = width - visibleWidth(indicator); + result.push(this.borderColor(indicator + "─".repeat(Math.max(0, remaining)))); + } else { + result.push(horizontal.repeat(width)); + } + + // Render each visible layout line + // Emit hardware cursor marker only when focused and not showing autocomplete + const emitCursorMarker = this.focused && !this.autocompleteState; + + for (const layoutLine of visibleLines) { + let displayText = layoutLine.text; + let lineVisibleWidth = visibleWidth(layoutLine.text); + let cursorInPadding = false; + + // Add cursor if this line has it + if (layoutLine.hasCursor && layoutLine.cursorPos !== undefined) { + const before = displayText.slice(0, layoutLine.cursorPos); + const after = displayText.slice(layoutLine.cursorPos); + + // Hardware cursor marker (zero-width, emitted before fake cursor for IME positioning) + const marker = emitCursorMarker ? CURSOR_MARKER : ""; + + if (after.length > 0) { + // Cursor is on a character (grapheme) - replace it with highlighted version + // Get the first grapheme from 'after' + const afterGraphemes = [...segmenter.segment(after)]; + const firstGrapheme = afterGraphemes[0]?.segment || ""; + const restAfter = after.slice(firstGrapheme.length); + const cursor = `\x1b[7m${firstGrapheme}\x1b[0m`; + displayText = before + marker + cursor + restAfter; + // lineVisibleWidth stays the same - we're replacing, not adding + } else { + // Cursor is at the end - add highlighted space + const cursor = "\x1b[7m \x1b[0m"; + displayText = before + marker + cursor; + lineVisibleWidth = lineVisibleWidth + 1; + // If cursor overflows content width into the padding, flag it + if (lineVisibleWidth > contentWidth && paddingX > 0) { + cursorInPadding = true; + } + } + } + + // Calculate padding based on actual visible width + const padding = " ".repeat(Math.max(0, contentWidth - lineVisibleWidth)); + const lineRightPadding = cursorInPadding ? rightPadding.slice(1) : rightPadding; + + // Render the line (no side borders, just horizontal lines above and below) + result.push(`${leftPadding}${displayText}${padding}${lineRightPadding}`); + } + + // Render bottom border (with scroll indicator if more content below) + const linesBelow = layoutLines.length - (this.scrollOffset + visibleLines.length); + if (linesBelow > 0) { + const indicator = `─── ↓ ${linesBelow} more `; + const remaining = width - visibleWidth(indicator); + result.push(this.borderColor(indicator + "─".repeat(Math.max(0, remaining)))); + } else { + result.push(horizontal.repeat(width)); + } + + // Add autocomplete list if active + if (this.autocompleteState && this.autocompleteList) { + const autocompleteResult = this.autocompleteList.render(contentWidth); + for (const line of autocompleteResult) { + const lineWidth = visibleWidth(line); + const linePadding = " ".repeat(Math.max(0, contentWidth - lineWidth)); + result.push(`${leftPadding}${line}${linePadding}${rightPadding}`); + } + } + + return result; + } + + handleInput(data: string): void { + const kb = getEditorKeybindings(); + + // Handle character jump mode (awaiting next character to jump to) + if (this.jumpMode !== null) { + // Cancel if the hotkey is pressed again + if (kb.matches(data, "jumpForward") || kb.matches(data, "jumpBackward")) { + this.jumpMode = null; + return; + } + + if (data.charCodeAt(0) >= 32) { + // Printable character - perform the jump + const direction = this.jumpMode; + this.jumpMode = null; + this.jumpToChar(data, direction); + return; + } + + // Control character - cancel and fall through to normal handling + this.jumpMode = null; + } + + // Handle bracketed paste mode + if (data.includes("\x1b[200~")) { + this.isInPaste = true; + this.pasteBuffer = ""; + data = data.replace("\x1b[200~", ""); + } + + if (this.isInPaste) { + this.pasteBuffer += data; + const endIndex = this.pasteBuffer.indexOf("\x1b[201~"); + if (endIndex !== -1) { + const pasteContent = this.pasteBuffer.substring(0, endIndex); + if (pasteContent.length > 0) { + this.handlePaste(pasteContent); + } + this.isInPaste = false; + const remaining = this.pasteBuffer.substring(endIndex + 6); + this.pasteBuffer = ""; + if (remaining.length > 0) { + this.handleInput(remaining); + } + return; + } + return; + } + + // Ctrl+C - let parent handle (exit/clear) + if (kb.matches(data, "copy")) { + return; + } + + // Undo + if (kb.matches(data, "undo")) { + this.undo(); + return; + } + + // Handle autocomplete mode + if (this.autocompleteState && this.autocompleteList) { + if (kb.matches(data, "selectCancel")) { + this.cancelAutocomplete(); + return; + } + + if (kb.matches(data, "selectUp") || kb.matches(data, "selectDown")) { + this.autocompleteList.handleInput(data); + return; + } + + if (kb.matches(data, "tab")) { + const selected = this.autocompleteList.getSelectedItem(); + if (selected && this.autocompleteProvider) { + const shouldChainSlashArgumentAutocomplete = this.shouldChainSlashArgumentAutocompleteOnTabSelection(); + + this.pushUndoSnapshot(); + this.lastAction = null; + const result = this.autocompleteProvider.applyCompletion( + this.state.lines, + this.state.cursorLine, + this.state.cursorCol, + selected, + this.autocompletePrefix, + ); + this.state.lines = result.lines; + this.state.cursorLine = result.cursorLine; + this.setCursorCol(result.cursorCol); + this.cancelAutocomplete(); + if (this.onChange) this.onChange(this.getText()); + + if (shouldChainSlashArgumentAutocomplete && this.isBareCompletedSlashCommandAtCursor()) { + this.tryTriggerAutocomplete(); + } + } + return; + } + + if (kb.matches(data, "selectConfirm")) { + const selected = this.autocompleteList.getSelectedItem(); + if (selected && this.autocompleteProvider) { + this.pushUndoSnapshot(); + this.lastAction = null; + const result = this.autocompleteProvider.applyCompletion( + this.state.lines, + this.state.cursorLine, + this.state.cursorCol, + selected, + this.autocompletePrefix, + ); + this.state.lines = result.lines; + this.state.cursorLine = result.cursorLine; + this.setCursorCol(result.cursorCol); + + if (this.autocompletePrefix.startsWith("/")) { + this.cancelAutocomplete(); + // Fall through to submit + } else { + this.cancelAutocomplete(); + if (this.onChange) this.onChange(this.getText()); + return; + } + } + } + } + + // Tab - trigger completion + if (kb.matches(data, "tab") && !this.autocompleteState) { + this.handleTabCompletion(); + return; + } + + // Deletion actions + if (kb.matches(data, "deleteToLineEnd")) { + this.deleteToEndOfLine(); + return; + } + if (kb.matches(data, "deleteToLineStart")) { + this.deleteToStartOfLine(); + return; + } + if (kb.matches(data, "deleteWordBackward")) { + this.deleteWordBackwards(); + return; + } + if (kb.matches(data, "deleteWordForward")) { + this.deleteWordForward(); + return; + } + if (kb.matches(data, "deleteCharBackward") || matchesKey(data, "shift+backspace")) { + this.handleBackspace(); + return; + } + if (kb.matches(data, "deleteCharForward") || matchesKey(data, "shift+delete")) { + this.handleForwardDelete(); + return; + } + + // Kill ring actions + if (kb.matches(data, "yank")) { + this.yank(); + return; + } + if (kb.matches(data, "yankPop")) { + this.yankPop(); + return; + } + + // Cursor movement actions + if (kb.matches(data, "cursorLineStart")) { + this.moveToLineStart(); + return; + } + if (kb.matches(data, "cursorLineEnd")) { + this.moveToLineEnd(); + return; + } + if (kb.matches(data, "cursorWordLeft")) { + this.moveWordBackwards(); + return; + } + if (kb.matches(data, "cursorWordRight")) { + this.moveWordForwards(); + return; + } + + // New line + if ( + kb.matches(data, "newLine") || + (data.charCodeAt(0) === 10 && data.length > 1) || + data === "\x1b\r" || + data === "\x1b[13;2~" || + (data.length > 1 && data.includes("\x1b") && data.includes("\r")) || + (data === "\n" && data.length === 1) + ) { + if (this.shouldSubmitOnBackslashEnter(data, kb)) { + this.handleBackspace(); + this.submitValue(); + return; + } + this.addNewLine(); + return; + } + + // Submit (Enter) + if (kb.matches(data, "submit")) { + if (this.disableSubmit) return; + + // Workaround for terminals without Shift+Enter support: + // If char before cursor is \, delete it and insert newline instead of submitting. + const currentLine = this.state.lines[this.state.cursorLine] || ""; + if (this.state.cursorCol > 0 && currentLine[this.state.cursorCol - 1] === "\\") { + this.handleBackspace(); + this.addNewLine(); + return; + } + + this.submitValue(); + return; + } + + // Arrow key navigation (with history support) + if (kb.matches(data, "cursorUp")) { + if (this.isEditorEmpty()) { + this.navigateHistory(-1); + } else if (this.historyIndex > -1 && this.isOnFirstVisualLine()) { + this.navigateHistory(-1); + } else if (this.isOnFirstVisualLine()) { + // Already at top - jump to start of line + this.moveToLineStart(); + } else { + this.moveCursor(-1, 0); + } + return; + } + if (kb.matches(data, "cursorDown")) { + if (this.historyIndex > -1 && this.isOnLastVisualLine()) { + this.navigateHistory(1); + } else if (this.isOnLastVisualLine()) { + // Already at bottom - jump to end of line + this.moveToLineEnd(); + } else { + this.moveCursor(1, 0); + } + return; + } + if (kb.matches(data, "cursorRight")) { + this.moveCursor(0, 1); + return; + } + if (kb.matches(data, "cursorLeft")) { + this.moveCursor(0, -1); + return; + } + + // Page up/down - scroll by page and move cursor + if (kb.matches(data, "pageUp")) { + this.pageScroll(-1); + return; + } + if (kb.matches(data, "pageDown")) { + this.pageScroll(1); + return; + } + + // Character jump mode triggers + if (kb.matches(data, "jumpForward")) { + this.jumpMode = "forward"; + return; + } + if (kb.matches(data, "jumpBackward")) { + this.jumpMode = "backward"; + return; + } + + // Shift+Space - insert regular space + if (matchesKey(data, "shift+space")) { + this.insertCharacter(" "); + return; + } + + const kittyPrintable = decodeKittyPrintable(data); + if (kittyPrintable !== undefined) { + this.insertCharacter(kittyPrintable); + return; + } + + // Regular characters + if (data.charCodeAt(0) >= 32) { + this.insertCharacter(data); + } + } + + private layoutText(contentWidth: number): LayoutLine[] { + const layoutLines: LayoutLine[] = []; + + if (this.state.lines.length === 0 || (this.state.lines.length === 1 && this.state.lines[0] === "")) { + // Empty editor + layoutLines.push({ + text: "", + hasCursor: true, + cursorPos: 0, + }); + return layoutLines; + } + + // Process each logical line + for (let i = 0; i < this.state.lines.length; i++) { + const line = this.state.lines[i] || ""; + const isCurrentLine = i === this.state.cursorLine; + const lineVisibleWidth = visibleWidth(line); + + if (lineVisibleWidth <= contentWidth) { + // Line fits in one layout line + if (isCurrentLine) { + layoutLines.push({ + text: line, + hasCursor: true, + cursorPos: this.state.cursorCol, + }); + } else { + layoutLines.push({ + text: line, + hasCursor: false, + }); + } + } else { + // Line needs wrapping - use word-aware wrapping + const chunks = wordWrapLine(line, contentWidth); + + for (let chunkIndex = 0; chunkIndex < chunks.length; chunkIndex++) { + const chunk = chunks[chunkIndex]; + if (!chunk) continue; + + const cursorPos = this.state.cursorCol; + const isLastChunk = chunkIndex === chunks.length - 1; + + // Determine if cursor is in this chunk + // For word-wrapped chunks, we need to handle the case where + // cursor might be in trimmed whitespace at end of chunk + let hasCursorInChunk = false; + let adjustedCursorPos = 0; + + if (isCurrentLine) { + if (isLastChunk) { + // Last chunk: cursor belongs here if >= startIndex + hasCursorInChunk = cursorPos >= chunk.startIndex; + adjustedCursorPos = cursorPos - chunk.startIndex; + } else { + // Non-last chunk: cursor belongs here if in range [startIndex, endIndex) + // But we need to handle the visual position in the trimmed text + hasCursorInChunk = cursorPos >= chunk.startIndex && cursorPos < chunk.endIndex; + if (hasCursorInChunk) { + adjustedCursorPos = cursorPos - chunk.startIndex; + // Clamp to text length (in case cursor was in trimmed whitespace) + if (adjustedCursorPos > chunk.text.length) { + adjustedCursorPos = chunk.text.length; + } + } + } + } + + if (hasCursorInChunk) { + layoutLines.push({ + text: chunk.text, + hasCursor: true, + cursorPos: adjustedCursorPos, + }); + } else { + layoutLines.push({ + text: chunk.text, + hasCursor: false, + }); + } + } + } + } + + return layoutLines; + } + + getText(): string { + return this.state.lines.join("\n"); + } + + /** + * Get text with paste markers expanded to their actual content. + * Use this when you need the full content (e.g., for external editor). + */ + getExpandedText(): string { + let result = this.state.lines.join("\n"); + for (const [pasteId, pasteContent] of this.pastes) { + const markerRegex = new RegExp(`\\[paste #${pasteId}( (\\+\\d+ lines|\\d+ chars))?\\]`, "g"); + result = result.replace(markerRegex, pasteContent); + } + return result; + } + + getLines(): string[] { + return [...this.state.lines]; + } + + getCursor(): { line: number; col: number } { + return { line: this.state.cursorLine, col: this.state.cursorCol }; + } + + setText(text: string): void { + this.lastAction = null; + this.historyIndex = -1; // Exit history browsing mode + // Push undo snapshot if content differs (makes programmatic changes undoable) + if (this.getText() !== text) { + this.pushUndoSnapshot(); + } + this.setTextInternal(text); + } + + /** + * Insert text at the current cursor position. + * Used for programmatic insertion (e.g., clipboard image markers). + * This is atomic for undo - single undo restores entire pre-insert state. + */ + insertTextAtCursor(text: string): void { + if (!text) return; + this.pushUndoSnapshot(); + this.lastAction = null; + this.historyIndex = -1; + this.insertTextAtCursorInternal(text); + } + + /** + * Internal text insertion at cursor. Handles single and multi-line text. + * Does not push undo snapshots or trigger autocomplete - caller is responsible. + * Normalizes line endings and calls onChange once at the end. + */ + private insertTextAtCursorInternal(text: string): void { + if (!text) return; + + // Normalize line endings + const normalized = text.replace(/\r\n/g, "\n").replace(/\r/g, "\n"); + const insertedLines = normalized.split("\n"); + + const currentLine = this.state.lines[this.state.cursorLine] || ""; + const beforeCursor = currentLine.slice(0, this.state.cursorCol); + const afterCursor = currentLine.slice(this.state.cursorCol); + + if (insertedLines.length === 1) { + // Single line - insert at cursor position + this.state.lines[this.state.cursorLine] = beforeCursor + normalized + afterCursor; + this.setCursorCol(this.state.cursorCol + normalized.length); + } else { + // Multi-line insertion + this.state.lines = [ + // All lines before current line + ...this.state.lines.slice(0, this.state.cursorLine), + + // The first inserted line merged with text before cursor + beforeCursor + insertedLines[0], + + // All middle inserted lines + ...insertedLines.slice(1, -1), + + // The last inserted line with text after cursor + insertedLines[insertedLines.length - 1] + afterCursor, + + // All lines after current line + ...this.state.lines.slice(this.state.cursorLine + 1), + ]; + + this.state.cursorLine += insertedLines.length - 1; + this.setCursorCol((insertedLines[insertedLines.length - 1] || "").length); + } + + if (this.onChange) { + this.onChange(this.getText()); + } + } + + // All the editor methods from before... + private insertCharacter(char: string, skipUndoCoalescing?: boolean): void { + this.historyIndex = -1; // Exit history browsing mode + + // Undo coalescing (fish-style): + // - Consecutive word chars coalesce into one undo unit + // - Space captures state before itself (so undo removes space+following word together) + // - Each space is separately undoable + // Skip coalescing when called from atomic operations (e.g., handlePaste) + if (!skipUndoCoalescing) { + if (isWhitespaceChar(char) || this.lastAction !== "type-word") { + this.pushUndoSnapshot(); + } + this.lastAction = "type-word"; + } + + const line = this.state.lines[this.state.cursorLine] || ""; + + const before = line.slice(0, this.state.cursorCol); + const after = line.slice(this.state.cursorCol); + + this.state.lines[this.state.cursorLine] = before + char + after; + this.setCursorCol(this.state.cursorCol + char.length); + + if (this.onChange) { + this.onChange(this.getText()); + } + + // Check if we should trigger or update autocomplete + if (!this.autocompleteState) { + // Auto-trigger for "/" at the start of a line (slash commands) + if (char === "/" && this.isAtStartOfMessage()) { + this.tryTriggerAutocomplete(); + } + // Auto-trigger for "@" file reference (fuzzy search) + else if (char === "@") { + const currentLine = this.state.lines[this.state.cursorLine] || ""; + const textBeforeCursor = currentLine.slice(0, this.state.cursorCol); + // Only trigger if @ is after whitespace or at start of line + const charBeforeAt = textBeforeCursor[textBeforeCursor.length - 2]; + if (textBeforeCursor.length === 1 || charBeforeAt === " " || charBeforeAt === "\t") { + this.tryTriggerAutocomplete(); + } + } + // Also auto-trigger when typing letters in a slash command context + else if (/[a-zA-Z0-9.\-_]/.test(char)) { + const currentLine = this.state.lines[this.state.cursorLine] || ""; + const textBeforeCursor = currentLine.slice(0, this.state.cursorCol); + // Check if we're in a slash command (with or without space for arguments) + if (this.isInSlashCommandContext(textBeforeCursor)) { + this.tryTriggerAutocomplete(); + } + // Check if we're in an @ file reference context + else if (textBeforeCursor.match(/(?:^|[\s])@[^\s]*$/)) { + this.tryTriggerAutocomplete(); + } + } + } else { + this.updateAutocomplete(); + } + } + + private handlePaste(pastedText: string): void { + this.historyIndex = -1; // Exit history browsing mode + this.lastAction = null; + + this.pushUndoSnapshot(); + + // Clean the pasted text + const cleanText = pastedText.replace(/\r\n/g, "\n").replace(/\r/g, "\n"); + + // Convert tabs to spaces (4 spaces per tab) + const tabExpandedText = cleanText.replace(/\t/g, " "); + + // Filter out non-printable characters except newlines + let filteredText = tabExpandedText + .split("") + .filter((char) => char === "\n" || char.charCodeAt(0) >= 32) + .join(""); + + // If pasting a file path (starts with /, ~, or .) and the character before + // the cursor is a word character, prepend a space for better readability + if (/^[/~.]/.test(filteredText)) { + const currentLine = this.state.lines[this.state.cursorLine] || ""; + const charBeforeCursor = this.state.cursorCol > 0 ? currentLine[this.state.cursorCol - 1] : ""; + if (charBeforeCursor && /\w/.test(charBeforeCursor)) { + filteredText = ` ${filteredText}`; + } + } + + // Split into lines to check for large paste + const pastedLines = filteredText.split("\n"); + + // Check if this is a large paste (> 10 lines or > 1000 characters) + const totalChars = filteredText.length; + if (pastedLines.length > 10 || totalChars > 1000) { + // Store the paste and insert a marker + this.pasteCounter++; + const pasteId = this.pasteCounter; + this.pastes.set(pasteId, filteredText); + + // Insert marker like "[paste #1 +123 lines]" or "[paste #1 1234 chars]" + const marker = + pastedLines.length > 10 + ? `[paste #${pasteId} +${pastedLines.length} lines]` + : `[paste #${pasteId} ${totalChars} chars]`; + this.insertTextAtCursorInternal(marker); + return; + } + + if (pastedLines.length === 1) { + // Single line - insert atomically (do not trigger autocomplete during paste) + this.insertTextAtCursorInternal(filteredText); + return; + } + + // Multi-line paste - use direct state manipulation + this.insertTextAtCursorInternal(filteredText); + } + + private addNewLine(): void { + this.historyIndex = -1; // Exit history browsing mode + this.lastAction = null; + + this.pushUndoSnapshot(); + + const currentLine = this.state.lines[this.state.cursorLine] || ""; + + const before = currentLine.slice(0, this.state.cursorCol); + const after = currentLine.slice(this.state.cursorCol); + + // Split current line + this.state.lines[this.state.cursorLine] = before; + this.state.lines.splice(this.state.cursorLine + 1, 0, after); + + // Move cursor to start of new line + this.state.cursorLine++; + this.setCursorCol(0); + + if (this.onChange) { + this.onChange(this.getText()); + } + } + + private shouldSubmitOnBackslashEnter(data: string, kb: ReturnType): boolean { + if (this.disableSubmit) return false; + if (!matchesKey(data, "enter")) return false; + const submitKeys = kb.getKeys("submit"); + const hasShiftEnter = submitKeys.includes("shift+enter") || submitKeys.includes("shift+return"); + if (!hasShiftEnter) return false; + + const currentLine = this.state.lines[this.state.cursorLine] || ""; + return this.state.cursorCol > 0 && currentLine[this.state.cursorCol - 1] === "\\"; + } + + private submitValue(): void { + let result = this.state.lines.join("\n").trim(); + for (const [pasteId, pasteContent] of this.pastes) { + const markerRegex = new RegExp(`\\[paste #${pasteId}( (\\+\\d+ lines|\\d+ chars))?\\]`, "g"); + result = result.replace(markerRegex, pasteContent); + } + + this.state = { lines: [""], cursorLine: 0, cursorCol: 0 }; + this.pastes.clear(); + this.pasteCounter = 0; + this.historyIndex = -1; + this.scrollOffset = 0; + this.undoStack.clear(); + this.lastAction = null; + + if (this.onChange) this.onChange(""); + if (this.onSubmit) this.onSubmit(result); + } + + private handleBackspace(): void { + this.historyIndex = -1; // Exit history browsing mode + this.lastAction = null; + + if (this.state.cursorCol > 0) { + this.pushUndoSnapshot(); + + // Delete grapheme before cursor (handles emojis, combining characters, etc.) + const line = this.state.lines[this.state.cursorLine] || ""; + const beforeCursor = line.slice(0, this.state.cursorCol); + + // Find the last grapheme in the text before cursor + const graphemes = [...segmenter.segment(beforeCursor)]; + const lastGrapheme = graphemes[graphemes.length - 1]; + const graphemeLength = lastGrapheme ? lastGrapheme.segment.length : 1; + + const before = line.slice(0, this.state.cursorCol - graphemeLength); + const after = line.slice(this.state.cursorCol); + + this.state.lines[this.state.cursorLine] = before + after; + this.setCursorCol(this.state.cursorCol - graphemeLength); + } else if (this.state.cursorLine > 0) { + this.pushUndoSnapshot(); + + // Merge with previous line + const currentLine = this.state.lines[this.state.cursorLine] || ""; + const previousLine = this.state.lines[this.state.cursorLine - 1] || ""; + + this.state.lines[this.state.cursorLine - 1] = previousLine + currentLine; + this.state.lines.splice(this.state.cursorLine, 1); + + this.state.cursorLine--; + this.setCursorCol(previousLine.length); + } + + if (this.onChange) { + this.onChange(this.getText()); + } + + // Update or re-trigger autocomplete after backspace + if (this.autocompleteState) { + this.updateAutocomplete(); + } else { + // If autocomplete was cancelled (no matches), re-trigger if we're in a completable context + const currentLine = this.state.lines[this.state.cursorLine] || ""; + const textBeforeCursor = currentLine.slice(0, this.state.cursorCol); + // Slash command context + if (this.isInSlashCommandContext(textBeforeCursor)) { + this.tryTriggerAutocomplete(); + } + // @ file reference context + else if (textBeforeCursor.match(/(?:^|[\s])@[^\s]*$/)) { + this.tryTriggerAutocomplete(); + } + } + } + + /** + * Set cursor column and clear preferredVisualCol. + * Use this for all non-vertical cursor movements to reset sticky column behavior. + */ + private setCursorCol(col: number): void { + this.state.cursorCol = col; + this.preferredVisualCol = null; + } + + /** + * Move cursor to a target visual line, applying sticky column logic. + * Shared by moveCursor() and pageScroll(). + */ + private moveToVisualLine( + visualLines: Array<{ logicalLine: number; startCol: number; length: number }>, + currentVisualLine: number, + targetVisualLine: number, + ): void { + const currentVL = visualLines[currentVisualLine]; + const targetVL = visualLines[targetVisualLine]; + + if (currentVL && targetVL) { + const currentVisualCol = this.state.cursorCol - currentVL.startCol; + + // For non-last segments, clamp to length-1 to stay within the segment + const isLastSourceSegment = + currentVisualLine === visualLines.length - 1 || + visualLines[currentVisualLine + 1]?.logicalLine !== currentVL.logicalLine; + const sourceMaxVisualCol = isLastSourceSegment ? currentVL.length : Math.max(0, currentVL.length - 1); + + const isLastTargetSegment = + targetVisualLine === visualLines.length - 1 || + visualLines[targetVisualLine + 1]?.logicalLine !== targetVL.logicalLine; + const targetMaxVisualCol = isLastTargetSegment ? targetVL.length : Math.max(0, targetVL.length - 1); + + const moveToVisualCol = this.computeVerticalMoveColumn( + currentVisualCol, + sourceMaxVisualCol, + targetMaxVisualCol, + ); + + // Set cursor position + this.state.cursorLine = targetVL.logicalLine; + const targetCol = targetVL.startCol + moveToVisualCol; + const logicalLine = this.state.lines[targetVL.logicalLine] || ""; + this.state.cursorCol = Math.min(targetCol, logicalLine.length); + } + } + + /** + * Compute the target visual column for vertical cursor movement. + * Implements the sticky column decision table: + * + * | P | S | T | U | Scenario | Set Preferred | Move To | + * |---|---|---|---| ---------------------------------------------------- |---------------|-------------| + * | 0 | * | 0 | - | Start nav, target fits | null | current | + * | 0 | * | 1 | - | Start nav, target shorter | current | target end | + * | 1 | 0 | 0 | 0 | Clamped, target fits preferred | null | preferred | + * | 1 | 0 | 0 | 1 | Clamped, target longer but still can't fit preferred | keep | target end | + * | 1 | 0 | 1 | - | Clamped, target even shorter | keep | target end | + * | 1 | 1 | 0 | - | Rewrapped, target fits current | null | current | + * | 1 | 1 | 1 | - | Rewrapped, target shorter than current | current | target end | + * + * Where: + * - P = preferred col is set + * - S = cursor in middle of source line (not clamped to end) + * - T = target line shorter than current visual col + * - U = target line shorter than preferred col + */ + private computeVerticalMoveColumn( + currentVisualCol: number, + sourceMaxVisualCol: number, + targetMaxVisualCol: number, + ): number { + const hasPreferred = this.preferredVisualCol !== null; // P + const cursorInMiddle = currentVisualCol < sourceMaxVisualCol; // S + const targetTooShort = targetMaxVisualCol < currentVisualCol; // T + + if (!hasPreferred || cursorInMiddle) { + if (targetTooShort) { + // Cases 2 and 7 + this.preferredVisualCol = currentVisualCol; + return targetMaxVisualCol; + } + + // Cases 1 and 6 + this.preferredVisualCol = null; + return currentVisualCol; + } + + const targetCantFitPreferred = targetMaxVisualCol < this.preferredVisualCol!; // U + if (targetTooShort || targetCantFitPreferred) { + // Cases 4 and 5 + return targetMaxVisualCol; + } + + // Case 3 + const result = this.preferredVisualCol!; + this.preferredVisualCol = null; + return result; + } + + private moveToLineStart(): void { + this.lastAction = null; + this.setCursorCol(0); + } + + private moveToLineEnd(): void { + this.lastAction = null; + const currentLine = this.state.lines[this.state.cursorLine] || ""; + this.setCursorCol(currentLine.length); + } + + private deleteToStartOfLine(): void { + this.historyIndex = -1; // Exit history browsing mode + + const currentLine = this.state.lines[this.state.cursorLine] || ""; + + if (this.state.cursorCol > 0) { + this.pushUndoSnapshot(); + + // Calculate text to be deleted and save to kill ring (backward deletion = prepend) + const deletedText = currentLine.slice(0, this.state.cursorCol); + this.killRing.push(deletedText, { prepend: true, accumulate: this.lastAction === "kill" }); + this.lastAction = "kill"; + + // Delete from start of line up to cursor + this.state.lines[this.state.cursorLine] = currentLine.slice(this.state.cursorCol); + this.setCursorCol(0); + } else if (this.state.cursorLine > 0) { + this.pushUndoSnapshot(); + + // At start of line - merge with previous line, treating newline as deleted text + this.killRing.push("\n", { prepend: true, accumulate: this.lastAction === "kill" }); + this.lastAction = "kill"; + + const previousLine = this.state.lines[this.state.cursorLine - 1] || ""; + this.state.lines[this.state.cursorLine - 1] = previousLine + currentLine; + this.state.lines.splice(this.state.cursorLine, 1); + this.state.cursorLine--; + this.setCursorCol(previousLine.length); + } + + if (this.onChange) { + this.onChange(this.getText()); + } + } + + private deleteToEndOfLine(): void { + this.historyIndex = -1; // Exit history browsing mode + + const currentLine = this.state.lines[this.state.cursorLine] || ""; + + if (this.state.cursorCol < currentLine.length) { + this.pushUndoSnapshot(); + + // Calculate text to be deleted and save to kill ring (forward deletion = append) + const deletedText = currentLine.slice(this.state.cursorCol); + this.killRing.push(deletedText, { prepend: false, accumulate: this.lastAction === "kill" }); + this.lastAction = "kill"; + + // Delete from cursor to end of line + this.state.lines[this.state.cursorLine] = currentLine.slice(0, this.state.cursorCol); + } else if (this.state.cursorLine < this.state.lines.length - 1) { + this.pushUndoSnapshot(); + + // At end of line - merge with next line, treating newline as deleted text + this.killRing.push("\n", { prepend: false, accumulate: this.lastAction === "kill" }); + this.lastAction = "kill"; + + const nextLine = this.state.lines[this.state.cursorLine + 1] || ""; + this.state.lines[this.state.cursorLine] = currentLine + nextLine; + this.state.lines.splice(this.state.cursorLine + 1, 1); + } + + if (this.onChange) { + this.onChange(this.getText()); + } + } + + private deleteWordBackwards(): void { + this.historyIndex = -1; // Exit history browsing mode + + const currentLine = this.state.lines[this.state.cursorLine] || ""; + + // If at start of line, behave like backspace at column 0 (merge with previous line) + if (this.state.cursorCol === 0) { + if (this.state.cursorLine > 0) { + this.pushUndoSnapshot(); + + // Treat newline as deleted text (backward deletion = prepend) + this.killRing.push("\n", { prepend: true, accumulate: this.lastAction === "kill" }); + this.lastAction = "kill"; + + const previousLine = this.state.lines[this.state.cursorLine - 1] || ""; + this.state.lines[this.state.cursorLine - 1] = previousLine + currentLine; + this.state.lines.splice(this.state.cursorLine, 1); + this.state.cursorLine--; + this.setCursorCol(previousLine.length); + } + } else { + this.pushUndoSnapshot(); + + // Save lastAction before cursor movement (moveWordBackwards resets it) + const wasKill = this.lastAction === "kill"; + + const oldCursorCol = this.state.cursorCol; + this.moveWordBackwards(); + const deleteFrom = this.state.cursorCol; + this.setCursorCol(oldCursorCol); + + const deletedText = currentLine.slice(deleteFrom, this.state.cursorCol); + this.killRing.push(deletedText, { prepend: true, accumulate: wasKill }); + this.lastAction = "kill"; + + this.state.lines[this.state.cursorLine] = + currentLine.slice(0, deleteFrom) + currentLine.slice(this.state.cursorCol); + this.setCursorCol(deleteFrom); + } + + if (this.onChange) { + this.onChange(this.getText()); + } + } + + private deleteWordForward(): void { + this.historyIndex = -1; // Exit history browsing mode + + const currentLine = this.state.lines[this.state.cursorLine] || ""; + + // If at end of line, merge with next line (delete the newline) + if (this.state.cursorCol >= currentLine.length) { + if (this.state.cursorLine < this.state.lines.length - 1) { + this.pushUndoSnapshot(); + + // Treat newline as deleted text (forward deletion = append) + this.killRing.push("\n", { prepend: false, accumulate: this.lastAction === "kill" }); + this.lastAction = "kill"; + + const nextLine = this.state.lines[this.state.cursorLine + 1] || ""; + this.state.lines[this.state.cursorLine] = currentLine + nextLine; + this.state.lines.splice(this.state.cursorLine + 1, 1); + } + } else { + this.pushUndoSnapshot(); + + // Save lastAction before cursor movement (moveWordForwards resets it) + const wasKill = this.lastAction === "kill"; + + const oldCursorCol = this.state.cursorCol; + this.moveWordForwards(); + const deleteTo = this.state.cursorCol; + this.setCursorCol(oldCursorCol); + + const deletedText = currentLine.slice(this.state.cursorCol, deleteTo); + this.killRing.push(deletedText, { prepend: false, accumulate: wasKill }); + this.lastAction = "kill"; + + this.state.lines[this.state.cursorLine] = + currentLine.slice(0, this.state.cursorCol) + currentLine.slice(deleteTo); + } + + if (this.onChange) { + this.onChange(this.getText()); + } + } + + private handleForwardDelete(): void { + this.historyIndex = -1; // Exit history browsing mode + this.lastAction = null; + + const currentLine = this.state.lines[this.state.cursorLine] || ""; + + if (this.state.cursorCol < currentLine.length) { + this.pushUndoSnapshot(); + + // Delete grapheme at cursor position (handles emojis, combining characters, etc.) + const afterCursor = currentLine.slice(this.state.cursorCol); + + // Find the first grapheme at cursor + const graphemes = [...segmenter.segment(afterCursor)]; + const firstGrapheme = graphemes[0]; + const graphemeLength = firstGrapheme ? firstGrapheme.segment.length : 1; + + const before = currentLine.slice(0, this.state.cursorCol); + const after = currentLine.slice(this.state.cursorCol + graphemeLength); + this.state.lines[this.state.cursorLine] = before + after; + } else if (this.state.cursorLine < this.state.lines.length - 1) { + this.pushUndoSnapshot(); + + // At end of line - merge with next line + const nextLine = this.state.lines[this.state.cursorLine + 1] || ""; + this.state.lines[this.state.cursorLine] = currentLine + nextLine; + this.state.lines.splice(this.state.cursorLine + 1, 1); + } + + if (this.onChange) { + this.onChange(this.getText()); + } + + // Update or re-trigger autocomplete after forward delete + if (this.autocompleteState) { + this.updateAutocomplete(); + } else { + const currentLine = this.state.lines[this.state.cursorLine] || ""; + const textBeforeCursor = currentLine.slice(0, this.state.cursorCol); + // Slash command context + if (this.isInSlashCommandContext(textBeforeCursor)) { + this.tryTriggerAutocomplete(); + } + // @ file reference context + else if (textBeforeCursor.match(/(?:^|[\s])@[^\s]*$/)) { + this.tryTriggerAutocomplete(); + } + } + } + + /** + * Build a mapping from visual lines to logical positions. + * Returns an array where each element represents a visual line with: + * - logicalLine: index into this.state.lines + * - startCol: starting column in the logical line + * - length: length of this visual line segment + */ + private buildVisualLineMap(width: number): Array<{ logicalLine: number; startCol: number; length: number }> { + const visualLines: Array<{ logicalLine: number; startCol: number; length: number }> = []; + + for (let i = 0; i < this.state.lines.length; i++) { + const line = this.state.lines[i] || ""; + const lineVisWidth = visibleWidth(line); + if (line.length === 0) { + // Empty line still takes one visual line + visualLines.push({ logicalLine: i, startCol: 0, length: 0 }); + } else if (lineVisWidth <= width) { + visualLines.push({ logicalLine: i, startCol: 0, length: line.length }); + } else { + // Line needs wrapping - use word-aware wrapping + const chunks = wordWrapLine(line, width); + for (const chunk of chunks) { + visualLines.push({ + logicalLine: i, + startCol: chunk.startIndex, + length: chunk.endIndex - chunk.startIndex, + }); + } + } + } + + return visualLines; + } + + /** + * Find the visual line index for the current cursor position. + */ + private findCurrentVisualLine( + visualLines: Array<{ logicalLine: number; startCol: number; length: number }>, + ): number { + for (let i = 0; i < visualLines.length; i++) { + const vl = visualLines[i]; + if (!vl) continue; + if (vl.logicalLine === this.state.cursorLine) { + const colInSegment = this.state.cursorCol - vl.startCol; + // Cursor is in this segment if it's within range + // For the last segment of a logical line, cursor can be at length (end position) + const isLastSegmentOfLine = + i === visualLines.length - 1 || visualLines[i + 1]?.logicalLine !== vl.logicalLine; + if (colInSegment >= 0 && (colInSegment < vl.length || (isLastSegmentOfLine && colInSegment <= vl.length))) { + return i; + } + } + } + // Fallback: return last visual line + return visualLines.length - 1; + } + + private moveCursor(deltaLine: number, deltaCol: number): void { + this.lastAction = null; + const visualLines = this.buildVisualLineMap(this.lastWidth); + const currentVisualLine = this.findCurrentVisualLine(visualLines); + + if (deltaLine !== 0) { + const targetVisualLine = currentVisualLine + deltaLine; + + if (targetVisualLine >= 0 && targetVisualLine < visualLines.length) { + this.moveToVisualLine(visualLines, currentVisualLine, targetVisualLine); + } + } + + if (deltaCol !== 0) { + const currentLine = this.state.lines[this.state.cursorLine] || ""; + + if (deltaCol > 0) { + // Moving right - move by one grapheme (handles emojis, combining characters, etc.) + if (this.state.cursorCol < currentLine.length) { + const afterCursor = currentLine.slice(this.state.cursorCol); + const graphemes = [...segmenter.segment(afterCursor)]; + const firstGrapheme = graphemes[0]; + this.setCursorCol(this.state.cursorCol + (firstGrapheme ? firstGrapheme.segment.length : 1)); + } else if (this.state.cursorLine < this.state.lines.length - 1) { + // Wrap to start of next logical line + this.state.cursorLine++; + this.setCursorCol(0); + } else { + // At end of last line - can't move, but set preferredVisualCol for up/down navigation + const currentVL = visualLines[currentVisualLine]; + if (currentVL) { + this.preferredVisualCol = this.state.cursorCol - currentVL.startCol; + } + } + } else { + // Moving left - move by one grapheme (handles emojis, combining characters, etc.) + if (this.state.cursorCol > 0) { + const beforeCursor = currentLine.slice(0, this.state.cursorCol); + const graphemes = [...segmenter.segment(beforeCursor)]; + const lastGrapheme = graphemes[graphemes.length - 1]; + this.setCursorCol(this.state.cursorCol - (lastGrapheme ? lastGrapheme.segment.length : 1)); + } else if (this.state.cursorLine > 0) { + // Wrap to end of previous logical line + this.state.cursorLine--; + const prevLine = this.state.lines[this.state.cursorLine] || ""; + this.setCursorCol(prevLine.length); + } + } + } + } + + /** + * Scroll by a page (direction: -1 for up, 1 for down). + * Moves cursor by the page size while keeping it in bounds. + */ + private pageScroll(direction: -1 | 1): void { + this.lastAction = null; + const terminalRows = this.tui.terminal.rows; + const pageSize = Math.max(5, Math.floor(terminalRows * 0.3)); + + const visualLines = this.buildVisualLineMap(this.lastWidth); + const currentVisualLine = this.findCurrentVisualLine(visualLines); + const targetVisualLine = Math.max(0, Math.min(visualLines.length - 1, currentVisualLine + direction * pageSize)); + + this.moveToVisualLine(visualLines, currentVisualLine, targetVisualLine); + } + + private moveWordBackwards(): void { + this.lastAction = null; + const currentLine = this.state.lines[this.state.cursorLine] || ""; + + // If at start of line, move to end of previous line + if (this.state.cursorCol === 0) { + if (this.state.cursorLine > 0) { + this.state.cursorLine--; + const prevLine = this.state.lines[this.state.cursorLine] || ""; + this.setCursorCol(prevLine.length); + } + return; + } + + const textBeforeCursor = currentLine.slice(0, this.state.cursorCol); + const graphemes = [...segmenter.segment(textBeforeCursor)]; + let newCol = this.state.cursorCol; + + // Skip trailing whitespace + while (graphemes.length > 0 && isWhitespaceChar(graphemes[graphemes.length - 1]?.segment || "")) { + newCol -= graphemes.pop()?.segment.length || 0; + } + + if (graphemes.length > 0) { + const lastGrapheme = graphemes[graphemes.length - 1]?.segment || ""; + if (isPunctuationChar(lastGrapheme)) { + // Skip punctuation run + while (graphemes.length > 0 && isPunctuationChar(graphemes[graphemes.length - 1]?.segment || "")) { + newCol -= graphemes.pop()?.segment.length || 0; + } + } else { + // Skip word run + while ( + graphemes.length > 0 && + !isWhitespaceChar(graphemes[graphemes.length - 1]?.segment || "") && + !isPunctuationChar(graphemes[graphemes.length - 1]?.segment || "") + ) { + newCol -= graphemes.pop()?.segment.length || 0; + } + } + } + + this.setCursorCol(newCol); + } + + /** + * Yank (paste) the most recent kill ring entry at cursor position. + */ + private yank(): void { + if (this.killRing.length === 0) return; + + this.pushUndoSnapshot(); + + const text = this.killRing.peek()!; + this.insertYankedText(text); + + this.lastAction = "yank"; + } + + /** + * Cycle through kill ring (only works immediately after yank or yank-pop). + * Replaces the last yanked text with the previous entry in the ring. + */ + private yankPop(): void { + // Only works if we just yanked and have more than one entry + if (this.lastAction !== "yank" || this.killRing.length <= 1) return; + + this.pushUndoSnapshot(); + + // Delete the previously yanked text (still at end of ring before rotation) + this.deleteYankedText(); + + // Rotate the ring: move end to front + this.killRing.rotate(); + + // Insert the new most recent entry (now at end after rotation) + const text = this.killRing.peek()!; + this.insertYankedText(text); + + this.lastAction = "yank"; + } + + /** + * Insert text at cursor position (used by yank operations). + */ + private insertYankedText(text: string): void { + this.historyIndex = -1; // Exit history browsing mode + const lines = text.split("\n"); + + if (lines.length === 1) { + // Single line - insert at cursor + const currentLine = this.state.lines[this.state.cursorLine] || ""; + const before = currentLine.slice(0, this.state.cursorCol); + const after = currentLine.slice(this.state.cursorCol); + this.state.lines[this.state.cursorLine] = before + text + after; + this.setCursorCol(this.state.cursorCol + text.length); + } else { + // Multi-line insert + const currentLine = this.state.lines[this.state.cursorLine] || ""; + const before = currentLine.slice(0, this.state.cursorCol); + const after = currentLine.slice(this.state.cursorCol); + + // First line merges with text before cursor + this.state.lines[this.state.cursorLine] = before + (lines[0] || ""); + + // Insert middle lines + for (let i = 1; i < lines.length - 1; i++) { + this.state.lines.splice(this.state.cursorLine + i, 0, lines[i] || ""); + } + + // Last line merges with text after cursor + const lastLineIndex = this.state.cursorLine + lines.length - 1; + this.state.lines.splice(lastLineIndex, 0, (lines[lines.length - 1] || "") + after); + + // Update cursor position + this.state.cursorLine = lastLineIndex; + this.setCursorCol((lines[lines.length - 1] || "").length); + } + + if (this.onChange) { + this.onChange(this.getText()); + } + } + + /** + * Delete the previously yanked text (used by yank-pop). + * The yanked text is derived from killRing[end] since it hasn't been rotated yet. + */ + private deleteYankedText(): void { + const yankedText = this.killRing.peek(); + if (!yankedText) return; + + const yankLines = yankedText.split("\n"); + + if (yankLines.length === 1) { + // Single line - delete backward from cursor + const currentLine = this.state.lines[this.state.cursorLine] || ""; + const deleteLen = yankedText.length; + const before = currentLine.slice(0, this.state.cursorCol - deleteLen); + const after = currentLine.slice(this.state.cursorCol); + this.state.lines[this.state.cursorLine] = before + after; + this.setCursorCol(this.state.cursorCol - deleteLen); + } else { + // Multi-line delete - cursor is at end of last yanked line + const startLine = this.state.cursorLine - (yankLines.length - 1); + const startCol = (this.state.lines[startLine] || "").length - (yankLines[0] || "").length; + + // Get text after cursor on current line + const afterCursor = (this.state.lines[this.state.cursorLine] || "").slice(this.state.cursorCol); + + // Get text before yank start position + const beforeYank = (this.state.lines[startLine] || "").slice(0, startCol); + + // Remove all lines from startLine to cursorLine and replace with merged line + this.state.lines.splice(startLine, yankLines.length, beforeYank + afterCursor); + + // Update cursor + this.state.cursorLine = startLine; + this.setCursorCol(startCol); + } + + if (this.onChange) { + this.onChange(this.getText()); + } + } + + private pushUndoSnapshot(): void { + this.undoStack.push(this.state); + } + + private undo(): void { + this.historyIndex = -1; // Exit history browsing mode + const snapshot = this.undoStack.pop(); + if (!snapshot) return; + Object.assign(this.state, snapshot); + this.lastAction = null; + this.preferredVisualCol = null; + if (this.onChange) { + this.onChange(this.getText()); + } + } + + /** + * Jump to the first occurrence of a character in the specified direction. + * Multi-line search. Case-sensitive. Skips the current cursor position. + */ + private jumpToChar(char: string, direction: "forward" | "backward"): void { + this.lastAction = null; + const isForward = direction === "forward"; + const lines = this.state.lines; + + const end = isForward ? lines.length : -1; + const step = isForward ? 1 : -1; + + for (let lineIdx = this.state.cursorLine; lineIdx !== end; lineIdx += step) { + const line = lines[lineIdx] || ""; + const isCurrentLine = lineIdx === this.state.cursorLine; + + // Current line: start after/before cursor; other lines: search full line + const searchFrom = isCurrentLine + ? isForward + ? this.state.cursorCol + 1 + : this.state.cursorCol - 1 + : undefined; + + const idx = isForward ? line.indexOf(char, searchFrom) : line.lastIndexOf(char, searchFrom); + + if (idx !== -1) { + this.state.cursorLine = lineIdx; + this.setCursorCol(idx); + return; + } + } + // No match found - cursor stays in place + } + + private moveWordForwards(): void { + this.lastAction = null; + const currentLine = this.state.lines[this.state.cursorLine] || ""; + + // If at end of line, move to start of next line + if (this.state.cursorCol >= currentLine.length) { + if (this.state.cursorLine < this.state.lines.length - 1) { + this.state.cursorLine++; + this.setCursorCol(0); + } + return; + } + + const textAfterCursor = currentLine.slice(this.state.cursorCol); + const segments = segmenter.segment(textAfterCursor); + const iterator = segments[Symbol.iterator](); + let next = iterator.next(); + let newCol = this.state.cursorCol; + + // Skip leading whitespace + while (!next.done && isWhitespaceChar(next.value.segment)) { + newCol += next.value.segment.length; + next = iterator.next(); + } + + if (!next.done) { + const firstGrapheme = next.value.segment; + if (isPunctuationChar(firstGrapheme)) { + // Skip punctuation run + while (!next.done && isPunctuationChar(next.value.segment)) { + newCol += next.value.segment.length; + next = iterator.next(); + } + } else { + // Skip word run + while (!next.done && !isWhitespaceChar(next.value.segment) && !isPunctuationChar(next.value.segment)) { + newCol += next.value.segment.length; + next = iterator.next(); + } + } + } + + this.setCursorCol(newCol); + } + + // Slash menu only allowed on the first line of the editor + private isSlashMenuAllowed(): boolean { + return this.state.cursorLine === 0; + } + + // Helper method to check if cursor is at start of message (for slash command detection) + private isAtStartOfMessage(): boolean { + if (!this.isSlashMenuAllowed()) return false; + const currentLine = this.state.lines[this.state.cursorLine] || ""; + const beforeCursor = currentLine.slice(0, this.state.cursorCol); + return beforeCursor.trim() === "" || beforeCursor.trim() === "/"; + } + + private isInSlashCommandContext(textBeforeCursor: string): boolean { + return this.isSlashMenuAllowed() && textBeforeCursor.trimStart().startsWith("/"); + } + + private shouldChainSlashArgumentAutocompleteOnTabSelection(): boolean { + if (this.autocompleteState !== "regular") { + return false; + } + + const currentLine = this.state.lines[this.state.cursorLine] || ""; + const textBeforeCursor = currentLine.slice(0, this.state.cursorCol); + return this.isInSlashCommandContext(textBeforeCursor) && !textBeforeCursor.trimStart().includes(" "); + } + + private isBareCompletedSlashCommandAtCursor(): boolean { + const currentLine = this.state.lines[this.state.cursorLine] || ""; + if (this.state.cursorCol !== currentLine.length) { + return false; + } + + const textBeforeCursor = currentLine.slice(0, this.state.cursorCol).trimStart(); + return /^\/\S+ $/.test(textBeforeCursor); + } + + // Autocomplete methods + /** + * Find the best autocomplete item index for the given prefix. + * Returns -1 if no match is found. + * + * Match priority: + * 1. Exact match (prefix === item.value) -> always selected + * 2. Prefix match -> first item whose value starts with prefix + * 3. No match -> -1 (keep default highlight) + * + * Matching is case-sensitive and checks item.value only. + */ + private getBestAutocompleteMatchIndex(items: Array<{ value: string; label: string }>, prefix: string): number { + if (!prefix) return -1; + + let firstPrefixIndex = -1; + + for (let i = 0; i < items.length; i++) { + const value = items[i]!.value; + if (value === prefix) { + return i; // Exact match always wins + } + if (firstPrefixIndex === -1 && value.startsWith(prefix)) { + firstPrefixIndex = i; + } + } + + return firstPrefixIndex; + } + + private tryTriggerAutocomplete(explicitTab: boolean = false): void { + if (!this.autocompleteProvider) return; + + // Check if we should trigger file completion on Tab + if (explicitTab) { + const provider = this.autocompleteProvider as CombinedAutocompleteProvider; + const shouldTrigger = + !provider.shouldTriggerFileCompletion || + provider.shouldTriggerFileCompletion(this.state.lines, this.state.cursorLine, this.state.cursorCol); + if (!shouldTrigger) { + return; + } + } + + const suggestions = this.autocompleteProvider.getSuggestions( + this.state.lines, + this.state.cursorLine, + this.state.cursorCol, + ); + + if (suggestions && suggestions.items.length > 0) { + this.autocompletePrefix = suggestions.prefix; + this.autocompleteList = new SelectList(suggestions.items, this.autocompleteMaxVisible, this.theme.selectList); + + // If typed prefix exactly matches one of the suggestions, select that item + const bestMatchIndex = this.getBestAutocompleteMatchIndex(suggestions.items, suggestions.prefix); + if (bestMatchIndex >= 0) { + this.autocompleteList.setSelectedIndex(bestMatchIndex); + } + + this.autocompleteState = "regular"; + } else { + this.cancelAutocomplete(); + } + } + + private handleTabCompletion(): void { + if (!this.autocompleteProvider) return; + + const currentLine = this.state.lines[this.state.cursorLine] || ""; + const beforeCursor = currentLine.slice(0, this.state.cursorCol); + + // Check if we're in a slash command context + if (this.isInSlashCommandContext(beforeCursor) && !beforeCursor.trimStart().includes(" ")) { + this.handleSlashCommandCompletion(); + } else { + this.forceFileAutocomplete(true); + } + } + + private handleSlashCommandCompletion(): void { + this.tryTriggerAutocomplete(true); + } + + /* +https://github.com/EsotericSoftware/spine-runtimes/actions/runs/19536643416/job/559322883 +17 this job fails with https://github.com/EsotericSoftware/spine-runtimes/actions/runs/19 +536643416/job/55932288317 havea look at .gi + */ + private forceFileAutocomplete(explicitTab: boolean = false): void { + if (!this.autocompleteProvider) return; + + // Check if provider supports force file suggestions via runtime check + const provider = this.autocompleteProvider as { + getForceFileSuggestions?: CombinedAutocompleteProvider["getForceFileSuggestions"]; + }; + if (typeof provider.getForceFileSuggestions !== "function") { + this.tryTriggerAutocomplete(true); + return; + } + + const suggestions = provider.getForceFileSuggestions( + this.state.lines, + this.state.cursorLine, + this.state.cursorCol, + ); + + if (suggestions && suggestions.items.length > 0) { + // If there's exactly one suggestion, apply it immediately + if (explicitTab && suggestions.items.length === 1) { + const item = suggestions.items[0]!; + this.pushUndoSnapshot(); + this.lastAction = null; + const result = this.autocompleteProvider.applyCompletion( + this.state.lines, + this.state.cursorLine, + this.state.cursorCol, + item, + suggestions.prefix, + ); + this.state.lines = result.lines; + this.state.cursorLine = result.cursorLine; + this.setCursorCol(result.cursorCol); + if (this.onChange) this.onChange(this.getText()); + return; + } + + this.autocompletePrefix = suggestions.prefix; + this.autocompleteList = new SelectList(suggestions.items, this.autocompleteMaxVisible, this.theme.selectList); + + // If typed prefix exactly matches one of the suggestions, select that item + const bestMatchIndex = this.getBestAutocompleteMatchIndex(suggestions.items, suggestions.prefix); + if (bestMatchIndex >= 0) { + this.autocompleteList.setSelectedIndex(bestMatchIndex); + } + + this.autocompleteState = "force"; + } else { + this.cancelAutocomplete(); + } + } + + private cancelAutocomplete(): void { + this.autocompleteState = null; + this.autocompleteList = undefined; + this.autocompletePrefix = ""; + } + + public isShowingAutocomplete(): boolean { + return this.autocompleteState !== null; + } + + private updateAutocomplete(): void { + if (!this.autocompleteState || !this.autocompleteProvider) return; + + if (this.autocompleteState === "force") { + this.forceFileAutocomplete(); + return; + } + + const suggestions = this.autocompleteProvider.getSuggestions( + this.state.lines, + this.state.cursorLine, + this.state.cursorCol, + ); + if (suggestions && suggestions.items.length > 0) { + this.autocompletePrefix = suggestions.prefix; + // Always create new SelectList to ensure update + this.autocompleteList = new SelectList(suggestions.items, this.autocompleteMaxVisible, this.theme.selectList); + + // If typed prefix exactly matches one of the suggestions, select that item + const bestMatchIndex = this.getBestAutocompleteMatchIndex(suggestions.items, suggestions.prefix); + if (bestMatchIndex >= 0) { + this.autocompleteList.setSelectedIndex(bestMatchIndex); + } + } else { + this.cancelAutocomplete(); + } + } +} diff --git a/packages/pi-tui/src/components/image.ts b/packages/pi-tui/src/components/image.ts new file mode 100644 index 000000000..ca76cddde --- /dev/null +++ b/packages/pi-tui/src/components/image.ts @@ -0,0 +1,104 @@ +import { + getCapabilities, + getImageDimensions, + type ImageDimensions, + imageFallback, + renderImage, +} from "../terminal-image.js"; +import type { Component } from "../tui.js"; + +export interface ImageTheme { + fallbackColor: (str: string) => string; +} + +export interface ImageOptions { + maxWidthCells?: number; + maxHeightCells?: number; + filename?: string; + /** Kitty image ID. If provided, reuses this ID (for animations/updates). */ + imageId?: number; +} + +export class Image implements Component { + private base64Data: string; + private mimeType: string; + private dimensions: ImageDimensions; + private theme: ImageTheme; + private options: ImageOptions; + private imageId?: number; + + private cachedLines?: string[]; + private cachedWidth?: number; + + constructor( + base64Data: string, + mimeType: string, + theme: ImageTheme, + options: ImageOptions = {}, + dimensions?: ImageDimensions, + ) { + this.base64Data = base64Data; + this.mimeType = mimeType; + this.theme = theme; + this.options = options; + this.dimensions = dimensions || getImageDimensions(base64Data, mimeType) || { widthPx: 800, heightPx: 600 }; + this.imageId = options.imageId; + } + + /** Get the Kitty image ID used by this image (if any). */ + getImageId(): number | undefined { + return this.imageId; + } + + invalidate(): void { + this.cachedLines = undefined; + this.cachedWidth = undefined; + } + + render(width: number): string[] { + if (this.cachedLines && this.cachedWidth === width) { + return this.cachedLines; + } + + const maxWidth = Math.min(width - 2, this.options.maxWidthCells ?? 60); + + const caps = getCapabilities(); + let lines: string[]; + + if (caps.images) { + const result = renderImage(this.base64Data, this.dimensions, { + maxWidthCells: maxWidth, + imageId: this.imageId, + }); + + if (result) { + // Store the image ID for later cleanup + if (result.imageId) { + this.imageId = result.imageId; + } + + // Return `rows` lines so TUI accounts for image height + // First (rows-1) lines are empty (TUI clears them) + // Last line: move cursor back up, then output image sequence + lines = []; + for (let i = 0; i < result.rows - 1; i++) { + lines.push(""); + } + // Move cursor up to first row, then output image + const moveUp = result.rows > 1 ? `\x1b[${result.rows - 1}A` : ""; + lines.push(moveUp + result.sequence); + } else { + const fallback = imageFallback(this.mimeType, this.dimensions, this.options.filename); + lines = [this.theme.fallbackColor(fallback)]; + } + } else { + const fallback = imageFallback(this.mimeType, this.dimensions, this.options.filename); + lines = [this.theme.fallbackColor(fallback)]; + } + + this.cachedLines = lines; + this.cachedWidth = width; + + return lines; + } +} diff --git a/packages/pi-tui/src/components/input.ts b/packages/pi-tui/src/components/input.ts new file mode 100644 index 000000000..e5c3b4f7f --- /dev/null +++ b/packages/pi-tui/src/components/input.ts @@ -0,0 +1,521 @@ +import { getEditorKeybindings } from "../keybindings.js"; +import { decodeKittyPrintable } from "../keys.js"; +import { KillRing } from "../kill-ring.js"; +import { type Component, CURSOR_MARKER, type Focusable } from "../tui.js"; +import { UndoStack } from "../undo-stack.js"; +import { getSegmenter, isPunctuationChar, isWhitespaceChar, visibleWidth } from "../utils.js"; + +const segmenter = getSegmenter(); + +interface InputState { + value: string; + cursor: number; +} + +/** + * Input component - single-line text input with horizontal scrolling + */ +export class Input implements Component, Focusable { + private value: string = ""; + private cursor: number = 0; // Cursor position in the value + public onSubmit?: (value: string) => void; + public onEscape?: () => void; + + /** Focusable interface - set by TUI when focus changes */ + focused: boolean = false; + + // Bracketed paste mode buffering + private pasteBuffer: string = ""; + private isInPaste: boolean = false; + + // Kill ring for Emacs-style kill/yank operations + private killRing = new KillRing(); + private lastAction: "kill" | "yank" | "type-word" | null = null; + + // Undo support + private undoStack = new UndoStack(); + + getValue(): string { + return this.value; + } + + setValue(value: string): void { + this.value = value; + this.cursor = Math.min(this.cursor, value.length); + } + + handleInput(data: string): void { + // Handle bracketed paste mode + // Start of paste: \x1b[200~ + // End of paste: \x1b[201~ + + // Check if we're starting a bracketed paste + if (data.includes("\x1b[200~")) { + this.isInPaste = true; + this.pasteBuffer = ""; + data = data.replace("\x1b[200~", ""); + } + + // If we're in a paste, buffer the data + if (this.isInPaste) { + // Check if this chunk contains the end marker + this.pasteBuffer += data; + + const endIndex = this.pasteBuffer.indexOf("\x1b[201~"); + if (endIndex !== -1) { + // Extract the pasted content + const pasteContent = this.pasteBuffer.substring(0, endIndex); + + // Process the complete paste + this.handlePaste(pasteContent); + + // Reset paste state + this.isInPaste = false; + + // Handle any remaining input after the paste marker + const remaining = this.pasteBuffer.substring(endIndex + 6); // 6 = length of \x1b[201~ + this.pasteBuffer = ""; + if (remaining) { + this.handleInput(remaining); + } + } + return; + } + + const kb = getEditorKeybindings(); + + // Escape/Cancel + if (kb.matches(data, "selectCancel")) { + if (this.onEscape) this.onEscape(); + return; + } + + // Undo + if (kb.matches(data, "undo")) { + this.undo(); + return; + } + + // Submit + if (kb.matches(data, "submit") || data === "\n") { + if (this.onSubmit) this.onSubmit(this.value); + return; + } + + // Deletion + if (kb.matches(data, "deleteCharBackward")) { + this.handleBackspace(); + return; + } + + if (kb.matches(data, "deleteCharForward")) { + this.handleForwardDelete(); + return; + } + + if (kb.matches(data, "deleteWordBackward")) { + this.deleteWordBackwards(); + return; + } + + if (kb.matches(data, "deleteWordForward")) { + this.deleteWordForward(); + return; + } + + if (kb.matches(data, "deleteToLineStart")) { + this.deleteToLineStart(); + return; + } + + if (kb.matches(data, "deleteToLineEnd")) { + this.deleteToLineEnd(); + return; + } + + // Kill ring actions + if (kb.matches(data, "yank")) { + this.yank(); + return; + } + if (kb.matches(data, "yankPop")) { + this.yankPop(); + return; + } + + // Cursor movement + if (kb.matches(data, "cursorLeft")) { + this.lastAction = null; + if (this.cursor > 0) { + const beforeCursor = this.value.slice(0, this.cursor); + const graphemes = [...segmenter.segment(beforeCursor)]; + const lastGrapheme = graphemes[graphemes.length - 1]; + this.cursor -= lastGrapheme ? lastGrapheme.segment.length : 1; + } + return; + } + + if (kb.matches(data, "cursorRight")) { + this.lastAction = null; + if (this.cursor < this.value.length) { + const afterCursor = this.value.slice(this.cursor); + const graphemes = [...segmenter.segment(afterCursor)]; + const firstGrapheme = graphemes[0]; + this.cursor += firstGrapheme ? firstGrapheme.segment.length : 1; + } + return; + } + + if (kb.matches(data, "cursorLineStart")) { + this.lastAction = null; + this.cursor = 0; + return; + } + + if (kb.matches(data, "cursorLineEnd")) { + this.lastAction = null; + this.cursor = this.value.length; + return; + } + + if (kb.matches(data, "cursorWordLeft")) { + this.moveWordBackwards(); + return; + } + + if (kb.matches(data, "cursorWordRight")) { + this.moveWordForwards(); + return; + } + + // Kitty CSI-u printable character (e.g. \x1b[97u for 'a'). + // Terminals with Kitty protocol flag 1 (disambiguate) send CSI-u for all keys, + // including plain printable characters. Decode before the control-char check + // since CSI-u sequences contain \x1b which would be rejected. + const kittyPrintable = decodeKittyPrintable(data); + if (kittyPrintable !== undefined) { + this.insertCharacter(kittyPrintable); + return; + } + + // Regular character input - accept printable characters including Unicode, + // but reject control characters (C0: 0x00-0x1F, DEL: 0x7F, C1: 0x80-0x9F) + const hasControlChars = [...data].some((ch) => { + const code = ch.charCodeAt(0); + return code < 32 || code === 0x7f || (code >= 0x80 && code <= 0x9f); + }); + if (!hasControlChars) { + this.insertCharacter(data); + } + } + + private insertCharacter(char: string): void { + // Undo coalescing: consecutive word chars coalesce into one undo unit + if (isWhitespaceChar(char) || this.lastAction !== "type-word") { + this.pushUndo(); + } + this.lastAction = "type-word"; + + this.value = this.value.slice(0, this.cursor) + char + this.value.slice(this.cursor); + this.cursor += char.length; + } + + private handleBackspace(): void { + this.lastAction = null; + if (this.cursor > 0) { + this.pushUndo(); + const beforeCursor = this.value.slice(0, this.cursor); + const graphemes = [...segmenter.segment(beforeCursor)]; + const lastGrapheme = graphemes[graphemes.length - 1]; + const graphemeLength = lastGrapheme ? lastGrapheme.segment.length : 1; + this.value = this.value.slice(0, this.cursor - graphemeLength) + this.value.slice(this.cursor); + this.cursor -= graphemeLength; + } + } + + private handleForwardDelete(): void { + this.lastAction = null; + if (this.cursor < this.value.length) { + this.pushUndo(); + const afterCursor = this.value.slice(this.cursor); + const graphemes = [...segmenter.segment(afterCursor)]; + const firstGrapheme = graphemes[0]; + const graphemeLength = firstGrapheme ? firstGrapheme.segment.length : 1; + this.value = this.value.slice(0, this.cursor) + this.value.slice(this.cursor + graphemeLength); + } + } + + private deleteToLineStart(): void { + if (this.cursor === 0) return; + this.pushUndo(); + const deletedText = this.value.slice(0, this.cursor); + this.killRing.push(deletedText, { prepend: true, accumulate: this.lastAction === "kill" }); + this.lastAction = "kill"; + this.value = this.value.slice(this.cursor); + this.cursor = 0; + } + + private deleteToLineEnd(): void { + if (this.cursor >= this.value.length) return; + this.pushUndo(); + const deletedText = this.value.slice(this.cursor); + this.killRing.push(deletedText, { prepend: false, accumulate: this.lastAction === "kill" }); + this.lastAction = "kill"; + this.value = this.value.slice(0, this.cursor); + } + + private deleteWordBackwards(): void { + if (this.cursor === 0) return; + + // Save lastAction before cursor movement (moveWordBackwards resets it) + const wasKill = this.lastAction === "kill"; + + this.pushUndo(); + + const oldCursor = this.cursor; + this.moveWordBackwards(); + const deleteFrom = this.cursor; + this.cursor = oldCursor; + + const deletedText = this.value.slice(deleteFrom, this.cursor); + this.killRing.push(deletedText, { prepend: true, accumulate: wasKill }); + this.lastAction = "kill"; + + this.value = this.value.slice(0, deleteFrom) + this.value.slice(this.cursor); + this.cursor = deleteFrom; + } + + private deleteWordForward(): void { + if (this.cursor >= this.value.length) return; + + // Save lastAction before cursor movement (moveWordForwards resets it) + const wasKill = this.lastAction === "kill"; + + this.pushUndo(); + + const oldCursor = this.cursor; + this.moveWordForwards(); + const deleteTo = this.cursor; + this.cursor = oldCursor; + + const deletedText = this.value.slice(this.cursor, deleteTo); + this.killRing.push(deletedText, { prepend: false, accumulate: wasKill }); + this.lastAction = "kill"; + + this.value = this.value.slice(0, this.cursor) + this.value.slice(deleteTo); + } + + private yank(): void { + const text = this.killRing.peek(); + if (!text) return; + + this.pushUndo(); + + this.value = this.value.slice(0, this.cursor) + text + this.value.slice(this.cursor); + this.cursor += text.length; + this.lastAction = "yank"; + } + + private yankPop(): void { + if (this.lastAction !== "yank" || this.killRing.length <= 1) return; + + this.pushUndo(); + + // Delete the previously yanked text (still at end of ring before rotation) + const prevText = this.killRing.peek() || ""; + this.value = this.value.slice(0, this.cursor - prevText.length) + this.value.slice(this.cursor); + this.cursor -= prevText.length; + + // Rotate and insert new entry + this.killRing.rotate(); + const text = this.killRing.peek() || ""; + this.value = this.value.slice(0, this.cursor) + text + this.value.slice(this.cursor); + this.cursor += text.length; + this.lastAction = "yank"; + } + + private pushUndo(): void { + this.undoStack.push({ value: this.value, cursor: this.cursor }); + } + + private undo(): void { + const snapshot = this.undoStack.pop(); + if (!snapshot) return; + this.value = snapshot.value; + this.cursor = snapshot.cursor; + this.lastAction = null; + } + + private moveWordBackwards(): void { + if (this.cursor === 0) { + return; + } + + this.lastAction = null; + const textBeforeCursor = this.value.slice(0, this.cursor); + const graphemes = [...segmenter.segment(textBeforeCursor)]; + + // Skip trailing whitespace + while (graphemes.length > 0 && isWhitespaceChar(graphemes[graphemes.length - 1]?.segment || "")) { + this.cursor -= graphemes.pop()?.segment.length || 0; + } + + if (graphemes.length > 0) { + const lastGrapheme = graphemes[graphemes.length - 1]?.segment || ""; + if (isPunctuationChar(lastGrapheme)) { + // Skip punctuation run + while (graphemes.length > 0 && isPunctuationChar(graphemes[graphemes.length - 1]?.segment || "")) { + this.cursor -= graphemes.pop()?.segment.length || 0; + } + } else { + // Skip word run + while ( + graphemes.length > 0 && + !isWhitespaceChar(graphemes[graphemes.length - 1]?.segment || "") && + !isPunctuationChar(graphemes[graphemes.length - 1]?.segment || "") + ) { + this.cursor -= graphemes.pop()?.segment.length || 0; + } + } + } + } + + private moveWordForwards(): void { + if (this.cursor >= this.value.length) { + return; + } + + this.lastAction = null; + const textAfterCursor = this.value.slice(this.cursor); + const segments = segmenter.segment(textAfterCursor); + const iterator = segments[Symbol.iterator](); + let next = iterator.next(); + + // Skip leading whitespace + while (!next.done && isWhitespaceChar(next.value.segment)) { + this.cursor += next.value.segment.length; + next = iterator.next(); + } + + if (!next.done) { + const firstGrapheme = next.value.segment; + if (isPunctuationChar(firstGrapheme)) { + // Skip punctuation run + while (!next.done && isPunctuationChar(next.value.segment)) { + this.cursor += next.value.segment.length; + next = iterator.next(); + } + } else { + // Skip word run + while (!next.done && !isWhitespaceChar(next.value.segment) && !isPunctuationChar(next.value.segment)) { + this.cursor += next.value.segment.length; + next = iterator.next(); + } + } + } + } + + private handlePaste(pastedText: string): void { + this.lastAction = null; + this.pushUndo(); + + // Clean the pasted text - remove newlines and carriage returns + const cleanText = pastedText.replace(/\r\n/g, "").replace(/\r/g, "").replace(/\n/g, ""); + + // Insert at cursor position + this.value = this.value.slice(0, this.cursor) + cleanText + this.value.slice(this.cursor); + this.cursor += cleanText.length; + } + + invalidate(): void { + // No cached state to invalidate currently + } + + render(width: number): string[] { + // Calculate visible window + const prompt = "> "; + const availableWidth = width - prompt.length; + + if (availableWidth <= 0) { + return [prompt]; + } + + let visibleText = ""; + let cursorDisplay = this.cursor; + + if (this.value.length < availableWidth) { + // Everything fits (leave room for cursor at end) + visibleText = this.value; + } else { + // Need horizontal scrolling + // Reserve one character for cursor if it's at the end + const scrollWidth = this.cursor === this.value.length ? availableWidth - 1 : availableWidth; + const halfWidth = Math.floor(scrollWidth / 2); + + const findValidStart = (start: number) => { + while (start < this.value.length) { + const charCode = this.value.charCodeAt(start); + // this is low surrogate, not a valid start + if (charCode >= 0xdc00 && charCode < 0xe000) { + start++; + continue; + } + break; + } + return start; + }; + + const findValidEnd = (end: number) => { + while (end > 0) { + const charCode = this.value.charCodeAt(end - 1); + // this is high surrogate, might be split. + if (charCode >= 0xd800 && charCode < 0xdc00) { + end--; + continue; + } + break; + } + return end; + }; + + if (this.cursor < halfWidth) { + // Cursor near start + visibleText = this.value.slice(0, findValidEnd(scrollWidth)); + cursorDisplay = this.cursor; + } else if (this.cursor > this.value.length - halfWidth) { + // Cursor near end + const start = findValidStart(this.value.length - scrollWidth); + visibleText = this.value.slice(start); + cursorDisplay = this.cursor - start; + } else { + // Cursor in middle + const start = findValidStart(this.cursor - halfWidth); + visibleText = this.value.slice(start, findValidEnd(start + scrollWidth)); + cursorDisplay = halfWidth; + } + } + + // Build line with fake cursor + // Insert cursor character at cursor position + const graphemes = [...segmenter.segment(visibleText.slice(cursorDisplay))]; + const cursorGrapheme = graphemes[0]; + + const beforeCursor = visibleText.slice(0, cursorDisplay); + const atCursor = cursorGrapheme?.segment ?? " "; // Character at cursor, or space if at end + const afterCursor = visibleText.slice(cursorDisplay + atCursor.length); + + // Hardware cursor marker (zero-width, emitted before fake cursor for IME positioning) + const marker = this.focused ? CURSOR_MARKER : ""; + + // Use inverse video to show cursor + const cursorChar = `\x1b[7m${atCursor}\x1b[27m`; // ESC[7m = reverse video, ESC[27m = normal + const textWithCursor = beforeCursor + marker + cursorChar + afterCursor; + + // Calculate visual width + const visualLength = visibleWidth(textWithCursor); + const padding = " ".repeat(Math.max(0, availableWidth - visualLength)); + const line = prompt + textWithCursor + padding; + + return [line]; + } +} diff --git a/packages/pi-tui/src/components/loader.ts b/packages/pi-tui/src/components/loader.ts new file mode 100644 index 000000000..b071e8ee2 --- /dev/null +++ b/packages/pi-tui/src/components/loader.ts @@ -0,0 +1,55 @@ +import type { TUI } from "../tui.js"; +import { Text } from "./text.js"; + +/** + * Loader component that updates every 80ms with spinning animation + */ +export class Loader extends Text { + private frames = ["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]; + private currentFrame = 0; + private intervalId: NodeJS.Timeout | null = null; + private ui: TUI | null = null; + + constructor( + ui: TUI, + private spinnerColorFn: (str: string) => string, + private messageColorFn: (str: string) => string, + private message: string = "Loading...", + ) { + super("", 1, 0); + this.ui = ui; + this.start(); + } + + render(width: number): string[] { + return ["", ...super.render(width)]; + } + + start() { + this.updateDisplay(); + this.intervalId = setInterval(() => { + this.currentFrame = (this.currentFrame + 1) % this.frames.length; + this.updateDisplay(); + }, 80); + } + + stop() { + if (this.intervalId) { + clearInterval(this.intervalId); + this.intervalId = null; + } + } + + setMessage(message: string) { + this.message = message; + this.updateDisplay(); + } + + private updateDisplay() { + const frame = this.frames[this.currentFrame]; + this.setText(`${this.spinnerColorFn(frame)} ${this.messageColorFn(this.message)}`); + if (this.ui) { + this.ui.requestRender(); + } + } +} diff --git a/packages/pi-tui/src/components/markdown.ts b/packages/pi-tui/src/components/markdown.ts new file mode 100644 index 000000000..7ab926896 --- /dev/null +++ b/packages/pi-tui/src/components/markdown.ts @@ -0,0 +1,806 @@ +import { marked, type Token } from "marked"; +import { isImageLine } from "../terminal-image.js"; +import type { Component } from "../tui.js"; +import { applyBackgroundToLine, visibleWidth, wrapTextWithAnsi } from "../utils.js"; + +/** + * Default text styling for markdown content. + * Applied to all text unless overridden by markdown formatting. + */ +export interface DefaultTextStyle { + /** Foreground color function */ + color?: (text: string) => string; + /** Background color function */ + bgColor?: (text: string) => string; + /** Bold text */ + bold?: boolean; + /** Italic text */ + italic?: boolean; + /** Strikethrough text */ + strikethrough?: boolean; + /** Underline text */ + underline?: boolean; +} + +/** + * Theme functions for markdown elements. + * Each function takes text and returns styled text with ANSI codes. + */ +export interface MarkdownTheme { + heading: (text: string) => string; + link: (text: string) => string; + linkUrl: (text: string) => string; + code: (text: string) => string; + codeBlock: (text: string) => string; + codeBlockBorder: (text: string) => string; + quote: (text: string) => string; + quoteBorder: (text: string) => string; + hr: (text: string) => string; + listBullet: (text: string) => string; + bold: (text: string) => string; + italic: (text: string) => string; + strikethrough: (text: string) => string; + underline: (text: string) => string; + highlightCode?: (code: string, lang?: string) => string[]; + /** Prefix applied to each rendered code block line (default: " ") */ + codeBlockIndent?: string; +} + +interface InlineStyleContext { + applyText: (text: string) => string; + stylePrefix: string; +} + +export class Markdown implements Component { + private text: string; + private paddingX: number; // Left/right padding + private paddingY: number; // Top/bottom padding + private defaultTextStyle?: DefaultTextStyle; + private theme: MarkdownTheme; + private defaultStylePrefix?: string; + + // Cache for rendered output + private cachedText?: string; + private cachedWidth?: number; + private cachedLines?: string[]; + + constructor( + text: string, + paddingX: number, + paddingY: number, + theme: MarkdownTheme, + defaultTextStyle?: DefaultTextStyle, + ) { + this.text = text; + this.paddingX = paddingX; + this.paddingY = paddingY; + this.theme = theme; + this.defaultTextStyle = defaultTextStyle; + } + + setText(text: string): void { + this.text = text; + this.invalidate(); + } + + invalidate(): void { + this.cachedText = undefined; + this.cachedWidth = undefined; + this.cachedLines = undefined; + } + + render(width: number): string[] { + // Check cache + if (this.cachedLines && this.cachedText === this.text && this.cachedWidth === width) { + return this.cachedLines; + } + + // Calculate available width for content (subtract horizontal padding) + const contentWidth = Math.max(1, width - this.paddingX * 2); + + // Don't render anything if there's no actual text + if (!this.text || this.text.trim() === "") { + const result: string[] = []; + // Update cache + this.cachedText = this.text; + this.cachedWidth = width; + this.cachedLines = result; + return result; + } + + // Replace tabs with 3 spaces for consistent rendering + const normalizedText = this.text.replace(/\t/g, " "); + + // Parse markdown to HTML-like tokens + const tokens = marked.lexer(normalizedText); + + // Convert tokens to styled terminal output + const renderedLines: string[] = []; + + for (let i = 0; i < tokens.length; i++) { + const token = tokens[i]; + const nextToken = tokens[i + 1]; + const tokenLines = this.renderToken(token, contentWidth, nextToken?.type); + renderedLines.push(...tokenLines); + } + + // Wrap lines (NO padding, NO background yet) + const wrappedLines: string[] = []; + for (const line of renderedLines) { + if (isImageLine(line)) { + wrappedLines.push(line); + } else { + wrappedLines.push(...wrapTextWithAnsi(line, contentWidth)); + } + } + + // Add margins and background to each wrapped line + const leftMargin = " ".repeat(this.paddingX); + const rightMargin = " ".repeat(this.paddingX); + const bgFn = this.defaultTextStyle?.bgColor; + const contentLines: string[] = []; + + for (const line of wrappedLines) { + if (isImageLine(line)) { + contentLines.push(line); + continue; + } + + const lineWithMargins = leftMargin + line + rightMargin; + + if (bgFn) { + contentLines.push(applyBackgroundToLine(lineWithMargins, width, bgFn)); + } else { + // No background - just pad to width + const visibleLen = visibleWidth(lineWithMargins); + const paddingNeeded = Math.max(0, width - visibleLen); + contentLines.push(lineWithMargins + " ".repeat(paddingNeeded)); + } + } + + // Add top/bottom padding (empty lines) + const emptyLine = " ".repeat(width); + const emptyLines: string[] = []; + for (let i = 0; i < this.paddingY; i++) { + const line = bgFn ? applyBackgroundToLine(emptyLine, width, bgFn) : emptyLine; + emptyLines.push(line); + } + + // Combine top padding, content, and bottom padding + const result = [...emptyLines, ...contentLines, ...emptyLines]; + + // Update cache + this.cachedText = this.text; + this.cachedWidth = width; + this.cachedLines = result; + + return result.length > 0 ? result : [""]; + } + + /** + * Apply default text style to a string. + * This is the base styling applied to all text content. + * NOTE: Background color is NOT applied here - it's applied at the padding stage + * to ensure it extends to the full line width. + */ + private applyDefaultStyle(text: string): string { + if (!this.defaultTextStyle) { + return text; + } + + let styled = text; + + // Apply foreground color (NOT background - that's applied at padding stage) + if (this.defaultTextStyle.color) { + styled = this.defaultTextStyle.color(styled); + } + + // Apply text decorations using this.theme + if (this.defaultTextStyle.bold) { + styled = this.theme.bold(styled); + } + if (this.defaultTextStyle.italic) { + styled = this.theme.italic(styled); + } + if (this.defaultTextStyle.strikethrough) { + styled = this.theme.strikethrough(styled); + } + if (this.defaultTextStyle.underline) { + styled = this.theme.underline(styled); + } + + return styled; + } + + private getDefaultStylePrefix(): string { + if (!this.defaultTextStyle) { + return ""; + } + + if (this.defaultStylePrefix !== undefined) { + return this.defaultStylePrefix; + } + + const sentinel = "\u0000"; + let styled = sentinel; + + if (this.defaultTextStyle.color) { + styled = this.defaultTextStyle.color(styled); + } + + if (this.defaultTextStyle.bold) { + styled = this.theme.bold(styled); + } + if (this.defaultTextStyle.italic) { + styled = this.theme.italic(styled); + } + if (this.defaultTextStyle.strikethrough) { + styled = this.theme.strikethrough(styled); + } + if (this.defaultTextStyle.underline) { + styled = this.theme.underline(styled); + } + + const sentinelIndex = styled.indexOf(sentinel); + this.defaultStylePrefix = sentinelIndex >= 0 ? styled.slice(0, sentinelIndex) : ""; + return this.defaultStylePrefix; + } + + private getStylePrefix(styleFn: (text: string) => string): string { + const sentinel = "\u0000"; + const styled = styleFn(sentinel); + const sentinelIndex = styled.indexOf(sentinel); + return sentinelIndex >= 0 ? styled.slice(0, sentinelIndex) : ""; + } + + private getDefaultInlineStyleContext(): InlineStyleContext { + return { + applyText: (text: string) => this.applyDefaultStyle(text), + stylePrefix: this.getDefaultStylePrefix(), + }; + } + + private renderToken( + token: Token, + width: number, + nextTokenType?: string, + styleContext?: InlineStyleContext, + ): string[] { + const lines: string[] = []; + + switch (token.type) { + case "heading": { + const headingLevel = token.depth; + const headingPrefix = `${"#".repeat(headingLevel)} `; + const headingText = this.renderInlineTokens(token.tokens || [], styleContext); + let styledHeading: string; + if (headingLevel === 1) { + styledHeading = this.theme.heading(this.theme.bold(this.theme.underline(headingText))); + } else if (headingLevel === 2) { + styledHeading = this.theme.heading(this.theme.bold(headingText)); + } else { + styledHeading = this.theme.heading(this.theme.bold(headingPrefix + headingText)); + } + lines.push(styledHeading); + if (nextTokenType !== "space") { + lines.push(""); // Add spacing after headings (unless space token follows) + } + break; + } + + case "paragraph": { + const paragraphText = this.renderInlineTokens(token.tokens || [], styleContext); + lines.push(paragraphText); + // Don't add spacing if next token is space or list + if (nextTokenType && nextTokenType !== "list" && nextTokenType !== "space") { + lines.push(""); + } + break; + } + + case "code": { + const indent = this.theme.codeBlockIndent ?? " "; + lines.push(this.theme.codeBlockBorder(`\`\`\`${token.lang || ""}`)); + if (this.theme.highlightCode) { + const highlightedLines = this.theme.highlightCode(token.text, token.lang); + for (const hlLine of highlightedLines) { + lines.push(`${indent}${hlLine}`); + } + } else { + // Split code by newlines and style each line + const codeLines = token.text.split("\n"); + for (const codeLine of codeLines) { + lines.push(`${indent}${this.theme.codeBlock(codeLine)}`); + } + } + lines.push(this.theme.codeBlockBorder("```")); + if (nextTokenType !== "space") { + lines.push(""); // Add spacing after code blocks (unless space token follows) + } + break; + } + + case "list": { + const listLines = this.renderList(token as any, 0, styleContext); + lines.push(...listLines); + // Don't add spacing after lists if a space token follows + // (the space token will handle it) + break; + } + + case "table": { + const tableLines = this.renderTable(token as any, width, styleContext); + lines.push(...tableLines); + break; + } + + case "blockquote": { + const quoteStyle = (text: string) => this.theme.quote(this.theme.italic(text)); + const quoteStylePrefix = this.getStylePrefix(quoteStyle); + const applyQuoteStyle = (line: string): string => { + if (!quoteStylePrefix) { + return quoteStyle(line); + } + const lineWithReappliedStyle = line.replace(/\x1b\[0m/g, `\x1b[0m${quoteStylePrefix}`); + return quoteStyle(lineWithReappliedStyle); + }; + + // Calculate available width for quote content (subtract border "│ " = 2 chars) + const quoteContentWidth = Math.max(1, width - 2); + + // Blockquotes contain block-level tokens (paragraph, list, code, etc.), so render + // children with renderToken() instead of renderInlineTokens(). + // Default message style should not apply inside blockquotes. + const quoteInlineStyleContext: InlineStyleContext = { + applyText: (text: string) => text, + stylePrefix: "", + }; + const quoteTokens = token.tokens || []; + const renderedQuoteLines: string[] = []; + for (let i = 0; i < quoteTokens.length; i++) { + const quoteToken = quoteTokens[i]; + const nextQuoteToken = quoteTokens[i + 1]; + renderedQuoteLines.push( + ...this.renderToken(quoteToken, quoteContentWidth, nextQuoteToken?.type, quoteInlineStyleContext), + ); + } + + // Avoid rendering an extra empty quote line before the outer blockquote spacing. + while (renderedQuoteLines.length > 0 && renderedQuoteLines[renderedQuoteLines.length - 1] === "") { + renderedQuoteLines.pop(); + } + + for (const quoteLine of renderedQuoteLines) { + const styledLine = applyQuoteStyle(quoteLine); + const wrappedLines = wrapTextWithAnsi(styledLine, quoteContentWidth); + for (const wrappedLine of wrappedLines) { + lines.push(this.theme.quoteBorder("│ ") + wrappedLine); + } + } + if (nextTokenType !== "space") { + lines.push(""); // Add spacing after blockquotes (unless space token follows) + } + break; + } + + case "hr": + lines.push(this.theme.hr("─".repeat(Math.min(width, 80)))); + if (nextTokenType !== "space") { + lines.push(""); // Add spacing after horizontal rules (unless space token follows) + } + break; + + case "html": + // Render HTML as plain text (escaped for terminal) + if ("raw" in token && typeof token.raw === "string") { + lines.push(this.applyDefaultStyle(token.raw.trim())); + } + break; + + case "space": + // Space tokens represent blank lines in markdown + lines.push(""); + break; + + default: + // Handle any other token types as plain text + if ("text" in token && typeof token.text === "string") { + lines.push(token.text); + } + } + + return lines; + } + + private renderInlineTokens(tokens: Token[], styleContext?: InlineStyleContext): string { + let result = ""; + const resolvedStyleContext = styleContext ?? this.getDefaultInlineStyleContext(); + const { applyText, stylePrefix } = resolvedStyleContext; + const applyTextWithNewlines = (text: string): string => { + const segments: string[] = text.split("\n"); + return segments.map((segment: string) => applyText(segment)).join("\n"); + }; + + for (const token of tokens) { + switch (token.type) { + case "text": + // Text tokens in list items can have nested tokens for inline formatting + if (token.tokens && token.tokens.length > 0) { + result += this.renderInlineTokens(token.tokens, resolvedStyleContext); + } else { + result += applyTextWithNewlines(token.text); + } + break; + + case "paragraph": + // Paragraph tokens contain nested inline tokens + result += this.renderInlineTokens(token.tokens || [], resolvedStyleContext); + break; + + case "strong": { + const boldContent = this.renderInlineTokens(token.tokens || [], resolvedStyleContext); + result += this.theme.bold(boldContent) + stylePrefix; + break; + } + + case "em": { + const italicContent = this.renderInlineTokens(token.tokens || [], resolvedStyleContext); + result += this.theme.italic(italicContent) + stylePrefix; + break; + } + + case "codespan": + result += this.theme.code(token.text) + stylePrefix; + break; + + case "link": { + const linkText = this.renderInlineTokens(token.tokens || [], resolvedStyleContext); + // If link text matches href, only show the link once + // Compare raw text (token.text) not styled text (linkText) since linkText has ANSI codes + // For mailto: links, strip the prefix before comparing (autolinked emails have + // text="foo@bar.com" but href="mailto:foo@bar.com") + const hrefForComparison = token.href.startsWith("mailto:") ? token.href.slice(7) : token.href; + if (token.text === token.href || token.text === hrefForComparison) { + result += this.theme.link(this.theme.underline(linkText)) + stylePrefix; + } else { + result += + this.theme.link(this.theme.underline(linkText)) + + this.theme.linkUrl(` (${token.href})`) + + stylePrefix; + } + break; + } + + case "br": + result += "\n"; + break; + + case "del": { + const delContent = this.renderInlineTokens(token.tokens || [], resolvedStyleContext); + result += this.theme.strikethrough(delContent) + stylePrefix; + break; + } + + case "html": + // Render inline HTML as plain text + if ("raw" in token && typeof token.raw === "string") { + result += applyTextWithNewlines(token.raw); + } + break; + + default: + // Handle any other inline token types as plain text + if ("text" in token && typeof token.text === "string") { + result += applyTextWithNewlines(token.text); + } + } + } + + return result; + } + + /** + * Render a list with proper nesting support + */ + private renderList( + token: Token & { items: any[]; ordered: boolean; start?: number }, + depth: number, + styleContext?: InlineStyleContext, + ): string[] { + const lines: string[] = []; + const indent = " ".repeat(depth); + // Use the list's start property (defaults to 1 for ordered lists) + const startNumber = token.start ?? 1; + + for (let i = 0; i < token.items.length; i++) { + const item = token.items[i]; + const bullet = token.ordered ? `${startNumber + i}. ` : "- "; + + // Process item tokens to handle nested lists + const itemLines = this.renderListItem(item.tokens || [], depth, styleContext); + + if (itemLines.length > 0) { + // First line - check if it's a nested list + // A nested list will start with indent (spaces) followed by cyan bullet + const firstLine = itemLines[0]; + const isNestedList = /^\s+\x1b\[36m[-\d]/.test(firstLine); // starts with spaces + cyan + bullet char + + if (isNestedList) { + // This is a nested list, just add it as-is (already has full indent) + lines.push(firstLine); + } else { + // Regular text content - add indent and bullet + lines.push(indent + this.theme.listBullet(bullet) + firstLine); + } + + // Rest of the lines + for (let j = 1; j < itemLines.length; j++) { + const line = itemLines[j]; + const isNestedListLine = /^\s+\x1b\[36m[-\d]/.test(line); // starts with spaces + cyan + bullet char + + if (isNestedListLine) { + // Nested list line - already has full indent + lines.push(line); + } else { + // Regular content - add parent indent + 2 spaces for continuation + lines.push(`${indent} ${line}`); + } + } + } else { + lines.push(indent + this.theme.listBullet(bullet)); + } + } + + return lines; + } + + /** + * Render list item tokens, handling nested lists + * Returns lines WITHOUT the parent indent (renderList will add it) + */ + private renderListItem(tokens: Token[], parentDepth: number, styleContext?: InlineStyleContext): string[] { + const lines: string[] = []; + + for (const token of tokens) { + if (token.type === "list") { + // Nested list - render with one additional indent level + // These lines will have their own indent, so we just add them as-is + const nestedLines = this.renderList(token as any, parentDepth + 1, styleContext); + lines.push(...nestedLines); + } else if (token.type === "text") { + // Text content (may have inline tokens) + const text = + token.tokens && token.tokens.length > 0 + ? this.renderInlineTokens(token.tokens, styleContext) + : token.text || ""; + lines.push(text); + } else if (token.type === "paragraph") { + // Paragraph in list item + const text = this.renderInlineTokens(token.tokens || [], styleContext); + lines.push(text); + } else if (token.type === "code") { + // Code block in list item + const indent = this.theme.codeBlockIndent ?? " "; + lines.push(this.theme.codeBlockBorder(`\`\`\`${token.lang || ""}`)); + if (this.theme.highlightCode) { + const highlightedLines = this.theme.highlightCode(token.text, token.lang); + for (const hlLine of highlightedLines) { + lines.push(`${indent}${hlLine}`); + } + } else { + const codeLines = token.text.split("\n"); + for (const codeLine of codeLines) { + lines.push(`${indent}${this.theme.codeBlock(codeLine)}`); + } + } + lines.push(this.theme.codeBlockBorder("```")); + } else { + // Other token types - try to render as inline + const text = this.renderInlineTokens([token], styleContext); + if (text) { + lines.push(text); + } + } + } + + return lines; + } + + /** + * Get the visible width of the longest word in a string. + */ + private getLongestWordWidth(text: string, maxWidth?: number): number { + const words = text.split(/\s+/).filter((word) => word.length > 0); + let longest = 0; + for (const word of words) { + longest = Math.max(longest, visibleWidth(word)); + } + if (maxWidth === undefined) { + return longest; + } + return Math.min(longest, maxWidth); + } + + /** + * Wrap a table cell to fit into a column. + * + * Delegates to wrapTextWithAnsi() so ANSI codes + long tokens are handled + * consistently with the rest of the renderer. + */ + private wrapCellText(text: string, maxWidth: number): string[] { + return wrapTextWithAnsi(text, Math.max(1, maxWidth)); + } + + /** + * Render a table with width-aware cell wrapping. + * Cells that don't fit are wrapped to multiple lines. + */ + private renderTable( + token: Token & { header: any[]; rows: any[][]; raw?: string }, + availableWidth: number, + styleContext?: InlineStyleContext, + ): string[] { + const lines: string[] = []; + const numCols = token.header.length; + + if (numCols === 0) { + return lines; + } + + // Calculate border overhead: "│ " + (n-1) * " │ " + " │" + // = 2 + (n-1) * 3 + 2 = 3n + 1 + const borderOverhead = 3 * numCols + 1; + const availableForCells = availableWidth - borderOverhead; + if (availableForCells < numCols) { + // Too narrow to render a stable table. Fall back to raw markdown. + const fallbackLines = token.raw ? wrapTextWithAnsi(token.raw, availableWidth) : []; + fallbackLines.push(""); + return fallbackLines; + } + + const maxUnbrokenWordWidth = 30; + + // Calculate natural column widths (what each column needs without constraints) + const naturalWidths: number[] = []; + const minWordWidths: number[] = []; + for (let i = 0; i < numCols; i++) { + const headerText = this.renderInlineTokens(token.header[i].tokens || [], styleContext); + naturalWidths[i] = visibleWidth(headerText); + minWordWidths[i] = Math.max(1, this.getLongestWordWidth(headerText, maxUnbrokenWordWidth)); + } + for (const row of token.rows) { + for (let i = 0; i < row.length; i++) { + const cellText = this.renderInlineTokens(row[i].tokens || [], styleContext); + naturalWidths[i] = Math.max(naturalWidths[i] || 0, visibleWidth(cellText)); + minWordWidths[i] = Math.max( + minWordWidths[i] || 1, + this.getLongestWordWidth(cellText, maxUnbrokenWordWidth), + ); + } + } + + let minColumnWidths = minWordWidths; + let minCellsWidth = minColumnWidths.reduce((a, b) => a + b, 0); + + if (minCellsWidth > availableForCells) { + minColumnWidths = new Array(numCols).fill(1); + const remaining = availableForCells - numCols; + + if (remaining > 0) { + const totalWeight = minWordWidths.reduce((total, width) => total + Math.max(0, width - 1), 0); + const growth = minWordWidths.map((width) => { + const weight = Math.max(0, width - 1); + return totalWeight > 0 ? Math.floor((weight / totalWeight) * remaining) : 0; + }); + + for (let i = 0; i < numCols; i++) { + minColumnWidths[i] += growth[i] ?? 0; + } + + const allocated = growth.reduce((total, width) => total + width, 0); + let leftover = remaining - allocated; + for (let i = 0; leftover > 0 && i < numCols; i++) { + minColumnWidths[i]++; + leftover--; + } + } + + minCellsWidth = minColumnWidths.reduce((a, b) => a + b, 0); + } + + // Calculate column widths that fit within available width + const totalNaturalWidth = naturalWidths.reduce((a, b) => a + b, 0) + borderOverhead; + let columnWidths: number[]; + + if (totalNaturalWidth <= availableWidth) { + // Everything fits naturally + columnWidths = naturalWidths.map((width, index) => Math.max(width, minColumnWidths[index])); + } else { + // Need to shrink columns to fit + const totalGrowPotential = naturalWidths.reduce((total, width, index) => { + return total + Math.max(0, width - minColumnWidths[index]); + }, 0); + const extraWidth = Math.max(0, availableForCells - minCellsWidth); + columnWidths = minColumnWidths.map((minWidth, index) => { + const naturalWidth = naturalWidths[index]; + const minWidthDelta = Math.max(0, naturalWidth - minWidth); + let grow = 0; + if (totalGrowPotential > 0) { + grow = Math.floor((minWidthDelta / totalGrowPotential) * extraWidth); + } + return minWidth + grow; + }); + + // Adjust for rounding errors - distribute remaining space + const allocated = columnWidths.reduce((a, b) => a + b, 0); + let remaining = availableForCells - allocated; + while (remaining > 0) { + let grew = false; + for (let i = 0; i < numCols && remaining > 0; i++) { + if (columnWidths[i] < naturalWidths[i]) { + columnWidths[i]++; + remaining--; + grew = true; + } + } + if (!grew) { + break; + } + } + } + + // Render top border + const topBorderCells = columnWidths.map((w) => "─".repeat(w)); + lines.push(`┌─${topBorderCells.join("─┬─")}─┐`); + + // Render header with wrapping + const headerCellLines: string[][] = token.header.map((cell, i) => { + const text = this.renderInlineTokens(cell.tokens || [], styleContext); + return this.wrapCellText(text, columnWidths[i]); + }); + const headerLineCount = Math.max(...headerCellLines.map((c) => c.length)); + + for (let lineIdx = 0; lineIdx < headerLineCount; lineIdx++) { + const rowParts = headerCellLines.map((cellLines, colIdx) => { + const text = cellLines[lineIdx] || ""; + const padded = text + " ".repeat(Math.max(0, columnWidths[colIdx] - visibleWidth(text))); + return this.theme.bold(padded); + }); + lines.push(`│ ${rowParts.join(" │ ")} │`); + } + + // Render separator + const separatorCells = columnWidths.map((w) => "─".repeat(w)); + const separatorLine = `├─${separatorCells.join("─┼─")}─┤`; + lines.push(separatorLine); + + // Render rows with wrapping + for (let rowIndex = 0; rowIndex < token.rows.length; rowIndex++) { + const row = token.rows[rowIndex]; + const rowCellLines: string[][] = row.map((cell, i) => { + const text = this.renderInlineTokens(cell.tokens || [], styleContext); + return this.wrapCellText(text, columnWidths[i]); + }); + const rowLineCount = Math.max(...rowCellLines.map((c) => c.length)); + + for (let lineIdx = 0; lineIdx < rowLineCount; lineIdx++) { + const rowParts = rowCellLines.map((cellLines, colIdx) => { + const text = cellLines[lineIdx] || ""; + return text + " ".repeat(Math.max(0, columnWidths[colIdx] - visibleWidth(text))); + }); + lines.push(`│ ${rowParts.join(" │ ")} │`); + } + + if (rowIndex < token.rows.length - 1) { + lines.push(separatorLine); + } + } + + // Render bottom border + const bottomBorderCells = columnWidths.map((w) => "─".repeat(w)); + lines.push(`└─${bottomBorderCells.join("─┴─")}─┘`); + + lines.push(""); // Add spacing after table + return lines; + } +} diff --git a/packages/pi-tui/src/components/select-list.ts b/packages/pi-tui/src/components/select-list.ts new file mode 100644 index 000000000..e4664a051 --- /dev/null +++ b/packages/pi-tui/src/components/select-list.ts @@ -0,0 +1,188 @@ +import { getEditorKeybindings } from "../keybindings.js"; +import type { Component } from "../tui.js"; +import { truncateToWidth } from "../utils.js"; + +const normalizeToSingleLine = (text: string): string => text.replace(/[\r\n]+/g, " ").trim(); + +export interface SelectItem { + value: string; + label: string; + description?: string; +} + +export interface SelectListTheme { + selectedPrefix: (text: string) => string; + selectedText: (text: string) => string; + description: (text: string) => string; + scrollInfo: (text: string) => string; + noMatch: (text: string) => string; +} + +export class SelectList implements Component { + private items: SelectItem[] = []; + private filteredItems: SelectItem[] = []; + private selectedIndex: number = 0; + private maxVisible: number = 5; + private theme: SelectListTheme; + + public onSelect?: (item: SelectItem) => void; + public onCancel?: () => void; + public onSelectionChange?: (item: SelectItem) => void; + + constructor(items: SelectItem[], maxVisible: number, theme: SelectListTheme) { + this.items = items; + this.filteredItems = items; + this.maxVisible = maxVisible; + this.theme = theme; + } + + setFilter(filter: string): void { + this.filteredItems = this.items.filter((item) => item.value.toLowerCase().startsWith(filter.toLowerCase())); + // Reset selection when filter changes + this.selectedIndex = 0; + } + + setSelectedIndex(index: number): void { + this.selectedIndex = Math.max(0, Math.min(index, this.filteredItems.length - 1)); + } + + invalidate(): void { + // No cached state to invalidate currently + } + + render(width: number): string[] { + const lines: string[] = []; + + // If no items match filter, show message + if (this.filteredItems.length === 0) { + lines.push(this.theme.noMatch(" No matching commands")); + return lines; + } + + // Calculate visible range with scrolling + const startIndex = Math.max( + 0, + Math.min(this.selectedIndex - Math.floor(this.maxVisible / 2), this.filteredItems.length - this.maxVisible), + ); + const endIndex = Math.min(startIndex + this.maxVisible, this.filteredItems.length); + + // Render visible items + for (let i = startIndex; i < endIndex; i++) { + const item = this.filteredItems[i]; + if (!item) continue; + + const isSelected = i === this.selectedIndex; + const descriptionSingleLine = item.description ? normalizeToSingleLine(item.description) : undefined; + + let line = ""; + if (isSelected) { + // Use arrow indicator for selection - entire line uses selectedText color + const prefixWidth = 2; // "→ " is 2 characters visually + const displayValue = item.label || item.value; + + if (descriptionSingleLine && width > 40) { + // Calculate how much space we have for value + description + const maxValueWidth = Math.min(30, width - prefixWidth - 4); + const truncatedValue = truncateToWidth(displayValue, maxValueWidth, ""); + const spacing = " ".repeat(Math.max(1, 32 - truncatedValue.length)); + + // Calculate remaining space for description using visible widths + const descriptionStart = prefixWidth + truncatedValue.length + spacing.length; + const remainingWidth = width - descriptionStart - 2; // -2 for safety + + if (remainingWidth > 10) { + const truncatedDesc = truncateToWidth(descriptionSingleLine, remainingWidth, ""); + // Apply selectedText to entire line content + line = this.theme.selectedText(`→ ${truncatedValue}${spacing}${truncatedDesc}`); + } else { + // Not enough space for description + const maxWidth = width - prefixWidth - 2; + line = this.theme.selectedText(`→ ${truncateToWidth(displayValue, maxWidth, "")}`); + } + } else { + // No description or not enough width + const maxWidth = width - prefixWidth - 2; + line = this.theme.selectedText(`→ ${truncateToWidth(displayValue, maxWidth, "")}`); + } + } else { + const displayValue = item.label || item.value; + const prefix = " "; + + if (descriptionSingleLine && width > 40) { + // Calculate how much space we have for value + description + const maxValueWidth = Math.min(30, width - prefix.length - 4); + const truncatedValue = truncateToWidth(displayValue, maxValueWidth, ""); + const spacing = " ".repeat(Math.max(1, 32 - truncatedValue.length)); + + // Calculate remaining space for description + const descriptionStart = prefix.length + truncatedValue.length + spacing.length; + const remainingWidth = width - descriptionStart - 2; // -2 for safety + + if (remainingWidth > 10) { + const truncatedDesc = truncateToWidth(descriptionSingleLine, remainingWidth, ""); + const descText = this.theme.description(spacing + truncatedDesc); + line = prefix + truncatedValue + descText; + } else { + // Not enough space for description + const maxWidth = width - prefix.length - 2; + line = prefix + truncateToWidth(displayValue, maxWidth, ""); + } + } else { + // No description or not enough width + const maxWidth = width - prefix.length - 2; + line = prefix + truncateToWidth(displayValue, maxWidth, ""); + } + } + + lines.push(line); + } + + // Add scroll indicators if needed + if (startIndex > 0 || endIndex < this.filteredItems.length) { + const scrollText = ` (${this.selectedIndex + 1}/${this.filteredItems.length})`; + // Truncate if too long for terminal + lines.push(this.theme.scrollInfo(truncateToWidth(scrollText, width - 2, ""))); + } + + return lines; + } + + handleInput(keyData: string): void { + const kb = getEditorKeybindings(); + // Up arrow - wrap to bottom when at top + if (kb.matches(keyData, "selectUp")) { + this.selectedIndex = this.selectedIndex === 0 ? this.filteredItems.length - 1 : this.selectedIndex - 1; + this.notifySelectionChange(); + } + // Down arrow - wrap to top when at bottom + else if (kb.matches(keyData, "selectDown")) { + this.selectedIndex = this.selectedIndex === this.filteredItems.length - 1 ? 0 : this.selectedIndex + 1; + this.notifySelectionChange(); + } + // Enter + else if (kb.matches(keyData, "selectConfirm")) { + const selectedItem = this.filteredItems[this.selectedIndex]; + if (selectedItem && this.onSelect) { + this.onSelect(selectedItem); + } + } + // Escape or Ctrl+C + else if (kb.matches(keyData, "selectCancel")) { + if (this.onCancel) { + this.onCancel(); + } + } + } + + private notifySelectionChange(): void { + const selectedItem = this.filteredItems[this.selectedIndex]; + if (selectedItem && this.onSelectionChange) { + this.onSelectionChange(selectedItem); + } + } + + getSelectedItem(): SelectItem | null { + const item = this.filteredItems[this.selectedIndex]; + return item || null; + } +} diff --git a/packages/pi-tui/src/components/settings-list.ts b/packages/pi-tui/src/components/settings-list.ts new file mode 100644 index 000000000..e6d01348c --- /dev/null +++ b/packages/pi-tui/src/components/settings-list.ts @@ -0,0 +1,250 @@ +import { fuzzyFilter } from "../fuzzy.js"; +import { getEditorKeybindings } from "../keybindings.js"; +import type { Component } from "../tui.js"; +import { truncateToWidth, visibleWidth, wrapTextWithAnsi } from "../utils.js"; +import { Input } from "./input.js"; + +export interface SettingItem { + /** Unique identifier for this setting */ + id: string; + /** Display label (left side) */ + label: string; + /** Optional description shown when selected */ + description?: string; + /** Current value to display (right side) */ + currentValue: string; + /** If provided, Enter/Space cycles through these values */ + values?: string[]; + /** If provided, Enter opens this submenu. Receives current value and done callback. */ + submenu?: (currentValue: string, done: (selectedValue?: string) => void) => Component; +} + +export interface SettingsListTheme { + label: (text: string, selected: boolean) => string; + value: (text: string, selected: boolean) => string; + description: (text: string) => string; + cursor: string; + hint: (text: string) => string; +} + +export interface SettingsListOptions { + enableSearch?: boolean; +} + +export class SettingsList implements Component { + private items: SettingItem[]; + private filteredItems: SettingItem[]; + private theme: SettingsListTheme; + private selectedIndex = 0; + private maxVisible: number; + private onChange: (id: string, newValue: string) => void; + private onCancel: () => void; + private searchInput?: Input; + private searchEnabled: boolean; + + // Submenu state + private submenuComponent: Component | null = null; + private submenuItemIndex: number | null = null; + + constructor( + items: SettingItem[], + maxVisible: number, + theme: SettingsListTheme, + onChange: (id: string, newValue: string) => void, + onCancel: () => void, + options: SettingsListOptions = {}, + ) { + this.items = items; + this.filteredItems = items; + this.maxVisible = maxVisible; + this.theme = theme; + this.onChange = onChange; + this.onCancel = onCancel; + this.searchEnabled = options.enableSearch ?? false; + if (this.searchEnabled) { + this.searchInput = new Input(); + } + } + + /** Update an item's currentValue */ + updateValue(id: string, newValue: string): void { + const item = this.items.find((i) => i.id === id); + if (item) { + item.currentValue = newValue; + } + } + + invalidate(): void { + this.submenuComponent?.invalidate?.(); + } + + render(width: number): string[] { + // If submenu is active, render it instead + if (this.submenuComponent) { + return this.submenuComponent.render(width); + } + + return this.renderMainList(width); + } + + private renderMainList(width: number): string[] { + const lines: string[] = []; + + if (this.searchEnabled && this.searchInput) { + lines.push(...this.searchInput.render(width)); + lines.push(""); + } + + if (this.items.length === 0) { + lines.push(this.theme.hint(" No settings available")); + if (this.searchEnabled) { + this.addHintLine(lines, width); + } + return lines; + } + + const displayItems = this.searchEnabled ? this.filteredItems : this.items; + if (displayItems.length === 0) { + lines.push(truncateToWidth(this.theme.hint(" No matching settings"), width)); + this.addHintLine(lines, width); + return lines; + } + + // Calculate visible range with scrolling + const startIndex = Math.max( + 0, + Math.min(this.selectedIndex - Math.floor(this.maxVisible / 2), displayItems.length - this.maxVisible), + ); + const endIndex = Math.min(startIndex + this.maxVisible, displayItems.length); + + // Calculate max label width for alignment + const maxLabelWidth = Math.min(30, Math.max(...this.items.map((item) => visibleWidth(item.label)))); + + // Render visible items + for (let i = startIndex; i < endIndex; i++) { + const item = displayItems[i]; + if (!item) continue; + + const isSelected = i === this.selectedIndex; + const prefix = isSelected ? this.theme.cursor : " "; + const prefixWidth = visibleWidth(prefix); + + // Pad label to align values + const labelPadded = item.label + " ".repeat(Math.max(0, maxLabelWidth - visibleWidth(item.label))); + const labelText = this.theme.label(labelPadded, isSelected); + + // Calculate space for value + const separator = " "; + const usedWidth = prefixWidth + maxLabelWidth + visibleWidth(separator); + const valueMaxWidth = width - usedWidth - 2; + + const valueText = this.theme.value(truncateToWidth(item.currentValue, valueMaxWidth, ""), isSelected); + + lines.push(truncateToWidth(prefix + labelText + separator + valueText, width)); + } + + // Add scroll indicator if needed + if (startIndex > 0 || endIndex < displayItems.length) { + const scrollText = ` (${this.selectedIndex + 1}/${displayItems.length})`; + lines.push(this.theme.hint(truncateToWidth(scrollText, width - 2, ""))); + } + + // Add description for selected item + const selectedItem = displayItems[this.selectedIndex]; + if (selectedItem?.description) { + lines.push(""); + const wrappedDesc = wrapTextWithAnsi(selectedItem.description, width - 4); + for (const line of wrappedDesc) { + lines.push(this.theme.description(` ${line}`)); + } + } + + // Add hint + this.addHintLine(lines, width); + + return lines; + } + + handleInput(data: string): void { + // If submenu is active, delegate all input to it + // The submenu's onCancel (triggered by escape) will call done() which closes it + if (this.submenuComponent) { + this.submenuComponent.handleInput?.(data); + return; + } + + // Main list input handling + const kb = getEditorKeybindings(); + const displayItems = this.searchEnabled ? this.filteredItems : this.items; + if (kb.matches(data, "selectUp")) { + if (displayItems.length === 0) return; + this.selectedIndex = this.selectedIndex === 0 ? displayItems.length - 1 : this.selectedIndex - 1; + } else if (kb.matches(data, "selectDown")) { + if (displayItems.length === 0) return; + this.selectedIndex = this.selectedIndex === displayItems.length - 1 ? 0 : this.selectedIndex + 1; + } else if (kb.matches(data, "selectConfirm") || data === " ") { + this.activateItem(); + } else if (kb.matches(data, "selectCancel")) { + this.onCancel(); + } else if (this.searchEnabled && this.searchInput) { + const sanitized = data.replace(/ /g, ""); + if (!sanitized) { + return; + } + this.searchInput.handleInput(sanitized); + this.applyFilter(this.searchInput.getValue()); + } + } + + private activateItem(): void { + const item = this.searchEnabled ? this.filteredItems[this.selectedIndex] : this.items[this.selectedIndex]; + if (!item) return; + + if (item.submenu) { + // Open submenu, passing current value so it can pre-select correctly + this.submenuItemIndex = this.selectedIndex; + this.submenuComponent = item.submenu(item.currentValue, (selectedValue?: string) => { + if (selectedValue !== undefined) { + item.currentValue = selectedValue; + this.onChange(item.id, selectedValue); + } + this.closeSubmenu(); + }); + } else if (item.values && item.values.length > 0) { + // Cycle through values + const currentIndex = item.values.indexOf(item.currentValue); + const nextIndex = (currentIndex + 1) % item.values.length; + const newValue = item.values[nextIndex]; + item.currentValue = newValue; + this.onChange(item.id, newValue); + } + } + + private closeSubmenu(): void { + this.submenuComponent = null; + // Restore selection to the item that opened the submenu + if (this.submenuItemIndex !== null) { + this.selectedIndex = this.submenuItemIndex; + this.submenuItemIndex = null; + } + } + + private applyFilter(query: string): void { + this.filteredItems = fuzzyFilter(this.items, query, (item) => item.label); + this.selectedIndex = 0; + } + + private addHintLine(lines: string[], width: number): void { + lines.push(""); + lines.push( + truncateToWidth( + this.theme.hint( + this.searchEnabled + ? " Type to search · Enter/Space to change · Esc to cancel" + : " Enter/Space to change · Esc to cancel", + ), + width, + ), + ); + } +} diff --git a/packages/pi-tui/src/components/spacer.ts b/packages/pi-tui/src/components/spacer.ts new file mode 100644 index 000000000..8c63d3c2e --- /dev/null +++ b/packages/pi-tui/src/components/spacer.ts @@ -0,0 +1,28 @@ +import type { Component } from "../tui.js"; + +/** + * Spacer component that renders empty lines + */ +export class Spacer implements Component { + private lines: number; + + constructor(lines: number = 1) { + this.lines = lines; + } + + setLines(lines: number): void { + this.lines = lines; + } + + invalidate(): void { + // No cached state to invalidate currently + } + + render(_width: number): string[] { + const result: string[] = []; + for (let i = 0; i < this.lines; i++) { + result.push(""); + } + return result; + } +} diff --git a/packages/pi-tui/src/components/text.ts b/packages/pi-tui/src/components/text.ts new file mode 100644 index 000000000..efcf25b45 --- /dev/null +++ b/packages/pi-tui/src/components/text.ts @@ -0,0 +1,106 @@ +import type { Component } from "../tui.js"; +import { applyBackgroundToLine, visibleWidth, wrapTextWithAnsi } from "../utils.js"; + +/** + * Text component - displays multi-line text with word wrapping + */ +export class Text implements Component { + private text: string; + private paddingX: number; // Left/right padding + private paddingY: number; // Top/bottom padding + private customBgFn?: (text: string) => string; + + // Cache for rendered output + private cachedText?: string; + private cachedWidth?: number; + private cachedLines?: string[]; + + constructor(text: string = "", paddingX: number = 1, paddingY: number = 1, customBgFn?: (text: string) => string) { + this.text = text; + this.paddingX = paddingX; + this.paddingY = paddingY; + this.customBgFn = customBgFn; + } + + setText(text: string): void { + this.text = text; + this.cachedText = undefined; + this.cachedWidth = undefined; + this.cachedLines = undefined; + } + + setCustomBgFn(customBgFn?: (text: string) => string): void { + this.customBgFn = customBgFn; + this.cachedText = undefined; + this.cachedWidth = undefined; + this.cachedLines = undefined; + } + + invalidate(): void { + this.cachedText = undefined; + this.cachedWidth = undefined; + this.cachedLines = undefined; + } + + render(width: number): string[] { + // Check cache + if (this.cachedLines && this.cachedText === this.text && this.cachedWidth === width) { + return this.cachedLines; + } + + // Don't render anything if there's no actual text + if (!this.text || this.text.trim() === "") { + const result: string[] = []; + this.cachedText = this.text; + this.cachedWidth = width; + this.cachedLines = result; + return result; + } + + // Replace tabs with 3 spaces + const normalizedText = this.text.replace(/\t/g, " "); + + // Calculate content width (subtract left/right margins) + const contentWidth = Math.max(1, width - this.paddingX * 2); + + // Wrap text (this preserves ANSI codes but does NOT pad) + const wrappedLines = wrapTextWithAnsi(normalizedText, contentWidth); + + // Add margins and background to each line + const leftMargin = " ".repeat(this.paddingX); + const rightMargin = " ".repeat(this.paddingX); + const contentLines: string[] = []; + + for (const line of wrappedLines) { + // Add margins + const lineWithMargins = leftMargin + line + rightMargin; + + // Apply background if specified (this also pads to full width) + if (this.customBgFn) { + contentLines.push(applyBackgroundToLine(lineWithMargins, width, this.customBgFn)); + } else { + // No background - just pad to width with spaces + const visibleLen = visibleWidth(lineWithMargins); + const paddingNeeded = Math.max(0, width - visibleLen); + contentLines.push(lineWithMargins + " ".repeat(paddingNeeded)); + } + } + + // Add top/bottom padding (empty lines) + const emptyLine = " ".repeat(width); + const emptyLines: string[] = []; + for (let i = 0; i < this.paddingY; i++) { + const line = this.customBgFn ? applyBackgroundToLine(emptyLine, width, this.customBgFn) : emptyLine; + emptyLines.push(line); + } + + const result = [...emptyLines, ...contentLines, ...emptyLines]; + + // Update cache + this.cachedText = this.text; + this.cachedWidth = width; + this.cachedLines = result; + + return result.length > 0 ? result : [""]; + } +} diff --git a/packages/pi-tui/src/components/truncated-text.ts b/packages/pi-tui/src/components/truncated-text.ts new file mode 100644 index 000000000..12eac558d --- /dev/null +++ b/packages/pi-tui/src/components/truncated-text.ts @@ -0,0 +1,65 @@ +import type { Component } from "../tui.js"; +import { truncateToWidth, visibleWidth } from "../utils.js"; + +/** + * Text component that truncates to fit viewport width + */ +export class TruncatedText implements Component { + private text: string; + private paddingX: number; + private paddingY: number; + + constructor(text: string, paddingX: number = 0, paddingY: number = 0) { + this.text = text; + this.paddingX = paddingX; + this.paddingY = paddingY; + } + + invalidate(): void { + // No cached state to invalidate currently + } + + render(width: number): string[] { + const result: string[] = []; + + // Empty line padded to width + const emptyLine = " ".repeat(width); + + // Add vertical padding above + for (let i = 0; i < this.paddingY; i++) { + result.push(emptyLine); + } + + // Calculate available width after horizontal padding + const availableWidth = Math.max(1, width - this.paddingX * 2); + + // Take only the first line (stop at newline) + let singleLineText = this.text; + const newlineIndex = this.text.indexOf("\n"); + if (newlineIndex !== -1) { + singleLineText = this.text.substring(0, newlineIndex); + } + + // Truncate text if needed (accounting for ANSI codes) + const displayText = truncateToWidth(singleLineText, availableWidth); + + // Add horizontal padding + const leftPadding = " ".repeat(this.paddingX); + const rightPadding = " ".repeat(this.paddingX); + const lineWithPadding = leftPadding + displayText + rightPadding; + + // Pad line to exactly width characters + const lineVisibleWidth = visibleWidth(lineWithPadding); + const paddingNeeded = Math.max(0, width - lineVisibleWidth); + const finalLine = lineWithPadding + " ".repeat(paddingNeeded); + + result.push(finalLine); + + // Add vertical padding below + for (let i = 0; i < this.paddingY; i++) { + result.push(emptyLine); + } + + return result; + } +} diff --git a/packages/pi-tui/src/editor-component.ts b/packages/pi-tui/src/editor-component.ts new file mode 100644 index 000000000..c6b6c43da --- /dev/null +++ b/packages/pi-tui/src/editor-component.ts @@ -0,0 +1,74 @@ +import type { AutocompleteProvider } from "./autocomplete.js"; +import type { Component } from "./tui.js"; + +/** + * Interface for custom editor components. + * + * This allows extensions to provide their own editor implementation + * (e.g., vim mode, emacs mode, custom keybindings) while maintaining + * compatibility with the core application. + */ +export interface EditorComponent extends Component { + // ========================================================================= + // Core text access (required) + // ========================================================================= + + /** Get the current text content */ + getText(): string; + + /** Set the text content */ + setText(text: string): void; + + /** Handle raw terminal input (key presses, paste sequences, etc.) */ + handleInput(data: string): void; + + // ========================================================================= + // Callbacks (required) + // ========================================================================= + + /** Called when user submits (e.g., Enter key) */ + onSubmit?: (text: string) => void; + + /** Called when text changes */ + onChange?: (text: string) => void; + + // ========================================================================= + // History support (optional) + // ========================================================================= + + /** Add text to history for up/down navigation */ + addToHistory?(text: string): void; + + // ========================================================================= + // Advanced text manipulation (optional) + // ========================================================================= + + /** Insert text at current cursor position */ + insertTextAtCursor?(text: string): void; + + /** + * Get text with any markers expanded (e.g., paste markers). + * Falls back to getText() if not implemented. + */ + getExpandedText?(): string; + + // ========================================================================= + // Autocomplete support (optional) + // ========================================================================= + + /** Set the autocomplete provider */ + setAutocompleteProvider?(provider: AutocompleteProvider): void; + + // ========================================================================= + // Appearance (optional) + // ========================================================================= + + /** Border color function */ + borderColor?: (str: string) => string; + + /** Set horizontal padding */ + setPaddingX?(padding: number): void; + + /** Set max visible items in autocomplete dropdown */ + setAutocompleteMaxVisible?(maxVisible: number): void; +} diff --git a/packages/pi-tui/src/fuzzy.ts b/packages/pi-tui/src/fuzzy.ts new file mode 100644 index 000000000..09410238f --- /dev/null +++ b/packages/pi-tui/src/fuzzy.ts @@ -0,0 +1,133 @@ +/** + * Fuzzy matching utilities. + * Matches if all query characters appear in order (not necessarily consecutive). + * Lower score = better match. + */ + +export interface FuzzyMatch { + matches: boolean; + score: number; +} + +export function fuzzyMatch(query: string, text: string): FuzzyMatch { + const queryLower = query.toLowerCase(); + const textLower = text.toLowerCase(); + + const matchQuery = (normalizedQuery: string): FuzzyMatch => { + if (normalizedQuery.length === 0) { + return { matches: true, score: 0 }; + } + + if (normalizedQuery.length > textLower.length) { + return { matches: false, score: 0 }; + } + + let queryIndex = 0; + let score = 0; + let lastMatchIndex = -1; + let consecutiveMatches = 0; + + for (let i = 0; i < textLower.length && queryIndex < normalizedQuery.length; i++) { + if (textLower[i] === normalizedQuery[queryIndex]) { + const isWordBoundary = i === 0 || /[\s\-_./:]/.test(textLower[i - 1]!); + + // Reward consecutive matches + if (lastMatchIndex === i - 1) { + consecutiveMatches++; + score -= consecutiveMatches * 5; + } else { + consecutiveMatches = 0; + // Penalize gaps + if (lastMatchIndex >= 0) { + score += (i - lastMatchIndex - 1) * 2; + } + } + + // Reward word boundary matches + if (isWordBoundary) { + score -= 10; + } + + // Slight penalty for later matches + score += i * 0.1; + + lastMatchIndex = i; + queryIndex++; + } + } + + if (queryIndex < normalizedQuery.length) { + return { matches: false, score: 0 }; + } + + return { matches: true, score }; + }; + + const primaryMatch = matchQuery(queryLower); + if (primaryMatch.matches) { + return primaryMatch; + } + + const alphaNumericMatch = queryLower.match(/^(?[a-z]+)(?[0-9]+)$/); + const numericAlphaMatch = queryLower.match(/^(?[0-9]+)(?[a-z]+)$/); + const swappedQuery = alphaNumericMatch + ? `${alphaNumericMatch.groups?.digits ?? ""}${alphaNumericMatch.groups?.letters ?? ""}` + : numericAlphaMatch + ? `${numericAlphaMatch.groups?.letters ?? ""}${numericAlphaMatch.groups?.digits ?? ""}` + : ""; + + if (!swappedQuery) { + return primaryMatch; + } + + const swappedMatch = matchQuery(swappedQuery); + if (!swappedMatch.matches) { + return primaryMatch; + } + + return { matches: true, score: swappedMatch.score + 5 }; +} + +/** + * Filter and sort items by fuzzy match quality (best matches first). + * Supports space-separated tokens: all tokens must match. + */ +export function fuzzyFilter(items: T[], query: string, getText: (item: T) => string): T[] { + if (!query.trim()) { + return items; + } + + const tokens = query + .trim() + .split(/\s+/) + .filter((t) => t.length > 0); + + if (tokens.length === 0) { + return items; + } + + const results: { item: T; totalScore: number }[] = []; + + for (const item of items) { + const text = getText(item); + let totalScore = 0; + let allMatch = true; + + for (const token of tokens) { + const match = fuzzyMatch(token, text); + if (match.matches) { + totalScore += match.score; + } else { + allMatch = false; + break; + } + } + + if (allMatch) { + results.push({ item, totalScore }); + } + } + + results.sort((a, b) => a.totalScore - b.totalScore); + return results.map((r) => r.item); +} diff --git a/packages/pi-tui/src/index.ts b/packages/pi-tui/src/index.ts new file mode 100644 index 000000000..5e8c90ab6 --- /dev/null +++ b/packages/pi-tui/src/index.ts @@ -0,0 +1,93 @@ +// Core TUI interfaces and classes + +// Autocomplete support +export { + type AutocompleteItem, + type AutocompleteProvider, + CombinedAutocompleteProvider, + type SlashCommand, +} from "./autocomplete.js"; +// Components +export { Box } from "./components/box.js"; +export { CancellableLoader } from "./components/cancellable-loader.js"; +export { Editor, type EditorOptions, type EditorTheme } from "./components/editor.js"; +export { Image, type ImageOptions, type ImageTheme } from "./components/image.js"; +export { Input } from "./components/input.js"; +export { Loader } from "./components/loader.js"; +export { type DefaultTextStyle, Markdown, type MarkdownTheme } from "./components/markdown.js"; +export { type SelectItem, SelectList, type SelectListTheme } from "./components/select-list.js"; +export { type SettingItem, SettingsList, type SettingsListTheme } from "./components/settings-list.js"; +export { Spacer } from "./components/spacer.js"; +export { Text } from "./components/text.js"; +export { TruncatedText } from "./components/truncated-text.js"; +// Editor component interface (for custom editors) +export type { EditorComponent } from "./editor-component.js"; +// Fuzzy matching +export { type FuzzyMatch, fuzzyFilter, fuzzyMatch } from "./fuzzy.js"; +// Keybindings +export { + DEFAULT_EDITOR_KEYBINDINGS, + type EditorAction, + type EditorKeybindingsConfig, + EditorKeybindingsManager, + getEditorKeybindings, + setEditorKeybindings, +} from "./keybindings.js"; +// Keyboard input handling +export { + decodeKittyPrintable, + isKeyRelease, + isKeyRepeat, + isKittyProtocolActive, + Key, + type KeyEventType, + type KeyId, + matchesKey, + parseKey, + setKittyProtocolActive, +} from "./keys.js"; +// Input buffering for batch splitting +export { StdinBuffer, type StdinBufferEventMap, type StdinBufferOptions } from "./stdin-buffer.js"; +// Terminal interface and implementations +export { ProcessTerminal, type Terminal } from "./terminal.js"; +// Terminal image support +export { + allocateImageId, + type CellDimensions, + calculateImageRows, + deleteAllKittyImages, + deleteKittyImage, + detectCapabilities, + encodeITerm2, + encodeKitty, + getCapabilities, + getCellDimensions, + getGifDimensions, + getImageDimensions, + getJpegDimensions, + getPngDimensions, + getWebpDimensions, + type ImageDimensions, + type ImageProtocol, + type ImageRenderOptions, + imageFallback, + renderImage, + resetCapabilitiesCache, + setCellDimensions, + type TerminalCapabilities, +} from "./terminal-image.js"; +export { + type Component, + Container, + CURSOR_MARKER, + type Focusable, + isFocusable, + type OverlayAnchor, + type OverlayHandle, + type OverlayMargin, + type OverlayOptions, + type SizeValue, + TUI, +} from "./tui.js"; +// Utilities +export { truncateToWidth, visibleWidth, wrapTextWithAnsi } from "./utils.js"; diff --git a/packages/pi-tui/src/keybindings.ts b/packages/pi-tui/src/keybindings.ts new file mode 100644 index 000000000..cafc9e235 --- /dev/null +++ b/packages/pi-tui/src/keybindings.ts @@ -0,0 +1,189 @@ +import { type KeyId, matchesKey } from "./keys.js"; + +/** + * Editor actions that can be bound to keys. + */ +export type EditorAction = + // Cursor movement + | "cursorUp" + | "cursorDown" + | "cursorLeft" + | "cursorRight" + | "cursorWordLeft" + | "cursorWordRight" + | "cursorLineStart" + | "cursorLineEnd" + | "jumpForward" + | "jumpBackward" + | "pageUp" + | "pageDown" + // Deletion + | "deleteCharBackward" + | "deleteCharForward" + | "deleteWordBackward" + | "deleteWordForward" + | "deleteToLineStart" + | "deleteToLineEnd" + // Text input + | "newLine" + | "submit" + | "tab" + // Selection/autocomplete + | "selectUp" + | "selectDown" + | "selectPageUp" + | "selectPageDown" + | "selectConfirm" + | "selectCancel" + // Clipboard + | "copy" + // Kill ring + | "yank" + | "yankPop" + // Undo + | "undo" + // Tool output + | "expandTools" + // Tree navigation + | "treeFoldOrUp" + | "treeUnfoldOrDown" + // Session + | "toggleSessionPath" + | "toggleSessionSort" + | "renameSession" + | "deleteSession" + | "deleteSessionNoninvasive"; + +// Re-export KeyId from keys.ts +export type { KeyId }; + +/** + * Editor keybindings configuration. + */ +export type EditorKeybindingsConfig = { + [K in EditorAction]?: KeyId | KeyId[]; +}; + +/** + * Default editor keybindings. + */ +export const DEFAULT_EDITOR_KEYBINDINGS: Required = { + // Cursor movement + cursorUp: "up", + cursorDown: "down", + cursorLeft: ["left", "ctrl+b"], + cursorRight: ["right", "ctrl+f"], + cursorWordLeft: ["alt+left", "ctrl+left", "alt+b"], + cursorWordRight: ["alt+right", "ctrl+right", "alt+f"], + cursorLineStart: ["home", "ctrl+a"], + cursorLineEnd: ["end", "ctrl+e"], + jumpForward: "ctrl+]", + jumpBackward: "ctrl+alt+]", + pageUp: "pageUp", + pageDown: "pageDown", + // Deletion + deleteCharBackward: "backspace", + deleteCharForward: ["delete", "ctrl+d"], + deleteWordBackward: ["ctrl+w", "alt+backspace"], + deleteWordForward: ["alt+d", "alt+delete"], + deleteToLineStart: "ctrl+u", + deleteToLineEnd: "ctrl+k", + // Text input + newLine: "shift+enter", + submit: "enter", + tab: "tab", + // Selection/autocomplete + selectUp: "up", + selectDown: "down", + selectPageUp: "pageUp", + selectPageDown: "pageDown", + selectConfirm: "enter", + selectCancel: ["escape", "ctrl+c"], + // Clipboard + copy: "ctrl+c", + // Kill ring + yank: "ctrl+y", + yankPop: "alt+y", + // Undo + undo: "ctrl+-", + // Tool output + expandTools: "ctrl+o", + // Tree navigation + treeFoldOrUp: ["ctrl+left", "alt+left"], + treeUnfoldOrDown: ["ctrl+right", "alt+right"], + // Session + toggleSessionPath: "ctrl+p", + toggleSessionSort: "ctrl+s", + renameSession: "ctrl+r", + deleteSession: "ctrl+d", + deleteSessionNoninvasive: "ctrl+backspace", +}; + +/** + * Manages keybindings for the editor. + */ +export class EditorKeybindingsManager { + private actionToKeys: Map; + + constructor(config: EditorKeybindingsConfig = {}) { + this.actionToKeys = new Map(); + this.buildMaps(config); + } + + private buildMaps(config: EditorKeybindingsConfig): void { + this.actionToKeys.clear(); + + // Start with defaults + for (const [action, keys] of Object.entries(DEFAULT_EDITOR_KEYBINDINGS)) { + const keyArray = Array.isArray(keys) ? keys : [keys]; + this.actionToKeys.set(action as EditorAction, [...keyArray]); + } + + // Override with user config + for (const [action, keys] of Object.entries(config)) { + if (keys === undefined) continue; + const keyArray = Array.isArray(keys) ? keys : [keys]; + this.actionToKeys.set(action as EditorAction, keyArray); + } + } + + /** + * Check if input matches a specific action. + */ + matches(data: string, action: EditorAction): boolean { + const keys = this.actionToKeys.get(action); + if (!keys) return false; + for (const key of keys) { + if (matchesKey(data, key)) return true; + } + return false; + } + + /** + * Get keys bound to an action. + */ + getKeys(action: EditorAction): KeyId[] { + return this.actionToKeys.get(action) ?? []; + } + + /** + * Update configuration. + */ + setConfig(config: EditorKeybindingsConfig): void { + this.buildMaps(config); + } +} + +// Global instance +let globalEditorKeybindings: EditorKeybindingsManager | null = null; + +export function getEditorKeybindings(): EditorKeybindingsManager { + if (!globalEditorKeybindings) { + globalEditorKeybindings = new EditorKeybindingsManager(); + } + return globalEditorKeybindings; +} + +export function setEditorKeybindings(manager: EditorKeybindingsManager): void { + globalEditorKeybindings = manager; +} diff --git a/packages/pi-tui/src/keys.ts b/packages/pi-tui/src/keys.ts new file mode 100644 index 000000000..57366f6f5 --- /dev/null +++ b/packages/pi-tui/src/keys.ts @@ -0,0 +1,1255 @@ +/** + * Keyboard input handling for terminal applications. + * + * Supports both legacy terminal sequences and Kitty keyboard protocol. + * See: https://sw.kovidgoyal.net/kitty/keyboard-protocol/ + * Reference: https://github.com/sst/opentui/blob/7da92b4088aebfe27b9f691c04163a48821e49fd/packages/core/src/lib/parse.keypress.ts + * + * Symbol keys are also supported, however some ctrl+symbol combos + * overlap with ASCII codes, e.g. ctrl+[ = ESC. + * See: https://sw.kovidgoyal.net/kitty/keyboard-protocol/#legacy-ctrl-mapping-of-ascii-keys + * Those can still be * used for ctrl+shift combos + * + * API: + * - matchesKey(data, keyId) - Check if input matches a key identifier + * - parseKey(data) - Parse input and return the key identifier + * - Key - Helper object for creating typed key identifiers + * - setKittyProtocolActive(active) - Set global Kitty protocol state + * - isKittyProtocolActive() - Query global Kitty protocol state + */ + +// ============================================================================= +// Global Kitty Protocol State +// ============================================================================= + +let _kittyProtocolActive = false; + +/** + * Set the global Kitty keyboard protocol state. + * Called by ProcessTerminal after detecting protocol support. + */ +export function setKittyProtocolActive(active: boolean): void { + _kittyProtocolActive = active; +} + +/** + * Query whether Kitty keyboard protocol is currently active. + */ +export function isKittyProtocolActive(): boolean { + return _kittyProtocolActive; +} + +// ============================================================================= +// Type-Safe Key Identifiers +// ============================================================================= + +type Letter = + | "a" + | "b" + | "c" + | "d" + | "e" + | "f" + | "g" + | "h" + | "i" + | "j" + | "k" + | "l" + | "m" + | "n" + | "o" + | "p" + | "q" + | "r" + | "s" + | "t" + | "u" + | "v" + | "w" + | "x" + | "y" + | "z"; + +type Digit = "0" | "1" | "2" | "3" | "4" | "5" | "6" | "7" | "8" | "9"; + +type SymbolKey = + | "`" + | "-" + | "=" + | "[" + | "]" + | "\\" + | ";" + | "'" + | "," + | "." + | "/" + | "!" + | "@" + | "#" + | "$" + | "%" + | "^" + | "&" + | "*" + | "(" + | ")" + | "_" + | "+" + | "|" + | "~" + | "{" + | "}" + | ":" + | "<" + | ">" + | "?"; + +type SpecialKey = + | "escape" + | "esc" + | "enter" + | "return" + | "tab" + | "space" + | "backspace" + | "delete" + | "insert" + | "clear" + | "home" + | "end" + | "pageUp" + | "pageDown" + | "up" + | "down" + | "left" + | "right" + | "f1" + | "f2" + | "f3" + | "f4" + | "f5" + | "f6" + | "f7" + | "f8" + | "f9" + | "f10" + | "f11" + | "f12"; + +type BaseKey = Letter | Digit | SymbolKey | SpecialKey; + +/** + * Union type of all valid key identifiers. + * Provides autocomplete and catches typos at compile time. + */ +export type KeyId = + | BaseKey + | `ctrl+${BaseKey}` + | `shift+${BaseKey}` + | `alt+${BaseKey}` + | `ctrl+shift+${BaseKey}` + | `shift+ctrl+${BaseKey}` + | `ctrl+alt+${BaseKey}` + | `alt+ctrl+${BaseKey}` + | `shift+alt+${BaseKey}` + | `alt+shift+${BaseKey}` + | `ctrl+shift+alt+${BaseKey}` + | `ctrl+alt+shift+${BaseKey}` + | `shift+ctrl+alt+${BaseKey}` + | `shift+alt+ctrl+${BaseKey}` + | `alt+ctrl+shift+${BaseKey}` + | `alt+shift+ctrl+${BaseKey}`; + +/** + * Helper object for creating typed key identifiers with autocomplete. + * + * Usage: + * - Key.escape, Key.enter, Key.tab, etc. for special keys + * - Key.backtick, Key.comma, Key.period, etc. for symbol keys + * - Key.ctrl("c"), Key.alt("x") for single modifier + * - Key.ctrlShift("p"), Key.ctrlAlt("x") for combined modifiers + */ +export const Key = { + // Special keys + escape: "escape" as const, + esc: "esc" as const, + enter: "enter" as const, + return: "return" as const, + tab: "tab" as const, + space: "space" as const, + backspace: "backspace" as const, + delete: "delete" as const, + insert: "insert" as const, + clear: "clear" as const, + home: "home" as const, + end: "end" as const, + pageUp: "pageUp" as const, + pageDown: "pageDown" as const, + up: "up" as const, + down: "down" as const, + left: "left" as const, + right: "right" as const, + f1: "f1" as const, + f2: "f2" as const, + f3: "f3" as const, + f4: "f4" as const, + f5: "f5" as const, + f6: "f6" as const, + f7: "f7" as const, + f8: "f8" as const, + f9: "f9" as const, + f10: "f10" as const, + f11: "f11" as const, + f12: "f12" as const, + + // Symbol keys + backtick: "`" as const, + hyphen: "-" as const, + equals: "=" as const, + leftbracket: "[" as const, + rightbracket: "]" as const, + backslash: "\\" as const, + semicolon: ";" as const, + quote: "'" as const, + comma: "," as const, + period: "." as const, + slash: "/" as const, + exclamation: "!" as const, + at: "@" as const, + hash: "#" as const, + dollar: "$" as const, + percent: "%" as const, + caret: "^" as const, + ampersand: "&" as const, + asterisk: "*" as const, + leftparen: "(" as const, + rightparen: ")" as const, + underscore: "_" as const, + plus: "+" as const, + pipe: "|" as const, + tilde: "~" as const, + leftbrace: "{" as const, + rightbrace: "}" as const, + colon: ":" as const, + lessthan: "<" as const, + greaterthan: ">" as const, + question: "?" as const, + + // Single modifiers + ctrl: (key: K): `ctrl+${K}` => `ctrl+${key}`, + shift: (key: K): `shift+${K}` => `shift+${key}`, + alt: (key: K): `alt+${K}` => `alt+${key}`, + + // Combined modifiers + ctrlShift: (key: K): `ctrl+shift+${K}` => `ctrl+shift+${key}`, + shiftCtrl: (key: K): `shift+ctrl+${K}` => `shift+ctrl+${key}`, + ctrlAlt: (key: K): `ctrl+alt+${K}` => `ctrl+alt+${key}`, + altCtrl: (key: K): `alt+ctrl+${K}` => `alt+ctrl+${key}`, + shiftAlt: (key: K): `shift+alt+${K}` => `shift+alt+${key}`, + altShift: (key: K): `alt+shift+${K}` => `alt+shift+${key}`, + + // Triple modifiers + ctrlShiftAlt: (key: K): `ctrl+shift+alt+${K}` => `ctrl+shift+alt+${key}`, +} as const; + +// ============================================================================= +// Constants +// ============================================================================= + +const SYMBOL_KEYS = new Set([ + "`", + "-", + "=", + "[", + "]", + "\\", + ";", + "'", + ",", + ".", + "/", + "!", + "@", + "#", + "$", + "%", + "^", + "&", + "*", + "(", + ")", + "_", + "+", + "|", + "~", + "{", + "}", + ":", + "<", + ">", + "?", +]); + +const MODIFIERS = { + shift: 1, + alt: 2, + ctrl: 4, +} as const; + +const LOCK_MASK = 64 + 128; // Caps Lock + Num Lock + +const CODEPOINTS = { + escape: 27, + tab: 9, + enter: 13, + space: 32, + backspace: 127, + kpEnter: 57414, // Numpad Enter (Kitty protocol) +} as const; + +const ARROW_CODEPOINTS = { + up: -1, + down: -2, + right: -3, + left: -4, +} as const; + +const FUNCTIONAL_CODEPOINTS = { + delete: -10, + insert: -11, + pageUp: -12, + pageDown: -13, + home: -14, + end: -15, +} as const; + +const LEGACY_KEY_SEQUENCES = { + up: ["\x1b[A", "\x1bOA"], + down: ["\x1b[B", "\x1bOB"], + right: ["\x1b[C", "\x1bOC"], + left: ["\x1b[D", "\x1bOD"], + home: ["\x1b[H", "\x1bOH", "\x1b[1~", "\x1b[7~"], + end: ["\x1b[F", "\x1bOF", "\x1b[4~", "\x1b[8~"], + insert: ["\x1b[2~"], + delete: ["\x1b[3~"], + pageUp: ["\x1b[5~", "\x1b[[5~"], + pageDown: ["\x1b[6~", "\x1b[[6~"], + clear: ["\x1b[E", "\x1bOE"], + f1: ["\x1bOP", "\x1b[11~", "\x1b[[A"], + f2: ["\x1bOQ", "\x1b[12~", "\x1b[[B"], + f3: ["\x1bOR", "\x1b[13~", "\x1b[[C"], + f4: ["\x1bOS", "\x1b[14~", "\x1b[[D"], + f5: ["\x1b[15~", "\x1b[[E"], + f6: ["\x1b[17~"], + f7: ["\x1b[18~"], + f8: ["\x1b[19~"], + f9: ["\x1b[20~"], + f10: ["\x1b[21~"], + f11: ["\x1b[23~"], + f12: ["\x1b[24~"], +} as const; + +const LEGACY_SHIFT_SEQUENCES = { + up: ["\x1b[a"], + down: ["\x1b[b"], + right: ["\x1b[c"], + left: ["\x1b[d"], + clear: ["\x1b[e"], + insert: ["\x1b[2$"], + delete: ["\x1b[3$"], + pageUp: ["\x1b[5$"], + pageDown: ["\x1b[6$"], + home: ["\x1b[7$"], + end: ["\x1b[8$"], +} as const; + +const LEGACY_CTRL_SEQUENCES = { + up: ["\x1bOa"], + down: ["\x1bOb"], + right: ["\x1bOc"], + left: ["\x1bOd"], + clear: ["\x1bOe"], + insert: ["\x1b[2^"], + delete: ["\x1b[3^"], + pageUp: ["\x1b[5^"], + pageDown: ["\x1b[6^"], + home: ["\x1b[7^"], + end: ["\x1b[8^"], +} as const; + +const LEGACY_SEQUENCE_KEY_IDS: Record = { + "\x1bOA": "up", + "\x1bOB": "down", + "\x1bOC": "right", + "\x1bOD": "left", + "\x1bOH": "home", + "\x1bOF": "end", + "\x1b[E": "clear", + "\x1bOE": "clear", + "\x1bOe": "ctrl+clear", + "\x1b[e": "shift+clear", + "\x1b[2~": "insert", + "\x1b[2$": "shift+insert", + "\x1b[2^": "ctrl+insert", + "\x1b[3$": "shift+delete", + "\x1b[3^": "ctrl+delete", + "\x1b[[5~": "pageUp", + "\x1b[[6~": "pageDown", + "\x1b[a": "shift+up", + "\x1b[b": "shift+down", + "\x1b[c": "shift+right", + "\x1b[d": "shift+left", + "\x1bOa": "ctrl+up", + "\x1bOb": "ctrl+down", + "\x1bOc": "ctrl+right", + "\x1bOd": "ctrl+left", + "\x1b[5$": "shift+pageUp", + "\x1b[6$": "shift+pageDown", + "\x1b[7$": "shift+home", + "\x1b[8$": "shift+end", + "\x1b[5^": "ctrl+pageUp", + "\x1b[6^": "ctrl+pageDown", + "\x1b[7^": "ctrl+home", + "\x1b[8^": "ctrl+end", + "\x1bOP": "f1", + "\x1bOQ": "f2", + "\x1bOR": "f3", + "\x1bOS": "f4", + "\x1b[11~": "f1", + "\x1b[12~": "f2", + "\x1b[13~": "f3", + "\x1b[14~": "f4", + "\x1b[[A": "f1", + "\x1b[[B": "f2", + "\x1b[[C": "f3", + "\x1b[[D": "f4", + "\x1b[[E": "f5", + "\x1b[15~": "f5", + "\x1b[17~": "f6", + "\x1b[18~": "f7", + "\x1b[19~": "f8", + "\x1b[20~": "f9", + "\x1b[21~": "f10", + "\x1b[23~": "f11", + "\x1b[24~": "f12", + "\x1bb": "alt+left", + "\x1bf": "alt+right", + "\x1bp": "alt+up", + "\x1bn": "alt+down", +} as const; + +type LegacyModifierKey = keyof typeof LEGACY_SHIFT_SEQUENCES; + +const matchesLegacySequence = (data: string, sequences: readonly string[]): boolean => sequences.includes(data); + +const matchesLegacyModifierSequence = (data: string, key: LegacyModifierKey, modifier: number): boolean => { + if (modifier === MODIFIERS.shift) { + return matchesLegacySequence(data, LEGACY_SHIFT_SEQUENCES[key]); + } + if (modifier === MODIFIERS.ctrl) { + return matchesLegacySequence(data, LEGACY_CTRL_SEQUENCES[key]); + } + return false; +}; + +// ============================================================================= +// Kitty Protocol Parsing +// ============================================================================= + +/** + * Event types from Kitty keyboard protocol (flag 2) + * 1 = key press, 2 = key repeat, 3 = key release + */ +export type KeyEventType = "press" | "repeat" | "release"; + +interface ParsedKittySequence { + codepoint: number; + shiftedKey?: number; // Shifted version of the key (when shift is pressed) + baseLayoutKey?: number; // Key in standard PC-101 layout (for non-Latin layouts) + modifier: number; + eventType: KeyEventType; +} + +interface ParsedModifyOtherKeysSequence { + codepoint: number; + modifier: number; +} + +// Store the last parsed event type for isKeyRelease() to query +let _lastEventType: KeyEventType = "press"; + +/** + * Check if the last parsed key event was a key release. + * Only meaningful when Kitty keyboard protocol with flag 2 is active. + */ +export function isKeyRelease(data: string): boolean { + // Don't treat bracketed paste content as key release, even if it contains + // patterns like ":3F" (e.g., bluetooth MAC addresses like "90:62:3F:A5"). + // Terminal.ts re-wraps paste content with bracketed paste markers before + // passing to TUI, so pasted data will always contain \x1b[200~. + if (data.includes("\x1b[200~")) { + return false; + } + + // Quick check: release events with flag 2 contain ":3" + // Format: \x1b[;:3u + if ( + data.includes(":3u") || + data.includes(":3~") || + data.includes(":3A") || + data.includes(":3B") || + data.includes(":3C") || + data.includes(":3D") || + data.includes(":3H") || + data.includes(":3F") + ) { + return true; + } + return false; +} + +/** + * Check if the last parsed key event was a key repeat. + * Only meaningful when Kitty keyboard protocol with flag 2 is active. + */ +export function isKeyRepeat(data: string): boolean { + // Don't treat bracketed paste content as key repeat, even if it contains + // patterns like ":2F". See isKeyRelease() for details. + if (data.includes("\x1b[200~")) { + return false; + } + + if ( + data.includes(":2u") || + data.includes(":2~") || + data.includes(":2A") || + data.includes(":2B") || + data.includes(":2C") || + data.includes(":2D") || + data.includes(":2H") || + data.includes(":2F") + ) { + return true; + } + return false; +} + +function parseEventType(eventTypeStr: string | undefined): KeyEventType { + if (!eventTypeStr) return "press"; + const eventType = parseInt(eventTypeStr, 10); + if (eventType === 2) return "repeat"; + if (eventType === 3) return "release"; + return "press"; +} + +function parseKittySequence(data: string): ParsedKittySequence | null { + // CSI u format with alternate keys (flag 4): + // \x1b[u + // \x1b[;u + // \x1b[;:u + // \x1b[:;u + // \x1b[::;u + // \x1b[::;u (no shifted key, only base) + // + // With flag 2, event type is appended after modifier colon: 1=press, 2=repeat, 3=release + // With flag 4, alternate keys are appended after codepoint with colons + const csiUMatch = data.match(/^\x1b\[(\d+)(?::(\d*))?(?::(\d+))?(?:;(\d+))?(?::(\d+))?u$/); + if (csiUMatch) { + const codepoint = parseInt(csiUMatch[1]!, 10); + const shiftedKey = csiUMatch[2] && csiUMatch[2].length > 0 ? parseInt(csiUMatch[2], 10) : undefined; + const baseLayoutKey = csiUMatch[3] ? parseInt(csiUMatch[3], 10) : undefined; + const modValue = csiUMatch[4] ? parseInt(csiUMatch[4], 10) : 1; + const eventType = parseEventType(csiUMatch[5]); + _lastEventType = eventType; + return { codepoint, shiftedKey, baseLayoutKey, modifier: modValue - 1, eventType }; + } + + // Arrow keys with modifier: \x1b[1;A/B/C/D or \x1b[1;:A/B/C/D + const arrowMatch = data.match(/^\x1b\[1;(\d+)(?::(\d+))?([ABCD])$/); + if (arrowMatch) { + const modValue = parseInt(arrowMatch[1]!, 10); + const eventType = parseEventType(arrowMatch[2]); + const arrowCodes: Record = { A: -1, B: -2, C: -3, D: -4 }; + _lastEventType = eventType; + return { codepoint: arrowCodes[arrowMatch[3]!]!, modifier: modValue - 1, eventType }; + } + + // Functional keys: \x1b[~ or \x1b[;~ or \x1b[;:~ + const funcMatch = data.match(/^\x1b\[(\d+)(?:;(\d+))?(?::(\d+))?~$/); + if (funcMatch) { + const keyNum = parseInt(funcMatch[1]!, 10); + const modValue = funcMatch[2] ? parseInt(funcMatch[2], 10) : 1; + const eventType = parseEventType(funcMatch[3]); + const funcCodes: Record = { + 2: FUNCTIONAL_CODEPOINTS.insert, + 3: FUNCTIONAL_CODEPOINTS.delete, + 5: FUNCTIONAL_CODEPOINTS.pageUp, + 6: FUNCTIONAL_CODEPOINTS.pageDown, + 7: FUNCTIONAL_CODEPOINTS.home, + 8: FUNCTIONAL_CODEPOINTS.end, + }; + const codepoint = funcCodes[keyNum]; + if (codepoint !== undefined) { + _lastEventType = eventType; + return { codepoint, modifier: modValue - 1, eventType }; + } + } + + // Home/End with modifier: \x1b[1;H/F or \x1b[1;:H/F + const homeEndMatch = data.match(/^\x1b\[1;(\d+)(?::(\d+))?([HF])$/); + if (homeEndMatch) { + const modValue = parseInt(homeEndMatch[1]!, 10); + const eventType = parseEventType(homeEndMatch[2]); + const codepoint = homeEndMatch[3] === "H" ? FUNCTIONAL_CODEPOINTS.home : FUNCTIONAL_CODEPOINTS.end; + _lastEventType = eventType; + return { codepoint, modifier: modValue - 1, eventType }; + } + + return null; +} + +function matchesKittySequence(data: string, expectedCodepoint: number, expectedModifier: number): boolean { + const parsed = parseKittySequence(data); + if (!parsed) return false; + const actualMod = parsed.modifier & ~LOCK_MASK; + const expectedMod = expectedModifier & ~LOCK_MASK; + + // Check if modifiers match + if (actualMod !== expectedMod) return false; + + // Primary match: codepoint matches directly + if (parsed.codepoint === expectedCodepoint) return true; + + // Alternate match: use base layout key for non-Latin keyboard layouts. + // This allows Ctrl+С (Cyrillic) to match Ctrl+c (Latin) when terminal reports + // the base layout key (the key in standard PC-101 layout). + // + // Only fall back to base layout key when the codepoint is NOT already a + // recognized Latin letter (a-z) or symbol (e.g., /, -, [, ;, etc.). + // When the codepoint is a recognized key, it is authoritative regardless + // of physical key position. This prevents remapped layouts (Dvorak, Colemak, + // xremap, etc.) from causing false matches: both letters and symbols move + // to different physical positions, so Ctrl+K could falsely match Ctrl+V + // (letter remapping) and Ctrl+/ could falsely match Ctrl+[ (symbol remapping) + // if the base layout key were always considered. + if (parsed.baseLayoutKey !== undefined && parsed.baseLayoutKey === expectedCodepoint) { + const cp = parsed.codepoint; + const isLatinLetter = cp >= 97 && cp <= 122; // a-z + const isKnownSymbol = SYMBOL_KEYS.has(String.fromCharCode(cp)); + if (!isLatinLetter && !isKnownSymbol) return true; + } + + return false; +} + +function parseModifyOtherKeysSequence(data: string): ParsedModifyOtherKeysSequence | null { + const match = data.match(/^\x1b\[27;(\d+);(\d+)~$/); + if (!match) return null; + const modValue = parseInt(match[1]!, 10); + const codepoint = parseInt(match[2]!, 10); + return { codepoint, modifier: modValue - 1 }; +} + +/** + * Match xterm modifyOtherKeys format: CSI 27 ; modifiers ; keycode ~ + * This is used by terminals when Kitty protocol is not enabled. + * Modifier values are 1-indexed: 2=shift, 3=alt, 5=ctrl, etc. + */ +function matchesModifyOtherKeys(data: string, expectedKeycode: number, expectedModifier: number): boolean { + const parsed = parseModifyOtherKeysSequence(data); + if (!parsed) return false; + return parsed.codepoint === expectedKeycode && parsed.modifier === expectedModifier; +} + +// ============================================================================= +// Generic Key Matching +// ============================================================================= + +/** + * Get the control character for a key. + * Uses the universal formula: code & 0x1f (mask to lower 5 bits) + * + * Works for: + * - Letters a-z → 1-26 + * - Symbols [\]_ → 27, 28, 29, 31 + * - Also maps - to same as _ (same physical key on US keyboards) + */ +function rawCtrlChar(key: string): string | null { + const char = key.toLowerCase(); + const code = char.charCodeAt(0); + if ((code >= 97 && code <= 122) || char === "[" || char === "\\" || char === "]" || char === "_") { + return String.fromCharCode(code & 0x1f); + } + // Handle - as _ (same physical key on US keyboards) + if (char === "-") { + return String.fromCharCode(31); // Same as Ctrl+_ + } + return null; +} + +function isDigitKey(key: string): boolean { + return key >= "0" && key <= "9"; +} + +function matchesPrintableModifyOtherKeys(data: string, expectedKeycode: number, expectedModifier: number): boolean { + if (expectedModifier === 0) return false; + return matchesModifyOtherKeys(data, expectedKeycode, expectedModifier); +} + +function formatKeyNameWithModifiers(keyName: string, modifier: number): string | undefined { + const mods: string[] = []; + const effectiveMod = modifier & ~LOCK_MASK; + const supportedModifierMask = MODIFIERS.shift | MODIFIERS.ctrl | MODIFIERS.alt; + if ((effectiveMod & ~supportedModifierMask) !== 0) return undefined; + if (effectiveMod & MODIFIERS.shift) mods.push("shift"); + if (effectiveMod & MODIFIERS.ctrl) mods.push("ctrl"); + if (effectiveMod & MODIFIERS.alt) mods.push("alt"); + return mods.length > 0 ? `${mods.join("+")}+${keyName}` : keyName; +} + +function parseKeyId(keyId: string): { key: string; ctrl: boolean; shift: boolean; alt: boolean } | null { + const parts = keyId.toLowerCase().split("+"); + const key = parts[parts.length - 1]; + if (!key) return null; + return { + key, + ctrl: parts.includes("ctrl"), + shift: parts.includes("shift"), + alt: parts.includes("alt"), + }; +} + +/** + * Match input data against a key identifier string. + * + * Supported key identifiers: + * - Single keys: "escape", "tab", "enter", "backspace", "delete", "home", "end", "space" + * - Arrow keys: "up", "down", "left", "right" + * - Ctrl combinations: "ctrl+c", "ctrl+z", etc. + * - Shift combinations: "shift+tab", "shift+enter" + * - Alt combinations: "alt+enter", "alt+backspace" + * - Combined modifiers: "shift+ctrl+p", "ctrl+alt+x" + * + * Use the Key helper for autocomplete: Key.ctrl("c"), Key.escape, Key.ctrlShift("p") + * + * @param data - Raw input data from terminal + * @param keyId - Key identifier (e.g., "ctrl+c", "escape", Key.ctrl("c")) + */ +export function matchesKey(data: string, keyId: KeyId): boolean { + const parsed = parseKeyId(keyId); + if (!parsed) return false; + + const { key, ctrl, shift, alt } = parsed; + let modifier = 0; + if (shift) modifier |= MODIFIERS.shift; + if (alt) modifier |= MODIFIERS.alt; + if (ctrl) modifier |= MODIFIERS.ctrl; + + switch (key) { + case "escape": + case "esc": + if (modifier !== 0) return false; + return data === "\x1b" || matchesKittySequence(data, CODEPOINTS.escape, 0); + + case "space": + if (!_kittyProtocolActive) { + if (ctrl && !alt && !shift && data === "\x00") { + return true; + } + if (alt && !ctrl && !shift && data === "\x1b ") { + return true; + } + } + if (modifier === 0) { + return data === " " || matchesKittySequence(data, CODEPOINTS.space, 0); + } + return matchesKittySequence(data, CODEPOINTS.space, modifier); + + case "tab": + if (shift && !ctrl && !alt) { + return data === "\x1b[Z" || matchesKittySequence(data, CODEPOINTS.tab, MODIFIERS.shift); + } + if (modifier === 0) { + return data === "\t" || matchesKittySequence(data, CODEPOINTS.tab, 0); + } + return matchesKittySequence(data, CODEPOINTS.tab, modifier); + + case "enter": + case "return": + if (shift && !ctrl && !alt) { + // CSI u sequences (standard Kitty protocol) + if ( + matchesKittySequence(data, CODEPOINTS.enter, MODIFIERS.shift) || + matchesKittySequence(data, CODEPOINTS.kpEnter, MODIFIERS.shift) + ) { + return true; + } + // xterm modifyOtherKeys format (fallback when Kitty protocol not enabled) + if (matchesModifyOtherKeys(data, CODEPOINTS.enter, MODIFIERS.shift)) { + return true; + } + // When Kitty protocol is active, legacy sequences are custom terminal mappings + // \x1b\r = Kitty's "map shift+enter send_text all \e\r" + // \n = Ghostty's "keybind = shift+enter=text:\n" + if (_kittyProtocolActive) { + return data === "\x1b\r" || data === "\n"; + } + return false; + } + if (alt && !ctrl && !shift) { + // CSI u sequences (standard Kitty protocol) + if ( + matchesKittySequence(data, CODEPOINTS.enter, MODIFIERS.alt) || + matchesKittySequence(data, CODEPOINTS.kpEnter, MODIFIERS.alt) + ) { + return true; + } + // xterm modifyOtherKeys format (fallback when Kitty protocol not enabled) + if (matchesModifyOtherKeys(data, CODEPOINTS.enter, MODIFIERS.alt)) { + return true; + } + // \x1b\r is alt+enter only in legacy mode (no Kitty protocol) + // When Kitty protocol is active, alt+enter comes as CSI u sequence + if (!_kittyProtocolActive) { + return data === "\x1b\r"; + } + return false; + } + if (modifier === 0) { + return ( + data === "\r" || + (!_kittyProtocolActive && data === "\n") || + data === "\x1bOM" || // SS3 M (numpad enter in some terminals) + matchesKittySequence(data, CODEPOINTS.enter, 0) || + matchesKittySequence(data, CODEPOINTS.kpEnter, 0) + ); + } + return ( + matchesKittySequence(data, CODEPOINTS.enter, modifier) || + matchesKittySequence(data, CODEPOINTS.kpEnter, modifier) || + matchesModifyOtherKeys(data, CODEPOINTS.enter, modifier) + ); + + case "backspace": + if (alt && !ctrl && !shift) { + if (data === "\x1b\x7f" || data === "\x1b\b") { + return true; + } + return matchesKittySequence(data, CODEPOINTS.backspace, MODIFIERS.alt); + } + if (modifier === 0) { + return data === "\x7f" || data === "\x08" || matchesKittySequence(data, CODEPOINTS.backspace, 0); + } + return matchesKittySequence(data, CODEPOINTS.backspace, modifier); + + case "insert": + if (modifier === 0) { + return ( + matchesLegacySequence(data, LEGACY_KEY_SEQUENCES.insert) || + matchesKittySequence(data, FUNCTIONAL_CODEPOINTS.insert, 0) + ); + } + if (matchesLegacyModifierSequence(data, "insert", modifier)) { + return true; + } + return matchesKittySequence(data, FUNCTIONAL_CODEPOINTS.insert, modifier); + + case "delete": + if (modifier === 0) { + return ( + matchesLegacySequence(data, LEGACY_KEY_SEQUENCES.delete) || + matchesKittySequence(data, FUNCTIONAL_CODEPOINTS.delete, 0) + ); + } + if (matchesLegacyModifierSequence(data, "delete", modifier)) { + return true; + } + return matchesKittySequence(data, FUNCTIONAL_CODEPOINTS.delete, modifier); + + case "clear": + if (modifier === 0) { + return matchesLegacySequence(data, LEGACY_KEY_SEQUENCES.clear); + } + return matchesLegacyModifierSequence(data, "clear", modifier); + + case "home": + if (modifier === 0) { + return ( + matchesLegacySequence(data, LEGACY_KEY_SEQUENCES.home) || + matchesKittySequence(data, FUNCTIONAL_CODEPOINTS.home, 0) + ); + } + if (matchesLegacyModifierSequence(data, "home", modifier)) { + return true; + } + return matchesKittySequence(data, FUNCTIONAL_CODEPOINTS.home, modifier); + + case "end": + if (modifier === 0) { + return ( + matchesLegacySequence(data, LEGACY_KEY_SEQUENCES.end) || + matchesKittySequence(data, FUNCTIONAL_CODEPOINTS.end, 0) + ); + } + if (matchesLegacyModifierSequence(data, "end", modifier)) { + return true; + } + return matchesKittySequence(data, FUNCTIONAL_CODEPOINTS.end, modifier); + + case "pageup": + if (modifier === 0) { + return ( + matchesLegacySequence(data, LEGACY_KEY_SEQUENCES.pageUp) || + matchesKittySequence(data, FUNCTIONAL_CODEPOINTS.pageUp, 0) + ); + } + if (matchesLegacyModifierSequence(data, "pageUp", modifier)) { + return true; + } + return matchesKittySequence(data, FUNCTIONAL_CODEPOINTS.pageUp, modifier); + + case "pagedown": + if (modifier === 0) { + return ( + matchesLegacySequence(data, LEGACY_KEY_SEQUENCES.pageDown) || + matchesKittySequence(data, FUNCTIONAL_CODEPOINTS.pageDown, 0) + ); + } + if (matchesLegacyModifierSequence(data, "pageDown", modifier)) { + return true; + } + return matchesKittySequence(data, FUNCTIONAL_CODEPOINTS.pageDown, modifier); + + case "up": + if (alt && !ctrl && !shift) { + return data === "\x1bp" || matchesKittySequence(data, ARROW_CODEPOINTS.up, MODIFIERS.alt); + } + if (modifier === 0) { + return ( + matchesLegacySequence(data, LEGACY_KEY_SEQUENCES.up) || + matchesKittySequence(data, ARROW_CODEPOINTS.up, 0) + ); + } + if (matchesLegacyModifierSequence(data, "up", modifier)) { + return true; + } + return matchesKittySequence(data, ARROW_CODEPOINTS.up, modifier); + + case "down": + if (alt && !ctrl && !shift) { + return data === "\x1bn" || matchesKittySequence(data, ARROW_CODEPOINTS.down, MODIFIERS.alt); + } + if (modifier === 0) { + return ( + matchesLegacySequence(data, LEGACY_KEY_SEQUENCES.down) || + matchesKittySequence(data, ARROW_CODEPOINTS.down, 0) + ); + } + if (matchesLegacyModifierSequence(data, "down", modifier)) { + return true; + } + return matchesKittySequence(data, ARROW_CODEPOINTS.down, modifier); + + case "left": + if (alt && !ctrl && !shift) { + return ( + data === "\x1b[1;3D" || + (!_kittyProtocolActive && data === "\x1bB") || + data === "\x1bb" || + matchesKittySequence(data, ARROW_CODEPOINTS.left, MODIFIERS.alt) + ); + } + if (ctrl && !alt && !shift) { + return ( + data === "\x1b[1;5D" || + matchesLegacyModifierSequence(data, "left", MODIFIERS.ctrl) || + matchesKittySequence(data, ARROW_CODEPOINTS.left, MODIFIERS.ctrl) + ); + } + if (modifier === 0) { + return ( + matchesLegacySequence(data, LEGACY_KEY_SEQUENCES.left) || + matchesKittySequence(data, ARROW_CODEPOINTS.left, 0) + ); + } + if (matchesLegacyModifierSequence(data, "left", modifier)) { + return true; + } + return matchesKittySequence(data, ARROW_CODEPOINTS.left, modifier); + + case "right": + if (alt && !ctrl && !shift) { + return ( + data === "\x1b[1;3C" || + (!_kittyProtocolActive && data === "\x1bF") || + data === "\x1bf" || + matchesKittySequence(data, ARROW_CODEPOINTS.right, MODIFIERS.alt) + ); + } + if (ctrl && !alt && !shift) { + return ( + data === "\x1b[1;5C" || + matchesLegacyModifierSequence(data, "right", MODIFIERS.ctrl) || + matchesKittySequence(data, ARROW_CODEPOINTS.right, MODIFIERS.ctrl) + ); + } + if (modifier === 0) { + return ( + matchesLegacySequence(data, LEGACY_KEY_SEQUENCES.right) || + matchesKittySequence(data, ARROW_CODEPOINTS.right, 0) + ); + } + if (matchesLegacyModifierSequence(data, "right", modifier)) { + return true; + } + return matchesKittySequence(data, ARROW_CODEPOINTS.right, modifier); + + case "f1": + case "f2": + case "f3": + case "f4": + case "f5": + case "f6": + case "f7": + case "f8": + case "f9": + case "f10": + case "f11": + case "f12": { + if (modifier !== 0) { + return false; + } + const functionKey = key as keyof typeof LEGACY_KEY_SEQUENCES; + return matchesLegacySequence(data, LEGACY_KEY_SEQUENCES[functionKey]); + } + } + + // Handle single letter/digit keys and symbols + if (key.length === 1 && ((key >= "a" && key <= "z") || isDigitKey(key) || SYMBOL_KEYS.has(key))) { + const codepoint = key.charCodeAt(0); + const rawCtrl = rawCtrlChar(key); + const isLetter = key >= "a" && key <= "z"; + const isDigit = isDigitKey(key); + + if (ctrl && alt && !shift && !_kittyProtocolActive && rawCtrl) { + // Legacy: ctrl+alt+key is ESC followed by the control character + return data === `\x1b${rawCtrl}`; + } + + if (alt && !ctrl && !shift && !_kittyProtocolActive && (isLetter || isDigit)) { + // Legacy: alt+letter/digit is ESC followed by the key + if (data === `\x1b${key}`) return true; + } + + if (ctrl && !shift && !alt) { + // Legacy: ctrl+key sends the control character + if (rawCtrl && data === rawCtrl) return true; + return ( + matchesKittySequence(data, codepoint, MODIFIERS.ctrl) || + matchesPrintableModifyOtherKeys(data, codepoint, MODIFIERS.ctrl) + ); + } + + if (ctrl && shift && !alt) { + return ( + matchesKittySequence(data, codepoint, MODIFIERS.shift + MODIFIERS.ctrl) || + matchesPrintableModifyOtherKeys(data, codepoint, MODIFIERS.shift + MODIFIERS.ctrl) + ); + } + + if (shift && !ctrl && !alt) { + // Legacy: shift+letter produces uppercase + if (isLetter && data === key.toUpperCase()) return true; + return ( + matchesKittySequence(data, codepoint, MODIFIERS.shift) || + matchesPrintableModifyOtherKeys(data, codepoint, MODIFIERS.shift) + ); + } + + if (modifier !== 0) { + return ( + matchesKittySequence(data, codepoint, modifier) || + matchesPrintableModifyOtherKeys(data, codepoint, modifier) + ); + } + + // Check both raw char and Kitty sequence (needed for release events) + return data === key || matchesKittySequence(data, codepoint, 0); + } + + return false; +} + +/** + * Parse input data and return the key identifier if recognized. + * + * @param data - Raw input data from terminal + * @returns Key identifier string (e.g., "ctrl+c") or undefined + */ +function formatParsedKey(codepoint: number, modifier: number, baseLayoutKey?: number): string | undefined { + // Use base layout key only when codepoint is not a recognized Latin + // letter (a-z), digit (0-9), or symbol (/, -, [, ;, etc.). For those, + // the codepoint is authoritative regardless of physical key position. + // This prevents remapped layouts (Dvorak, Colemak, xremap, etc.) from + // reporting the wrong key name based on the QWERTY physical position. + const isLatinLetter = codepoint >= 97 && codepoint <= 122; // a-z + const isDigit = codepoint >= 48 && codepoint <= 57; // 0-9 + const isKnownSymbol = SYMBOL_KEYS.has(String.fromCharCode(codepoint)); + const effectiveCodepoint = isLatinLetter || isDigit || isKnownSymbol ? codepoint : (baseLayoutKey ?? codepoint); + + let keyName: string | undefined; + if (effectiveCodepoint === CODEPOINTS.escape) keyName = "escape"; + else if (effectiveCodepoint === CODEPOINTS.tab) keyName = "tab"; + else if (effectiveCodepoint === CODEPOINTS.enter || effectiveCodepoint === CODEPOINTS.kpEnter) keyName = "enter"; + else if (effectiveCodepoint === CODEPOINTS.space) keyName = "space"; + else if (effectiveCodepoint === CODEPOINTS.backspace) keyName = "backspace"; + else if (effectiveCodepoint === FUNCTIONAL_CODEPOINTS.delete) keyName = "delete"; + else if (effectiveCodepoint === FUNCTIONAL_CODEPOINTS.insert) keyName = "insert"; + else if (effectiveCodepoint === FUNCTIONAL_CODEPOINTS.home) keyName = "home"; + else if (effectiveCodepoint === FUNCTIONAL_CODEPOINTS.end) keyName = "end"; + else if (effectiveCodepoint === FUNCTIONAL_CODEPOINTS.pageUp) keyName = "pageUp"; + else if (effectiveCodepoint === FUNCTIONAL_CODEPOINTS.pageDown) keyName = "pageDown"; + else if (effectiveCodepoint === ARROW_CODEPOINTS.up) keyName = "up"; + else if (effectiveCodepoint === ARROW_CODEPOINTS.down) keyName = "down"; + else if (effectiveCodepoint === ARROW_CODEPOINTS.left) keyName = "left"; + else if (effectiveCodepoint === ARROW_CODEPOINTS.right) keyName = "right"; + else if (effectiveCodepoint >= 48 && effectiveCodepoint <= 57) keyName = String.fromCharCode(effectiveCodepoint); + else if (effectiveCodepoint >= 97 && effectiveCodepoint <= 122) keyName = String.fromCharCode(effectiveCodepoint); + else if (SYMBOL_KEYS.has(String.fromCharCode(effectiveCodepoint))) keyName = String.fromCharCode(effectiveCodepoint); + + if (!keyName) return undefined; + return formatKeyNameWithModifiers(keyName, modifier); +} + +export function parseKey(data: string): string | undefined { + const kitty = parseKittySequence(data); + if (kitty) { + return formatParsedKey(kitty.codepoint, kitty.modifier, kitty.baseLayoutKey); + } + + const modifyOtherKeys = parseModifyOtherKeysSequence(data); + if (modifyOtherKeys) { + return formatParsedKey(modifyOtherKeys.codepoint, modifyOtherKeys.modifier); + } + + // Mode-aware legacy sequences + // When Kitty protocol is active, ambiguous sequences are interpreted as custom terminal mappings: + // - \x1b\r = shift+enter (Kitty mapping), not alt+enter + // - \n = shift+enter (Ghostty mapping) + if (_kittyProtocolActive) { + if (data === "\x1b\r" || data === "\n") return "shift+enter"; + } + + const legacySequenceKeyId = LEGACY_SEQUENCE_KEY_IDS[data]; + if (legacySequenceKeyId) return legacySequenceKeyId; + + // Legacy sequences (used when Kitty protocol is not active, or for unambiguous sequences) + if (data === "\x1b") return "escape"; + if (data === "\x1c") return "ctrl+\\"; + if (data === "\x1d") return "ctrl+]"; + if (data === "\x1f") return "ctrl+-"; + if (data === "\x1b\x1b") return "ctrl+alt+["; + if (data === "\x1b\x1c") return "ctrl+alt+\\"; + if (data === "\x1b\x1d") return "ctrl+alt+]"; + if (data === "\x1b\x1f") return "ctrl+alt+-"; + if (data === "\t") return "tab"; + if (data === "\r" || (!_kittyProtocolActive && data === "\n") || data === "\x1bOM") return "enter"; + if (data === "\x00") return "ctrl+space"; + if (data === " ") return "space"; + if (data === "\x7f" || data === "\x08") return "backspace"; + if (data === "\x1b[Z") return "shift+tab"; + if (!_kittyProtocolActive && data === "\x1b\r") return "alt+enter"; + if (!_kittyProtocolActive && data === "\x1b ") return "alt+space"; + if (data === "\x1b\x7f" || data === "\x1b\b") return "alt+backspace"; + if (!_kittyProtocolActive && data === "\x1bB") return "alt+left"; + if (!_kittyProtocolActive && data === "\x1bF") return "alt+right"; + if (!_kittyProtocolActive && data.length === 2 && data[0] === "\x1b") { + const code = data.charCodeAt(1); + if (code >= 1 && code <= 26) { + return `ctrl+alt+${String.fromCharCode(code + 96)}`; + } + // Legacy alt+letter/digit (ESC followed by the key) + if ((code >= 97 && code <= 122) || (code >= 48 && code <= 57)) { + return `alt+${String.fromCharCode(code)}`; + } + } + if (data === "\x1b[A") return "up"; + if (data === "\x1b[B") return "down"; + if (data === "\x1b[C") return "right"; + if (data === "\x1b[D") return "left"; + if (data === "\x1b[H" || data === "\x1bOH") return "home"; + if (data === "\x1b[F" || data === "\x1bOF") return "end"; + if (data === "\x1b[3~") return "delete"; + if (data === "\x1b[5~") return "pageUp"; + if (data === "\x1b[6~") return "pageDown"; + + // Raw Ctrl+letter + if (data.length === 1) { + const code = data.charCodeAt(0); + if (code >= 1 && code <= 26) { + return `ctrl+${String.fromCharCode(code + 96)}`; + } + if (code >= 32 && code <= 126) { + return data; + } + } + + return undefined; +} + +// ============================================================================= +// Kitty CSI-u Printable Decoding +// ============================================================================= + +const KITTY_CSI_U_REGEX = /^\x1b\[(\d+)(?::(\d*))?(?::(\d+))?(?:;(\d+))?(?::(\d+))?u$/; +const KITTY_PRINTABLE_ALLOWED_MODIFIERS = MODIFIERS.shift | LOCK_MASK; + +/** + * Decode a Kitty CSI-u sequence into a printable character, if applicable. + * + * When Kitty keyboard protocol flag 1 (disambiguate) is active, terminals send + * CSI-u sequences for all keys, including plain printable characters. This + * function extracts the printable character from such sequences. + * + * Only accepts plain or Shift-modified keys. Rejects Ctrl, Alt, and unsupported + * modifier combinations (those are handled by keybinding matching instead). + * Prefers the shifted keycode when Shift is held and a shifted key is reported. + * + * @param data - Raw input data from terminal + * @returns The printable character, or undefined if not a printable CSI-u sequence + */ +export function decodeKittyPrintable(data: string): string | undefined { + const match = data.match(KITTY_CSI_U_REGEX); + if (!match) return undefined; + + // CSI-u groups: [:[:]];[:]u + const codepoint = Number.parseInt(match[1] ?? "", 10); + if (!Number.isFinite(codepoint)) return undefined; + + const shiftedKey = match[2] && match[2].length > 0 ? Number.parseInt(match[2], 10) : undefined; + const modValue = match[4] ? Number.parseInt(match[4], 10) : 1; + // Modifiers are 1-indexed in CSI-u; normalize to our bitmask. + const modifier = Number.isFinite(modValue) ? modValue - 1 : 0; + + // Only accept printable CSI-u input for plain or Shift-modified text keys. + // Reject unsupported modifier bits (e.g. Super/Meta) to avoid inserting + // characters from modifier-only terminal events. + if ((modifier & ~KITTY_PRINTABLE_ALLOWED_MODIFIERS) !== 0) return undefined; + if (modifier & (MODIFIERS.alt | MODIFIERS.ctrl)) return undefined; + + // Prefer the shifted keycode when Shift is held. + let effectiveCodepoint = codepoint; + if (modifier & MODIFIERS.shift && typeof shiftedKey === "number") { + effectiveCodepoint = shiftedKey; + } + // Drop control characters or invalid codepoints. + if (!Number.isFinite(effectiveCodepoint) || effectiveCodepoint < 32) return undefined; + + try { + return String.fromCodePoint(effectiveCodepoint); + } catch { + return undefined; + } +} diff --git a/packages/pi-tui/src/kill-ring.ts b/packages/pi-tui/src/kill-ring.ts new file mode 100644 index 000000000..2292f91aa --- /dev/null +++ b/packages/pi-tui/src/kill-ring.ts @@ -0,0 +1,46 @@ +/** + * Ring buffer for Emacs-style kill/yank operations. + * + * Tracks killed (deleted) text entries. Consecutive kills can accumulate + * into a single entry. Supports yank (paste most recent) and yank-pop + * (cycle through older entries). + */ +export class KillRing { + private ring: string[] = []; + + /** + * Add text to the kill ring. + * + * @param text - The killed text to add + * @param opts - Push options + * @param opts.prepend - If accumulating, prepend (backward deletion) or append (forward deletion) + * @param opts.accumulate - Merge with the most recent entry instead of creating a new one + */ + push(text: string, opts: { prepend: boolean; accumulate?: boolean }): void { + if (!text) return; + + if (opts.accumulate && this.ring.length > 0) { + const last = this.ring.pop()!; + this.ring.push(opts.prepend ? text + last : last + text); + } else { + this.ring.push(text); + } + } + + /** Get most recent entry without modifying the ring. */ + peek(): string | undefined { + return this.ring.length > 0 ? this.ring[this.ring.length - 1] : undefined; + } + + /** Move last entry to front (for yank-pop cycling). */ + rotate(): void { + if (this.ring.length > 1) { + const last = this.ring.pop()!; + this.ring.unshift(last); + } + } + + get length(): number { + return this.ring.length; + } +} diff --git a/packages/pi-tui/src/stdin-buffer.ts b/packages/pi-tui/src/stdin-buffer.ts new file mode 100644 index 000000000..5b2f977b0 --- /dev/null +++ b/packages/pi-tui/src/stdin-buffer.ts @@ -0,0 +1,386 @@ +/** + * StdinBuffer buffers input and emits complete sequences. + * + * This is necessary because stdin data events can arrive in partial chunks, + * especially for escape sequences like mouse events. Without buffering, + * partial sequences can be misinterpreted as regular keypresses. + * + * For example, the mouse SGR sequence `\x1b[<35;20;5m` might arrive as: + * - Event 1: `\x1b` + * - Event 2: `[<35` + * - Event 3: `;20;5m` + * + * The buffer accumulates these until a complete sequence is detected. + * Call the `process()` method to feed input data. + * + * Based on code from OpenTUI (https://github.com/anomalyco/opentui) + * MIT License - Copyright (c) 2025 opentui + */ + +import { EventEmitter } from "events"; + +const ESC = "\x1b"; +const BRACKETED_PASTE_START = "\x1b[200~"; +const BRACKETED_PASTE_END = "\x1b[201~"; + +/** + * Check if a string is a complete escape sequence or needs more data + */ +function isCompleteSequence(data: string): "complete" | "incomplete" | "not-escape" { + if (!data.startsWith(ESC)) { + return "not-escape"; + } + + if (data.length === 1) { + return "incomplete"; + } + + const afterEsc = data.slice(1); + + // CSI sequences: ESC [ + if (afterEsc.startsWith("[")) { + // Check for old-style mouse sequence: ESC[M + 3 bytes + if (afterEsc.startsWith("[M")) { + // Old-style mouse needs ESC[M + 3 bytes = 6 total + return data.length >= 6 ? "complete" : "incomplete"; + } + return isCompleteCsiSequence(data); + } + + // OSC sequences: ESC ] + if (afterEsc.startsWith("]")) { + return isCompleteOscSequence(data); + } + + // DCS sequences: ESC P ... ESC \ (includes XTVersion responses) + if (afterEsc.startsWith("P")) { + return isCompleteDcsSequence(data); + } + + // APC sequences: ESC _ ... ESC \ (includes Kitty graphics responses) + if (afterEsc.startsWith("_")) { + return isCompleteApcSequence(data); + } + + // SS3 sequences: ESC O + if (afterEsc.startsWith("O")) { + // ESC O followed by a single character + return afterEsc.length >= 2 ? "complete" : "incomplete"; + } + + // Meta key sequences: ESC followed by a single character + if (afterEsc.length === 1) { + return "complete"; + } + + // Unknown escape sequence - treat as complete + return "complete"; +} + +/** + * Check if CSI sequence is complete + * CSI sequences: ESC [ ... followed by a final byte (0x40-0x7E) + */ +function isCompleteCsiSequence(data: string): "complete" | "incomplete" { + if (!data.startsWith(`${ESC}[`)) { + return "complete"; + } + + // Need at least ESC [ and one more character + if (data.length < 3) { + return "incomplete"; + } + + const payload = data.slice(2); + + // CSI sequences end with a byte in the range 0x40-0x7E (@-~) + // This includes all letters and several special characters + const lastChar = payload[payload.length - 1]; + const lastCharCode = lastChar.charCodeAt(0); + + if (lastCharCode >= 0x40 && lastCharCode <= 0x7e) { + // Special handling for SGR mouse sequences + // Format: ESC[ /^\d+$/.test(p))) { + return "complete"; + } + } + + return "incomplete"; + } + + return "complete"; + } + + return "incomplete"; +} + +/** + * Check if OSC sequence is complete + * OSC sequences: ESC ] ... ST (where ST is ESC \ or BEL) + */ +function isCompleteOscSequence(data: string): "complete" | "incomplete" { + if (!data.startsWith(`${ESC}]`)) { + return "complete"; + } + + // OSC sequences end with ST (ESC \) or BEL (\x07) + if (data.endsWith(`${ESC}\\`) || data.endsWith("\x07")) { + return "complete"; + } + + return "incomplete"; +} + +/** + * Check if DCS (Device Control String) sequence is complete + * DCS sequences: ESC P ... ST (where ST is ESC \) + * Used for XTVersion responses like ESC P >| ... ESC \ + */ +function isCompleteDcsSequence(data: string): "complete" | "incomplete" { + if (!data.startsWith(`${ESC}P`)) { + return "complete"; + } + + // DCS sequences end with ST (ESC \) + if (data.endsWith(`${ESC}\\`)) { + return "complete"; + } + + return "incomplete"; +} + +/** + * Check if APC (Application Program Command) sequence is complete + * APC sequences: ESC _ ... ST (where ST is ESC \) + * Used for Kitty graphics responses like ESC _ G ... ESC \ + */ +function isCompleteApcSequence(data: string): "complete" | "incomplete" { + if (!data.startsWith(`${ESC}_`)) { + return "complete"; + } + + // APC sequences end with ST (ESC \) + if (data.endsWith(`${ESC}\\`)) { + return "complete"; + } + + return "incomplete"; +} + +/** + * Split accumulated buffer into complete sequences + */ +function extractCompleteSequences(buffer: string): { sequences: string[]; remainder: string } { + const sequences: string[] = []; + let pos = 0; + + while (pos < buffer.length) { + const remaining = buffer.slice(pos); + + // Try to extract a sequence starting at this position + if (remaining.startsWith(ESC)) { + // Find the end of this escape sequence + let seqEnd = 1; + while (seqEnd <= remaining.length) { + const candidate = remaining.slice(0, seqEnd); + const status = isCompleteSequence(candidate); + + if (status === "complete") { + sequences.push(candidate); + pos += seqEnd; + break; + } else if (status === "incomplete") { + seqEnd++; + } else { + // Should not happen when starting with ESC + sequences.push(candidate); + pos += seqEnd; + break; + } + } + + if (seqEnd > remaining.length) { + return { sequences, remainder: remaining }; + } + } else { + // Not an escape sequence - take a single character + sequences.push(remaining[0]!); + pos++; + } + } + + return { sequences, remainder: "" }; +} + +export type StdinBufferOptions = { + /** + * Maximum time to wait for sequence completion (default: 10ms) + * After this time, the buffer is flushed even if incomplete + */ + timeout?: number; +}; + +export type StdinBufferEventMap = { + data: [string]; + paste: [string]; +}; + +/** + * Buffers stdin input and emits complete sequences via the 'data' event. + * Handles partial escape sequences that arrive across multiple chunks. + */ +export class StdinBuffer extends EventEmitter { + private buffer: string = ""; + private timeout: ReturnType | null = null; + private readonly timeoutMs: number; + private pasteMode: boolean = false; + private pasteBuffer: string = ""; + + constructor(options: StdinBufferOptions = {}) { + super(); + this.timeoutMs = options.timeout ?? 10; + } + + public process(data: string | Buffer): void { + // Clear any pending timeout + if (this.timeout) { + clearTimeout(this.timeout); + this.timeout = null; + } + + // Handle high-byte conversion (for compatibility with parseKeypress) + // If buffer has single byte > 127, convert to ESC + (byte - 128) + let str: string; + if (Buffer.isBuffer(data)) { + if (data.length === 1 && data[0]! > 127) { + const byte = data[0]! - 128; + str = `\x1b${String.fromCharCode(byte)}`; + } else { + str = data.toString(); + } + } else { + str = data; + } + + if (str.length === 0 && this.buffer.length === 0) { + this.emit("data", ""); + return; + } + + this.buffer += str; + + if (this.pasteMode) { + this.pasteBuffer += this.buffer; + this.buffer = ""; + + const endIndex = this.pasteBuffer.indexOf(BRACKETED_PASTE_END); + if (endIndex !== -1) { + const pastedContent = this.pasteBuffer.slice(0, endIndex); + const remaining = this.pasteBuffer.slice(endIndex + BRACKETED_PASTE_END.length); + + this.pasteMode = false; + this.pasteBuffer = ""; + + this.emit("paste", pastedContent); + + if (remaining.length > 0) { + this.process(remaining); + } + } + return; + } + + const startIndex = this.buffer.indexOf(BRACKETED_PASTE_START); + if (startIndex !== -1) { + if (startIndex > 0) { + const beforePaste = this.buffer.slice(0, startIndex); + const result = extractCompleteSequences(beforePaste); + for (const sequence of result.sequences) { + this.emit("data", sequence); + } + } + + this.buffer = this.buffer.slice(startIndex + BRACKETED_PASTE_START.length); + this.pasteMode = true; + this.pasteBuffer = this.buffer; + this.buffer = ""; + + const endIndex = this.pasteBuffer.indexOf(BRACKETED_PASTE_END); + if (endIndex !== -1) { + const pastedContent = this.pasteBuffer.slice(0, endIndex); + const remaining = this.pasteBuffer.slice(endIndex + BRACKETED_PASTE_END.length); + + this.pasteMode = false; + this.pasteBuffer = ""; + + this.emit("paste", pastedContent); + + if (remaining.length > 0) { + this.process(remaining); + } + } + return; + } + + const result = extractCompleteSequences(this.buffer); + this.buffer = result.remainder; + + for (const sequence of result.sequences) { + this.emit("data", sequence); + } + + if (this.buffer.length > 0) { + this.timeout = setTimeout(() => { + const flushed = this.flush(); + + for (const sequence of flushed) { + this.emit("data", sequence); + } + }, this.timeoutMs); + } + } + + flush(): string[] { + if (this.timeout) { + clearTimeout(this.timeout); + this.timeout = null; + } + + if (this.buffer.length === 0) { + return []; + } + + const sequences = [this.buffer]; + this.buffer = ""; + return sequences; + } + + clear(): void { + if (this.timeout) { + clearTimeout(this.timeout); + this.timeout = null; + } + this.buffer = ""; + this.pasteMode = false; + this.pasteBuffer = ""; + } + + getBuffer(): string { + return this.buffer; + } + + destroy(): void { + this.clear(); + } +} diff --git a/packages/pi-tui/src/terminal-image.ts b/packages/pi-tui/src/terminal-image.ts new file mode 100644 index 000000000..e706fedcf --- /dev/null +++ b/packages/pi-tui/src/terminal-image.ts @@ -0,0 +1,381 @@ +export type ImageProtocol = "kitty" | "iterm2" | null; + +export interface TerminalCapabilities { + images: ImageProtocol; + trueColor: boolean; + hyperlinks: boolean; +} + +export interface CellDimensions { + widthPx: number; + heightPx: number; +} + +export interface ImageDimensions { + widthPx: number; + heightPx: number; +} + +export interface ImageRenderOptions { + maxWidthCells?: number; + maxHeightCells?: number; + preserveAspectRatio?: boolean; + /** Kitty image ID. If provided, reuses/replaces existing image with this ID. */ + imageId?: number; +} + +let cachedCapabilities: TerminalCapabilities | null = null; + +// Default cell dimensions - updated by TUI when terminal responds to query +let cellDimensions: CellDimensions = { widthPx: 9, heightPx: 18 }; + +export function getCellDimensions(): CellDimensions { + return cellDimensions; +} + +export function setCellDimensions(dims: CellDimensions): void { + cellDimensions = dims; +} + +export function detectCapabilities(): TerminalCapabilities { + const termProgram = process.env.TERM_PROGRAM?.toLowerCase() || ""; + const term = process.env.TERM?.toLowerCase() || ""; + const colorTerm = process.env.COLORTERM?.toLowerCase() || ""; + + if (process.env.KITTY_WINDOW_ID || termProgram === "kitty") { + return { images: "kitty", trueColor: true, hyperlinks: true }; + } + + if (termProgram === "ghostty" || term.includes("ghostty") || process.env.GHOSTTY_RESOURCES_DIR) { + return { images: "kitty", trueColor: true, hyperlinks: true }; + } + + if (process.env.WEZTERM_PANE || termProgram === "wezterm") { + return { images: "kitty", trueColor: true, hyperlinks: true }; + } + + if (process.env.ITERM_SESSION_ID || termProgram === "iterm.app") { + return { images: "iterm2", trueColor: true, hyperlinks: true }; + } + + if (termProgram === "vscode") { + return { images: null, trueColor: true, hyperlinks: true }; + } + + if (termProgram === "alacritty") { + return { images: null, trueColor: true, hyperlinks: true }; + } + + const trueColor = colorTerm === "truecolor" || colorTerm === "24bit"; + return { images: null, trueColor, hyperlinks: true }; +} + +export function getCapabilities(): TerminalCapabilities { + if (!cachedCapabilities) { + cachedCapabilities = detectCapabilities(); + } + return cachedCapabilities; +} + +export function resetCapabilitiesCache(): void { + cachedCapabilities = null; +} + +const KITTY_PREFIX = "\x1b_G"; +const ITERM2_PREFIX = "\x1b]1337;File="; + +export function isImageLine(line: string): boolean { + // Fast path: sequence at line start (single-row images) + if (line.startsWith(KITTY_PREFIX) || line.startsWith(ITERM2_PREFIX)) { + return true; + } + // Slow path: sequence elsewhere (multi-row images have cursor-up prefix) + return line.includes(KITTY_PREFIX) || line.includes(ITERM2_PREFIX); +} + +/** + * Generate a random image ID for Kitty graphics protocol. + * Uses random IDs to avoid collisions between different module instances + * (e.g., main app vs extensions). + */ +export function allocateImageId(): number { + // Use random ID in range [1, 0xffffffff] to avoid collisions + return Math.floor(Math.random() * 0xfffffffe) + 1; +} + +export function encodeKitty( + base64Data: string, + options: { + columns?: number; + rows?: number; + imageId?: number; + } = {}, +): string { + const CHUNK_SIZE = 4096; + + const params: string[] = ["a=T", "f=100", "q=2"]; + + if (options.columns) params.push(`c=${options.columns}`); + if (options.rows) params.push(`r=${options.rows}`); + if (options.imageId) params.push(`i=${options.imageId}`); + + if (base64Data.length <= CHUNK_SIZE) { + return `\x1b_G${params.join(",")};${base64Data}\x1b\\`; + } + + const chunks: string[] = []; + let offset = 0; + let isFirst = true; + + while (offset < base64Data.length) { + const chunk = base64Data.slice(offset, offset + CHUNK_SIZE); + const isLast = offset + CHUNK_SIZE >= base64Data.length; + + if (isFirst) { + chunks.push(`\x1b_G${params.join(",")},m=1;${chunk}\x1b\\`); + isFirst = false; + } else if (isLast) { + chunks.push(`\x1b_Gm=0;${chunk}\x1b\\`); + } else { + chunks.push(`\x1b_Gm=1;${chunk}\x1b\\`); + } + + offset += CHUNK_SIZE; + } + + return chunks.join(""); +} + +/** + * Delete a Kitty graphics image by ID. + * Uses uppercase 'I' to also free the image data. + */ +export function deleteKittyImage(imageId: number): string { + return `\x1b_Ga=d,d=I,i=${imageId}\x1b\\`; +} + +/** + * Delete all visible Kitty graphics images. + * Uses uppercase 'A' to also free the image data. + */ +export function deleteAllKittyImages(): string { + return `\x1b_Ga=d,d=A\x1b\\`; +} + +export function encodeITerm2( + base64Data: string, + options: { + width?: number | string; + height?: number | string; + name?: string; + preserveAspectRatio?: boolean; + inline?: boolean; + } = {}, +): string { + const params: string[] = [`inline=${options.inline !== false ? 1 : 0}`]; + + if (options.width !== undefined) params.push(`width=${options.width}`); + if (options.height !== undefined) params.push(`height=${options.height}`); + if (options.name) { + const nameBase64 = Buffer.from(options.name).toString("base64"); + params.push(`name=${nameBase64}`); + } + if (options.preserveAspectRatio === false) { + params.push("preserveAspectRatio=0"); + } + + return `\x1b]1337;File=${params.join(";")}:${base64Data}\x07`; +} + +export function calculateImageRows( + imageDimensions: ImageDimensions, + targetWidthCells: number, + cellDimensions: CellDimensions = { widthPx: 9, heightPx: 18 }, +): number { + const targetWidthPx = targetWidthCells * cellDimensions.widthPx; + const scale = targetWidthPx / imageDimensions.widthPx; + const scaledHeightPx = imageDimensions.heightPx * scale; + const rows = Math.ceil(scaledHeightPx / cellDimensions.heightPx); + return Math.max(1, rows); +} + +export function getPngDimensions(base64Data: string): ImageDimensions | null { + try { + const buffer = Buffer.from(base64Data, "base64"); + + if (buffer.length < 24) { + return null; + } + + if (buffer[0] !== 0x89 || buffer[1] !== 0x50 || buffer[2] !== 0x4e || buffer[3] !== 0x47) { + return null; + } + + const width = buffer.readUInt32BE(16); + const height = buffer.readUInt32BE(20); + + return { widthPx: width, heightPx: height }; + } catch { + return null; + } +} + +export function getJpegDimensions(base64Data: string): ImageDimensions | null { + try { + const buffer = Buffer.from(base64Data, "base64"); + + if (buffer.length < 2) { + return null; + } + + if (buffer[0] !== 0xff || buffer[1] !== 0xd8) { + return null; + } + + let offset = 2; + while (offset < buffer.length - 9) { + if (buffer[offset] !== 0xff) { + offset++; + continue; + } + + const marker = buffer[offset + 1]; + + if (marker >= 0xc0 && marker <= 0xc2) { + const height = buffer.readUInt16BE(offset + 5); + const width = buffer.readUInt16BE(offset + 7); + return { widthPx: width, heightPx: height }; + } + + if (offset + 3 >= buffer.length) { + return null; + } + const length = buffer.readUInt16BE(offset + 2); + if (length < 2) { + return null; + } + offset += 2 + length; + } + + return null; + } catch { + return null; + } +} + +export function getGifDimensions(base64Data: string): ImageDimensions | null { + try { + const buffer = Buffer.from(base64Data, "base64"); + + if (buffer.length < 10) { + return null; + } + + const sig = buffer.slice(0, 6).toString("ascii"); + if (sig !== "GIF87a" && sig !== "GIF89a") { + return null; + } + + const width = buffer.readUInt16LE(6); + const height = buffer.readUInt16LE(8); + + return { widthPx: width, heightPx: height }; + } catch { + return null; + } +} + +export function getWebpDimensions(base64Data: string): ImageDimensions | null { + try { + const buffer = Buffer.from(base64Data, "base64"); + + if (buffer.length < 30) { + return null; + } + + const riff = buffer.slice(0, 4).toString("ascii"); + const webp = buffer.slice(8, 12).toString("ascii"); + if (riff !== "RIFF" || webp !== "WEBP") { + return null; + } + + const chunk = buffer.slice(12, 16).toString("ascii"); + if (chunk === "VP8 ") { + if (buffer.length < 30) return null; + const width = buffer.readUInt16LE(26) & 0x3fff; + const height = buffer.readUInt16LE(28) & 0x3fff; + return { widthPx: width, heightPx: height }; + } else if (chunk === "VP8L") { + if (buffer.length < 25) return null; + const bits = buffer.readUInt32LE(21); + const width = (bits & 0x3fff) + 1; + const height = ((bits >> 14) & 0x3fff) + 1; + return { widthPx: width, heightPx: height }; + } else if (chunk === "VP8X") { + if (buffer.length < 30) return null; + const width = (buffer[24] | (buffer[25] << 8) | (buffer[26] << 16)) + 1; + const height = (buffer[27] | (buffer[28] << 8) | (buffer[29] << 16)) + 1; + return { widthPx: width, heightPx: height }; + } + + return null; + } catch { + return null; + } +} + +export function getImageDimensions(base64Data: string, mimeType: string): ImageDimensions | null { + if (mimeType === "image/png") { + return getPngDimensions(base64Data); + } + if (mimeType === "image/jpeg") { + return getJpegDimensions(base64Data); + } + if (mimeType === "image/gif") { + return getGifDimensions(base64Data); + } + if (mimeType === "image/webp") { + return getWebpDimensions(base64Data); + } + return null; +} + +export function renderImage( + base64Data: string, + imageDimensions: ImageDimensions, + options: ImageRenderOptions = {}, +): { sequence: string; rows: number; imageId?: number } | null { + const caps = getCapabilities(); + + if (!caps.images) { + return null; + } + + const maxWidth = options.maxWidthCells ?? 80; + const rows = calculateImageRows(imageDimensions, maxWidth, getCellDimensions()); + + if (caps.images === "kitty") { + // Only use imageId if explicitly provided - static images don't need IDs + const sequence = encodeKitty(base64Data, { columns: maxWidth, rows, imageId: options.imageId }); + return { sequence, rows, imageId: options.imageId }; + } + + if (caps.images === "iterm2") { + const sequence = encodeITerm2(base64Data, { + width: maxWidth, + height: "auto", + preserveAspectRatio: options.preserveAspectRatio ?? true, + }); + return { sequence, rows }; + } + + return null; +} + +export function imageFallback(mimeType: string, dimensions?: ImageDimensions, filename?: string): string { + const parts: string[] = []; + if (filename) parts.push(filename); + parts.push(`[${mimeType}]`); + if (dimensions) parts.push(`${dimensions.widthPx}x${dimensions.heightPx}`); + return `[Image: ${parts.join(" ")}]`; +} diff --git a/packages/pi-tui/src/terminal.ts b/packages/pi-tui/src/terminal.ts new file mode 100644 index 000000000..9f5cc17d9 --- /dev/null +++ b/packages/pi-tui/src/terminal.ts @@ -0,0 +1,349 @@ +import * as fs from "node:fs"; +import { createRequire } from "node:module"; +import { setKittyProtocolActive } from "./keys.js"; +import { StdinBuffer } from "./stdin-buffer.js"; + +const cjsRequire = createRequire(import.meta.url); + +/** + * Minimal terminal interface for TUI + */ +export interface Terminal { + // Start the terminal with input and resize handlers + start(onInput: (data: string) => void, onResize: () => void): void; + + // Stop the terminal and restore state + stop(): void; + + /** + * Drain stdin before exiting to prevent Kitty key release events from + * leaking to the parent shell over slow SSH connections. + * @param maxMs - Maximum time to drain (default: 1000ms) + * @param idleMs - Exit early if no input arrives within this time (default: 50ms) + */ + drainInput(maxMs?: number, idleMs?: number): Promise; + + // Write output to terminal + write(data: string): void; + + // Get terminal dimensions + get columns(): number; + get rows(): number; + + // Whether Kitty keyboard protocol is active + get kittyProtocolActive(): boolean; + + // Cursor positioning (relative to current position) + moveBy(lines: number): void; // Move cursor up (negative) or down (positive) by N lines + + // Cursor visibility + hideCursor(): void; // Hide the cursor + showCursor(): void; // Show the cursor + + // Clear operations + clearLine(): void; // Clear current line + clearFromCursor(): void; // Clear from cursor to end of screen + clearScreen(): void; // Clear entire screen and move cursor to (0,0) + + // Title operations + setTitle(title: string): void; // Set terminal window title +} + +/** + * Real terminal using process.stdin/stdout + */ +export class ProcessTerminal implements Terminal { + private static _vtHandles: { GetConsoleMode: any; SetConsoleMode: any; handle: any } | null = null; + private wasRaw = false; + private inputHandler?: (data: string) => void; + private resizeHandler?: () => void; + private _kittyProtocolActive = false; + private _modifyOtherKeysActive = false; + private stdinBuffer?: StdinBuffer; + private stdinDataHandler?: (data: string) => void; + private writeLogPath = process.env.PI_TUI_WRITE_LOG || ""; + + get kittyProtocolActive(): boolean { + return this._kittyProtocolActive; + } + + start(onInput: (data: string) => void, onResize: () => void): void { + this.inputHandler = onInput; + this.resizeHandler = onResize; + + // Save previous state and enable raw mode + this.wasRaw = process.stdin.isRaw || false; + if (process.stdin.setRawMode) { + process.stdin.setRawMode(true); + } + process.stdin.setEncoding("utf8"); + process.stdin.resume(); + + // Enable bracketed paste mode - terminal will wrap pastes in \x1b[200~ ... \x1b[201~ + process.stdout.write("\x1b[?2004h"); + + // Set up resize handler immediately + process.stdout.on("resize", this.resizeHandler); + + // Refresh terminal dimensions - they may be stale after suspend/resume + // (SIGWINCH is lost while process is stopped). Unix only. + if (process.platform !== "win32") { + process.kill(process.pid, "SIGWINCH"); + } + + // On Windows, enable ENABLE_VIRTUAL_TERMINAL_INPUT so the console sends + // VT escape sequences (e.g. \x1b[Z for Shift+Tab) instead of raw console + // events that lose modifier information. Must run AFTER setRawMode(true) + // since that resets console mode flags. + this.enableWindowsVTInput(); + + // Query and enable Kitty keyboard protocol + // The query handler intercepts input temporarily, then installs the user's handler + // See: https://sw.kovidgoyal.net/kitty/keyboard-protocol/ + this.queryAndEnableKittyProtocol(); + } + + /** + * Set up StdinBuffer to split batched input into individual sequences. + * This ensures components receive single events, making matchesKey/isKeyRelease work correctly. + * + * Also watches for Kitty protocol response and enables it when detected. + * This is done here (after stdinBuffer parsing) rather than on raw stdin + * to handle the case where the response arrives split across multiple events. + */ + private setupStdinBuffer(): void { + this.stdinBuffer = new StdinBuffer({ timeout: 10 }); + + // Kitty protocol response pattern: \x1b[?u + const kittyResponsePattern = /^\x1b\[\?(\d+)u$/; + + // Forward individual sequences to the input handler + this.stdinBuffer.on("data", (sequence) => { + // Check for Kitty protocol response (only if not already enabled) + if (!this._kittyProtocolActive) { + const match = sequence.match(kittyResponsePattern); + if (match) { + this._kittyProtocolActive = true; + setKittyProtocolActive(true); + + // Enable Kitty keyboard protocol (push flags) + // Flag 1 = disambiguate escape codes + // Flag 2 = report event types (press/repeat/release) + // Flag 4 = report alternate keys (shifted key, base layout key) + // Base layout key enables shortcuts to work with non-Latin keyboard layouts + process.stdout.write("\x1b[>7u"); + return; // Don't forward protocol response to TUI + } + } + + if (this.inputHandler) { + this.inputHandler(sequence); + } + }); + + // Re-wrap paste content with bracketed paste markers for existing editor handling + this.stdinBuffer.on("paste", (content) => { + if (this.inputHandler) { + this.inputHandler(`\x1b[200~${content}\x1b[201~`); + } + }); + + // Handler that pipes stdin data through the buffer + this.stdinDataHandler = (data: string) => { + this.stdinBuffer!.process(data); + }; + } + + /** + * Query terminal for Kitty keyboard protocol support and enable if available. + * + * Sends CSI ? u to query current flags. If terminal responds with CSI ? u, + * it supports the protocol and we enable it with CSI > 1 u. + * + * If no Kitty response arrives shortly after startup, fall back to enabling + * xterm modifyOtherKeys mode 2. This is needed for tmux, which can forward + * modified enter keys as CSI-u when extended-keys is enabled, but may not + * answer the Kitty protocol query. + * + * The response is detected in setupStdinBuffer's data handler, which properly + * handles the case where the response arrives split across multiple stdin events. + */ + private queryAndEnableKittyProtocol(): void { + this.setupStdinBuffer(); + process.stdin.on("data", this.stdinDataHandler!); + process.stdout.write("\x1b[?u"); + setTimeout(() => { + if (!this._kittyProtocolActive && !this._modifyOtherKeysActive) { + process.stdout.write("\x1b[>4;2m"); + this._modifyOtherKeysActive = true; + } + }, 150); + } + + /** + * On Windows, add ENABLE_VIRTUAL_TERMINAL_INPUT (0x0200) to the stdin + * console handle so the terminal sends VT sequences for modified keys + * (e.g. \x1b[Z for Shift+Tab). Without this, libuv's ReadConsoleInputW + * discards modifier state and Shift+Tab arrives as plain \t. + */ + private enableWindowsVTInput(): void { + if (process.platform !== "win32") return; + try { + if (!ProcessTerminal._vtHandles) { + const koffi = cjsRequire("koffi"); + const k32 = koffi.load("kernel32.dll"); + const GetStdHandle = k32.func("void* __stdcall GetStdHandle(int)"); + const GetConsoleMode = k32.func("bool __stdcall GetConsoleMode(void*, _Out_ uint32_t*)"); + const SetConsoleMode = k32.func("bool __stdcall SetConsoleMode(void*, uint32_t)"); + const STD_INPUT_HANDLE = -10; + const handle = GetStdHandle(STD_INPUT_HANDLE); + ProcessTerminal._vtHandles = { GetConsoleMode, SetConsoleMode, handle }; + } + const ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200; + const { GetConsoleMode, SetConsoleMode, handle } = ProcessTerminal._vtHandles; + const mode = new Uint32Array(1); + GetConsoleMode(handle, mode); + if (!(mode[0]! & ENABLE_VIRTUAL_TERMINAL_INPUT)) { + SetConsoleMode(handle, mode[0]! | ENABLE_VIRTUAL_TERMINAL_INPUT); + } + } catch { + // koffi not available — Shift+Tab won't be distinguishable from Tab + } + } + + async drainInput(maxMs = 1000, idleMs = 50): Promise { + if (this._kittyProtocolActive) { + // Disable Kitty keyboard protocol first so any late key releases + // do not generate new Kitty escape sequences. + process.stdout.write("\x1b[4;0m"); + this._modifyOtherKeysActive = false; + } + + const previousHandler = this.inputHandler; + this.inputHandler = undefined; + + let lastDataTime = Date.now(); + const onData = () => { + lastDataTime = Date.now(); + }; + + process.stdin.on("data", onData); + const endTime = Date.now() + maxMs; + + try { + while (true) { + const now = Date.now(); + const timeLeft = endTime - now; + if (timeLeft <= 0) break; + if (now - lastDataTime >= idleMs) break; + await new Promise((resolve) => setTimeout(resolve, Math.min(idleMs, timeLeft))); + } + } finally { + process.stdin.removeListener("data", onData); + this.inputHandler = previousHandler; + } + } + + stop(): void { + // Disable bracketed paste mode + process.stdout.write("\x1b[?2004l"); + + // Disable Kitty keyboard protocol if not already done by drainInput() + if (this._kittyProtocolActive) { + process.stdout.write("\x1b[4;0m"); + this._modifyOtherKeysActive = false; + } + + // Clean up StdinBuffer + if (this.stdinBuffer) { + this.stdinBuffer.destroy(); + this.stdinBuffer = undefined; + } + + // Remove event handlers + if (this.stdinDataHandler) { + process.stdin.removeListener("data", this.stdinDataHandler); + this.stdinDataHandler = undefined; + } + this.inputHandler = undefined; + if (this.resizeHandler) { + process.stdout.removeListener("resize", this.resizeHandler); + this.resizeHandler = undefined; + } + + // Pause stdin to prevent any buffered input (e.g., Ctrl+D) from being + // re-interpreted after raw mode is disabled. This fixes a race condition + // where Ctrl+D could close the parent shell over SSH. + process.stdin.pause(); + + // Restore raw mode state + if (process.stdin.setRawMode) { + process.stdin.setRawMode(this.wasRaw); + } + } + + write(data: string): void { + process.stdout.write(data); + if (this.writeLogPath) { + try { + fs.appendFileSync(this.writeLogPath, data, { encoding: "utf8" }); + } catch { + // Ignore logging errors + } + } + } + + get columns(): number { + return process.stdout.columns || 80; + } + + get rows(): number { + return process.stdout.rows || 24; + } + + moveBy(lines: number): void { + if (lines > 0) { + // Move down + process.stdout.write(`\x1b[${lines}B`); + } else if (lines < 0) { + // Move up + process.stdout.write(`\x1b[${-lines}A`); + } + // lines === 0: no movement + } + + hideCursor(): void { + process.stdout.write("\x1b[?25l"); + } + + showCursor(): void { + process.stdout.write("\x1b[?25h"); + } + + clearLine(): void { + process.stdout.write("\x1b[K"); + } + + clearFromCursor(): void { + process.stdout.write("\x1b[J"); + } + + clearScreen(): void { + process.stdout.write("\x1b[2J\x1b[H"); // Clear screen and move to home (1,1) + } + + setTitle(title: string): void { + // OSC 0;title BEL - set terminal window title + process.stdout.write(`\x1b]0;${title}\x07`); + } +} diff --git a/packages/pi-tui/src/tui.ts b/packages/pi-tui/src/tui.ts new file mode 100644 index 000000000..7f4bb999c --- /dev/null +++ b/packages/pi-tui/src/tui.ts @@ -0,0 +1,1212 @@ +/** + * Minimal TUI implementation with differential rendering + */ + +import * as fs from "node:fs"; +import * as os from "node:os"; +import * as path from "node:path"; +import { isKeyRelease, matchesKey } from "./keys.js"; +import type { Terminal } from "./terminal.js"; +import { getCapabilities, isImageLine, setCellDimensions } from "./terminal-image.js"; +import { extractSegments, sliceByColumn, sliceWithWidth, visibleWidth } from "./utils.js"; + +/** + * Component interface - all components must implement this + */ +export interface Component { + /** + * Render the component to lines for the given viewport width + * @param width - Current viewport width + * @returns Array of strings, each representing a line + */ + render(width: number): string[]; + + /** + * Optional handler for keyboard input when component has focus + */ + handleInput?(data: string): void; + + /** + * If true, component receives key release events (Kitty protocol). + * Default is false - release events are filtered out. + */ + wantsKeyRelease?: boolean; + + /** + * Invalidate any cached rendering state. + * Called when theme changes or when component needs to re-render from scratch. + */ + invalidate(): void; +} + +type InputListenerResult = { consume?: boolean; data?: string } | undefined; +type InputListener = (data: string) => InputListenerResult; + +/** + * Interface for components that can receive focus and display a hardware cursor. + * When focused, the component should emit CURSOR_MARKER at the cursor position + * in its render output. TUI will find this marker and position the hardware + * cursor there for proper IME candidate window positioning. + */ +export interface Focusable { + /** Set by TUI when focus changes. Component should emit CURSOR_MARKER when true. */ + focused: boolean; +} + +/** Type guard to check if a component implements Focusable */ +export function isFocusable(component: Component | null): component is Component & Focusable { + return component !== null && "focused" in component; +} + +/** + * Cursor position marker - APC (Application Program Command) sequence. + * This is a zero-width escape sequence that terminals ignore. + * Components emit this at the cursor position when focused. + * TUI finds and strips this marker, then positions the hardware cursor there. + */ +export const CURSOR_MARKER = "\x1b_pi:c\x07"; + +export { visibleWidth }; + +/** + * Anchor position for overlays + */ +export type OverlayAnchor = + | "center" + | "top-left" + | "top-right" + | "bottom-left" + | "bottom-right" + | "top-center" + | "bottom-center" + | "left-center" + | "right-center"; + +/** + * Margin configuration for overlays + */ +export interface OverlayMargin { + top?: number; + right?: number; + bottom?: number; + left?: number; +} + +/** Value that can be absolute (number) or percentage (string like "50%") */ +export type SizeValue = number | `${number}%`; + +/** Parse a SizeValue into absolute value given a reference size */ +function parseSizeValue(value: SizeValue | undefined, referenceSize: number): number | undefined { + if (value === undefined) return undefined; + if (typeof value === "number") return value; + // Parse percentage string like "50%" + const match = value.match(/^(\d+(?:\.\d+)?)%$/); + if (match) { + return Math.floor((referenceSize * parseFloat(match[1])) / 100); + } + return undefined; +} + +/** + * Options for overlay positioning and sizing. + * Values can be absolute numbers or percentage strings (e.g., "50%"). + */ +export interface OverlayOptions { + // === Sizing === + /** Width in columns, or percentage of terminal width (e.g., "50%") */ + width?: SizeValue; + /** Minimum width in columns */ + minWidth?: number; + /** Maximum height in rows, or percentage of terminal height (e.g., "50%") */ + maxHeight?: SizeValue; + + // === Positioning - anchor-based === + /** Anchor point for positioning (default: 'center') */ + anchor?: OverlayAnchor; + /** Horizontal offset from anchor position (positive = right) */ + offsetX?: number; + /** Vertical offset from anchor position (positive = down) */ + offsetY?: number; + + // === Positioning - percentage or absolute === + /** Row position: absolute number, or percentage (e.g., "25%" = 25% from top) */ + row?: SizeValue; + /** Column position: absolute number, or percentage (e.g., "50%" = centered horizontally) */ + col?: SizeValue; + + // === Margin from terminal edges === + /** Margin from terminal edges. Number applies to all sides. */ + margin?: OverlayMargin | number; + + // === Visibility === + /** + * Control overlay visibility based on terminal dimensions. + * If provided, overlay is only rendered when this returns true. + * Called each render cycle with current terminal dimensions. + */ + visible?: (termWidth: number, termHeight: number) => boolean; + /** If true, don't capture keyboard focus when shown */ + nonCapturing?: boolean; +} + +/** + * Handle returned by showOverlay for controlling the overlay + */ +export interface OverlayHandle { + /** Permanently remove the overlay (cannot be shown again) */ + hide(): void; + /** Temporarily hide or show the overlay */ + setHidden(hidden: boolean): void; + /** Check if overlay is temporarily hidden */ + isHidden(): boolean; + /** Focus this overlay and bring it to the visual front */ + focus(): void; + /** Release focus to the previous target */ + unfocus(): void; + /** Check if this overlay currently has focus */ + isFocused(): boolean; +} + +/** + * Container - a component that contains other components + */ +export class Container implements Component { + children: Component[] = []; + + addChild(component: Component): void { + this.children.push(component); + } + + removeChild(component: Component): void { + const index = this.children.indexOf(component); + if (index !== -1) { + this.children.splice(index, 1); + } + } + + clear(): void { + this.children = []; + } + + invalidate(): void { + for (const child of this.children) { + child.invalidate?.(); + } + } + + render(width: number): string[] { + const lines: string[] = []; + for (const child of this.children) { + lines.push(...child.render(width)); + } + return lines; + } +} + +/** + * TUI - Main class for managing terminal UI with differential rendering + */ +export class TUI extends Container { + public terminal: Terminal; + private previousLines: string[] = []; + private previousWidth = 0; + private previousHeight = 0; + private focusedComponent: Component | null = null; + private inputListeners = new Set(); + + /** Global callback for debug key (Shift+Ctrl+D). Called before input is forwarded to focused component. */ + public onDebug?: () => void; + private renderRequested = false; + private cursorRow = 0; // Logical cursor row (end of rendered content) + private hardwareCursorRow = 0; // Actual terminal cursor row (may differ due to IME positioning) + private inputBuffer = ""; // Buffer for parsing terminal responses + private cellSizeQueryPending = false; + private showHardwareCursor = process.env.PI_HARDWARE_CURSOR === "1"; + private clearOnShrink = process.env.PI_CLEAR_ON_SHRINK === "1"; // Clear empty rows when content shrinks (default: off) + private maxLinesRendered = 0; // Track terminal's working area (max lines ever rendered) + private previousViewportTop = 0; // Track previous viewport top for resize-aware cursor moves + private fullRedrawCount = 0; + private stopped = false; + + // Overlay stack for modal components rendered on top of base content + private focusOrderCounter = 0; + private overlayStack: { + component: Component; + options?: OverlayOptions; + preFocus: Component | null; + hidden: boolean; + focusOrder: number; + }[] = []; + + constructor(terminal: Terminal, showHardwareCursor?: boolean) { + super(); + this.terminal = terminal; + if (showHardwareCursor !== undefined) { + this.showHardwareCursor = showHardwareCursor; + } + } + + get fullRedraws(): number { + return this.fullRedrawCount; + } + + getShowHardwareCursor(): boolean { + return this.showHardwareCursor; + } + + setShowHardwareCursor(enabled: boolean): void { + if (this.showHardwareCursor === enabled) return; + this.showHardwareCursor = enabled; + if (!enabled) { + this.terminal.hideCursor(); + } + this.requestRender(); + } + + getClearOnShrink(): boolean { + return this.clearOnShrink; + } + + /** + * Set whether to trigger full re-render when content shrinks. + * When true (default), empty rows are cleared when content shrinks. + * When false, empty rows remain (reduces redraws on slower terminals). + */ + setClearOnShrink(enabled: boolean): void { + this.clearOnShrink = enabled; + } + + setFocus(component: Component | null): void { + // Clear focused flag on old component + if (isFocusable(this.focusedComponent)) { + this.focusedComponent.focused = false; + } + + this.focusedComponent = component; + + // Set focused flag on new component + if (isFocusable(component)) { + component.focused = true; + } + } + + /** + * Show an overlay component with configurable positioning and sizing. + * Returns a handle to control the overlay's visibility. + */ + showOverlay(component: Component, options?: OverlayOptions): OverlayHandle { + const entry = { + component, + options, + preFocus: this.focusedComponent, + hidden: false, + focusOrder: ++this.focusOrderCounter, + }; + this.overlayStack.push(entry); + // Only focus if overlay is actually visible + if (!options?.nonCapturing && this.isOverlayVisible(entry)) { + this.setFocus(component); + } + this.terminal.hideCursor(); + this.requestRender(); + + // Return handle for controlling this overlay + return { + hide: () => { + const index = this.overlayStack.indexOf(entry); + if (index !== -1) { + this.overlayStack.splice(index, 1); + // Restore focus if this overlay had focus + if (this.focusedComponent === component) { + const topVisible = this.getTopmostVisibleOverlay(); + this.setFocus(topVisible?.component ?? entry.preFocus); + } + if (this.overlayStack.length === 0) this.terminal.hideCursor(); + this.requestRender(); + } + }, + setHidden: (hidden: boolean) => { + if (entry.hidden === hidden) return; + entry.hidden = hidden; + // Update focus when hiding/showing + if (hidden) { + // If this overlay had focus, move focus to next visible or preFocus + if (this.focusedComponent === component) { + const topVisible = this.getTopmostVisibleOverlay(); + this.setFocus(topVisible?.component ?? entry.preFocus); + } + } else { + // Restore focus to this overlay when showing (if it's actually visible) + if (!options?.nonCapturing && this.isOverlayVisible(entry)) { + entry.focusOrder = ++this.focusOrderCounter; + this.setFocus(component); + } + } + this.requestRender(); + }, + isHidden: () => entry.hidden, + focus: () => { + if (!this.overlayStack.includes(entry) || !this.isOverlayVisible(entry)) return; + if (this.focusedComponent !== component) { + this.setFocus(component); + } + entry.focusOrder = ++this.focusOrderCounter; + this.requestRender(); + }, + unfocus: () => { + if (this.focusedComponent !== component) return; + const topVisible = this.getTopmostVisibleOverlay(); + this.setFocus(topVisible && topVisible !== entry ? topVisible.component : entry.preFocus); + this.requestRender(); + }, + isFocused: () => this.focusedComponent === component, + }; + } + + /** Hide the topmost overlay and restore previous focus. */ + hideOverlay(): void { + const overlay = this.overlayStack.pop(); + if (!overlay) return; + if (this.focusedComponent === overlay.component) { + // Find topmost visible overlay, or fall back to preFocus + const topVisible = this.getTopmostVisibleOverlay(); + this.setFocus(topVisible?.component ?? overlay.preFocus); + } + if (this.overlayStack.length === 0) this.terminal.hideCursor(); + this.requestRender(); + } + + /** Check if there are any visible overlays */ + hasOverlay(): boolean { + return this.overlayStack.some((o) => this.isOverlayVisible(o)); + } + + /** Check if an overlay entry is currently visible */ + private isOverlayVisible(entry: (typeof this.overlayStack)[number]): boolean { + if (entry.hidden) return false; + if (entry.options?.visible) { + return entry.options.visible(this.terminal.columns, this.terminal.rows); + } + return true; + } + + /** Find the topmost visible capturing overlay, if any */ + private getTopmostVisibleOverlay(): (typeof this.overlayStack)[number] | undefined { + for (let i = this.overlayStack.length - 1; i >= 0; i--) { + if (this.overlayStack[i].options?.nonCapturing) continue; + if (this.isOverlayVisible(this.overlayStack[i])) { + return this.overlayStack[i]; + } + } + return undefined; + } + + override invalidate(): void { + super.invalidate(); + for (const overlay of this.overlayStack) overlay.component.invalidate?.(); + } + + start(): void { + this.stopped = false; + this.terminal.start( + (data) => this.handleInput(data), + () => this.requestRender(), + ); + this.terminal.hideCursor(); + this.queryCellSize(); + this.requestRender(); + } + + addInputListener(listener: InputListener): () => void { + this.inputListeners.add(listener); + return () => { + this.inputListeners.delete(listener); + }; + } + + removeInputListener(listener: InputListener): void { + this.inputListeners.delete(listener); + } + + private queryCellSize(): void { + // Only query if terminal supports images (cell size is only used for image rendering) + if (!getCapabilities().images) { + return; + } + // Query terminal for cell size in pixels: CSI 16 t + // Response format: CSI 6 ; height ; width t + this.cellSizeQueryPending = true; + this.terminal.write("\x1b[16t"); + } + + stop(): void { + this.stopped = true; + // Move cursor to the end of the content to prevent overwriting/artifacts on exit + if (this.previousLines.length > 0) { + const targetRow = this.previousLines.length; // Line after the last content + const lineDiff = targetRow - this.hardwareCursorRow; + if (lineDiff > 0) { + this.terminal.write(`\x1b[${lineDiff}B`); + } else if (lineDiff < 0) { + this.terminal.write(`\x1b[${-lineDiff}A`); + } + this.terminal.write("\r\n"); + } + + this.terminal.showCursor(); + this.terminal.stop(); + } + + requestRender(force = false): void { + if (force) { + this.previousLines = []; + this.previousWidth = -1; // -1 triggers widthChanged, forcing a full clear + this.previousHeight = -1; // -1 triggers heightChanged, forcing a full clear + this.cursorRow = 0; + this.hardwareCursorRow = 0; + this.maxLinesRendered = 0; + this.previousViewportTop = 0; + } + if (this.renderRequested) return; + this.renderRequested = true; + process.nextTick(() => { + this.renderRequested = false; + this.doRender(); + }); + } + + private handleInput(data: string): void { + if (this.inputListeners.size > 0) { + let current = data; + for (const listener of this.inputListeners) { + const result = listener(current); + if (result?.consume) { + return; + } + if (result?.data !== undefined) { + current = result.data; + } + } + if (current.length === 0) { + return; + } + data = current; + } + + // If we're waiting for cell size response, buffer input and parse + if (this.cellSizeQueryPending) { + this.inputBuffer += data; + const filtered = this.parseCellSizeResponse(); + if (filtered.length === 0) return; + data = filtered; + } + + // Global debug key handler (Shift+Ctrl+D) + if (matchesKey(data, "shift+ctrl+d") && this.onDebug) { + this.onDebug(); + return; + } + + // If focused component is an overlay, verify it's still visible + // (visibility can change due to terminal resize or visible() callback) + const focusedOverlay = this.overlayStack.find((o) => o.component === this.focusedComponent); + if (focusedOverlay && !this.isOverlayVisible(focusedOverlay)) { + // Focused overlay is no longer visible, redirect to topmost visible overlay + const topVisible = this.getTopmostVisibleOverlay(); + if (topVisible) { + this.setFocus(topVisible.component); + } else { + // No visible overlays, restore to preFocus + this.setFocus(focusedOverlay.preFocus); + } + } + + // Pass input to focused component (including Ctrl+C) + // The focused component can decide how to handle Ctrl+C + if (this.focusedComponent?.handleInput) { + // Filter out key release events unless component opts in + if (isKeyRelease(data) && !this.focusedComponent.wantsKeyRelease) { + return; + } + this.focusedComponent.handleInput(data); + this.requestRender(); + } + } + + private parseCellSizeResponse(): string { + // Response format: ESC [ 6 ; height ; width t + // Match the response pattern + const responsePattern = /\x1b\[6;(\d+);(\d+)t/; + const match = this.inputBuffer.match(responsePattern); + + if (match) { + const heightPx = parseInt(match[1], 10); + const widthPx = parseInt(match[2], 10); + + if (heightPx > 0 && widthPx > 0) { + setCellDimensions({ widthPx, heightPx }); + // Invalidate all components so images re-render with correct dimensions + this.invalidate(); + this.requestRender(); + } + + // Remove the response from buffer + this.inputBuffer = this.inputBuffer.replace(responsePattern, ""); + this.cellSizeQueryPending = false; + } + + // Check if we have a partial cell size response starting (wait for more data) + // Patterns that could be incomplete cell size response: \x1b, \x1b[, \x1b[6, \x1b[6;...(no t yet) + const partialCellSizePattern = /\x1b(\[6?;?[\d;]*)?$/; + if (partialCellSizePattern.test(this.inputBuffer)) { + // Check if it's actually a complete different escape sequence (ends with a letter) + // Cell size response ends with 't', Kitty keyboard ends with 'u', arrows end with A-D, etc. + const lastChar = this.inputBuffer[this.inputBuffer.length - 1]; + if (!/[a-zA-Z~]/.test(lastChar)) { + // Doesn't end with a terminator, might be incomplete - wait for more + return ""; + } + } + + // No cell size response found, return buffered data as user input + const result = this.inputBuffer; + this.inputBuffer = ""; + this.cellSizeQueryPending = false; // Give up waiting + return result; + } + + /** + * Resolve overlay layout from options. + * Returns { width, row, col, maxHeight } for rendering. + */ + private resolveOverlayLayout( + options: OverlayOptions | undefined, + overlayHeight: number, + termWidth: number, + termHeight: number, + ): { width: number; row: number; col: number; maxHeight: number | undefined } { + const opt = options ?? {}; + + // Parse margin (clamp to non-negative) + const margin = + typeof opt.margin === "number" + ? { top: opt.margin, right: opt.margin, bottom: opt.margin, left: opt.margin } + : (opt.margin ?? {}); + const marginTop = Math.max(0, margin.top ?? 0); + const marginRight = Math.max(0, margin.right ?? 0); + const marginBottom = Math.max(0, margin.bottom ?? 0); + const marginLeft = Math.max(0, margin.left ?? 0); + + // Available space after margins + const availWidth = Math.max(1, termWidth - marginLeft - marginRight); + const availHeight = Math.max(1, termHeight - marginTop - marginBottom); + + // === Resolve width === + let width = parseSizeValue(opt.width, termWidth) ?? Math.min(80, availWidth); + // Apply minWidth + if (opt.minWidth !== undefined) { + width = Math.max(width, opt.minWidth); + } + // Clamp to available space + width = Math.max(1, Math.min(width, availWidth)); + + // === Resolve maxHeight === + let maxHeight = parseSizeValue(opt.maxHeight, termHeight); + // Clamp to available space + if (maxHeight !== undefined) { + maxHeight = Math.max(1, Math.min(maxHeight, availHeight)); + } + + // Effective overlay height (may be clamped by maxHeight) + const effectiveHeight = maxHeight !== undefined ? Math.min(overlayHeight, maxHeight) : overlayHeight; + + // === Resolve position === + let row: number; + let col: number; + + if (opt.row !== undefined) { + if (typeof opt.row === "string") { + // Percentage: 0% = top, 100% = bottom (overlay stays within bounds) + const match = opt.row.match(/^(\d+(?:\.\d+)?)%$/); + if (match) { + const maxRow = Math.max(0, availHeight - effectiveHeight); + const percent = parseFloat(match[1]) / 100; + row = marginTop + Math.floor(maxRow * percent); + } else { + // Invalid format, fall back to center + row = this.resolveAnchorRow("center", effectiveHeight, availHeight, marginTop); + } + } else { + // Absolute row position + row = opt.row; + } + } else { + // Anchor-based (default: center) + const anchor = opt.anchor ?? "center"; + row = this.resolveAnchorRow(anchor, effectiveHeight, availHeight, marginTop); + } + + if (opt.col !== undefined) { + if (typeof opt.col === "string") { + // Percentage: 0% = left, 100% = right (overlay stays within bounds) + const match = opt.col.match(/^(\d+(?:\.\d+)?)%$/); + if (match) { + const maxCol = Math.max(0, availWidth - width); + const percent = parseFloat(match[1]) / 100; + col = marginLeft + Math.floor(maxCol * percent); + } else { + // Invalid format, fall back to center + col = this.resolveAnchorCol("center", width, availWidth, marginLeft); + } + } else { + // Absolute column position + col = opt.col; + } + } else { + // Anchor-based (default: center) + const anchor = opt.anchor ?? "center"; + col = this.resolveAnchorCol(anchor, width, availWidth, marginLeft); + } + + // Apply offsets + if (opt.offsetY !== undefined) row += opt.offsetY; + if (opt.offsetX !== undefined) col += opt.offsetX; + + // Clamp to terminal bounds (respecting margins) + row = Math.max(marginTop, Math.min(row, termHeight - marginBottom - effectiveHeight)); + col = Math.max(marginLeft, Math.min(col, termWidth - marginRight - width)); + + return { width, row, col, maxHeight }; + } + + private resolveAnchorRow(anchor: OverlayAnchor, height: number, availHeight: number, marginTop: number): number { + switch (anchor) { + case "top-left": + case "top-center": + case "top-right": + return marginTop; + case "bottom-left": + case "bottom-center": + case "bottom-right": + return marginTop + availHeight - height; + case "left-center": + case "center": + case "right-center": + return marginTop + Math.floor((availHeight - height) / 2); + } + } + + private resolveAnchorCol(anchor: OverlayAnchor, width: number, availWidth: number, marginLeft: number): number { + switch (anchor) { + case "top-left": + case "left-center": + case "bottom-left": + return marginLeft; + case "top-right": + case "right-center": + case "bottom-right": + return marginLeft + availWidth - width; + case "top-center": + case "center": + case "bottom-center": + return marginLeft + Math.floor((availWidth - width) / 2); + } + } + + /** Composite all overlays into content lines (sorted by focusOrder, higher = on top). */ + private compositeOverlays(lines: string[], termWidth: number, termHeight: number): string[] { + if (this.overlayStack.length === 0) return lines; + const result = [...lines]; + + // Pre-render all visible overlays and calculate positions + const rendered: { overlayLines: string[]; row: number; col: number; w: number }[] = []; + let minLinesNeeded = result.length; + + const visibleEntries = this.overlayStack.filter((e) => this.isOverlayVisible(e)); + visibleEntries.sort((a, b) => a.focusOrder - b.focusOrder); + for (const entry of visibleEntries) { + const { component, options } = entry; + + // Get layout with height=0 first to determine width and maxHeight + // (width and maxHeight don't depend on overlay height) + const { width, maxHeight } = this.resolveOverlayLayout(options, 0, termWidth, termHeight); + + // Render component at calculated width + let overlayLines = component.render(width); + + // Apply maxHeight if specified + if (maxHeight !== undefined && overlayLines.length > maxHeight) { + overlayLines = overlayLines.slice(0, maxHeight); + } + + // Get final row/col with actual overlay height + const { row, col } = this.resolveOverlayLayout(options, overlayLines.length, termWidth, termHeight); + + rendered.push({ overlayLines, row, col, w: width }); + minLinesNeeded = Math.max(minLinesNeeded, row + overlayLines.length); + } + + // Ensure result covers the terminal working area to keep overlay positioning stable across resizes. + // maxLinesRendered can exceed current content length after a shrink; pad to keep viewportStart consistent. + const workingHeight = Math.max(this.maxLinesRendered, minLinesNeeded); + + // Extend result with empty lines if content is too short for overlay placement or working area + while (result.length < workingHeight) { + result.push(""); + } + + const viewportStart = Math.max(0, workingHeight - termHeight); + + // Composite each overlay + for (const { overlayLines, row, col, w } of rendered) { + for (let i = 0; i < overlayLines.length; i++) { + const idx = viewportStart + row + i; + if (idx >= 0 && idx < result.length) { + // Defensive: truncate overlay line to declared width before compositing + // (components should already respect width, but this ensures it) + const truncatedOverlayLine = + visibleWidth(overlayLines[i]) > w ? sliceByColumn(overlayLines[i], 0, w, true) : overlayLines[i]; + result[idx] = this.compositeLineAt(result[idx], truncatedOverlayLine, col, w, termWidth); + } + } + } + + return result; + } + + private static readonly SEGMENT_RESET = "\x1b[0m\x1b]8;;\x07"; + + private applyLineResets(lines: string[]): string[] { + const reset = TUI.SEGMENT_RESET; + for (let i = 0; i < lines.length; i++) { + const line = lines[i]; + if (!isImageLine(line)) { + lines[i] = line + reset; + } + } + return lines; + } + + /** Splice overlay content into a base line at a specific column. Single-pass optimized. */ + private compositeLineAt( + baseLine: string, + overlayLine: string, + startCol: number, + overlayWidth: number, + totalWidth: number, + ): string { + if (isImageLine(baseLine)) return baseLine; + + // Single pass through baseLine extracts both before and after segments + const afterStart = startCol + overlayWidth; + const base = extractSegments(baseLine, startCol, afterStart, totalWidth - afterStart, true); + + // Extract overlay with width tracking (strict=true to exclude wide chars at boundary) + const overlay = sliceWithWidth(overlayLine, 0, overlayWidth, true); + + // Pad segments to target widths + const beforePad = Math.max(0, startCol - base.beforeWidth); + const overlayPad = Math.max(0, overlayWidth - overlay.width); + const actualBeforeWidth = Math.max(startCol, base.beforeWidth); + const actualOverlayWidth = Math.max(overlayWidth, overlay.width); + const afterTarget = Math.max(0, totalWidth - actualBeforeWidth - actualOverlayWidth); + const afterPad = Math.max(0, afterTarget - base.afterWidth); + + // Compose result + const r = TUI.SEGMENT_RESET; + const result = + base.before + + " ".repeat(beforePad) + + r + + overlay.text + + " ".repeat(overlayPad) + + r + + base.after + + " ".repeat(afterPad); + + // CRITICAL: Always verify and truncate to terminal width. + // This is the final safeguard against width overflow which would crash the TUI. + // Width tracking can drift from actual visible width due to: + // - Complex ANSI/OSC sequences (hyperlinks, colors) + // - Wide characters at segment boundaries + // - Edge cases in segment extraction + const resultWidth = visibleWidth(result); + if (resultWidth <= totalWidth) { + return result; + } + // Truncate with strict=true to ensure we don't exceed totalWidth + return sliceByColumn(result, 0, totalWidth, true); + } + + /** + * Find and extract cursor position from rendered lines. + * Searches for CURSOR_MARKER, calculates its position, and strips it from the output. + * Only scans the bottom terminal height lines (visible viewport). + * @param lines - Rendered lines to search + * @param height - Terminal height (visible viewport size) + * @returns Cursor position { row, col } or null if no marker found + */ + private extractCursorPosition(lines: string[], height: number): { row: number; col: number } | null { + // Only scan the bottom `height` lines (visible viewport) + const viewportTop = Math.max(0, lines.length - height); + for (let row = lines.length - 1; row >= viewportTop; row--) { + const line = lines[row]; + const markerIndex = line.indexOf(CURSOR_MARKER); + if (markerIndex !== -1) { + // Calculate visual column (width of text before marker) + const beforeMarker = line.slice(0, markerIndex); + const col = visibleWidth(beforeMarker); + + // Strip marker from the line + lines[row] = line.slice(0, markerIndex) + line.slice(markerIndex + CURSOR_MARKER.length); + + return { row, col }; + } + } + return null; + } + + private doRender(): void { + if (this.stopped) return; + const width = this.terminal.columns; + const height = this.terminal.rows; + let viewportTop = Math.max(0, this.maxLinesRendered - height); + let prevViewportTop = this.previousViewportTop; + let hardwareCursorRow = this.hardwareCursorRow; + const computeLineDiff = (targetRow: number): number => { + const currentScreenRow = hardwareCursorRow - prevViewportTop; + const targetScreenRow = targetRow - viewportTop; + return targetScreenRow - currentScreenRow; + }; + + // Render all components to get new lines + let newLines = this.render(width); + + // Composite overlays into the rendered lines (before differential compare) + if (this.overlayStack.length > 0) { + newLines = this.compositeOverlays(newLines, width, height); + } + + // Extract cursor position before applying line resets (marker must be found first) + const cursorPos = this.extractCursorPosition(newLines, height); + + newLines = this.applyLineResets(newLines); + + // Width or height changed - need full re-render + const widthChanged = this.previousWidth !== 0 && this.previousWidth !== width; + const heightChanged = this.previousHeight !== 0 && this.previousHeight !== height; + + // Helper to clear scrollback and viewport and render all new lines + const fullRender = (clear: boolean): void => { + this.fullRedrawCount += 1; + let buffer = "\x1b[?2026h"; // Begin synchronized output + if (clear) buffer += "\x1b[3J\x1b[2J\x1b[H"; // Clear scrollback, screen, and home + for (let i = 0; i < newLines.length; i++) { + if (i > 0) buffer += "\r\n"; + buffer += newLines[i]; + } + buffer += "\x1b[?2026l"; // End synchronized output + this.terminal.write(buffer); + this.cursorRow = Math.max(0, newLines.length - 1); + this.hardwareCursorRow = this.cursorRow; + // Reset max lines when clearing, otherwise track growth + if (clear) { + this.maxLinesRendered = newLines.length; + } else { + this.maxLinesRendered = Math.max(this.maxLinesRendered, newLines.length); + } + this.previousViewportTop = Math.max(0, this.maxLinesRendered - height); + this.positionHardwareCursor(cursorPos, newLines.length); + this.previousLines = newLines; + this.previousWidth = width; + this.previousHeight = height; + }; + + const debugRedraw = process.env.PI_DEBUG_REDRAW === "1"; + const logRedraw = (reason: string): void => { + if (!debugRedraw) return; + const logPath = path.join(os.homedir(), ".pi", "agent", "pi-debug.log"); + const msg = `[${new Date().toISOString()}] fullRender: ${reason} (prev=${this.previousLines.length}, new=${newLines.length}, height=${height})\n`; + fs.appendFileSync(logPath, msg); + }; + + // First render - just output everything without clearing (assumes clean screen) + if (this.previousLines.length === 0 && !widthChanged && !heightChanged) { + logRedraw("first render"); + fullRender(false); + return; + } + + // Width or height changed - full re-render + if (widthChanged || heightChanged) { + logRedraw(`terminal size changed (${this.previousWidth}x${this.previousHeight} -> ${width}x${height})`); + fullRender(true); + return; + } + + // Content shrunk below the working area and no overlays - re-render to clear empty rows + // (overlays need the padding, so only do this when no overlays are active) + // Configurable via setClearOnShrink() or PI_CLEAR_ON_SHRINK=0 env var + if (this.clearOnShrink && newLines.length < this.maxLinesRendered && this.overlayStack.length === 0) { + logRedraw(`clearOnShrink (maxLinesRendered=${this.maxLinesRendered})`); + fullRender(true); + return; + } + + // Find first and last changed lines + let firstChanged = -1; + let lastChanged = -1; + const maxLines = Math.max(newLines.length, this.previousLines.length); + for (let i = 0; i < maxLines; i++) { + const oldLine = i < this.previousLines.length ? this.previousLines[i] : ""; + const newLine = i < newLines.length ? newLines[i] : ""; + + if (oldLine !== newLine) { + if (firstChanged === -1) { + firstChanged = i; + } + lastChanged = i; + } + } + const appendedLines = newLines.length > this.previousLines.length; + if (appendedLines) { + if (firstChanged === -1) { + firstChanged = this.previousLines.length; + } + lastChanged = newLines.length - 1; + } + const appendStart = appendedLines && firstChanged === this.previousLines.length && firstChanged > 0; + + // No changes - but still need to update hardware cursor position if it moved + if (firstChanged === -1) { + this.positionHardwareCursor(cursorPos, newLines.length); + this.previousViewportTop = Math.max(0, this.maxLinesRendered - height); + this.previousHeight = height; + return; + } + + // All changes are in deleted lines (nothing to render, just clear) + if (firstChanged >= newLines.length) { + if (this.previousLines.length > newLines.length) { + let buffer = "\x1b[?2026h"; + // Move to end of new content (clamp to 0 for empty content) + const targetRow = Math.max(0, newLines.length - 1); + const lineDiff = computeLineDiff(targetRow); + if (lineDiff > 0) buffer += `\x1b[${lineDiff}B`; + else if (lineDiff < 0) buffer += `\x1b[${-lineDiff}A`; + buffer += "\r"; + // Clear extra lines without scrolling + const extraLines = this.previousLines.length - newLines.length; + if (extraLines > height) { + logRedraw(`extraLines > height (${extraLines} > ${height})`); + fullRender(true); + return; + } + if (extraLines > 0) { + buffer += "\x1b[1B"; + } + for (let i = 0; i < extraLines; i++) { + buffer += "\r\x1b[2K"; + if (i < extraLines - 1) buffer += "\x1b[1B"; + } + if (extraLines > 0) { + buffer += `\x1b[${extraLines}A`; + } + buffer += "\x1b[?2026l"; + this.terminal.write(buffer); + this.cursorRow = targetRow; + this.hardwareCursorRow = targetRow; + } + this.positionHardwareCursor(cursorPos, newLines.length); + this.previousLines = newLines; + this.previousWidth = width; + this.previousHeight = height; + this.previousViewportTop = Math.max(0, this.maxLinesRendered - height); + return; + } + + // Check if firstChanged is above what was previously visible + // Use previousLines.length (not maxLinesRendered) to avoid false positives after content shrinks + const previousContentViewportTop = Math.max(0, this.previousLines.length - height); + if (firstChanged < previousContentViewportTop) { + // First change is above previous viewport - need full re-render + logRedraw(`firstChanged < viewportTop (${firstChanged} < ${previousContentViewportTop})`); + fullRender(true); + return; + } + + // Render from first changed line to end + // Build buffer with all updates wrapped in synchronized output + let buffer = "\x1b[?2026h"; // Begin synchronized output + const prevViewportBottom = prevViewportTop + height - 1; + const moveTargetRow = appendStart ? firstChanged - 1 : firstChanged; + if (moveTargetRow > prevViewportBottom) { + const currentScreenRow = Math.max(0, Math.min(height - 1, hardwareCursorRow - prevViewportTop)); + const moveToBottom = height - 1 - currentScreenRow; + if (moveToBottom > 0) { + buffer += `\x1b[${moveToBottom}B`; + } + const scroll = moveTargetRow - prevViewportBottom; + buffer += "\r\n".repeat(scroll); + prevViewportTop += scroll; + viewportTop += scroll; + hardwareCursorRow = moveTargetRow; + } + + // Move cursor to first changed line (use hardwareCursorRow for actual position) + const lineDiff = computeLineDiff(moveTargetRow); + if (lineDiff > 0) { + buffer += `\x1b[${lineDiff}B`; // Move down + } else if (lineDiff < 0) { + buffer += `\x1b[${-lineDiff}A`; // Move up + } + + buffer += appendStart ? "\r\n" : "\r"; // Move to column 0 + + // Only render changed lines (firstChanged to lastChanged), not all lines to end + // This reduces flicker when only a single line changes (e.g., spinner animation) + const renderEnd = Math.min(lastChanged, newLines.length - 1); + for (let i = firstChanged; i <= renderEnd; i++) { + if (i > firstChanged) buffer += "\r\n"; + buffer += "\x1b[2K"; // Clear current line + const line = newLines[i]; + const isImage = isImageLine(line); + if (!isImage && visibleWidth(line) > width) { + // Log all lines to crash file for debugging + const crashLogPath = path.join(os.homedir(), ".pi", "agent", "pi-crash.log"); + const crashData = [ + `Crash at ${new Date().toISOString()}`, + `Terminal width: ${width}`, + `Line ${i} visible width: ${visibleWidth(line)}`, + "", + "=== All rendered lines ===", + ...newLines.map((l, idx) => `[${idx}] (w=${visibleWidth(l)}) ${l}`), + "", + ].join("\n"); + fs.mkdirSync(path.dirname(crashLogPath), { recursive: true }); + fs.writeFileSync(crashLogPath, crashData); + + // Clean up terminal state before throwing + this.stop(); + + const errorMsg = [ + `Rendered line ${i} exceeds terminal width (${visibleWidth(line)} > ${width}).`, + "", + "This is likely caused by a custom TUI component not truncating its output.", + "Use visibleWidth() to measure and truncateToWidth() to truncate lines.", + "", + `Debug log written to: ${crashLogPath}`, + ].join("\n"); + throw new Error(errorMsg); + } + buffer += line; + } + + // Track where cursor ended up after rendering + let finalCursorRow = renderEnd; + + // If we had more lines before, clear them and move cursor back + if (this.previousLines.length > newLines.length) { + // Move to end of new content first if we stopped before it + if (renderEnd < newLines.length - 1) { + const moveDown = newLines.length - 1 - renderEnd; + buffer += `\x1b[${moveDown}B`; + finalCursorRow = newLines.length - 1; + } + const extraLines = this.previousLines.length - newLines.length; + for (let i = newLines.length; i < this.previousLines.length; i++) { + buffer += "\r\n\x1b[2K"; + } + // Move cursor back to end of new content + buffer += `\x1b[${extraLines}A`; + } + + buffer += "\x1b[?2026l"; // End synchronized output + + if (process.env.PI_TUI_DEBUG === "1") { + const debugDir = "/tmp/tui"; + fs.mkdirSync(debugDir, { recursive: true }); + const debugPath = path.join(debugDir, `render-${Date.now()}-${Math.random().toString(36).slice(2)}.log`); + const debugData = [ + `firstChanged: ${firstChanged}`, + `viewportTop: ${viewportTop}`, + `cursorRow: ${this.cursorRow}`, + `height: ${height}`, + `lineDiff: ${lineDiff}`, + `hardwareCursorRow: ${hardwareCursorRow}`, + `renderEnd: ${renderEnd}`, + `finalCursorRow: ${finalCursorRow}`, + `cursorPos: ${JSON.stringify(cursorPos)}`, + `newLines.length: ${newLines.length}`, + `previousLines.length: ${this.previousLines.length}`, + "", + "=== newLines ===", + JSON.stringify(newLines, null, 2), + "", + "=== previousLines ===", + JSON.stringify(this.previousLines, null, 2), + "", + "=== buffer ===", + JSON.stringify(buffer), + ].join("\n"); + fs.writeFileSync(debugPath, debugData); + } + + // Write entire buffer at once + this.terminal.write(buffer); + + // Track cursor position for next render + // cursorRow tracks end of content (for viewport calculation) + // hardwareCursorRow tracks actual terminal cursor position (for movement) + this.cursorRow = Math.max(0, newLines.length - 1); + this.hardwareCursorRow = finalCursorRow; + // Track terminal's working area (grows but doesn't shrink unless cleared) + this.maxLinesRendered = Math.max(this.maxLinesRendered, newLines.length); + this.previousViewportTop = Math.max(0, this.maxLinesRendered - height); + + // Position hardware cursor for IME + this.positionHardwareCursor(cursorPos, newLines.length); + + this.previousLines = newLines; + this.previousWidth = width; + this.previousHeight = height; + } + + /** + * Position the hardware cursor for IME candidate window. + * @param cursorPos The cursor position extracted from rendered output, or null + * @param totalLines Total number of rendered lines + */ + private positionHardwareCursor(cursorPos: { row: number; col: number } | null, totalLines: number): void { + if (!cursorPos || totalLines <= 0) { + this.terminal.hideCursor(); + return; + } + + // Clamp cursor position to valid range + const targetRow = Math.max(0, Math.min(cursorPos.row, totalLines - 1)); + const targetCol = Math.max(0, cursorPos.col); + + // Move cursor from current position to target + const rowDelta = targetRow - this.hardwareCursorRow; + let buffer = ""; + if (rowDelta > 0) { + buffer += `\x1b[${rowDelta}B`; // Move down + } else if (rowDelta < 0) { + buffer += `\x1b[${-rowDelta}A`; // Move up + } + // Move to absolute column (1-indexed) + buffer += `\x1b[${targetCol + 1}G`; + + if (buffer) { + this.terminal.write(buffer); + } + + this.hardwareCursorRow = targetRow; + if (this.showHardwareCursor) { + this.terminal.showCursor(); + } else { + this.terminal.hideCursor(); + } + } +} diff --git a/packages/pi-tui/src/undo-stack.ts b/packages/pi-tui/src/undo-stack.ts new file mode 100644 index 000000000..5b9a7e9ce --- /dev/null +++ b/packages/pi-tui/src/undo-stack.ts @@ -0,0 +1,28 @@ +/** + * Generic undo stack with clone-on-push semantics. + * + * Stores deep clones of state snapshots. Popped snapshots are returned + * directly (no re-cloning) since they are already detached. + */ +export class UndoStack { + private stack: S[] = []; + + /** Push a deep clone of the given state onto the stack. */ + push(state: S): void { + this.stack.push(structuredClone(state)); + } + + /** Pop and return the most recent snapshot, or undefined if empty. */ + pop(): S | undefined { + return this.stack.pop(); + } + + /** Remove all snapshots. */ + clear(): void { + this.stack.length = 0; + } + + get length(): number { + return this.stack.length; + } +} diff --git a/packages/pi-tui/src/utils.ts b/packages/pi-tui/src/utils.ts new file mode 100644 index 000000000..228b2420c --- /dev/null +++ b/packages/pi-tui/src/utils.ts @@ -0,0 +1,905 @@ +import { eastAsianWidth } from "get-east-asian-width"; + +// Grapheme segmenter (shared instance) +const segmenter = new Intl.Segmenter(undefined, { granularity: "grapheme" }); + +/** + * Get the shared grapheme segmenter instance. + */ +export function getSegmenter(): Intl.Segmenter { + return segmenter; +} + +/** + * Check if a grapheme cluster (after segmentation) could possibly be an RGI emoji. + * This is a fast heuristic to avoid the expensive rgiEmojiRegex test. + * The tested Unicode blocks are deliberately broad to account for future + * Unicode additions. + */ +function couldBeEmoji(segment: string): boolean { + const cp = segment.codePointAt(0)!; + return ( + (cp >= 0x1f000 && cp <= 0x1fbff) || // Emoji and Pictograph + (cp >= 0x2300 && cp <= 0x23ff) || // Misc technical + (cp >= 0x2600 && cp <= 0x27bf) || // Misc symbols, dingbats + (cp >= 0x2b50 && cp <= 0x2b55) || // Specific stars/circles + segment.includes("\uFE0F") || // Contains VS16 (emoji presentation selector) + segment.length > 2 // Multi-codepoint sequences (ZWJ, skin tones, etc.) + ); +} + +// Regexes for character classification (same as string-width library) +const zeroWidthRegex = /^(?:\p{Default_Ignorable_Code_Point}|\p{Control}|\p{Mark}|\p{Surrogate})+$/v; +const leadingNonPrintingRegex = /^[\p{Default_Ignorable_Code_Point}\p{Control}\p{Format}\p{Mark}\p{Surrogate}]+/v; +const rgiEmojiRegex = /^\p{RGI_Emoji}$/v; + +// Cache for non-ASCII strings +const WIDTH_CACHE_SIZE = 512; +const widthCache = new Map(); + +/** + * Calculate the terminal width of a single grapheme cluster. + * Based on code from the string-width library, but includes a possible-emoji + * check to avoid running the RGI_Emoji regex unnecessarily. + */ +function graphemeWidth(segment: string): number { + // Zero-width clusters + if (zeroWidthRegex.test(segment)) { + return 0; + } + + // Emoji check with pre-filter + if (couldBeEmoji(segment) && rgiEmojiRegex.test(segment)) { + return 2; + } + + // Get base visible codepoint + const base = segment.replace(leadingNonPrintingRegex, ""); + const cp = base.codePointAt(0); + if (cp === undefined) { + return 0; + } + + // Regional indicator symbols (U+1F1E6..U+1F1FF) are often rendered as + // full-width emoji in terminals, even when isolated during streaming. + // Keep width conservative (2) to avoid terminal auto-wrap drift artifacts. + if (cp >= 0x1f1e6 && cp <= 0x1f1ff) { + return 2; + } + + let width = eastAsianWidth(cp); + + // Trailing halfwidth/fullwidth forms + if (segment.length > 1) { + for (const char of segment.slice(1)) { + const c = char.codePointAt(0)!; + if (c >= 0xff00 && c <= 0xffef) { + width += eastAsianWidth(c); + } + } + } + + return width; +} + +/** + * Calculate the visible width of a string in terminal columns. + */ +export function visibleWidth(str: string): number { + if (str.length === 0) { + return 0; + } + + // Fast path: pure ASCII printable + let isPureAscii = true; + for (let i = 0; i < str.length; i++) { + const code = str.charCodeAt(i); + if (code < 0x20 || code > 0x7e) { + isPureAscii = false; + break; + } + } + if (isPureAscii) { + return str.length; + } + + // Check cache + const cached = widthCache.get(str); + if (cached !== undefined) { + return cached; + } + + // Normalize: tabs to 3 spaces, strip ANSI escape codes + let clean = str; + if (str.includes("\t")) { + clean = clean.replace(/\t/g, " "); + } + if (clean.includes("\x1b")) { + // Strip supported ANSI/OSC/APC escape sequences in one pass. + // This covers CSI styling/cursor codes, OSC hyperlinks and prompt markers, + // and APC sequences like CURSOR_MARKER. + let stripped = ""; + let i = 0; + while (i < clean.length) { + const ansi = extractAnsiCode(clean, i); + if (ansi) { + i += ansi.length; + continue; + } + stripped += clean[i]; + i++; + } + clean = stripped; + } + + // Calculate width + let width = 0; + for (const { segment } of segmenter.segment(clean)) { + width += graphemeWidth(segment); + } + + // Cache result + if (widthCache.size >= WIDTH_CACHE_SIZE) { + const firstKey = widthCache.keys().next().value; + if (firstKey !== undefined) { + widthCache.delete(firstKey); + } + } + widthCache.set(str, width); + + return width; +} + +/** + * Extract ANSI escape sequences from a string at the given position. + */ +export function extractAnsiCode(str: string, pos: number): { code: string; length: number } | null { + if (pos >= str.length || str[pos] !== "\x1b") return null; + + const next = str[pos + 1]; + + // CSI sequence: ESC [ ... m/G/K/H/J + if (next === "[") { + let j = pos + 2; + while (j < str.length && !/[mGKHJ]/.test(str[j]!)) j++; + if (j < str.length) return { code: str.substring(pos, j + 1), length: j + 1 - pos }; + return null; + } + + // OSC sequence: ESC ] ... BEL or ESC ] ... ST (ESC \) + // Used for hyperlinks (OSC 8), window titles, etc. + if (next === "]") { + let j = pos + 2; + while (j < str.length) { + if (str[j] === "\x07") return { code: str.substring(pos, j + 1), length: j + 1 - pos }; + if (str[j] === "\x1b" && str[j + 1] === "\\") return { code: str.substring(pos, j + 2), length: j + 2 - pos }; + j++; + } + return null; + } + + // APC sequence: ESC _ ... BEL or ESC _ ... ST (ESC \) + // Used for cursor marker and application-specific commands + if (next === "_") { + let j = pos + 2; + while (j < str.length) { + if (str[j] === "\x07") return { code: str.substring(pos, j + 1), length: j + 1 - pos }; + if (str[j] === "\x1b" && str[j + 1] === "\\") return { code: str.substring(pos, j + 2), length: j + 2 - pos }; + j++; + } + return null; + } + + return null; +} + +/** + * Track active ANSI SGR codes to preserve styling across line breaks. + */ +class AnsiCodeTracker { + // Track individual attributes separately so we can reset them specifically + private bold = false; + private dim = false; + private italic = false; + private underline = false; + private blink = false; + private inverse = false; + private hidden = false; + private strikethrough = false; + private fgColor: string | null = null; // Stores the full code like "31" or "38;5;240" + private bgColor: string | null = null; // Stores the full code like "41" or "48;5;240" + + process(ansiCode: string): void { + if (!ansiCode.endsWith("m")) { + return; + } + + // Extract the parameters between \x1b[ and m + const match = ansiCode.match(/\x1b\[([\d;]*)m/); + if (!match) return; + + const params = match[1]; + if (params === "" || params === "0") { + // Full reset + this.reset(); + return; + } + + // Parse parameters (can be semicolon-separated) + const parts = params.split(";"); + let i = 0; + while (i < parts.length) { + const code = Number.parseInt(parts[i], 10); + + // Handle 256-color and RGB codes which consume multiple parameters + if (code === 38 || code === 48) { + // 38;5;N (256 color fg) or 38;2;R;G;B (RGB fg) + // 48;5;N (256 color bg) or 48;2;R;G;B (RGB bg) + if (parts[i + 1] === "5" && parts[i + 2] !== undefined) { + // 256 color: 38;5;N or 48;5;N + const colorCode = `${parts[i]};${parts[i + 1]};${parts[i + 2]}`; + if (code === 38) { + this.fgColor = colorCode; + } else { + this.bgColor = colorCode; + } + i += 3; + continue; + } else if (parts[i + 1] === "2" && parts[i + 4] !== undefined) { + // RGB color: 38;2;R;G;B or 48;2;R;G;B + const colorCode = `${parts[i]};${parts[i + 1]};${parts[i + 2]};${parts[i + 3]};${parts[i + 4]}`; + if (code === 38) { + this.fgColor = colorCode; + } else { + this.bgColor = colorCode; + } + i += 5; + continue; + } + } + + // Standard SGR codes + switch (code) { + case 0: + this.reset(); + break; + case 1: + this.bold = true; + break; + case 2: + this.dim = true; + break; + case 3: + this.italic = true; + break; + case 4: + this.underline = true; + break; + case 5: + this.blink = true; + break; + case 7: + this.inverse = true; + break; + case 8: + this.hidden = true; + break; + case 9: + this.strikethrough = true; + break; + case 21: + this.bold = false; + break; // Some terminals + case 22: + this.bold = false; + this.dim = false; + break; + case 23: + this.italic = false; + break; + case 24: + this.underline = false; + break; + case 25: + this.blink = false; + break; + case 27: + this.inverse = false; + break; + case 28: + this.hidden = false; + break; + case 29: + this.strikethrough = false; + break; + case 39: + this.fgColor = null; + break; // Default fg + case 49: + this.bgColor = null; + break; // Default bg + default: + // Standard foreground colors 30-37, 90-97 + if ((code >= 30 && code <= 37) || (code >= 90 && code <= 97)) { + this.fgColor = String(code); + } + // Standard background colors 40-47, 100-107 + else if ((code >= 40 && code <= 47) || (code >= 100 && code <= 107)) { + this.bgColor = String(code); + } + break; + } + i++; + } + } + + private reset(): void { + this.bold = false; + this.dim = false; + this.italic = false; + this.underline = false; + this.blink = false; + this.inverse = false; + this.hidden = false; + this.strikethrough = false; + this.fgColor = null; + this.bgColor = null; + } + + /** Clear all state for reuse. */ + clear(): void { + this.reset(); + } + + getActiveCodes(): string { + const codes: string[] = []; + if (this.bold) codes.push("1"); + if (this.dim) codes.push("2"); + if (this.italic) codes.push("3"); + if (this.underline) codes.push("4"); + if (this.blink) codes.push("5"); + if (this.inverse) codes.push("7"); + if (this.hidden) codes.push("8"); + if (this.strikethrough) codes.push("9"); + if (this.fgColor) codes.push(this.fgColor); + if (this.bgColor) codes.push(this.bgColor); + + if (codes.length === 0) return ""; + return `\x1b[${codes.join(";")}m`; + } + + hasActiveCodes(): boolean { + return ( + this.bold || + this.dim || + this.italic || + this.underline || + this.blink || + this.inverse || + this.hidden || + this.strikethrough || + this.fgColor !== null || + this.bgColor !== null + ); + } + + /** + * Get reset codes for attributes that need to be turned off at line end, + * specifically underline which bleeds into padding. + * Returns empty string if no problematic attributes are active. + */ + getLineEndReset(): string { + // Only underline causes visual bleeding into padding + // Other attributes like colors don't visually bleed to padding + if (this.underline) { + return "\x1b[24m"; // Underline off only + } + return ""; + } +} + +function updateTrackerFromText(text: string, tracker: AnsiCodeTracker): void { + let i = 0; + while (i < text.length) { + const ansiResult = extractAnsiCode(text, i); + if (ansiResult) { + tracker.process(ansiResult.code); + i += ansiResult.length; + } else { + i++; + } + } +} + +/** + * Split text into words while keeping ANSI codes attached. + */ +function splitIntoTokensWithAnsi(text: string): string[] { + const tokens: string[] = []; + let current = ""; + let pendingAnsi = ""; // ANSI codes waiting to be attached to next visible content + let inWhitespace = false; + let i = 0; + + while (i < text.length) { + const ansiResult = extractAnsiCode(text, i); + if (ansiResult) { + // Hold ANSI codes separately - they'll be attached to the next visible char + pendingAnsi += ansiResult.code; + i += ansiResult.length; + continue; + } + + const char = text[i]; + const charIsSpace = char === " "; + + if (charIsSpace !== inWhitespace && current) { + // Switching between whitespace and non-whitespace, push current token + tokens.push(current); + current = ""; + } + + // Attach any pending ANSI codes to this visible character + if (pendingAnsi) { + current += pendingAnsi; + pendingAnsi = ""; + } + + inWhitespace = charIsSpace; + current += char; + i++; + } + + // Handle any remaining pending ANSI codes (attach to last token) + if (pendingAnsi) { + current += pendingAnsi; + } + + if (current) { + tokens.push(current); + } + + return tokens; +} + +/** + * Wrap text with ANSI codes preserved. + * + * ONLY does word wrapping - NO padding, NO background colors. + * Returns lines where each line is <= width visible chars. + * Active ANSI codes are preserved across line breaks. + * + * @param text - Text to wrap (may contain ANSI codes and newlines) + * @param width - Maximum visible width per line + * @returns Array of wrapped lines (NOT padded to width) + */ +export function wrapTextWithAnsi(text: string, width: number): string[] { + if (!text) { + return [""]; + } + + // Handle newlines by processing each line separately + // Track ANSI state across lines so styles carry over after literal newlines + const inputLines = text.split("\n"); + const result: string[] = []; + const tracker = new AnsiCodeTracker(); + + for (const inputLine of inputLines) { + // Prepend active ANSI codes from previous lines (except for first line) + const prefix = result.length > 0 ? tracker.getActiveCodes() : ""; + result.push(...wrapSingleLine(prefix + inputLine, width)); + // Update tracker with codes from this line for next iteration + updateTrackerFromText(inputLine, tracker); + } + + return result.length > 0 ? result : [""]; +} + +function wrapSingleLine(line: string, width: number): string[] { + if (!line) { + return [""]; + } + + const visibleLength = visibleWidth(line); + if (visibleLength <= width) { + return [line]; + } + + const wrapped: string[] = []; + const tracker = new AnsiCodeTracker(); + const tokens = splitIntoTokensWithAnsi(line); + + let currentLine = ""; + let currentVisibleLength = 0; + + for (const token of tokens) { + const tokenVisibleLength = visibleWidth(token); + const isWhitespace = token.trim() === ""; + + // Token itself is too long - break it character by character + if (tokenVisibleLength > width && !isWhitespace) { + if (currentLine) { + // Add specific reset for underline only (preserves background) + const lineEndReset = tracker.getLineEndReset(); + if (lineEndReset) { + currentLine += lineEndReset; + } + wrapped.push(currentLine); + currentLine = ""; + currentVisibleLength = 0; + } + + // Break long token - breakLongWord handles its own resets + const broken = breakLongWord(token, width, tracker); + wrapped.push(...broken.slice(0, -1)); + currentLine = broken[broken.length - 1]; + currentVisibleLength = visibleWidth(currentLine); + continue; + } + + // Check if adding this token would exceed width + const totalNeeded = currentVisibleLength + tokenVisibleLength; + + if (totalNeeded > width && currentVisibleLength > 0) { + // Trim trailing whitespace, then add underline reset (not full reset, to preserve background) + let lineToWrap = currentLine.trimEnd(); + const lineEndReset = tracker.getLineEndReset(); + if (lineEndReset) { + lineToWrap += lineEndReset; + } + wrapped.push(lineToWrap); + if (isWhitespace) { + // Don't start new line with whitespace + currentLine = tracker.getActiveCodes(); + currentVisibleLength = 0; + } else { + currentLine = tracker.getActiveCodes() + token; + currentVisibleLength = tokenVisibleLength; + } + } else { + // Add to current line + currentLine += token; + currentVisibleLength += tokenVisibleLength; + } + + updateTrackerFromText(token, tracker); + } + + if (currentLine) { + // No reset at end of final line - let caller handle it + wrapped.push(currentLine); + } + + // Trailing whitespace can cause lines to exceed the requested width + return wrapped.length > 0 ? wrapped.map((line) => line.trimEnd()) : [""]; +} + +const PUNCTUATION_REGEX = /[(){}[\]<>.,;:'"!?+\-=*/\\|&%^$#@~`]/; + +/** + * Check if a character is whitespace. + */ +export function isWhitespaceChar(char: string): boolean { + return /\s/.test(char); +} + +/** + * Check if a character is punctuation. + */ +export function isPunctuationChar(char: string): boolean { + return PUNCTUATION_REGEX.test(char); +} + +function breakLongWord(word: string, width: number, tracker: AnsiCodeTracker): string[] { + const lines: string[] = []; + let currentLine = tracker.getActiveCodes(); + let currentWidth = 0; + + // First, separate ANSI codes from visible content + // We need to handle ANSI codes specially since they're not graphemes + let i = 0; + const segments: Array<{ type: "ansi" | "grapheme"; value: string }> = []; + + while (i < word.length) { + const ansiResult = extractAnsiCode(word, i); + if (ansiResult) { + segments.push({ type: "ansi", value: ansiResult.code }); + i += ansiResult.length; + } else { + // Find the next ANSI code or end of string + let end = i; + while (end < word.length) { + const nextAnsi = extractAnsiCode(word, end); + if (nextAnsi) break; + end++; + } + // Segment this non-ANSI portion into graphemes + const textPortion = word.slice(i, end); + for (const seg of segmenter.segment(textPortion)) { + segments.push({ type: "grapheme", value: seg.segment }); + } + i = end; + } + } + + // Now process segments + for (const seg of segments) { + if (seg.type === "ansi") { + currentLine += seg.value; + tracker.process(seg.value); + continue; + } + + const grapheme = seg.value; + // Skip empty graphemes to avoid issues with string-width calculation + if (!grapheme) continue; + + const graphemeWidth = visibleWidth(grapheme); + + if (currentWidth + graphemeWidth > width) { + // Add specific reset for underline only (preserves background) + const lineEndReset = tracker.getLineEndReset(); + if (lineEndReset) { + currentLine += lineEndReset; + } + lines.push(currentLine); + currentLine = tracker.getActiveCodes(); + currentWidth = 0; + } + + currentLine += grapheme; + currentWidth += graphemeWidth; + } + + if (currentLine) { + // No reset at end of final segment - caller handles continuation + lines.push(currentLine); + } + + return lines.length > 0 ? lines : [""]; +} + +/** + * Apply background color to a line, padding to full width. + * + * @param line - Line of text (may contain ANSI codes) + * @param width - Total width to pad to + * @param bgFn - Background color function + * @returns Line with background applied and padded to width + */ +export function applyBackgroundToLine(line: string, width: number, bgFn: (text: string) => string): string { + // Calculate padding needed + const visibleLen = visibleWidth(line); + const paddingNeeded = Math.max(0, width - visibleLen); + const padding = " ".repeat(paddingNeeded); + + // Apply background to content + padding + const withPadding = line + padding; + return bgFn(withPadding); +} + +/** + * Truncate text to fit within a maximum visible width, adding ellipsis if needed. + * Optionally pad with spaces to reach exactly maxWidth. + * Properly handles ANSI escape codes (they don't count toward width). + * + * @param text - Text to truncate (may contain ANSI codes) + * @param maxWidth - Maximum visible width + * @param ellipsis - Ellipsis string to append when truncating (default: "...") + * @param pad - If true, pad result with spaces to exactly maxWidth (default: false) + * @returns Truncated text, optionally padded to exactly maxWidth + */ +export function truncateToWidth( + text: string, + maxWidth: number, + ellipsis: string = "...", + pad: boolean = false, +): string { + const textVisibleWidth = visibleWidth(text); + + if (textVisibleWidth <= maxWidth) { + return pad ? text + " ".repeat(maxWidth - textVisibleWidth) : text; + } + + const ellipsisWidth = visibleWidth(ellipsis); + const targetWidth = maxWidth - ellipsisWidth; + + if (targetWidth <= 0) { + return ellipsis.substring(0, maxWidth); + } + + // Separate ANSI codes from visible content using grapheme segmentation + let i = 0; + const segments: Array<{ type: "ansi" | "grapheme"; value: string }> = []; + + while (i < text.length) { + const ansiResult = extractAnsiCode(text, i); + if (ansiResult) { + segments.push({ type: "ansi", value: ansiResult.code }); + i += ansiResult.length; + } else { + // Find the next ANSI code or end of string + let end = i; + while (end < text.length) { + const nextAnsi = extractAnsiCode(text, end); + if (nextAnsi) break; + end++; + } + // Segment this non-ANSI portion into graphemes + const textPortion = text.slice(i, end); + for (const seg of segmenter.segment(textPortion)) { + segments.push({ type: "grapheme", value: seg.segment }); + } + i = end; + } + } + + // Build truncated string from segments + let result = ""; + let currentWidth = 0; + + for (const seg of segments) { + if (seg.type === "ansi") { + result += seg.value; + continue; + } + + const grapheme = seg.value; + // Skip empty graphemes to avoid issues with string-width calculation + if (!grapheme) continue; + + const graphemeWidth = visibleWidth(grapheme); + + if (currentWidth + graphemeWidth > targetWidth) { + break; + } + + result += grapheme; + currentWidth += graphemeWidth; + } + + // Add reset code before ellipsis to prevent styling leaking into it + const truncated = `${result}\x1b[0m${ellipsis}`; + if (pad) { + const truncatedWidth = visibleWidth(truncated); + return truncated + " ".repeat(Math.max(0, maxWidth - truncatedWidth)); + } + return truncated; +} + +/** + * Extract a range of visible columns from a line. Handles ANSI codes and wide chars. + * @param strict - If true, exclude wide chars at boundary that would extend past the range + */ +export function sliceByColumn(line: string, startCol: number, length: number, strict = false): string { + return sliceWithWidth(line, startCol, length, strict).text; +} + +/** Like sliceByColumn but also returns the actual visible width of the result. */ +export function sliceWithWidth( + line: string, + startCol: number, + length: number, + strict = false, +): { text: string; width: number } { + if (length <= 0) return { text: "", width: 0 }; + const endCol = startCol + length; + let result = "", + resultWidth = 0, + currentCol = 0, + i = 0, + pendingAnsi = ""; + + while (i < line.length) { + const ansi = extractAnsiCode(line, i); + if (ansi) { + if (currentCol >= startCol && currentCol < endCol) result += ansi.code; + else if (currentCol < startCol) pendingAnsi += ansi.code; + i += ansi.length; + continue; + } + + let textEnd = i; + while (textEnd < line.length && !extractAnsiCode(line, textEnd)) textEnd++; + + for (const { segment } of segmenter.segment(line.slice(i, textEnd))) { + const w = graphemeWidth(segment); + const inRange = currentCol >= startCol && currentCol < endCol; + const fits = !strict || currentCol + w <= endCol; + if (inRange && fits) { + if (pendingAnsi) { + result += pendingAnsi; + pendingAnsi = ""; + } + result += segment; + resultWidth += w; + } + currentCol += w; + if (currentCol >= endCol) break; + } + i = textEnd; + if (currentCol >= endCol) break; + } + return { text: result, width: resultWidth }; +} + +// Pooled tracker instance for extractSegments (avoids allocation per call) +const pooledStyleTracker = new AnsiCodeTracker(); + +/** + * Extract "before" and "after" segments from a line in a single pass. + * Used for overlay compositing where we need content before and after the overlay region. + * Preserves styling from before the overlay that should affect content after it. + */ +export function extractSegments( + line: string, + beforeEnd: number, + afterStart: number, + afterLen: number, + strictAfter = false, +): { before: string; beforeWidth: number; after: string; afterWidth: number } { + let before = "", + beforeWidth = 0, + after = "", + afterWidth = 0; + let currentCol = 0, + i = 0; + let pendingAnsiBefore = ""; + let afterStarted = false; + const afterEnd = afterStart + afterLen; + + // Track styling state so "after" inherits styling from before the overlay + pooledStyleTracker.clear(); + + while (i < line.length) { + const ansi = extractAnsiCode(line, i); + if (ansi) { + // Track all SGR codes to know styling state at afterStart + pooledStyleTracker.process(ansi.code); + // Include ANSI codes in their respective segments + if (currentCol < beforeEnd) { + pendingAnsiBefore += ansi.code; + } else if (currentCol >= afterStart && currentCol < afterEnd && afterStarted) { + // Only include after we've started "after" (styling already prepended) + after += ansi.code; + } + i += ansi.length; + continue; + } + + let textEnd = i; + while (textEnd < line.length && !extractAnsiCode(line, textEnd)) textEnd++; + + for (const { segment } of segmenter.segment(line.slice(i, textEnd))) { + const w = graphemeWidth(segment); + + if (currentCol < beforeEnd) { + if (pendingAnsiBefore) { + before += pendingAnsiBefore; + pendingAnsiBefore = ""; + } + before += segment; + beforeWidth += w; + } else if (currentCol >= afterStart && currentCol < afterEnd) { + const fits = !strictAfter || currentCol + w <= afterEnd; + if (fits) { + // On first "after" grapheme, prepend inherited styling from before overlay + if (!afterStarted) { + after += pooledStyleTracker.getActiveCodes(); + afterStarted = true; + } + after += segment; + afterWidth += w; + } + } + + currentCol += w; + // Early exit: done with "before" only, or done with both segments + if (afterLen <= 0 ? currentCol >= beforeEnd : currentCol >= afterEnd) break; + } + i = textEnd; + if (afterLen <= 0 ? currentCol >= beforeEnd : currentCol >= afterEnd) break; + } + + return { before, beforeWidth, after, afterWidth }; +} diff --git a/packages/pi-tui/tsconfig.json b/packages/pi-tui/tsconfig.json new file mode 100644 index 000000000..c156e06aa --- /dev/null +++ b/packages/pi-tui/tsconfig.json @@ -0,0 +1,27 @@ +{ + "compilerOptions": { + "target": "ES2024", + "module": "Node16", + "lib": ["ES2024"], + "strict": true, + "esModuleInterop": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "declaration": true, + "declarationMap": true, + "sourceMap": true, + "inlineSources": true, + "inlineSourceMap": false, + "moduleResolution": "Node16", + "resolveJsonModule": true, + "allowImportingTsExtensions": false, + "experimentalDecorators": true, + "emitDecoratorMetadata": true, + "useDefineForClassFields": false, + "types": ["node"], + "outDir": "./dist", + "rootDir": "./src" + }, + "include": ["src/**/*"], + "exclude": ["node_modules", "dist"] +} diff --git a/patches/@mariozechner+pi-coding-agent+0.57.1.patch b/patches/@mariozechner+pi-coding-agent+0.57.1.patch deleted file mode 100644 index 0b2a0a8c8..000000000 --- a/patches/@mariozechner+pi-coding-agent+0.57.1.patch +++ /dev/null @@ -1,108 +0,0 @@ -diff --git a/node_modules/@mariozechner/pi-coding-agent/dist/core/agent-session.js b/node_modules/@mariozechner/pi-coding-agent/dist/core/agent-session.js -index 90622c2..cff094b 100644 ---- a/node_modules/@mariozechner/pi-coding-agent/dist/core/agent-session.js -+++ b/node_modules/@mariozechner/pi-coding-agent/dist/core/agent-session.js -@@ -1007,7 +1007,7 @@ export class AgentSession { - * Validates API key, saves to session and settings. - * @throws Error if no API key available for the model - */ -- async setModel(model) { -+ async setModel(model, options) { - const apiKey = await this._modelRegistry.getApiKey(model); - if (!apiKey) { - throw new Error(`No API key for ${model.provider}/${model.id}`); -@@ -1016,7 +1016,9 @@ export class AgentSession { - const thinkingLevel = this._getThinkingLevelForModelSwitch(); - this.agent.setModel(model); - this.sessionManager.appendModelChange(model.provider, model.id); -- this.settingsManager.setDefaultModelAndProvider(model.provider, model.id); -+ if (options?.persist !== false) { -+ this.settingsManager.setDefaultModelAndProvider(model.provider, model.id); -+ } - // Re-clamp thinking level for new model's capabilities - this.setThinkingLevel(thinkingLevel); - await this._emitModelSelect(model, previousModel, "set"); -@@ -1067,7 +1069,9 @@ export class AgentSession { - // Apply model - this.agent.setModel(next.model); - this.sessionManager.appendModelChange(next.model.provider, next.model.id); -- this.settingsManager.setDefaultModelAndProvider(next.model.provider, next.model.id); -+ if (options?.persist !== false) { -+ this.settingsManager.setDefaultModelAndProvider(next.model.provider, next.model.id); -+ } - // Apply thinking level. - // - Explicit scoped model thinking level overrides current session level - // - Undefined scoped model thinking level inherits the current session preference -@@ -1094,7 +1098,9 @@ export class AgentSession { - const thinkingLevel = this._getThinkingLevelForModelSwitch(); - this.agent.setModel(nextModel); - this.sessionManager.appendModelChange(nextModel.provider, nextModel.id); -- this.settingsManager.setDefaultModelAndProvider(nextModel.provider, nextModel.id); -+ if (options?.persist !== false) { -+ this.settingsManager.setDefaultModelAndProvider(nextModel.provider, nextModel.id); -+ } - // Re-clamp thinking level for new model's capabilities - this.setThinkingLevel(thinkingLevel); - await this._emitModelSelect(nextModel, currentModel, "cycle"); -@@ -1659,11 +1665,11 @@ export class AgentSession { - setActiveTools: (toolNames) => this.setActiveToolsByName(toolNames), - refreshTools: () => this._refreshToolRegistry(), - getCommands, -- setModel: async (model) => { -+ setModel: async (model, options) => { - const key = await this.modelRegistry.getApiKey(model); - if (!key) - return false; -- await this.setModel(model); -+ await this.setModel(model, options); - return true; - }, - getThinkingLevel: () => this.thinkingLevel, -diff --git a/node_modules/@mariozechner/pi-coding-agent/dist/core/tools/bash.js b/node_modules/@mariozechner/pi-coding-agent/dist/core/tools/bash.js -index 27fe820..68f277f 100644 ---- a/node_modules/@mariozechner/pi-coding-agent/dist/core/tools/bash.js -+++ b/node_modules/@mariozechner/pi-coding-agent/dist/core/tools/bash.js -@@ -1,11 +1,35 @@ - import { randomBytes } from "node:crypto"; - import { createWriteStream, existsSync } from "node:fs"; -+import { createRequire } from "node:module"; - import { tmpdir } from "node:os"; - import { join } from "node:path"; - import { Type } from "@sinclair/typebox"; - import { spawn } from "child_process"; - import { getShellConfig, getShellEnv, killProcessTree } from "../../utils/shell.js"; - import { DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES, formatSize, truncateTail } from "./truncate.js"; -+// Cached Win32 FFI handles for restoring VT input after child processes -+let _vtHandles = null; -+function restoreWindowsVTInput() { -+ if (process.platform !== "win32") return; -+ try { -+ if (!_vtHandles) { -+ const cjsRequire = createRequire(import.meta.url); -+ const koffi = cjsRequire("koffi"); -+ const k32 = koffi.load("kernel32.dll"); -+ const GetStdHandle = k32.func("void* __stdcall GetStdHandle(int)"); -+ const GetConsoleMode = k32.func("bool __stdcall GetConsoleMode(void*, _Out_ uint32_t*)"); -+ const SetConsoleMode = k32.func("bool __stdcall SetConsoleMode(void*, uint32_t)"); -+ const handle = GetStdHandle(-10); -+ _vtHandles = { GetConsoleMode, SetConsoleMode, handle }; -+ } -+ const ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200; -+ const mode = new Uint32Array(1); -+ _vtHandles.GetConsoleMode(_vtHandles.handle, mode); -+ if (!(mode[0] & ENABLE_VIRTUAL_TERMINAL_INPUT)) { -+ _vtHandles.SetConsoleMode(_vtHandles.handle, mode[0] | ENABLE_VIRTUAL_TERMINAL_INPUT); -+ } -+ } catch { } -+} - /** - * Generate a unique temp file path for bash output - */ -@@ -76,6 +100,7 @@ const defaultBashOperations = { - } - // Handle process exit - child.on("close", (code) => { -+ restoreWindowsVTInput(); - if (timeoutHandle) - clearTimeout(timeoutHandle); - if (signal) diff --git a/patches/@mariozechner+pi-tui+0.57.1.patch b/patches/@mariozechner+pi-tui+0.57.1.patch deleted file mode 100644 index 64da0cb55..000000000 --- a/patches/@mariozechner+pi-tui+0.57.1.patch +++ /dev/null @@ -1,47 +0,0 @@ -diff --git a/node_modules/@mariozechner/pi-tui/dist/terminal.js b/node_modules/@mariozechner/pi-tui/dist/terminal.js -index cd20330..e836fcd 100644 ---- a/node_modules/@mariozechner/pi-tui/dist/terminal.js -+++ b/node_modules/@mariozechner/pi-tui/dist/terminal.js -@@ -7,6 +7,7 @@ const cjsRequire = createRequire(import.meta.url); - * Real terminal using process.stdin/stdout - */ - export class ProcessTerminal { -+ static _vtHandles = null; - wasRaw = false; - inputHandler; - resizeHandler; -@@ -126,20 +127,23 @@ export class ProcessTerminal { - if (process.platform !== "win32") - return; - try { -- // Dynamic require to avoid bundling koffi's 74MB of cross-platform -- // native binaries into every compiled binary. Koffi is only needed -- // on Windows for VT input support. -- const koffi = cjsRequire("koffi"); -- const k32 = koffi.load("kernel32.dll"); -- const GetStdHandle = k32.func("void* __stdcall GetStdHandle(int)"); -- const GetConsoleMode = k32.func("bool __stdcall GetConsoleMode(void*, _Out_ uint32_t*)"); -- const SetConsoleMode = k32.func("bool __stdcall SetConsoleMode(void*, uint32_t)"); -- const STD_INPUT_HANDLE = -10; -+ if (!ProcessTerminal._vtHandles) { -+ const koffi = cjsRequire("koffi"); -+ const k32 = koffi.load("kernel32.dll"); -+ const GetStdHandle = k32.func("void* __stdcall GetStdHandle(int)"); -+ const GetConsoleMode = k32.func("bool __stdcall GetConsoleMode(void*, _Out_ uint32_t*)"); -+ const SetConsoleMode = k32.func("bool __stdcall SetConsoleMode(void*, uint32_t)"); -+ const STD_INPUT_HANDLE = -10; -+ const handle = GetStdHandle(STD_INPUT_HANDLE); -+ ProcessTerminal._vtHandles = { GetConsoleMode, SetConsoleMode, handle }; -+ } - const ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200; -- const handle = GetStdHandle(STD_INPUT_HANDLE); -+ const { GetConsoleMode, SetConsoleMode, handle } = ProcessTerminal._vtHandles; - const mode = new Uint32Array(1); - GetConsoleMode(handle, mode); -- SetConsoleMode(handle, mode[0] | ENABLE_VIRTUAL_TERMINAL_INPUT); -+ if (!(mode[0] & ENABLE_VIRTUAL_TERMINAL_INPUT)) { -+ SetConsoleMode(handle, mode[0] | ENABLE_VIRTUAL_TERMINAL_INPUT); -+ } - } - catch { - // koffi not available — Shift+Tab won't be distinguishable from Tab diff --git a/scripts/postinstall.js b/scripts/postinstall.js index a9cb33f6e..a477dd912 100644 --- a/scripts/postinstall.js +++ b/scripts/postinstall.js @@ -63,7 +63,6 @@ const banner = } catch { // Clack or picocolors unavailable — fall back to minimal output process.stderr.write(` Run gsd to get started.\n\n`) - await run('npx patch-package') await run('npx playwright install chromium') return } @@ -74,21 +73,7 @@ const banner = const results = [] const s = p.spinner() - // --- Step 1: Apply patches ----------------------------------------------- - s.start('Applying patches…') - const patchResult = await run('npx patch-package') - if (patchResult.ok) { - s.stop('Patches applied') - results.push({ label: 'Patches applied', ok: true }) - } else { - s.stop(pc.yellow('Patches — skipped (non-fatal)')) - results.push({ - label: 'Patches skipped — run ' + pc.cyan('npx patch-package') + ' manually', - ok: false, - }) - } - - // --- Step 2: Playwright browser ------------------------------------------ + // --- Playwright browser -------------------------------------------------- // Avoid --with-deps: install scripts should not block on interactive sudo // prompts. If Linux libs are missing, suggest the explicit follow-up. s.start('Setting up browser tools…') diff --git a/scripts/sync-pkg-version.cjs b/scripts/sync-pkg-version.cjs index 9424040a2..3e05bacc6 100644 --- a/scripts/sync-pkg-version.cjs +++ b/scripts/sync-pkg-version.cjs @@ -15,7 +15,7 @@ const { readFileSync, writeFileSync } = require('fs') const { resolve, join } = require('path') const root = resolve(__dirname, '..') -const piPkgPath = join(root, 'node_modules', '@mariozechner', 'pi-coding-agent', 'package.json') +const piPkgPath = join(root, 'packages', 'pi-coding-agent', 'package.json') const gsdPkgPath = join(root, 'pkg', 'package.json') const piPkg = JSON.parse(readFileSync(piPkgPath, 'utf-8')) diff --git a/src/cli.ts b/src/cli.ts index 6d1737c9a..48e21a142 100644 --- a/src/cli.ts +++ b/src/cli.ts @@ -8,7 +8,7 @@ import { InteractiveMode, runPrintMode, runRpcMode, -} from '@mariozechner/pi-coding-agent' +} from '@gsd/pi-coding-agent' import { existsSync, readdirSync, renameSync, readFileSync } from 'node:fs' import { join } from 'node:path' import { agentDir, sessionsDir, authFilePath } from './app-paths.js' diff --git a/src/onboarding.ts b/src/onboarding.ts index 48345ff33..04ce4c2ac 100644 --- a/src/onboarding.ts +++ b/src/onboarding.ts @@ -11,7 +11,7 @@ */ import { exec } from 'node:child_process' -import type { AuthStorage } from '@mariozechner/pi-coding-agent' +import type { AuthStorage } from '@gsd/pi-coding-agent' import { renderLogo } from './logo.js' // ─── Types ──────────────────────────────────────────────────────────────────── diff --git a/src/pi-migration.ts b/src/pi-migration.ts index 3fa15902c..e880d3ad1 100644 --- a/src/pi-migration.ts +++ b/src/pi-migration.ts @@ -7,7 +7,7 @@ import { existsSync, readFileSync } from 'node:fs' import { homedir } from 'node:os' import { join } from 'node:path' -import type { AuthStorage, AuthCredential } from '@mariozechner/pi-coding-agent' +import type { AuthStorage, AuthCredential } from '@gsd/pi-coding-agent' const PI_AUTH_PATH = join(homedir(), '.pi', 'agent', 'auth.json') diff --git a/src/resource-loader.ts b/src/resource-loader.ts index d7595dd4d..d95311db4 100644 --- a/src/resource-loader.ts +++ b/src/resource-loader.ts @@ -1,4 +1,4 @@ -import { DefaultResourceLoader } from '@mariozechner/pi-coding-agent' +import { DefaultResourceLoader } from '@gsd/pi-coding-agent' import { homedir } from 'node:os' import { cpSync, existsSync, mkdirSync, readFileSync, writeFileSync } from 'node:fs' import { dirname, join, resolve } from 'node:path' diff --git a/src/resources/extensions/ask-user-questions.ts b/src/resources/extensions/ask-user-questions.ts index d33efea4a..3b218a90d 100644 --- a/src/resources/extensions/ask-user-questions.ts +++ b/src/resources/extensions/ask-user-questions.ts @@ -9,8 +9,8 @@ * Based on: https://github.com/openai/codex (codex-rs/core/src/tools/handlers/ask_user_questions.rs) */ -import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; -import { Text } from "@mariozechner/pi-tui"; +import type { ExtensionAPI } from "@gsd/pi-coding-agent"; +import { Text } from "@gsd/pi-tui"; import { Type } from "@sinclair/typebox"; import { showInterviewRound, diff --git a/src/resources/extensions/bg-shell/index.ts b/src/resources/extensions/bg-shell/index.ts index ee4662b4b..a98022104 100644 --- a/src/resources/extensions/bg-shell/index.ts +++ b/src/resources/extensions/bg-shell/index.ts @@ -23,25 +23,25 @@ * /bg — interactive process manager overlay */ -import { StringEnum } from "@mariozechner/pi-ai"; +import { StringEnum } from "@gsd/pi-ai"; import type { ExtensionAPI, ExtensionContext, Theme, -} from "@mariozechner/pi-coding-agent"; +} from "@gsd/pi-coding-agent"; import { truncateHead, DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES, getShellConfig, -} from "@mariozechner/pi-coding-agent"; +} from "@gsd/pi-coding-agent"; import { Text, truncateToWidth, visibleWidth, matchesKey, Key, -} from "@mariozechner/pi-tui"; +} from "@gsd/pi-tui"; import { Type } from "@sinclair/typebox"; import { spawn, spawnSync, type ChildProcess } from "node:child_process"; import { createConnection } from "node:net"; diff --git a/src/resources/extensions/browser-tools/index.ts b/src/resources/extensions/browser-tools/index.ts index 88ef292ef..f611fa488 100644 --- a/src/resources/extensions/browser-tools/index.ts +++ b/src/resources/extensions/browser-tools/index.ts @@ -20,14 +20,14 @@ * - Cleaned up on session_shutdown */ -import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; +import type { ExtensionAPI } from "@gsd/pi-coding-agent"; import { DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES, truncateHead, -} from "@mariozechner/pi-coding-agent"; +} from "@gsd/pi-coding-agent"; import { Type } from "@sinclair/typebox"; -import { StringEnum } from "@mariozechner/pi-ai"; +import { StringEnum } from "@gsd/pi-ai"; import type { Browser, BrowserContext, Frame, Page } from "playwright"; import { mkdir, stat, writeFile, copyFile } from "node:fs/promises"; import path from "node:path"; diff --git a/src/resources/extensions/context7/index.ts b/src/resources/extensions/context7/index.ts index 0c7b88245..cf21ac140 100644 --- a/src/resources/extensions/context7/index.ts +++ b/src/resources/extensions/context7/index.ts @@ -22,14 +22,14 @@ * export CONTEXT7_API_KEY=your_key (get one at context7.com/dashboard) */ -import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; +import type { ExtensionAPI } from "@gsd/pi-coding-agent"; import { DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES, formatSize, truncateHead, -} from "@mariozechner/pi-coding-agent"; -import { Text } from "@mariozechner/pi-tui"; +} from "@gsd/pi-coding-agent"; +import { Text } from "@gsd/pi-tui"; import { Type } from "@sinclair/typebox"; // ─── API types ──────────────────────────────────────────────────────────────── diff --git a/src/resources/extensions/get-secrets-from-user.ts b/src/resources/extensions/get-secrets-from-user.ts index e026b9386..ac693488e 100644 --- a/src/resources/extensions/get-secrets-from-user.ts +++ b/src/resources/extensions/get-secrets-from-user.ts @@ -10,8 +10,8 @@ import { readFile, writeFile } from "node:fs/promises"; import { existsSync, statSync } from "node:fs"; import { resolve } from "node:path"; -import type { ExtensionAPI, Theme } from "@mariozechner/pi-coding-agent"; -import { CURSOR_MARKER, Editor, type EditorTheme, Key, matchesKey, Text, truncateToWidth, wrapTextWithAnsi } from "@mariozechner/pi-tui"; +import type { ExtensionAPI, Theme } from "@gsd/pi-coding-agent"; +import { CURSOR_MARKER, Editor, type EditorTheme, Key, matchesKey, Text, truncateToWidth, wrapTextWithAnsi } from "@gsd/pi-tui"; import { Type } from "@sinclair/typebox"; import { makeUI, type ProgressStatus } from "./shared/ui.js"; import { parseSecretsManifest, formatSecretsManifest } from "./gsd/files.js"; diff --git a/src/resources/extensions/google-search/index.ts b/src/resources/extensions/google-search/index.ts index 409ae5b5f..d47fe0617 100644 --- a/src/resources/extensions/google-search/index.ts +++ b/src/resources/extensions/google-search/index.ts @@ -10,14 +10,14 @@ * returns it with source URLs from grounding metadata. */ -import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; +import type { ExtensionAPI } from "@gsd/pi-coding-agent"; import { DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES, formatSize, truncateHead, -} from "@mariozechner/pi-coding-agent"; -import { Text } from "@mariozechner/pi-tui"; +} from "@gsd/pi-coding-agent"; +import { Text } from "@gsd/pi-tui"; import { Type } from "@sinclair/typebox"; import { GoogleGenAI } from "@google/genai"; diff --git a/src/resources/extensions/gsd/activity-log.ts b/src/resources/extensions/gsd/activity-log.ts index dbbfc80f7..43f3ee81a 100644 --- a/src/resources/extensions/gsd/activity-log.ts +++ b/src/resources/extensions/gsd/activity-log.ts @@ -10,7 +10,7 @@ import { writeFileSync, mkdirSync, readdirSync, unlinkSync, statSync } from "node:fs"; import { join } from "node:path"; -import type { ExtensionContext } from "@mariozechner/pi-coding-agent"; +import type { ExtensionContext } from "@gsd/pi-coding-agent"; import { gsdRoot } from "./paths.js"; export function saveActivityLog( diff --git a/src/resources/extensions/gsd/auto.ts b/src/resources/extensions/gsd/auto.ts index 5251fa3d8..2ee324068 100644 --- a/src/resources/extensions/gsd/auto.ts +++ b/src/resources/extensions/gsd/auto.ts @@ -14,7 +14,7 @@ import type { ExtensionAPI, ExtensionContext, ExtensionCommandContext, -} from "@mariozechner/pi-coding-agent"; +} from "@gsd/pi-coding-agent"; import { deriveState } from "./state.js"; import type { GSDState } from "./types.js"; @@ -69,7 +69,7 @@ import { } from "./worktree.ts"; import { GitServiceImpl } from "./git-service.ts"; import type { GitPreferences } from "./git-service.ts"; -import { truncateToWidth, visibleWidth } from "@mariozechner/pi-tui"; +import { truncateToWidth, visibleWidth } from "@gsd/pi-tui"; import { makeUI, GLYPH, INDENT } from "../shared/ui.js"; import { showNextAction } from "../shared/next-action-ui.js"; diff --git a/src/resources/extensions/gsd/commands.ts b/src/resources/extensions/gsd/commands.ts index c1f3442f8..ddb7b09f9 100644 --- a/src/resources/extensions/gsd/commands.ts +++ b/src/resources/extensions/gsd/commands.ts @@ -4,7 +4,7 @@ * One command, one wizard. Routes to smart entry or status. */ -import type { ExtensionAPI, ExtensionCommandContext } from "@mariozechner/pi-coding-agent"; +import type { ExtensionAPI, ExtensionCommandContext } from "@gsd/pi-coding-agent"; import { existsSync, readFileSync } from "node:fs"; import { join, dirname } from "node:path"; import { fileURLToPath } from "node:url"; @@ -201,7 +201,7 @@ async function handleStatus(ctx: ExtensionCommandContext): Promise { } export async function fireStatusViaCommand( - ctx: import("@mariozechner/pi-coding-agent").ExtensionContext, + ctx: import("@gsd/pi-coding-agent").ExtensionContext, ): Promise { await handleStatus(ctx as ExtensionCommandContext); } diff --git a/src/resources/extensions/gsd/dashboard-overlay.ts b/src/resources/extensions/gsd/dashboard-overlay.ts index ad30dc0da..427b6f120 100644 --- a/src/resources/extensions/gsd/dashboard-overlay.ts +++ b/src/resources/extensions/gsd/dashboard-overlay.ts @@ -6,8 +6,8 @@ * Toggled with Ctrl+Alt+G or opened from /gsd status. */ -import type { Theme } from "@mariozechner/pi-coding-agent"; -import { truncateToWidth, visibleWidth, matchesKey, Key } from "@mariozechner/pi-tui"; +import type { Theme } from "@gsd/pi-coding-agent"; +import { truncateToWidth, visibleWidth, matchesKey, Key } from "@gsd/pi-tui"; import { deriveState } from "./state.js"; import { loadFile, parseRoadmap, parsePlan } from "./files.js"; import { resolveMilestoneFile, resolveSliceFile } from "./paths.js"; diff --git a/src/resources/extensions/gsd/guided-flow.ts b/src/resources/extensions/gsd/guided-flow.ts index 3319fe050..d0b31734f 100644 --- a/src/resources/extensions/gsd/guided-flow.ts +++ b/src/resources/extensions/gsd/guided-flow.ts @@ -6,7 +6,7 @@ * No execution state, no hooks, no tools — the LLM does the rest. */ -import type { ExtensionAPI, ExtensionContext, ExtensionCommandContext } from "@mariozechner/pi-coding-agent"; +import type { ExtensionAPI, ExtensionContext, ExtensionCommandContext } from "@gsd/pi-coding-agent"; import { showNextAction } from "../shared/next-action-ui.js"; import { loadFile, parseRoadmap } from "./files.js"; import { loadPrompt } from "./prompt-loader.js"; diff --git a/src/resources/extensions/gsd/index.ts b/src/resources/extensions/gsd/index.ts index 066944c2b..7f0f3a45a 100644 --- a/src/resources/extensions/gsd/index.ts +++ b/src/resources/extensions/gsd/index.ts @@ -21,8 +21,8 @@ import type { ExtensionAPI, ExtensionContext, -} from "@mariozechner/pi-coding-agent"; -import { createBashTool, createWriteTool, createReadTool, createEditTool } from "@mariozechner/pi-coding-agent"; +} from "@gsd/pi-coding-agent"; +import { createBashTool, createWriteTool, createReadTool, createEditTool } from "@gsd/pi-coding-agent"; import { registerGSDCommand } from "./commands.js"; import { registerWorktreeCommand, getWorktreeOriginalCwd, getActiveWorktreeName } from "./worktree-command.js"; @@ -44,11 +44,11 @@ import { relSliceFile, relSlicePath, relTaskFile, buildSliceFileName, gsdRoot, } from "./paths.js"; -import { Key } from "@mariozechner/pi-tui"; +import { Key } from "@gsd/pi-tui"; import { join } from "node:path"; import { existsSync } from "node:fs"; import { shortcutDesc } from "../shared/terminal.js"; -import { Text } from "@mariozechner/pi-tui"; +import { Text } from "@gsd/pi-tui"; // ── ASCII logo ──────────────────────────────────────────────────────────── const GSD_LOGO_LINES = [ diff --git a/src/resources/extensions/gsd/metrics.ts b/src/resources/extensions/gsd/metrics.ts index a9e875f61..84c5e72d0 100644 --- a/src/resources/extensions/gsd/metrics.ts +++ b/src/resources/extensions/gsd/metrics.ts @@ -15,7 +15,7 @@ import { readFileSync, writeFileSync, mkdirSync } from "node:fs"; import { join } from "node:path"; -import type { ExtensionContext } from "@mariozechner/pi-coding-agent"; +import type { ExtensionContext } from "@gsd/pi-coding-agent"; import { gsdRoot } from "./paths.js"; // ─── Types ──────────────────────────────────────────────────────────────────── diff --git a/src/resources/extensions/gsd/migrate/command.ts b/src/resources/extensions/gsd/migrate/command.ts index e80731c14..ef4e1f409 100644 --- a/src/resources/extensions/gsd/migrate/command.ts +++ b/src/resources/extensions/gsd/migrate/command.ts @@ -9,7 +9,7 @@ * output for GSD-2 standards compliance. */ -import type { ExtensionAPI, ExtensionCommandContext } from "@mariozechner/pi-coding-agent"; +import type { ExtensionAPI, ExtensionCommandContext } from "@gsd/pi-coding-agent"; import { existsSync, readFileSync } from "node:fs"; import { resolve, join, dirname } from "node:path"; import { fileURLToPath } from "node:url"; diff --git a/src/resources/extensions/gsd/preferences.ts b/src/resources/extensions/gsd/preferences.ts index 2857478c3..f4b17102c 100644 --- a/src/resources/extensions/gsd/preferences.ts +++ b/src/resources/extensions/gsd/preferences.ts @@ -1,7 +1,7 @@ import { existsSync, readdirSync, readFileSync, statSync } from "node:fs"; import { homedir } from "node:os"; import { isAbsolute, join } from "node:path"; -import { getAgentDir } from "@mariozechner/pi-coding-agent"; +import { getAgentDir } from "@gsd/pi-coding-agent"; import type { GitPreferences } from "./git-service.ts"; import { VALID_BRANCH_NAME } from "./git-service.ts"; diff --git a/src/resources/extensions/gsd/skill-discovery.ts b/src/resources/extensions/gsd/skill-discovery.ts index d33fc0206..8d4c2b76d 100644 --- a/src/resources/extensions/gsd/skill-discovery.ts +++ b/src/resources/extensions/gsd/skill-discovery.ts @@ -10,7 +10,7 @@ import { existsSync, readdirSync, readFileSync } from "node:fs"; import { join } from "node:path"; -import { getAgentDir } from "@mariozechner/pi-coding-agent"; +import { getAgentDir } from "@gsd/pi-coding-agent"; const SKILLS_DIR = join(getAgentDir(), "skills"); diff --git a/src/resources/extensions/gsd/worktree-command.ts b/src/resources/extensions/gsd/worktree-command.ts index 5f5e99c3d..0ded7dd0b 100644 --- a/src/resources/extensions/gsd/worktree-command.ts +++ b/src/resources/extensions/gsd/worktree-command.ts @@ -10,7 +10,7 @@ * /worktree remove — remove a worktree and its branch */ -import type { ExtensionAPI, ExtensionCommandContext } from "@mariozechner/pi-coding-agent"; +import type { ExtensionAPI, ExtensionCommandContext } from "@gsd/pi-coding-agent"; import { loadPrompt } from "./prompt-loader.js"; import { autoCommitCurrentBranch } from "./worktree.js"; import { showConfirm } from "../shared/confirm-ui.js"; diff --git a/src/resources/extensions/mac-tools/index.ts b/src/resources/extensions/mac-tools/index.ts index be9ff1ed6..ac20c1d6e 100644 --- a/src/resources/extensions/mac-tools/index.ts +++ b/src/resources/extensions/mac-tools/index.ts @@ -12,8 +12,8 @@ * - All Swift debug output goes to stderr; only JSON on stdout */ -import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; -import { StringEnum } from "@mariozechner/pi-ai"; +import type { ExtensionAPI } from "@gsd/pi-coding-agent"; +import { StringEnum } from "@gsd/pi-ai"; import { Type } from "@sinclair/typebox"; import { execFileSync } from "node:child_process"; import { statSync, readdirSync } from "node:fs"; diff --git a/src/resources/extensions/mcporter/index.ts b/src/resources/extensions/mcporter/index.ts index f32b5af2c..abd8d82e9 100644 --- a/src/resources/extensions/mcporter/index.ts +++ b/src/resources/extensions/mcporter/index.ts @@ -15,14 +15,14 @@ * - mcporter installed globally: npm i -g mcporter */ -import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; +import type { ExtensionAPI } from "@gsd/pi-coding-agent"; import { truncateHead, DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES, formatSize, -} from "@mariozechner/pi-coding-agent"; -import { Text } from "@mariozechner/pi-tui"; +} from "@gsd/pi-coding-agent"; +import { Text } from "@gsd/pi-tui"; import { Type } from "@sinclair/typebox"; import { execFile, exec } from "node:child_process"; import { promisify } from "node:util"; diff --git a/src/resources/extensions/remote-questions/remote-command.ts b/src/resources/extensions/remote-questions/remote-command.ts index 356cab9ab..fcfa53703 100644 --- a/src/resources/extensions/remote-questions/remote-command.ts +++ b/src/resources/extensions/remote-questions/remote-command.ts @@ -2,9 +2,9 @@ * Remote Questions — /gsd remote command */ -import type { ExtensionAPI, ExtensionCommandContext } from "@mariozechner/pi-coding-agent"; -import { AuthStorage } from "@mariozechner/pi-coding-agent"; -import { CURSOR_MARKER, Editor, type EditorTheme, Key, matchesKey, truncateToWidth } from "@mariozechner/pi-tui"; +import type { ExtensionAPI, ExtensionCommandContext } from "@gsd/pi-coding-agent"; +import { AuthStorage } from "@gsd/pi-coding-agent"; +import { CURSOR_MARKER, Editor, type EditorTheme, Key, matchesKey, truncateToWidth } from "@gsd/pi-tui"; import { existsSync, readFileSync, writeFileSync, mkdirSync } from "node:fs"; import { dirname, join } from "node:path"; import { getGlobalGSDPreferencesPath, loadEffectiveGSDPreferences } from "../gsd/preferences.js"; diff --git a/src/resources/extensions/search-the-web/command-search-provider.ts b/src/resources/extensions/search-the-web/command-search-provider.ts index 880910733..0f3ebb46d 100644 --- a/src/resources/extensions/search-the-web/command-search-provider.ts +++ b/src/resources/extensions/search-the-web/command-search-provider.ts @@ -8,8 +8,8 @@ * All provider logic lives in provider.ts (S01) — this is pure UI wiring. */ -import type { ExtensionAPI } from '@mariozechner/pi-coding-agent' -import type { AutocompleteItem } from '@mariozechner/pi-tui' +import type { ExtensionAPI } from '@gsd/pi-coding-agent' +import type { AutocompleteItem } from '@gsd/pi-tui' import { getTavilyApiKey, getBraveApiKey, diff --git a/src/resources/extensions/search-the-web/index.ts b/src/resources/extensions/search-the-web/index.ts index a3c71b661..a2c38cf69 100644 --- a/src/resources/extensions/search-the-web/index.ts +++ b/src/resources/extensions/search-the-web/index.ts @@ -44,7 +44,7 @@ * JINA_API_KEY — Optional. Higher rate limits for page extraction. */ -import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; +import type { ExtensionAPI } from "@gsd/pi-coding-agent"; import { registerSearchTool } from "./tool-search"; import { registerFetchPageTool } from "./tool-fetch-page"; import { registerLLMContextTool } from "./tool-llm-context"; diff --git a/src/resources/extensions/search-the-web/provider.ts b/src/resources/extensions/search-the-web/provider.ts index 018bcd22d..c4f52cc96 100644 --- a/src/resources/extensions/search-the-web/provider.ts +++ b/src/resources/extensions/search-the-web/provider.ts @@ -9,7 +9,7 @@ * @see S01-RESEARCH.md for the storage decision rationale (D002). */ -import { AuthStorage } from '@mariozechner/pi-coding-agent' +import { AuthStorage } from '@gsd/pi-coding-agent' import { homedir } from 'os' import { join } from 'path' diff --git a/src/resources/extensions/search-the-web/tool-fetch-page.ts b/src/resources/extensions/search-the-web/tool-fetch-page.ts index 42fd8b55e..98b21515e 100644 --- a/src/resources/extensions/search-the-web/tool-fetch-page.ts +++ b/src/resources/extensions/search-the-web/tool-fetch-page.ts @@ -8,9 +8,9 @@ * - Content-type awareness (JSON passthrough, PDF detection) */ -import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; -import { truncateHead, DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES } from "@mariozechner/pi-coding-agent"; -import { Text } from "@mariozechner/pi-tui"; +import type { ExtensionAPI } from "@gsd/pi-coding-agent"; +import { truncateHead, DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES } from "@gsd/pi-coding-agent"; +import { Text } from "@gsd/pi-tui"; import { Type } from "@sinclair/typebox"; import { LRUTTLCache } from "./cache"; diff --git a/src/resources/extensions/search-the-web/tool-llm-context.ts b/src/resources/extensions/search-the-web/tool-llm-context.ts index ceb2fcacd..5c0f5d3aa 100644 --- a/src/resources/extensions/search-the-web/tool-llm-context.ts +++ b/src/resources/extensions/search-the-web/tool-llm-context.ts @@ -15,11 +15,11 @@ * Use search-the-web when you want links/URLs to browse selectively. */ -import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; -import { truncateHead, DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES } from "@mariozechner/pi-coding-agent"; -import { Text } from "@mariozechner/pi-tui"; +import type { ExtensionAPI } from "@gsd/pi-coding-agent"; +import { truncateHead, DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES } from "@gsd/pi-coding-agent"; +import { Text } from "@gsd/pi-tui"; import { Type } from "@sinclair/typebox"; -import { StringEnum } from "@mariozechner/pi-ai"; +import { StringEnum } from "@gsd/pi-ai"; import { LRUTTLCache } from "./cache"; import { fetchWithRetryTimed, HttpError, classifyError, type RateLimitInfo } from "./http"; diff --git a/src/resources/extensions/search-the-web/tool-search.ts b/src/resources/extensions/search-the-web/tool-search.ts index f0b031c26..c7328e1eb 100644 --- a/src/resources/extensions/search-the-web/tool-search.ts +++ b/src/resources/extensions/search-the-web/tool-search.ts @@ -10,11 +10,11 @@ * - Rate limit info in details */ -import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; -import { truncateHead, formatSize, DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES } from "@mariozechner/pi-coding-agent"; -import { Text } from "@mariozechner/pi-tui"; +import type { ExtensionAPI } from "@gsd/pi-coding-agent"; +import { truncateHead, formatSize, DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES } from "@gsd/pi-coding-agent"; +import { Text } from "@gsd/pi-tui"; import { Type } from "@sinclair/typebox"; -import { StringEnum } from "@mariozechner/pi-ai"; +import { StringEnum } from "@gsd/pi-ai"; import { LRUTTLCache } from "./cache"; import { fetchWithRetryTimed, fetchWithRetry, classifyError, type RateLimitInfo } from "./http"; diff --git a/src/resources/extensions/shared/confirm-ui.ts b/src/resources/extensions/shared/confirm-ui.ts index 479812e6d..da5fc1675 100644 --- a/src/resources/extensions/shared/confirm-ui.ts +++ b/src/resources/extensions/shared/confirm-ui.ts @@ -15,9 +15,9 @@ * if (!confirmed) return textResult("Cancelled."); */ -import type { ExtensionContext } from "@mariozechner/pi-coding-agent"; -import { type Theme } from "@mariozechner/pi-coding-agent"; -import { Key, matchesKey, truncateToWidth, type TUI } from "@mariozechner/pi-tui"; +import type { ExtensionContext } from "@gsd/pi-coding-agent"; +import { type Theme } from "@gsd/pi-coding-agent"; +import { Key, matchesKey, truncateToWidth, type TUI } from "@gsd/pi-tui"; import { makeUI, GLYPH } from "./ui.js"; export interface ConfirmOptions { diff --git a/src/resources/extensions/shared/interview-ui.ts b/src/resources/extensions/shared/interview-ui.ts index e7649f181..3cb84c243 100644 --- a/src/resources/extensions/shared/interview-ui.ts +++ b/src/resources/extensions/shared/interview-ui.ts @@ -25,15 +25,15 @@ * Esc exit confirmation */ -import type { ExtensionCommandContext } from "@mariozechner/pi-coding-agent"; -import { type Theme } from "@mariozechner/pi-coding-agent"; +import type { ExtensionCommandContext } from "@gsd/pi-coding-agent"; +import { type Theme } from "@gsd/pi-coding-agent"; import { Editor, Key, matchesKey, truncateToWidth, type TUI, -} from "@mariozechner/pi-tui"; +} from "@gsd/pi-tui"; import { makeUI, INDENT } from "./ui.js"; // ─── Exported types ─────────────────────────────────────────────────────────── diff --git a/src/resources/extensions/shared/next-action-ui.ts b/src/resources/extensions/shared/next-action-ui.ts index 1741333a2..6d5690356 100644 --- a/src/resources/extensions/shared/next-action-ui.ts +++ b/src/resources/extensions/shared/next-action-ui.ts @@ -41,9 +41,9 @@ * Pressing Escape also resolves as "not_yet". */ -import type { ExtensionCommandContext } from "@mariozechner/pi-coding-agent"; -import { type Theme } from "@mariozechner/pi-coding-agent"; -import { Key, matchesKey, type TUI } from "@mariozechner/pi-tui"; +import type { ExtensionCommandContext } from "@gsd/pi-coding-agent"; +import { type Theme } from "@gsd/pi-coding-agent"; +import { Key, matchesKey, type TUI } from "@gsd/pi-tui"; import { makeUI } from "./ui.js"; // ─── Public API ─────────────────────────────────────────────────────────────── diff --git a/src/resources/extensions/shared/progress-widget.ts b/src/resources/extensions/shared/progress-widget.ts index de29fb8dc..97af216bf 100644 --- a/src/resources/extensions/shared/progress-widget.ts +++ b/src/resources/extensions/shared/progress-widget.ts @@ -21,8 +21,8 @@ * panel.dispose(); // remove widget and status */ -import type { ExtensionUIContext, Theme } from "@mariozechner/pi-coding-agent"; -import type { TUI } from "@mariozechner/pi-tui"; +import type { ExtensionUIContext, Theme } from "@gsd/pi-coding-agent"; +import type { TUI } from "@gsd/pi-tui"; import { makeUI, type ProgressStatus } from "./ui.js"; // ─── Exported types ─────────────────────────────────────────────────────────── diff --git a/src/resources/extensions/shared/thinking-widget.ts b/src/resources/extensions/shared/thinking-widget.ts index 1f37fde4a..d1f3bc1e2 100644 --- a/src/resources/extensions/shared/thinking-widget.ts +++ b/src/resources/extensions/shared/thinking-widget.ts @@ -20,9 +20,9 @@ * multiple widgets can safely coexist without key collisions. */ -import type { ExtensionCommandContext } from "@mariozechner/pi-coding-agent"; -import { type Theme } from "@mariozechner/pi-coding-agent"; -import { truncateToWidth, type TUI } from "@mariozechner/pi-tui"; +import type { ExtensionCommandContext } from "@gsd/pi-coding-agent"; +import { type Theme } from "@gsd/pi-coding-agent"; +import { truncateToWidth, type TUI } from "@gsd/pi-tui"; // ─── Public API ─────────────────────────────────────────────────────────────── diff --git a/src/resources/extensions/shared/ui.ts b/src/resources/extensions/shared/ui.ts index e350467a9..7c2e13239 100644 --- a/src/resources/extensions/shared/ui.ts +++ b/src/resources/extensions/shared/ui.ts @@ -28,8 +28,8 @@ * individual methods don't need it. */ -import { type Theme } from "@mariozechner/pi-coding-agent"; -import { truncateToWidth, visibleWidth, wrapTextWithAnsi } from "@mariozechner/pi-tui"; +import { type Theme } from "@gsd/pi-coding-agent"; +import { truncateToWidth, visibleWidth, wrapTextWithAnsi } from "@gsd/pi-tui"; // ─── Glyphs ─────────────────────────────────────────────────────────────────── // Change these to restyle every cursor, checkbox, and indicator at once. @@ -191,7 +191,7 @@ export interface UI { // ── Editor theme ────────────────────────────────────────────────────────── /** Standard EditorTheme object for use with the Editor component */ - editorTheme: import("@mariozechner/pi-tui").EditorTheme; + editorTheme: import("@gsd/pi-tui").EditorTheme; } /** @@ -215,7 +215,7 @@ export function makeUI(theme: Theme, width: number): UI { // ── EditorTheme ──────────────────────────────────────────────────────────── - const editorTheme: import("@mariozechner/pi-tui").EditorTheme = { + const editorTheme: import("@gsd/pi-tui").EditorTheme = { borderColor: (s) => theme.fg("accent", s), selectList: { selectedPrefix: (t) => theme.fg("accent", t), diff --git a/src/resources/extensions/shared/wizard-ui.ts b/src/resources/extensions/shared/wizard-ui.ts index ac3224131..ed0c95163 100644 --- a/src/resources/extensions/shared/wizard-ui.ts +++ b/src/resources/extensions/shared/wizard-ui.ts @@ -53,15 +53,15 @@ * const filePath = result["file_path"]?.["path"]; */ -import type { ExtensionCommandContext } from "@mariozechner/pi-coding-agent"; -import { type Theme } from "@mariozechner/pi-coding-agent"; +import type { ExtensionCommandContext } from "@gsd/pi-coding-agent"; +import { type Theme } from "@gsd/pi-coding-agent"; import { Editor, Key, matchesKey, truncateToWidth, type TUI, -} from "@mariozechner/pi-tui"; +} from "@gsd/pi-tui"; import { makeUI } from "./ui.js"; // ─── Public types ───────────────────────────────────────────────────────────── @@ -158,7 +158,7 @@ export async function showWizard( // Editors keyed by fieldId — one per text field // editorTheme is derived from the design system at first render const editors = new Map(); - let resolvedEditorTheme: import("@mariozechner/pi-tui").EditorTheme | null = null; + let resolvedEditorTheme: import("@gsd/pi-tui").EditorTheme | null = null; function getEditor(fieldId: string): Editor { if (!resolvedEditorTheme) resolvedEditorTheme = makeUI(theme, 80).editorTheme; diff --git a/src/resources/extensions/slash-commands/audit.ts b/src/resources/extensions/slash-commands/audit.ts index 25c3495c1..b5f3bf85c 100644 --- a/src/resources/extensions/slash-commands/audit.ts +++ b/src/resources/extensions/slash-commands/audit.ts @@ -1,4 +1,4 @@ -import type { ExtensionAPI, ExtensionCommandContext } from "@mariozechner/pi-coding-agent"; +import type { ExtensionAPI, ExtensionCommandContext } from "@gsd/pi-coding-agent"; export default function auditCommand(pi: ExtensionAPI) { pi.registerCommand("audit", { diff --git a/src/resources/extensions/slash-commands/clear.ts b/src/resources/extensions/slash-commands/clear.ts index 75e21f878..9f6bd5188 100644 --- a/src/resources/extensions/slash-commands/clear.ts +++ b/src/resources/extensions/slash-commands/clear.ts @@ -1,4 +1,4 @@ -import type { ExtensionAPI, ExtensionCommandContext } from "@mariozechner/pi-coding-agent"; +import type { ExtensionAPI, ExtensionCommandContext } from "@gsd/pi-coding-agent"; export default function clearCommand(pi: ExtensionAPI) { pi.registerCommand("clear", { diff --git a/src/resources/extensions/slash-commands/create-extension.ts b/src/resources/extensions/slash-commands/create-extension.ts index 002529e4c..cbdb81c79 100644 --- a/src/resources/extensions/slash-commands/create-extension.ts +++ b/src/resources/extensions/slash-commands/create-extension.ts @@ -1,4 +1,4 @@ -import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; +import type { ExtensionAPI } from "@gsd/pi-coding-agent"; import { showInterviewRound, type Question, type RoundResult } from "../shared/interview-ui.js"; export default function createExtension(pi: ExtensionAPI) { @@ -281,7 +281,7 @@ Then register it in the main extensions index: ## Rules you must follow exactly - Extension entry point: \`export default function (pi: ExtensionAPI): void { ... }\` -- Import type: \`import type { ExtensionAPI, ExtensionContext, ExtensionCommandContext } from "@mariozechner/pi-coding-agent";\` +- Import type: \`import type { ExtensionAPI, ExtensionContext, ExtensionCommandContext } from "@gsd/pi-coding-agent";\` - \`pi\` is the registration surface — call \`pi.registerCommand\`, \`pi.registerTool\`, \`pi.on\`, \`pi.registerShortcut\` inside the default export - \`ctx\` (ExtensionCommandContext or ExtensionContext) is passed to handlers and event callbacks — never stored, never assumed available globally - To send a message to the agent: \`pi.sendUserMessage("...")\` or \`pi.sendMessage({ content, display }, { triggerTurn })\` diff --git a/src/resources/extensions/slash-commands/create-slash-command.ts b/src/resources/extensions/slash-commands/create-slash-command.ts index 5c6f77c1d..54bcb34d3 100644 --- a/src/resources/extensions/slash-commands/create-slash-command.ts +++ b/src/resources/extensions/slash-commands/create-slash-command.ts @@ -1,4 +1,4 @@ -import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; +import type { ExtensionAPI } from "@gsd/pi-coding-agent"; import { showInterviewRound, type Question, type RoundResult } from "../shared/interview-ui.js"; export default function createSlashCommand(pi: ExtensionAPI) { @@ -225,7 +225,7 @@ Rules you must follow exactly: - To show a text input dialog: \`await ctx.ui.input("prompt", "placeholder")\` — returns the string or null - \`pi\` is captured in closure from the outer \`export default function(pi: ExtensionAPI)\` — use it freely inside the handler - No \`ctx.session\`, no \`ctx.sendMessage\`, no \`args[]\` array — these do not exist -- Import type: \`import type { ExtensionAPI, ExtensionCommandContext } from "@mariozechner/pi-coding-agent";\` +- Import type: \`import type { ExtensionAPI, ExtensionCommandContext } from "@gsd/pi-coding-agent";\` - Export default: \`export default function (pi: ExtensionAPI) { ... }\` After writing the files, run \`/reload\` to load the new command.`; diff --git a/src/resources/extensions/slash-commands/index.ts b/src/resources/extensions/slash-commands/index.ts index 52ab77bf4..5ea4db77c 100644 --- a/src/resources/extensions/slash-commands/index.ts +++ b/src/resources/extensions/slash-commands/index.ts @@ -1,4 +1,4 @@ -import type { ExtensionAPI } from "@mariozechner/pi-coding-agent"; +import type { ExtensionAPI } from "@gsd/pi-coding-agent"; import createSlashCommand from "./create-slash-command.js"; import createExtension from "./create-extension.js"; import auditCommand from "./audit.js"; diff --git a/src/resources/extensions/subagent/agents.ts b/src/resources/extensions/subagent/agents.ts index 2ae320342..498ec31cc 100644 --- a/src/resources/extensions/subagent/agents.ts +++ b/src/resources/extensions/subagent/agents.ts @@ -4,7 +4,7 @@ import * as fs from "node:fs"; import * as path from "node:path"; -import { getAgentDir, parseFrontmatter } from "@mariozechner/pi-coding-agent"; +import { getAgentDir, parseFrontmatter } from "@gsd/pi-coding-agent"; export type AgentScope = "user" | "project" | "both"; diff --git a/src/resources/extensions/subagent/index.ts b/src/resources/extensions/subagent/index.ts index 6e9ca115f..c849744f5 100644 --- a/src/resources/extensions/subagent/index.ts +++ b/src/resources/extensions/subagent/index.ts @@ -16,11 +16,11 @@ import { spawn } from "node:child_process"; import * as fs from "node:fs"; import * as os from "node:os"; import * as path from "node:path"; -import type { AgentToolResult } from "@mariozechner/pi-agent-core"; -import type { Message } from "@mariozechner/pi-ai"; -import { StringEnum } from "@mariozechner/pi-ai"; -import { type ExtensionAPI, getMarkdownTheme } from "@mariozechner/pi-coding-agent"; -import { Container, Markdown, Spacer, Text } from "@mariozechner/pi-tui"; +import type { AgentToolResult } from "@gsd/pi-agent-core"; +import type { Message } from "@gsd/pi-ai"; +import { StringEnum } from "@gsd/pi-ai"; +import { type ExtensionAPI, getMarkdownTheme } from "@gsd/pi-coding-agent"; +import { Container, Markdown, Spacer, Text } from "@gsd/pi-tui"; import { Type } from "@sinclair/typebox"; import { type AgentConfig, type AgentScope, discoverAgents } from "./agents.js"; diff --git a/src/resources/extensions/voice/index.ts b/src/resources/extensions/voice/index.ts index 3184efb7c..4f997ffb9 100644 --- a/src/resources/extensions/voice/index.ts +++ b/src/resources/extensions/voice/index.ts @@ -1,7 +1,7 @@ -import type { ExtensionAPI, ExtensionContext } from "@mariozechner/pi-coding-agent"; +import type { ExtensionAPI, ExtensionContext } from "@gsd/pi-coding-agent"; import { shortcutDesc } from "../shared/terminal.js"; -import type { AssistantMessage } from "@mariozechner/pi-ai"; -import { isKeyRelease, Key, matchesKey, truncateToWidth, visibleWidth } from "@mariozechner/pi-tui"; +import type { AssistantMessage } from "@gsd/pi-ai"; +import { isKeyRelease, Key, matchesKey, truncateToWidth, visibleWidth } from "@gsd/pi-tui"; import { spawn, execSync, type ChildProcess } from "node:child_process"; import * as fs from "node:fs"; import * as path from "node:path"; diff --git a/src/tests/app-smoke.test.ts b/src/tests/app-smoke.test.ts index c71df182e..bb157aad7 100644 --- a/src/tests/app-smoke.test.ts +++ b/src/tests/app-smoke.test.ts @@ -154,7 +154,7 @@ test("initResources syncs extensions, agents, and AGENTS.md to target dir", asyn test("loadStoredEnvKeys hydrates process.env from auth.json", async () => { const { loadStoredEnvKeys } = await import("../wizard.ts"); - const { AuthStorage } = await import("@mariozechner/pi-coding-agent"); + const { AuthStorage } = await import("@gsd/pi-coding-agent"); const tmp = mkdtempSync(join(tmpdir(), "gsd-wizard-test-")); const authPath = join(tmp, "auth.json"); @@ -203,7 +203,7 @@ test("loadStoredEnvKeys hydrates process.env from auth.json", async () => { test("loadStoredEnvKeys does not overwrite existing env vars", async () => { const { loadStoredEnvKeys } = await import("../wizard.ts"); - const { AuthStorage } = await import("@mariozechner/pi-coding-agent"); + const { AuthStorage } = await import("@gsd/pi-coding-agent"); const tmp = mkdtempSync(join(tmpdir(), "gsd-wizard-nooverwrite-")); const authPath = join(tmp, "auth.json"); diff --git a/src/wizard.ts b/src/wizard.ts index a70138fd4..786d21ef8 100644 --- a/src/wizard.ts +++ b/src/wizard.ts @@ -1,4 +1,4 @@ -import type { AuthStorage } from '@mariozechner/pi-coding-agent' +import type { AuthStorage } from '@gsd/pi-coding-agent' // ─── Env hydration ────────────────────────────────────────────────────────────