From 7377a08d8ee3c909da2de9b83f4cad5078b499fe Mon Sep 17 00:00:00 2001 From: Tom Boucher Date: Tue, 17 Mar 2026 20:49:51 -0400 Subject: [PATCH 1/2] docs: add Node LTS pinning guide for macOS Homebrew users New doc (docs/node-lts-macos.md) explains how to pin Node 24 LTS via Homebrew to avoid running on odd-numbered development releases. Covers brew install/link/pin, version managers as alternatives, and verification steps. Added notice banner in README linking to the guide. --- README.md | 2 ++ docs/node-lts-macos.md | 75 ++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 77 insertions(+) create mode 100644 docs/node-lts-macos.md diff --git a/README.md b/README.md index 9063c68cb..869acfadb 100644 --- a/README.md +++ b/README.md @@ -18,6 +18,8 @@ One command. Walk away. Come back to a built project with clean git history.
npm install -g gsd-pi
+> **📋 NOTICE: New to Node on Mac?** If you installed Node.js via Homebrew, you may be running a development release instead of LTS. **[Read this guide](./docs/node-lts-macos.md)** to pin Node 24 LTS and avoid compatibility issues. + --- diff --git a/docs/node-lts-macos.md b/docs/node-lts-macos.md new file mode 100644 index 000000000..67582aec1 --- /dev/null +++ b/docs/node-lts-macos.md @@ -0,0 +1,75 @@ +# Pinning Node.js LTS on macOS with Homebrew + +If you installed Node.js via Homebrew (`brew install node`), you're tracking the **latest current release** — which can include odd-numbered development versions (e.g. 23.x, 25.x). These aren't LTS and may have breaking changes or instability. + +GSD requires Node.js **v22 or later** and works best on an **LTS (even-numbered) release**. This guide shows how to pin Node 24 LTS using Homebrew. + +## Check your current version + +```bash +node --version +``` + +If this shows an odd number (e.g. `v23.x`, `v25.x`), you're on a development release. + +## Install Node 24 LTS + +Homebrew provides versioned formulas for LTS releases: + +```bash +# Unlink the current (possibly non-LTS) version +brew unlink node + +# Install Node 24 LTS +brew install node@24 + +# Link it as the default +brew link --overwrite node@24 +``` + +Verify: + +```bash +node --version +# Should show v24.x.x +``` + +## Why pin to LTS? + +- **Stability** — LTS releases receive bug fixes and security patches for 30 months +- **Compatibility** — npm packages (including GSD) test against LTS versions +- **No surprises** — `brew upgrade` won't jump you to an unstable development release + +## Prevent accidental upgrades + +By default, `brew upgrade` will upgrade all packages, which could move you off the pinned version. Pin the formula: + +```bash +brew pin node@24 +``` + +To unpin later: + +```bash +brew unpin node@24 +``` + +## Switching between versions + +If you need multiple Node versions (e.g. 22 and 24), consider using a version manager instead: + +- **[nvm](https://github.com/nvm-sh/nvm)** — `nvm install 24 && nvm use 24` +- **[fnm](https://github.com/Schniz/fnm)** — `fnm install 24 && fnm use 24` (faster, Rust-based) +- **[mise](https://mise.jdx.dev/)** — `mise use node@24` (polyglot version manager) + +These let you set per-project Node versions via `.node-version` or `.nvmrc` files. + +## Verify GSD works + +After pinning: + +```bash +node --version # v24.x.x +npm install -g gsd-pi +gsd --version +``` From d252168de5f2defc366bd003c3dd60b2bca63290 Mon Sep 17 00:00:00 2001 From: Tom Boucher Date: Tue, 17 Mar 2026 21:11:18 -0400 Subject: [PATCH 2/2] fix: switch alibaba-coding-plan to OpenAI-compat endpoint with proper compat flags (#1003) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The alibaba-coding-plan provider was using the Anthropic-compatible endpoint (/apps/anthropic) with anthropic-messages API, which caused issues with thinking mode on several models (MiniMax-M2.5 thinking loop, missing thinkingFormat for Qwen/GLM models). Changes for all 8 models: - API: anthropic-messages → openai-completions - Endpoint: /apps/anthropic → /v1 (OpenAI-compatible) - Added per-model compat flags: - Qwen models: thinkingFormat: 'qwen', supportsDeveloperRole: false - GLM models: thinkingFormat: 'qwen', supportsDeveloperRole: false - MiniMax-M2.5: supportsReasoningEffort: true, maxTokensField: 'max_tokens' - Kimi K2.5: thinkingFormat: 'zai', supportsDeveloperRole: false - Enabled reasoning for qwen3-max (was incorrectly false) - Fixed context windows to match tested values - Fixed MiniMax-M2.5 maxTokens: 24576 → 65536 --- packages/pi-ai/src/models.generated.ts | 76 ++++++++++++++------------ 1 file changed, 42 insertions(+), 34 deletions(-) diff --git a/packages/pi-ai/src/models.generated.ts b/packages/pi-ai/src/models.generated.ts index 85eb1fa85..fe3112ede 100644 --- a/packages/pi-ai/src/models.generated.ts +++ b/packages/pi-ai/src/models.generated.ts @@ -13388,27 +13388,28 @@ export const MODELS = { "qwen3.5-plus": { id: "qwen3.5-plus", name: "Qwen3.5 Plus", - api: "anthropic-messages", + api: "openai-completions", provider: "alibaba-coding-plan", - baseUrl: "https://coding-intl.dashscope.aliyuncs.com/apps/anthropic", + baseUrl: "https://coding-intl.dashscope.aliyuncs.com/v1", reasoning: true, - input: ["text", "image"], + input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, }, - contextWindow: 1000000, + contextWindow: 983616, maxTokens: 65536, - } satisfies Model<"anthropic-messages">, + compat: { thinkingFormat: "qwen", supportsDeveloperRole: false }, + } satisfies Model<"openai-completions">, "qwen3-max-2026-01-23": { id: "qwen3-max-2026-01-23", name: "Qwen3 Max 2026-01-23", - api: "anthropic-messages", + api: "openai-completions", provider: "alibaba-coding-plan", - baseUrl: "https://coding-intl.dashscope.aliyuncs.com/apps/anthropic", - reasoning: false, + baseUrl: "https://coding-intl.dashscope.aliyuncs.com/v1", + reasoning: true, input: ["text"], cost: { input: 0, @@ -13416,15 +13417,16 @@ export const MODELS = { cacheRead: 0, cacheWrite: 0, }, - contextWindow: 262144, + contextWindow: 258048, maxTokens: 32768, - } satisfies Model<"anthropic-messages">, + compat: { thinkingFormat: "qwen", supportsDeveloperRole: false }, + } satisfies Model<"openai-completions">, "qwen3-coder-next": { id: "qwen3-coder-next", name: "Qwen3 Coder Next", - api: "anthropic-messages", + api: "openai-completions", provider: "alibaba-coding-plan", - baseUrl: "https://coding-intl.dashscope.aliyuncs.com/apps/anthropic", + baseUrl: "https://coding-intl.dashscope.aliyuncs.com/v1", reasoning: false, input: ["text"], cost: { @@ -13433,15 +13435,16 @@ export const MODELS = { cacheRead: 0, cacheWrite: 0, }, - contextWindow: 262144, + contextWindow: 204800, maxTokens: 65536, - } satisfies Model<"anthropic-messages">, + compat: { supportsDeveloperRole: false }, + } satisfies Model<"openai-completions">, "qwen3-coder-plus": { id: "qwen3-coder-plus", name: "Qwen3 Coder Plus", - api: "anthropic-messages", + api: "openai-completions", provider: "alibaba-coding-plan", - baseUrl: "https://coding-intl.dashscope.aliyuncs.com/apps/anthropic", + baseUrl: "https://coding-intl.dashscope.aliyuncs.com/v1", reasoning: false, input: ["text"], cost: { @@ -13450,15 +13453,16 @@ export const MODELS = { cacheRead: 0, cacheWrite: 0, }, - contextWindow: 1000000, + contextWindow: 997952, maxTokens: 65536, - } satisfies Model<"anthropic-messages">, + compat: { supportsDeveloperRole: false }, + } satisfies Model<"openai-completions">, "MiniMax-M2.5": { id: "MiniMax-M2.5", name: "MiniMax M2.5", - api: "anthropic-messages", + api: "openai-completions", provider: "alibaba-coding-plan", - baseUrl: "https://coding-intl.dashscope.aliyuncs.com/apps/anthropic", + baseUrl: "https://coding-intl.dashscope.aliyuncs.com/v1", reasoning: true, input: ["text"], cost: { @@ -13468,14 +13472,15 @@ export const MODELS = { cacheWrite: 0, }, contextWindow: 196608, - maxTokens: 24576, - } satisfies Model<"anthropic-messages">, + maxTokens: 65536, + compat: { supportsStore: false, supportsDeveloperRole: false, supportsReasoningEffort: true, maxTokensField: "max_tokens" }, + } satisfies Model<"openai-completions">, "glm-5": { id: "glm-5", name: "GLM-5", - api: "anthropic-messages", + api: "openai-completions", provider: "alibaba-coding-plan", - baseUrl: "https://coding-intl.dashscope.aliyuncs.com/apps/anthropic", + baseUrl: "https://coding-intl.dashscope.aliyuncs.com/v1", reasoning: true, input: ["text"], cost: { @@ -13486,13 +13491,14 @@ export const MODELS = { }, contextWindow: 202752, maxTokens: 16384, - } satisfies Model<"anthropic-messages">, + compat: { thinkingFormat: "qwen", supportsDeveloperRole: false }, + } satisfies Model<"openai-completions">, "glm-4.7": { id: "glm-4.7", name: "GLM-4.7", - api: "anthropic-messages", + api: "openai-completions", provider: "alibaba-coding-plan", - baseUrl: "https://coding-intl.dashscope.aliyuncs.com/apps/anthropic", + baseUrl: "https://coding-intl.dashscope.aliyuncs.com/v1", reasoning: true, input: ["text"], cost: { @@ -13501,26 +13507,28 @@ export const MODELS = { cacheRead: 0, cacheWrite: 0, }, - contextWindow: 202752, + contextWindow: 169984, maxTokens: 16384, - } satisfies Model<"anthropic-messages">, + compat: { thinkingFormat: "qwen", supportsDeveloperRole: false }, + } satisfies Model<"openai-completions">, "kimi-k2.5": { id: "kimi-k2.5", name: "Kimi K2.5", - api: "anthropic-messages", + api: "openai-completions", provider: "alibaba-coding-plan", - baseUrl: "https://coding-intl.dashscope.aliyuncs.com/apps/anthropic", + baseUrl: "https://coding-intl.dashscope.aliyuncs.com/v1", reasoning: true, - input: ["text", "image"], + input: ["text"], cost: { input: 0, output: 0, cacheRead: 0, cacheWrite: 0, }, - contextWindow: 262144, + contextWindow: 258048, maxTokens: 32768, - } satisfies Model<"anthropic-messages">, + compat: { thinkingFormat: "zai", supportsDeveloperRole: false }, + } satisfies Model<"openai-completions">, }, "ollama-cloud": { "cogito-2.1:671b": {