merge: resolve conflict with upstream/main — combine next + remote subcommands
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
commit
f5c7dd1664
45 changed files with 3928 additions and 216 deletions
223
.claude/commands/publish-version.md
Normal file
223
.claude/commands/publish-version.md
Normal file
|
|
@ -0,0 +1,223 @@
|
|||
---
|
||||
description: Publish GSD updates to npm and GitHub
|
||||
---
|
||||
|
||||
Publish GSD updates with automatic changelog generation.
|
||||
|
||||
<process>
|
||||
|
||||
<step name="check_uncommitted">
|
||||
## 1. Check for Uncommitted Changes
|
||||
|
||||
```bash
|
||||
git status --short
|
||||
```
|
||||
|
||||
If uncommitted changes exist:
|
||||
- Ask: "Uncommitted changes detected. What commit message should I use?"
|
||||
- Commit with provided message
|
||||
- Continue to next step
|
||||
</step>
|
||||
|
||||
<step name="get_commits_since_tag">
|
||||
## 2. Get Commits Since Last Version
|
||||
|
||||
```bash
|
||||
LAST_TAG=$(git describe --tags --abbrev=0 2>/dev/null || echo "")
|
||||
if [ -n "$LAST_TAG" ]; then
|
||||
git log ${LAST_TAG}..HEAD --oneline --no-merges
|
||||
else
|
||||
echo "No previous tags found"
|
||||
fi
|
||||
```
|
||||
|
||||
Capture the commit list for changelog generation.
|
||||
</step>
|
||||
|
||||
<step name="check_docs">
|
||||
## 3. Check Documentation Currency
|
||||
|
||||
Review the commits captured above and check if README.md needs updates.
|
||||
|
||||
**Check for commits that require README updates:**
|
||||
- New commands or features
|
||||
- Changed command behavior or flags
|
||||
- New configuration options
|
||||
- New workflows or processes
|
||||
- Deprecations or removals
|
||||
|
||||
**Review README.md against commits:**
|
||||
1. Read README.md
|
||||
2. For each significant commit, verify the feature/change is documented
|
||||
3. Check command tables match actual commands
|
||||
4. Check configuration tables match actual options
|
||||
|
||||
**If updates needed:**
|
||||
1. Draft specific README changes
|
||||
2. Present changes for approval
|
||||
3. Apply approved changes
|
||||
4. Commit: `git add README.md && git commit -m "docs: update README for vX.Y.Z"`
|
||||
|
||||
**If no updates needed:**
|
||||
- State: "README is current with all changes"
|
||||
- Continue to next step
|
||||
</step>
|
||||
|
||||
<step name="generate_changelog_draft">
|
||||
## 4. Generate Changelog Entry Draft
|
||||
|
||||
Analyze the commits and draft a curated changelog entry.
|
||||
|
||||
**Grouping rules:**
|
||||
- **Added** — New features, commands, capabilities
|
||||
- **Changed** — Modifications to existing behavior
|
||||
- **Fixed** — Bug fixes
|
||||
- **Removed** — Deprecated/removed features
|
||||
- **BREAKING:** prefix for breaking changes
|
||||
|
||||
**Writing rules:**
|
||||
- Write human-readable descriptions, not raw commit messages
|
||||
- Focus on user impact, not implementation details
|
||||
- Group related commits into single entries
|
||||
- Flag breaking changes prominently with **BREAKING:** prefix
|
||||
|
||||
**Example draft:**
|
||||
```markdown
|
||||
## [X.Y.Z] - YYYY-MM-DD
|
||||
|
||||
### Added
|
||||
- New `/gsd:whats-new` command for version awareness
|
||||
|
||||
### Changed
|
||||
- Improved parallel execution performance
|
||||
|
||||
### Fixed
|
||||
- STATE.md progress bar calculation
|
||||
|
||||
### Removed
|
||||
- **BREAKING:** Removed deprecated ISSUES.md system
|
||||
```
|
||||
|
||||
Present the draft for review.
|
||||
</step>
|
||||
|
||||
<step name="checkpoint_review" type="checkpoint:human-verify">
|
||||
## 5. Review Changelog Draft
|
||||
|
||||
**Drafted changelog entry:**
|
||||
[Show the generated draft]
|
||||
|
||||
**Verify:**
|
||||
1. Categories are correct (Added/Changed/Fixed/Removed)
|
||||
2. Descriptions are clear and user-focused
|
||||
3. Breaking changes are marked with **BREAKING:** prefix
|
||||
4. Nothing important is missing from commits
|
||||
|
||||
**Resume signal:** Type "approved" or provide edits
|
||||
</step>
|
||||
|
||||
<step name="update_changelog">
|
||||
## 6. Update CHANGELOG.md
|
||||
|
||||
After approval:
|
||||
|
||||
1. **Read current CHANGELOG.md**
|
||||
2. **Insert new version section** after [Unreleased] header
|
||||
3. **Update version links** at bottom:
|
||||
- Add new version link: `[X.Y.Z]: https://github.com/gsd-build/gsd-2/releases/tag/vX.Y.Z`
|
||||
- Update [Unreleased] comparison: `[Unreleased]: https://github.com/gsd-build/gsd-2/compare/vX.Y.Z...HEAD`
|
||||
|
||||
```bash
|
||||
# Stage changelog
|
||||
git add CHANGELOG.md
|
||||
git commit -m "docs: update changelog for vX.Y.Z"
|
||||
```
|
||||
</step>
|
||||
|
||||
<step name="version_bump">
|
||||
## 7. Bump Version
|
||||
|
||||
Ask which version bump type:
|
||||
- `npm version patch` — Bug fixes (default)
|
||||
- `npm version minor` — New features
|
||||
- `npm version major` — Breaking changes
|
||||
- `npm version prerelease --preid=alpha` — Experimental features
|
||||
|
||||
```bash
|
||||
npm version patch # or minor/major/prerelease
|
||||
```
|
||||
|
||||
This creates a version commit and tag.
|
||||
</step>
|
||||
|
||||
<step name="push_and_publish">
|
||||
## 8. Push and Publish
|
||||
|
||||
```bash
|
||||
git push && git push --tags
|
||||
```
|
||||
|
||||
Then publish to npm:
|
||||
|
||||
```bash
|
||||
npm publish --access public
|
||||
```
|
||||
|
||||
Verify the publish succeeded by checking the output for the package URL.
|
||||
</step>
|
||||
|
||||
<step name="create_github_release">
|
||||
## 9. Create GitHub Release
|
||||
|
||||
Create a GitHub Release from the tag.
|
||||
|
||||
```bash
|
||||
gh release create vX.Y.Z --title "vX.Y.Z" --notes "[changelog content]" --latest
|
||||
```
|
||||
|
||||
Use the approved changelog content as the release notes.
|
||||
</step>
|
||||
|
||||
<step name="post_discord">
|
||||
## 10. Post to Discord Changelog
|
||||
|
||||
Post the changelog entry to the GSD Discord community.
|
||||
|
||||
Use the Discord MCP server:
|
||||
```
|
||||
discord_execute("messages.send", {
|
||||
"channel_id": "1464128246290579469",
|
||||
"content": "**vX.Y.Z Released** \n\n[changelog content here]\n\nInstall/upgrade: `npx gsd-pi@latest`"
|
||||
})
|
||||
```
|
||||
|
||||
Format the message with:
|
||||
- Version number as header
|
||||
- The approved changelog content (Added/Changed/Fixed/Removed sections)
|
||||
- Install command at the bottom
|
||||
</step>
|
||||
|
||||
<step name="report">
|
||||
## 11. Report Success
|
||||
|
||||
```
|
||||
Published vX.Y.Z
|
||||
|
||||
- npm: https://www.npmjs.com/package/gsd-pi
|
||||
- GitHub: https://github.com/gsd-build/gsd-2/releases/tag/vX.Y.Z
|
||||
```
|
||||
</step>
|
||||
|
||||
</process>
|
||||
|
||||
<success_criteria>
|
||||
- README.md checked against commits and updated if needed
|
||||
- Changelog entry drafted from commits
|
||||
- User reviewed and approved entry
|
||||
- CHANGELOG.md updated and committed
|
||||
- Version bumped via npm version
|
||||
- Pushed to GitHub with tags
|
||||
- Published to npm via `npm publish`
|
||||
- GitHub Release created with `gh release create`
|
||||
- Changelog posted to Discord #changelog channel
|
||||
</success_criteria>
|
||||
1
.github/FUNDING.yml
vendored
Normal file
1
.github/FUNDING.yml
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
github: glittercowboy
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
|
|
@ -33,4 +33,14 @@ dist/
|
|||
.bg_shell
|
||||
.gsd*.tgz
|
||||
.artifacts/
|
||||
AGENTS.md
|
||||
AGENTS.md
|
||||
.bg-shell/
|
||||
TODOS.md
|
||||
|
||||
# ── GSD baseline (auto-generated) ──
|
||||
.gsd/activity/
|
||||
.gsd/runtime/
|
||||
.gsd/worktrees/
|
||||
.gsd/auto.lock
|
||||
.gsd/metrics.json
|
||||
.gsd/STATE.md
|
||||
|
|
|
|||
33
.gsd/DECISIONS.md
Normal file
33
.gsd/DECISIONS.md
Normal file
|
|
@ -0,0 +1,33 @@
|
|||
# Decisions Register
|
||||
|
||||
<!-- Append-only. Never edit or remove existing rows.
|
||||
To reverse a decision, add a new row that supersedes it.
|
||||
Read this file at the start of any planning or research phase. -->
|
||||
|
||||
| # | When | Scope | Decision | Choice | Rationale | Revisable? |
|
||||
|---|------|-------|----------|--------|-----------|------------|
|
||||
| D001 | M001 | arch | Embedding strategy | SDK (`createAgentSession` + `InteractiveMode`) | Type-safe, no subprocess management, full control over storage/resources, cleanest branded app path per pi docs | No |
|
||||
| D002 | M001 | arch | State storage location | `~/.gsd/` (agent: `~/.gsd/agent/`, sessions: `~/.gsd/sessions/`) | Complete isolation from `~/.pi/`, clear brand identity, follows pi doc recommendation for branded apps | No |
|
||||
| D003 | M001 | arch | Branding mechanism | `PI_PACKAGE_DIR` env var set before pi internals load, pointing to gsd package root; gsd `package.json` declares `piConfig: { name: "gsd", configDir: ".gsd" }` | `config.js` reads `APP_NAME` from `piConfig.name` in the package.json found at `PI_PACKAGE_DIR`. Only mechanism that renames the TUI header without patching pi source. | Yes — if pi adds a dedicated `createAgentSession` appName option |
|
||||
| D004 | M001 | arch | Extension delivery | Copy extension `.ts` source into `src/resources/extensions/` at dev time; load via `DefaultResourceLoader.additionalExtensionPaths`; pi's jiti handles JIT compilation at runtime | Preserves pi's JIT compilation model, no separate build step for extensions, extensions stay readable source | Yes — if extension count grows large enough to warrant pre-compilation |
|
||||
| D005 | M001 | scope | Skills in M001 | Excluded — extensions only | User decision during discussion | Yes — M002 candidate |
|
||||
| D006 | M001 | scope | Plugin/install system | Deferred | Not MVP; bundled-only product for M001 | Yes — M002 candidate |
|
||||
| D007 | M001 | arch | pi interop | None — GSD never reads or writes `~/.pi/` | GSD is a product, not a pi config. Interop would blur the brand boundary. | No |
|
||||
| D008 | M001/S01 | verification | S01 verification strategy | Shell commands + real TTY launch (no test framework) | S01 is a pure binary launch / TUI branding check. The only meaningful assertion is whether the binary launches with "gsd" in the header — no unit-testable logic to isolate. Shell verification commands cover all must-haves. Test framework deferred to S02+ if needed. | Yes — add test framework in S02 if extension loading logic warrants it |
|
||||
| D009 | M001/S01 | arch | `files` array in package.json | Set in T03 during S01 (`["dist", "package.json", "README.md"]`) | Correct npm publish manifest must be in place before S04 pack/publish. Setting it early avoids a late-stage surprise. | No |
|
||||
| D010 | M001/S01/T02 | impl | ModelRegistry instantiation | Constructor `new ModelRegistry(authStorage)` — not a static factory | SDK types show no `.create()` on ModelRegistry; authStorage is passed directly to constructor. All other managers (AuthStorage, SettingsManager, SessionManager) use static `.create()` but synchronously. | No |
|
||||
| D011 | M001/S01/T02 | impl | InteractiveMode.run() | Instance method: `new InteractiveMode(session); mode.run()` — not static | SDK type declarations confirm `run()` is an instance method; static call would fail at runtime. | No |
|
||||
| D012 | M001/S01/T02 | impl | skipLibCheck in tsconfig | `skipLibCheck: true` added | `@google/genai` published types reference `@modelcontextprotocol/sdk` which is not installed as a type dep — causes transitive TS2307 error unrelated to gsd code. skipLibCheck is the standard fix for third-party type declaration issues. | Yes — remove if MCP types are added as a dep in the future |
|
||||
| D013 | M001/S01/T03 | arch | `PI_PACKAGE_DIR` shim directory (`pkg/`) | Added `pkg/` dir with `package.json` (piConfig) + `dist/modes/interactive/theme/` (pi theme JSONs) as the `PI_PACKAGE_DIR` target | `config.js::getThemesDir()` uses `getPackageDir()` (= PI_PACKAGE_DIR) and checks if `<dir>/src` exists; if yes, uses `src/modes/interactive/theme/` instead of `dist/`. Our project has a real `src/` dir, causing themes to resolve to the wrong path. Pointing PI_PACKAGE_DIR at `pkg/` (which has no `src/`) avoids the collision while still providing `piConfig` for branding. `pkg/dist/modes/interactive/theme/` is populated by `npm run copy-themes` (build script). | Yes — if pi adds a dedicated `appName` option to createAgentSession making PI_PACKAGE_DIR unnecessary |
|
||||
| D014 | M001/S02 | verification | S02 verification strategy | Shell commands + real TTY launch with stderr capture, no test framework | Extension loading is a runtime integration concern — no unit-testable logic to isolate. The meaningful assertions are: zero extension errors in stderr on launch, correct env vars in compiled loader.js, absence of `~/.pi/` refs in patched files. Shell commands cover all must-haves. Test framework deferred per D008. | Yes — add test framework if extension loading logic grows complex |
|
||||
| D015 | M001/S02 | arch | subagent spawn approach | `spawn(process.execPath, [GSD_BIN_PATH, ...extensionArgs, ...args])` — no `pi` binary in PATH | Patched subagent spawns node directly with the gsd dist/loader.js entrypoint. This ensures spawned subagents always use the bundled gsd extensions, regardless of what `pi` is in PATH. `GSD_BIN_PATH` = `process.argv[1]` from loader.ts. | Yes — if pi adds a native subagent spawn API |
|
||||
| D016 | M001/S02 | arch | shared/ is a library, not an extension entry point | `shared/` is NOT added to `additionalExtensionPaths` | `shared/ui.ts`, `shared/next-action-ui.ts` etc. are cross-extension imports, not independently registered extensions. They are discovered by jiti when gsd and ask-user-questions imports them via `../shared/*.js`. Adding shared/ as an extension entry point would attempt to register it as an extension (which it isn't). | No |
|
||||
| D017 | M001/S02 | arch | AGENTS.md first-run write | `initResources()` writes bundled AGENTS.md to `~/.gsd/agent/AGENTS.md` on first launch | pi's `loadProjectContextFiles` discovers AGENTS.md from `agentDir` (`~/.gsd/agent/`). On fresh install this file doesn't exist. One-time write on launch (behind existsSync check) ensures spawned subagents always pick up GSD's hard rules and execution heuristics. | No |
|
||||
| D018 | M001/S03 | arch | Wizard injection point | Pre-session: before `createAgentSession()`, not via `session_start` event hook | Running wizard before `createAgentSession()` ensures Anthropic key is in `authStorage` before `modelRegistry.getAvailable()` runs — avoids "No models available" fallback warning. S01 forward intelligence mentioned session_start hook; pre-session approach is strictly better because the session starts clean with a valid model. | Yes — if pi adds a native `beforeStart` or `authMissing` hook to `createAgentSession` |
|
||||
| D019 | M001/S03 | verification | S03 verification strategy | Shell script (`scripts/verify-s03.sh`) for automated non-TTY/skip checks + interactive UAT for masked input and TUI launch | Wizard involves TTY interaction that cannot be meaningfully automated (masked stdin, TUI launch). Automated shell script covers all non-interactive assertions (exit codes, error text, env hydration). Interactive UAT covers the remaining visual/interactive behaviors. No test framework added — consistent with D008/D014. | Yes — add test framework if wizard logic grows complex |
|
||||
| D020 | M001/S03 | arch | Wizard scope | Optional tool keys only (Brave/Context7/Jina) — Anthropic auth is pi's responsibility via OAuth | Wizard collecting Anthropic key was redundant (pi already handles it) and interfered with verify script automation. Optional-key scope satisfies R006. | Yes — if pi adds a native "no Anthropic key" callback hook |
|
||||
| D021 | M001/S04 | arch | GSD_BUNDLED_EXTENSION_PATHS target | agentDir-based paths, not src/resources paths | When subagent spawns a child gsd process via --extension flags, the child also runs initResources + buildResourceLoader from agentDir. src/resources paths ≠ agentDir paths → pi deduplication fails → duplicate tool registration errors. Pointing to agentDir paths means both the --extension args and agentDir scan resolve identically → deduplication works. Safe because subagent spawning only happens after initResources has synced on first launch. | No |
|
||||
| D022 | M001/S04 | verification | S04 verification strategy | 10-check `scripts/verify-s04.sh` for tarball install path; registry publish check automated; interactive UAT for wizard fire from clean install | Tarball install + launch is automatable (env isolation, background kill). Registry install check is automatable (prefix install + stderr check). Wizard TTY interaction is UAT-only. Consistent with D008/D014/D019 — shell scripts, no test framework. | Yes — add test framework if automated E2E is needed later |
|
||||
| D023 | M003 | arch | Test flow execution model | Intent-based YAML specs, not deterministic scripts — agent interprets verify blocks with full adaptive intelligence | Evaluated Maestro (JVM dep, deterministic scripting, mobile-first) and decided against embedding or cloning it. GSD's advantage is AI-in-the-loop. Flows describe what to verify; the agent decides how. Faster iteration, better flakiness handling, plays to GSD's strength. | Yes — could add deterministic fast-path for simple assertions later |
|
||||
| D024 | M003 | arch | Test browser isolation | test-flows runs its own Playwright instance, separate from browser-tools | Test execution must not be polluted by development browser state (cookies, auth, DOM mutations). Two Playwright instances in one process is supported. Keeps test-flows extension fully decoupled from browser-tools. | No |
|
||||
| D025 | M003 | arch | Maestro integration | Not embedded — optional external tool if user installs it | Maestro requires JVM, adds ~200MB+ footprint, its YAML format is deterministic scripts not intent specs. GSD builds its own testing arm. Maestro MCP could be wired in later as an optional extension for users who want it. | Yes — could add maestro MCP wrapper extension later |
|
||||
36
.gsd/PROJECT.md
Normal file
36
.gsd/PROJECT.md
Normal file
|
|
@ -0,0 +1,36 @@
|
|||
# Project
|
||||
|
||||
## What This Is
|
||||
|
||||
GSD 2.0 is a branded npm CLI (`npm install -g gsd-pi`) that ships the full GSD coding agent experience as a standalone product. It embeds `@mariozechner/pi-coding-agent` via SDK, stores state in `~/.gsd/`, bundles the GSD extension, all supporting extensions, agents, and AGENTS.md context, and runs pi's `InteractiveMode` under the `gsd` brand. Users run `gsd` — not `pi`.
|
||||
|
||||
## Core Value
|
||||
|
||||
A single `npm install -g gsd-pi` gives any developer a fully configured, GSD-branded coding agent with the GSD extension, all supporting tools (browser, search, context7, subagent, bg-shell, etc.), and a first-run setup wizard that collects API keys — ready to use in under two minutes.
|
||||
|
||||
## Current State
|
||||
|
||||
M001/S01, S02, and S03 complete. `gsd` binary compiles and launches with "gsd" TUI branding. All 11 bundled extensions load without errors. State goes to `~/.gsd/`. `~/.pi/` is untouched. AGENTS.md auto-deployed to `~/.gsd/agent/` on first launch. First-run wizard fires for missing optional keys (Brave/Context7/Jina), stores them with masked input, and skips on subsequent launches. Only S04 (npm publish and install smoke test) remains.
|
||||
|
||||
Key structural artifact: `pkg/` shim directory — `PI_PACKAGE_DIR` points here (not project root) to avoid pi's `getThemesDir()` collision with our real `src/` dir. Committed; `pkg/dist/modes/interactive/theme/` populated by `npm run copy-themes` at build time.
|
||||
|
||||
## Architecture / Key Patterns
|
||||
|
||||
- **SDK embedding**: `@mariozechner/pi-coding-agent` imported as a library via `createAgentSession` + `InteractiveMode`
|
||||
- **Branded app directories**: state lives in `~/.gsd/agent/`, sessions in `~/.gsd/sessions/` (constants in `src/app-paths.ts`)
|
||||
- **Branding via `PI_PACKAGE_DIR`**: env var set in `src/loader.ts` before any pi SDK loads; points to `pkg/` shim; `pkg/package.json` declares `piConfig: { name: "gsd", configDir: ".gsd" }`
|
||||
- **Two-file loader pattern**: `loader.ts` (sets env vars, zero SDK imports, dynamic-imports `cli.js`) → `cli.ts` (static SDK imports, wires all managers)
|
||||
- **pkg/ shim**: lean subdirectory — only `package.json` (piConfig) and `dist/modes/interactive/theme/` (pi theme assets). No `src/`. Avoids `getThemesDir()` src-check collision.
|
||||
- **Bundled extensions**: GSD extension + 10 supporting extensions in `src/resources/extensions/`; loaded via `buildResourceLoader()` → `DefaultResourceLoader.additionalExtensionPaths`; all 11 load clean on launch
|
||||
- **Bundled agents + AGENTS.md**: scout, researcher, worker in `src/resources/agents/`; `initResources()` writes bundled AGENTS.md to `~/.gsd/agent/` on first launch (existsSync guard)
|
||||
- **4 GSD_ env vars**: set in loader.ts before cli.js loads — `GSD_CODING_AGENT_DIR`, `GSD_BIN_PATH`, `GSD_WORKFLOW_PATH`, `GSD_BUNDLED_EXTENSION_PATHS`
|
||||
- **First-run wizard**: `src/wizard.ts` — detects missing optional keys (Brave/Context7/Jina), prompts with masked TTY input, writes to `~/.gsd/agent/auth.json`; `loadStoredEnvKeys` hydrates env on every launch before extensions load
|
||||
|
||||
## Capability Contract
|
||||
|
||||
See `.gsd/REQUIREMENTS.md` for the explicit capability contract, requirement status, and coverage mapping.
|
||||
|
||||
## Milestone Sequence
|
||||
|
||||
- [ ] M001: MVP CLI — `npm install -g gsd-pi` installs, launches, and runs with all bundled extensions and first-run setup
|
||||
- [ ] M003: AI-Driven Test Flows — intent-based YAML test specs the agent writes during development and executes autonomously at UAT time (browser, mac, api targets)
|
||||
7
.gsd/QUEUE.md
Normal file
7
.gsd/QUEUE.md
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
# Queue
|
||||
|
||||
<!-- Append-only log of queued milestones. -->
|
||||
|
||||
| # | Queued | Milestone | Title | Depends On | Notes |
|
||||
|---|--------|-----------|-------|------------|-------|
|
||||
| 1 | 2026-03-11 | M003 | AI-Driven Test Flows | M001 (bundled extension infrastructure) | Intent-based YAML test specs — browser, mac, api targets — with flow-driven UAT type for autonomous execution at slice completion |
|
||||
205
.gsd/REQUIREMENTS.md
Normal file
205
.gsd/REQUIREMENTS.md
Normal file
|
|
@ -0,0 +1,205 @@
|
|||
# Requirements
|
||||
|
||||
This file is the explicit capability and coverage contract for GSD 2.0.
|
||||
|
||||
## Active
|
||||
|
||||
### R001 — Single-command install
|
||||
|
||||
- Class: primary-user-loop
|
||||
- Status: validated
|
||||
- Description: `npm install -g gsd-pi` installs the gsd CLI and all bundled resources in a single command with no additional manual steps required
|
||||
- Why it matters: The whole product promise is zero-friction install. If install requires manual steps, the product fails its core pitch.
|
||||
- Source: user
|
||||
- Primary owning slice: M001/S01
|
||||
- Supporting slices: M001/S04
|
||||
- Validation: S04 — npm install -g gsd-pi from registry installs working binary; zero extension load errors; R001 fully validated
|
||||
|
||||
### R002 — Branded identity
|
||||
|
||||
- Class: differentiator
|
||||
- Status: validated
|
||||
- Description: The CLI is named `gsd`, state lives in `~/.gsd/`, the TUI header shows "gsd", and no pi branding is visible to the user in normal operation
|
||||
- Why it matters: GSD 2.0 is a product, not a pi config. Users should experience a coherent branded tool.
|
||||
- Source: user
|
||||
- Primary owning slice: M001/S01
|
||||
- Supporting slices: none
|
||||
- Validation: S01 — TUI header confirmed "gsd" via live runtime launch; piConfig.name=gsd, piConfig.configDir=.gsd verified; ~/.gsd/ confirmed created
|
||||
|
||||
### R003 — Bundled GSD extension
|
||||
|
||||
- Class: core-capability
|
||||
- Status: validated
|
||||
- Description: The `/gsd` command, auto-mode, GSD dashboard (Ctrl+Alt+G), and all GSD workflow commands work out of the box with no additional configuration
|
||||
- Why it matters: The GSD extension is the primary reason users install this tool.
|
||||
- Source: user
|
||||
- Primary owning slice: M001/S02
|
||||
- Supporting slices: none
|
||||
- Validation: S02 — gsd extension loads without errors on launch (zero stderr extension errors confirmed); interactive /gsd command use deferred to S04 UAT
|
||||
|
||||
### R004 — Bundled supporting extensions
|
||||
|
||||
- Class: core-capability
|
||||
- Status: validated
|
||||
- Description: All extensions from `~/.pi/agent/extensions/` ship bundled: browser-tools, search-the-web, context7, subagent, bg-shell, worktree, plan-mode, slash-commands, ask-user-questions, get-secrets-from-user
|
||||
- Why it matters: These extensions are what make the agent useful as a coding agent. GSD without browser tools, web search, and subagent is significantly less capable.
|
||||
- Source: user
|
||||
- Primary owning slice: M001/S02
|
||||
- Supporting slices: none
|
||||
- Validation: S02 — all 10 supporting extensions load without errors (zero stderr extension errors on launch); functional tool use (browser launch, web search) deferred to S04 UAT
|
||||
|
||||
### R005 — Bundled agents and AGENTS.md
|
||||
|
||||
- Class: core-capability
|
||||
- Status: validated
|
||||
- Description: The scout, researcher, and worker agents are bundled and available. The AGENTS.md hard rules and execution heuristics are loaded as the default agent context.
|
||||
- Why it matters: Agents and AGENTS.md define how the model behaves. Without them, subagent delegation and model discipline don't work.
|
||||
- Source: user
|
||||
- Primary owning slice: M001/S02
|
||||
- Supporting slices: none
|
||||
- Validation: S02 — scout.md, researcher.md, worker.md present in src/resources/agents/; AGENTS.md (15,070 bytes) written to ~/.gsd/agent/ on first launch via initResources()
|
||||
|
||||
### R006 — First-run setup wizard
|
||||
|
||||
- Class: launchability
|
||||
- Status: validated
|
||||
- Description: On first run, if optional tool API keys (Brave, Context7, Jina) are missing, a wizard prompts for them with masked input. Keys are stored in `~/.gsd/agent/auth.json` and hydrated into process.env on every launch. Wizard does not run on subsequent starts if keys are already configured. Anthropic auth is handled by pi's OAuth/API key flow — not the wizard.
|
||||
- Why it matters: Without API keys, nothing works. A wizard that detects and collects missing keys turns a broken first run into a successful one.
|
||||
- Source: user
|
||||
- Primary owning slice: M001/S03
|
||||
- Supporting slices: none
|
||||
- Validation: S03 — automated verify script (6/6 pass) + interactive UAT; wizard fires for missing optional keys, stores them, TUI launches, rerun skips wizard
|
||||
|
||||
### R007 — Isolated state in ~/.gsd/
|
||||
|
||||
- Class: quality-attribute
|
||||
- Status: validated
|
||||
- Description: All GSD state (auth, sessions, settings, logs) lives in `~/.gsd/`, completely separate from `~/.pi/`. Installing gsd must not modify or read a user's existing pi configuration.
|
||||
- Why it matters: Users may have an existing pi installation. GSD must not corrupt or interfere with it.
|
||||
- Source: inferred
|
||||
- Primary owning slice: M001/S01
|
||||
- Supporting slices: none
|
||||
- Validation: S01 — ~/.gsd/agent/ and ~/.gsd/sessions/ created after launch; ~/.pi/agent/sessions/ count unchanged (28/28) before and after gsd run
|
||||
|
||||
### R008 — npm update workflow
|
||||
|
||||
- Class: continuity
|
||||
- Status: validated
|
||||
- Description: `npm update -g gsd-pi` installs a new version with updated bundled resources. The update is clean — no stale extension files from old versions.
|
||||
- Why it matters: Software that can't update cleanly accumulates technical debt and breaks silently.
|
||||
- Source: user
|
||||
- Primary owning slice: M001/S04
|
||||
- Supporting slices: none
|
||||
- Validation: S04 — cpSync force:true in initResources ensures npm update -g replaces bundled resources; tarball smoke test confirms clean install path
|
||||
|
||||
### R009 — Observable failure state
|
||||
|
||||
- Class: failure-visibility
|
||||
- Status: validated
|
||||
- Description: If optional tool API keys are missing in a non-interactive run, the warning is actionable: it names the missing providers. Extension load failures are surfaced, not silently swallowed.
|
||||
- Why it matters: Silent failures are debugging nightmares. A future agent or user must be able to localize what broke without guessing.
|
||||
- Source: inferred
|
||||
- Primary owning slice: M001/S03
|
||||
- Supporting slices: M001/S02
|
||||
- Validation: S03 — non-TTY warning names all three missing providers (Brave Search, Context7, Jina); cat ~/.gsd/agent/auth.json shows stored state; extension load failure surface from S02 confirmed intact
|
||||
|
||||
### R010 — Test flow execution
|
||||
|
||||
- Class: core-capability
|
||||
- Status: active
|
||||
- Description: The agent can write YAML test specifications during development and execute them against browser, mac, and api targets via `run_test_flow` and `run_test_suite` tools. Flows use intent-based verification blocks (verify/given/expect) that the agent interprets adaptively. Browser tests run in a fresh isolated Playwright session.
|
||||
- Why it matters: Closes the gap between "agent builds a feature" and "agent proves it works" — durable, re-runnable test artifacts that survive context wipes.
|
||||
- Source: user
|
||||
- Primary owning slice: M003 (TBD)
|
||||
- Supporting slices: none
|
||||
- Validation: unmapped
|
||||
|
||||
### R011 — Flow-driven UAT
|
||||
|
||||
- Class: core-capability
|
||||
- Status: active
|
||||
- Description: GSD auto-mode recognizes `flow-driven` as a UAT type. At slice completion, the UAT pipeline automatically executes all flow files in the slice's `flows/` directory and writes structured pass/fail results to the UAT result file.
|
||||
- Why it matters: Makes UAT fully autonomous for slices with test flows — no human intervention needed for UI/API verification.
|
||||
- Source: user
|
||||
- Primary owning slice: M003 (TBD)
|
||||
- Supporting slices: none
|
||||
- Validation: unmapped
|
||||
|
||||
## Deferred
|
||||
|
||||
### R020 — Plugin system
|
||||
|
||||
- Class: differentiator
|
||||
- Status: deferred
|
||||
- Description: Allow users to install additional pi packages on top of GSD via `gsd install npm:pkg`
|
||||
- Why it matters: Makes GSD extensible beyond what ships in the box
|
||||
- Source: inferred
|
||||
- Primary owning slice: none
|
||||
- Supporting slices: none
|
||||
- Validation: unmapped
|
||||
- Notes: Deferred — M001 ships bundled-only. Plugin support is explicitly post-MVP.
|
||||
|
||||
### R021 — Skills bundle
|
||||
|
||||
- Class: core-capability
|
||||
- Status: deferred
|
||||
- Description: Ship the skills from `~/.pi/agent/skills/` as bundled GSD skills
|
||||
- Why it matters: Skills provide specialized workflows
|
||||
- Source: user
|
||||
- Primary owning slice: none
|
||||
- Supporting slices: none
|
||||
- Validation: unmapped
|
||||
- Notes: User explicitly excluded skills from M001. Can add in M002.
|
||||
|
||||
## Out of Scope
|
||||
|
||||
### R030 — pi compatibility / interoperability
|
||||
|
||||
- Class: anti-feature
|
||||
- Status: out-of-scope
|
||||
- Description: GSD does not read from or write to `~/.pi/`. There is no migration from pi to gsd. No `pi install npm:gsd` target.
|
||||
- Why it matters: Prevents scope confusion. GSD is a product, not a pi extension.
|
||||
- Source: user
|
||||
- Primary owning slice: none
|
||||
- Supporting slices: none
|
||||
- Validation: n/a
|
||||
- Notes: Explicitly out of scope by architecture decision.
|
||||
|
||||
### R031 — Web/desktop UI
|
||||
|
||||
- Class: constraint
|
||||
- Status: out-of-scope
|
||||
- Description: GSD 2.0 is terminal-only. No web UI, no Electron wrapper, no RPC mode.
|
||||
- Why it matters: Keeps scope focused on the CLI product.
|
||||
- Source: inferred
|
||||
- Primary owning slice: none
|
||||
- Supporting slices: none
|
||||
- Validation: n/a
|
||||
- Notes: `pi-web-ui` and RPC mode explicitly not used.
|
||||
|
||||
## Traceability
|
||||
|
||||
| ID | Class | Status | Primary owner | Supporting | Proof |
|
||||
| ---- | ------------------ | ------------ | ------------- | ---------- | -------- |
|
||||
| R001 | primary-user-loop | validated | M001/S01 | M001/S04 | S04 — npm install -g gsd-pi from registry; zero extension errors; binary confirmed |
|
||||
| R002 | differentiator | validated | M001/S01 | none | S01 — TUI shows "gsd", piConfig confirmed, ~/.gsd/ confirmed |
|
||||
| R003 | core-capability | validated | M001/S02 | none | S02 — gsd extension loads clean; interactive /gsd use deferred to S04 |
|
||||
| R004 | core-capability | validated | M001/S02 | none | S02 — all 10 supporting extensions load without errors; functional use deferred to S04 |
|
||||
| R005 | core-capability | validated | M001/S02 | none | S02 — agents present; AGENTS.md (15,070 bytes) written to ~/.gsd/agent/ on first launch |
|
||||
| R006 | launchability | validated | M001/S03 | none | S03 — optional-key wizard fires, stores, skips on rerun |
|
||||
| R007 | quality-attribute | validated | M001/S01 | none | S01 — ~/.gsd/ created; ~/.pi/ sessions unchanged (28/28) |
|
||||
| R008 | continuity | validated | M001/S04 | none | S04 — cpSync force:true; tarball smoke confirms clean install path |
|
||||
| R009 | failure-visibility | validated | M001/S03 | M001/S02 | S03 — non-TTY warning names missing providers; extension errors surface confirmed |
|
||||
| R020 | differentiator | deferred | none | none | unmapped |
|
||||
| R021 | core-capability | deferred | none | none | unmapped |
|
||||
| R010 | core-capability | active | M003 (TBD) | none | unmapped |
|
||||
| R011 | core-capability | active | M003 (TBD) | none | unmapped |
|
||||
| R030 | anti-feature | out-of-scope | none | none | n/a |
|
||||
| R031 | constraint | out-of-scope | none | none | n/a |
|
||||
|
||||
## Coverage Summary
|
||||
|
||||
- Active requirements: 11
|
||||
- Mapped to slices: 9
|
||||
- Validated: 9 (R001, R002, R003, R004, R005, R006, R007, R008, R009)
|
||||
- Unmapped active requirements: 2 (R010, R011 — pending M003 planning)
|
||||
133
.gsd/milestones/M003/M003-CONTEXT.md
Normal file
133
.gsd/milestones/M003/M003-CONTEXT.md
Normal file
|
|
@ -0,0 +1,133 @@
|
|||
# M003: AI-Driven Test Flows — Context
|
||||
|
||||
**Gathered:** 2026-03-11
|
||||
**Status:** Queued — pending auto-mode execution
|
||||
|
||||
## Project Description
|
||||
|
||||
A new GSD extension (`test-flows`) that introduces intent-based YAML test specifications the agent writes during development and executes autonomously at UAT time. Flows describe **what to verify** (not mechanical step-by-step scripts), and the agent interprets each verification block using its full adaptive intelligence — choosing selectors, handling flakiness, retrying intelligently, and diagnosing failures.
|
||||
|
||||
Supports three target surfaces: **browser** (web apps via Playwright), **mac** (native macOS apps via Accessibility APIs), and **api** (HTTP request/response verification).
|
||||
|
||||
This is GSD's testing arm — the thing that closes the loop between "agent builds a feature" and "agent proves it works."
|
||||
|
||||
## Why This Milestone
|
||||
|
||||
GSD's current UAT pipeline has a gap: `artifact-driven` UAT runs shell commands and file checks, while `live-runtime` and `human-experience` UAT punt to the human. There is no way for the agent to write durable, re-runnable UI/API tests during development that execute automatically at UAT time.
|
||||
|
||||
The agent already has the tools (`browser_*`, `mac_*`, `bash` for HTTP) — what's missing is a structured format for persisting test intent and a runner that orchestrates execution against fresh isolated sessions. This milestone fills that gap.
|
||||
|
||||
The insight from Maestro evaluation: don't compete with Maestro as a standalone deterministic test runner. Instead, leverage what GSD is uniquely good at — AI-driven adaptive execution of test specifications. The YAML files are intent specs, not scripts. The AI handles the "how."
|
||||
|
||||
## User-Visible Outcome
|
||||
|
||||
### When this milestone is complete, the user can:
|
||||
|
||||
- See the agent write `.yaml` test flow files during slice development that describe what to verify
|
||||
- Have UAT run automatically at slice completion — the agent executes all flow files and writes a structured pass/fail report
|
||||
- Read `S01-UAT-RESULT.md` with per-flow, per-verification pass/fail results, timing, screenshots on failure, and diagnostic context
|
||||
- Manually trigger test flows via the agent calling `run_test_flow` or `run_test_suite` tools at any time
|
||||
- Test web apps (browser target), macOS apps (mac target), and APIs (api target) from the same flow format
|
||||
|
||||
### Entry point / environment
|
||||
|
||||
- Entry point: LLM tool calls (`run_test_flow`, `run_test_suite`) + GSD auto-mode UAT pipeline
|
||||
- Environment: local dev (macOS terminal running `gsd`)
|
||||
- Live dependencies involved: Playwright (bundled), mac-tools Swift CLI (bundled), HTTP via Node fetch (built-in)
|
||||
|
||||
## Completion Class
|
||||
|
||||
- Contract complete means: flow YAML parser validates correctly, runner executes all three targets (browser/mac/api) and returns structured results, `flow-driven` UAT type is recognized by the auto-mode pipeline
|
||||
- Integration complete means: agent writes flows during development, auto-mode UAT dispatches `run_test_suite`, results appear in `S01-UAT-RESULT.md`, failures include screenshots and diagnostics
|
||||
- Operational complete means: the full loop works end-to-end in a real GSD auto-mode session — agent builds a web feature, writes test flows, completes the slice, UAT runs the flows, report is written
|
||||
|
||||
## Final Integrated Acceptance
|
||||
|
||||
To call this milestone complete, we must prove:
|
||||
|
||||
- Agent can write a browser-target flow YAML during development, and `run_test_flow` executes it against a running local web app with correct pass/fail results
|
||||
- Agent can write a mac-target flow YAML, and it executes against a real macOS app (e.g., TextEdit) with correct pass/fail results
|
||||
- Agent can write an api-target flow YAML with HTTP request/response checks, and it executes correctly
|
||||
- `flow-driven` UAT type triggers automatic test suite execution at slice completion in auto-mode, with results written to the UAT result file
|
||||
- Test execution uses a fresh isolated browser session, not the agent's development browser
|
||||
- Failures include actionable diagnostics: screenshots, console logs (browser), element state (mac), response bodies (api)
|
||||
|
||||
## Risks and Unknowns
|
||||
|
||||
- **Inter-extension isolation** — The test-flows extension must run its own Playwright browser instance, separate from browser-tools' instance. Two Playwright instances in the same process should work (Playwright supports it), but needs verification. If they conflict, the runner may need to use a subprocess.
|
||||
- **Mac-tools CLI access** — The test-flows extension needs to call the mac-tools Swift CLI binary directly. The binary is compiled on first use by the mac-tools extension. test-flows must either wait for mac-tools to compile it first, or handle compilation itself. Need to determine the right approach.
|
||||
- **Agent flow authoring quality** — The value depends on Claude writing good test specifications during development. If the generated flows are too vague or too brittle, the system fails in practice. This is a prompt engineering challenge, not a code challenge. The system prompt guidelines for the tool must be excellent.
|
||||
- **Adaptive execution reliability** — Each `verify` block is interpreted by the LLM. Non-determinism means a flow might pass one run and fail the next. Need to design the execution model to minimize this (clear verify/expect structure, retries, good diagnostics on failure).
|
||||
- **Execution model for verify blocks** — The runner tool receives a YAML flow and must execute each verify block. Since extensions can't call other extensions' tools, the runner must use Playwright/mac-tools/fetch directly (not via `browser_*` tools). This means reimplementing some of the smart waiting/settling logic from browser-tools. Alternatively, each verify block could be dispatched as an LLM sub-turn — but that's expensive and slow. The right balance needs to be found.
|
||||
|
||||
## Existing Codebase / Prior Art
|
||||
|
||||
- `src/resources/extensions/browser-tools/index.ts` — Full Playwright browser automation extension (~4990 lines). Reference for Playwright patterns, adaptive settling, assertion evaluation, screenshot capture. The test-flows runner will import Playwright directly rather than calling these tools.
|
||||
- `src/resources/extensions/browser-tools/core.js` — Runtime-neutral helpers: action timeline, assertion evaluation (`evaluateAssertionChecks`), compact state diffing. May be importable by test-flows.
|
||||
- `src/resources/extensions/mac-tools/index.ts` — macOS Accessibility API automation via Swift CLI. Reference for how to invoke the Swift CLI binary (`execFileSync` with JSON protocol).
|
||||
- `src/resources/extensions/gsd/auto.ts` — GSD auto-mode engine. Contains `checkNeedsRunUat()`, `buildRunUatPrompt()`, UAT dispatch logic. Must be modified to support `flow-driven` UAT type.
|
||||
- `src/resources/extensions/gsd/files.ts` — Contains `extractUatType()` which classifies UAT types from markdown content. Must be extended with `flow-driven`.
|
||||
- `src/resources/extensions/gsd/prompts/run-uat.md` — UAT execution prompt template. Must be extended with `flow-driven` instructions.
|
||||
- `src/resources/extensions/gsd/templates/uat.md` — UAT file template. Must include `flow-driven` as a valid UAT mode.
|
||||
- Maestro (external, not embedded) — Inspiration for YAML flow format and "arm's length" testing philosophy. Not a dependency. Key takeaways: declarative YAML syntax, smart waiting, accessibility-layer interaction, cross-platform unified format.
|
||||
|
||||
> See `.gsd/DECISIONS.md` for all architectural and pattern decisions — it is an append-only register; read it during planning, append to it during execution.
|
||||
|
||||
## Relevant Requirements
|
||||
|
||||
- R003 (Bundled GSD extension) — This extends the GSD extension's UAT pipeline with a new type
|
||||
- R004 (Bundled supporting extensions) — This adds a new bundled extension (`test-flows`)
|
||||
- New requirement candidates:
|
||||
- R010 — Test flow execution: agent can write and execute YAML test specifications against browser, mac, and api targets
|
||||
- R011 — Flow-driven UAT: auto-mode recognizes `flow-driven` UAT type and executes test suites automatically at slice completion
|
||||
|
||||
## Scope
|
||||
|
||||
### In Scope
|
||||
|
||||
- New `test-flows` extension in `src/resources/extensions/test-flows/`
|
||||
- YAML flow format: header (name, target, url/app/endpoint) + verification blocks (verify/given/expect)
|
||||
- Flow parser with validation and clear error messages
|
||||
- Browser target runner: own Playwright instance, fresh context per flow, smart waiting, screenshot capture
|
||||
- Mac target runner: direct Swift CLI invocation, element resolution, screenshot capture
|
||||
- API target runner: HTTP requests via Node fetch, status/header/body assertions
|
||||
- Two LLM tools: `run_test_flow` (single flow) and `run_test_suite` (directory of flows)
|
||||
- Structured result output: per-flow, per-verification pass/fail, timing, screenshots, diagnostics
|
||||
- New `flow-driven` UAT type in GSD extension (`files.ts`, `auto.ts`, `run-uat.md`, `uat.md`)
|
||||
- System prompt guidelines that teach the agent when and how to write good test flows
|
||||
- Flow files stored alongside slices: `.gsd/milestones/M00X/slices/S0X/flows/*.yaml`
|
||||
|
||||
### Out of Scope / Non-Goals
|
||||
|
||||
- Maestro compatibility (not a goal — different format, different execution model)
|
||||
- Visual regression testing / image diffing (future enhancement)
|
||||
- Parallel flow execution / sharding (future enhancement)
|
||||
- CI/CD integration or headless-only mode (future enhancement)
|
||||
- Flow recording / interactive flow authoring UI (future enhancement — Maestro Studio equivalent)
|
||||
- Mobile device/simulator testing (would require Maestro or Appium — out of scope)
|
||||
|
||||
## Technical Constraints
|
||||
|
||||
- Must be a pi extension following existing patterns (`export default function(pi: ExtensionAPI)`)
|
||||
- Must use TypeBox for tool parameter schemas, StringEnum for enums
|
||||
- Must truncate tool output to stay within context limits
|
||||
- Browser runner must use a separate Playwright instance from browser-tools (test isolation)
|
||||
- Mac runner must invoke the Swift CLI binary at the known path (`src/resources/extensions/mac-tools/swift-cli/.build/release/mac-agent`)
|
||||
- No new npm dependencies beyond what's already bundled (Playwright, yaml parsing via existing means)
|
||||
- Extension loads via `additionalExtensionPaths` — same mechanism as all other bundled extensions
|
||||
|
||||
## Integration Points
|
||||
|
||||
- `browser-tools` extension — Shares Playwright dependency but NOT browser state. test-flows runs its own Playwright instance.
|
||||
- `mac-tools` extension — test-flows calls the same Swift CLI binary but independently. Must handle the case where the binary hasn't been compiled yet.
|
||||
- `gsd` extension — UAT pipeline integration: `files.ts` (extractUatType), `auto.ts` (checkNeedsRunUat, buildRunUatPrompt), `prompts/run-uat.md`, `templates/uat.md`
|
||||
- `src/loader.ts` / `src/cli.ts` — test-flows must be added to `GSD_BUNDLED_EXTENSION_PATHS` and `initResources()` file sync
|
||||
- Playwright — Direct import for browser automation (already a dependency of the project)
|
||||
- Node.js `fetch` — For API target HTTP requests (built into Node 18+)
|
||||
|
||||
## Open Questions
|
||||
|
||||
- **Verify block execution model** — Should each `verify` block be executed by deterministic code (parse expect clauses, run Playwright assertions) or by sending the block to the LLM as a sub-task? Deterministic is faster and cheaper but less adaptive. LLM sub-task is more flexible but slower and non-deterministic. Hybrid approach (deterministic for simple assertions, LLM for complex "verify this looks right" blocks) may be the sweet spot. Needs design decision in planning.
|
||||
- **YAML parsing** — Use `js-yaml` (would need to add as dependency) or parse the simple format manually? The format is simple enough that a hand-rolled parser might suffice and avoids a new dep.
|
||||
- **Mac binary compilation timing** — If test-flows needs the mac-tools binary and it hasn't been compiled yet, should test-flows trigger compilation or just fail with a clear message? Triggering compilation would duplicate logic from mac-tools extension.
|
||||
- **Flow file discovery for UAT** — When `run_test_suite` is called for a slice's flows, should it discover files by convention (all `.yaml` in the `flows/` dir) or should the UAT file explicitly list which flows to run?
|
||||
128
CHANGELOG.md
Normal file
128
CHANGELOG.md
Normal file
|
|
@ -0,0 +1,128 @@
|
|||
# Changelog
|
||||
|
||||
All notable changes to GSD are documented in this file.
|
||||
|
||||
Format based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).
|
||||
|
||||
## [Unreleased]
|
||||
|
||||
## [2.3.4] - 2026-03-11
|
||||
|
||||
### Added
|
||||
- CHANGELOG.md with curated history from v0.1.6 onwards
|
||||
- Project-local `/publish-version` command for npm releases
|
||||
- GitHub Sponsors funding configuration
|
||||
- npm publish and install smoke test
|
||||
|
||||
## [0.3.3] - 2026-03-11
|
||||
|
||||
### Added
|
||||
- `/gsd next` step mode — walk through units one at a time with a wizard between each
|
||||
- `/gsd` bare command defaults to step mode
|
||||
- `/exit` command to kill the GSD process immediately
|
||||
- `/clear` as alias for `/new` (new session)
|
||||
- MCPorter extension for lazy on-demand MCP server integration
|
||||
- `/voice` extension for real-time speech-to-text
|
||||
- Pi global install scripts
|
||||
- Post-hook bookkeeping: auto-run doctor + rebuild STATE.md after each unit
|
||||
|
||||
### Changed
|
||||
- Improved worktree merge, create, remove, and reload resilience
|
||||
- Discuss prompt rewritten with reflection step and depth enforcement
|
||||
|
||||
### Fixed
|
||||
- Idle watchdog false-firing on active agents — tasks >10min no longer get incorrectly skipped (#52)
|
||||
- Browser screenshots constrained to 1568px max dimension (#56)
|
||||
- Pi extensions loaded from `~/.pi/agent/extensions/` (#51)
|
||||
|
||||
### Removed
|
||||
- `/gsd-run` command (replaced by `/gsd` and `/gsd next`)
|
||||
|
||||
## [0.3.1] - 2026-03-11
|
||||
|
||||
### Fixed
|
||||
- Windows VT input restored after child processes exit (#41)
|
||||
- Print/JSON mode in cli.js so subagents don't hang
|
||||
- Discuss prompt loop prevention
|
||||
- Managed tools bootstrap and gh auth
|
||||
- Session list scoped to current working directory
|
||||
- Bash/bg_shell hang and kill issues on Windows (#40)
|
||||
- `/gsd-run` hardcoded `~/.pi/` path (#38)
|
||||
- Windows backspace in masked input + custom browser path support (#36, #34)
|
||||
|
||||
### Changed
|
||||
- Renamed "Get Stuff Done" to "Get Shit Done"
|
||||
|
||||
## [0.3.0] - 2026-03-11
|
||||
|
||||
### Added
|
||||
- `/worktree` (`/wt`) — git worktree lifecycle management (#31)
|
||||
- `/gsd migrate` — `.planning` to `.gsd` migration tool (#28)
|
||||
|
||||
### Fixed
|
||||
- Skipped API keys now persist so wizard doesn't repeat on every launch (#27)
|
||||
- Scoped models restored from settings on new session startup (#22)
|
||||
- Startup fallback no longer overwrites user's default model with Sonnet (#29)
|
||||
|
||||
## [0.2.9] - 2026-03-11
|
||||
|
||||
### Fixed
|
||||
- Idle recovery skips stuck units instead of silently stalling (#19)
|
||||
- `pkg/package.json` version synced with pi-coding-agent to prevent false update banner
|
||||
- Milestones with summary but no roadmap treated as complete (#13)
|
||||
|
||||
## [0.2.8] - 2026-03-11
|
||||
|
||||
### Added
|
||||
- Mac-tools extension (macOS native automation)
|
||||
|
||||
## [0.2.6] - 2026-03-11
|
||||
|
||||
### Fixed
|
||||
- Default model validated against full registry on every startup
|
||||
|
||||
## [0.2.5] - 2026-03-11
|
||||
|
||||
### Fixed
|
||||
- Circular self-dependency removed, default model set to anthropic/claude-sonnet-4-6 with thinking off
|
||||
|
||||
## [0.2.4] - 2026-03-11
|
||||
|
||||
### Added
|
||||
- Branded setup wizard UI with visual hierarchy, descriptions, and status feedback
|
||||
- Branded banner on first launch
|
||||
- Postinstall banner with version and next-step hint
|
||||
|
||||
### Fixed
|
||||
- All `.pi/` paths updated to `.gsd/`
|
||||
- Default model matching by `id.includes('sonnet')` for dated API IDs
|
||||
- Circular gsd-pi self-dependency removed
|
||||
- Pi SDK version check suppressed
|
||||
- Selected options stay lit when notes field is focused
|
||||
|
||||
## [0.1.6] - 2026-03-11
|
||||
|
||||
### Added
|
||||
- GitHub extension tool suite with confirmation gate
|
||||
- Bundled skills: frontend-design, swiftui, debug-like-expert
|
||||
- Skills trigger table in system prompt
|
||||
- Resource loader syncs bundled skills to `~/.gsd/agent/skills/`
|
||||
|
||||
### Fixed
|
||||
- `~/.gsd/agent/` paths in prompt templates instead of `~/.pi/agent/` (#10)
|
||||
- Guard against re-injecting discuss prompt when session already in flight
|
||||
|
||||
### Changed
|
||||
- License updated to MIT
|
||||
|
||||
[Unreleased]: https://github.com/gsd-build/gsd-2/compare/v2.3.4...HEAD
|
||||
[2.3.4]: https://github.com/gsd-build/gsd-2/compare/v0.3.3...v2.3.4
|
||||
[0.3.3]: https://github.com/gsd-build/gsd-2/compare/v0.3.1...v0.3.3
|
||||
[0.3.1]: https://github.com/gsd-build/gsd-2/compare/v0.3.0...v0.3.1
|
||||
[0.3.0]: https://github.com/gsd-build/gsd-2/compare/v0.2.9...v0.3.0
|
||||
[0.2.9]: https://github.com/gsd-build/gsd-2/compare/v0.2.8...v0.2.9
|
||||
[0.2.8]: https://github.com/gsd-build/gsd-2/compare/v0.2.6...v0.2.8
|
||||
[0.2.6]: https://github.com/gsd-build/gsd-2/compare/v0.2.5...v0.2.6
|
||||
[0.2.5]: https://github.com/gsd-build/gsd-2/compare/v0.2.4...v0.2.5
|
||||
[0.2.4]: https://github.com/gsd-build/gsd-2/compare/v0.1.6...v0.2.4
|
||||
[0.1.6]: https://github.com/gsd-build/gsd-2/releases/tag/v0.1.6
|
||||
|
|
@ -412,7 +412,9 @@ Use expensive models where quality matters (planning, complex execution) and che
|
|||
|
||||
## Star History
|
||||
|
||||
[](https://star-history.com/#gsd-build/GSD-2&Date)
|
||||
<a href="https://star-history.com/#gsd-build/gsd-2&Date">
|
||||
<img alt="Star History Chart" src="https://api.star-history.com/svg?repos=gsd-build/gsd-2&type=Date" />
|
||||
</a>
|
||||
|
||||
---
|
||||
|
||||
|
|
|
|||
661
package-lock.json
generated
661
package-lock.json
generated
|
|
@ -1,12 +1,12 @@
|
|||
{
|
||||
"name": "gsd-pi",
|
||||
"version": "0.3.0",
|
||||
"version": "2.3.4",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "gsd-pi",
|
||||
"version": "0.3.0",
|
||||
"version": "2.3.4",
|
||||
"hasInstallScript": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
|
|
@ -19,6 +19,7 @@
|
|||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^22.0.0",
|
||||
"patch-package": "^8.0.1",
|
||||
"typescript": "^5.4.0"
|
||||
},
|
||||
"engines": {
|
||||
|
|
@ -1881,6 +1882,13 @@
|
|||
"@types/node": "*"
|
||||
}
|
||||
},
|
||||
"node_modules/@yarnpkg/lockfile": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/@yarnpkg/lockfile/-/lockfile-1.1.0.tgz",
|
||||
"integrity": "sha512-GpSwvyXOcOOlV70vbnzjj4fW5xW/FdUF6nQEt1ENy7m4ZCczi1+/buVUPAqmGfqznsORNFzUMjctTIp8a9tuCQ==",
|
||||
"dev": true,
|
||||
"license": "BSD-2-Clause"
|
||||
},
|
||||
"node_modules/agent-base": {
|
||||
"version": "7.1.4",
|
||||
"resolved": "https://registry.npmjs.org/agent-base/-/agent-base-7.1.4.tgz",
|
||||
|
|
@ -2033,6 +2041,19 @@
|
|||
"node": "18 || 20 || >=22"
|
||||
}
|
||||
},
|
||||
"node_modules/braces": {
|
||||
"version": "3.0.3",
|
||||
"resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz",
|
||||
"integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"fill-range": "^7.1.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/buffer-crc32": {
|
||||
"version": "0.2.13",
|
||||
"resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz",
|
||||
|
|
@ -2048,6 +2069,56 @@
|
|||
"integrity": "sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==",
|
||||
"license": "BSD-3-Clause"
|
||||
},
|
||||
"node_modules/call-bind": {
|
||||
"version": "1.0.8",
|
||||
"resolved": "https://registry.npmjs.org/call-bind/-/call-bind-1.0.8.tgz",
|
||||
"integrity": "sha512-oKlSFMcMwpUg2ednkhQ454wfWiU/ul3CkJe/PEHcTKuiX6RpbehUiFMXu13HalGZxfUwCQzZG747YXBn1im9ww==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"call-bind-apply-helpers": "^1.0.0",
|
||||
"es-define-property": "^1.0.0",
|
||||
"get-intrinsic": "^1.2.4",
|
||||
"set-function-length": "^1.2.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/call-bind-apply-helpers": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/call-bind-apply-helpers/-/call-bind-apply-helpers-1.0.2.tgz",
|
||||
"integrity": "sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"es-errors": "^1.3.0",
|
||||
"function-bind": "^1.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/call-bound": {
|
||||
"version": "1.0.4",
|
||||
"resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz",
|
||||
"integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"call-bind-apply-helpers": "^1.0.2",
|
||||
"get-intrinsic": "^1.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/chalk": {
|
||||
"version": "5.6.2",
|
||||
"resolved": "https://registry.npmjs.org/chalk/-/chalk-5.6.2.tgz",
|
||||
|
|
@ -2060,6 +2131,22 @@
|
|||
"url": "https://github.com/chalk/chalk?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/ci-info": {
|
||||
"version": "3.9.0",
|
||||
"resolved": "https://registry.npmjs.org/ci-info/-/ci-info-3.9.0.tgz",
|
||||
"integrity": "sha512-NIxF55hv4nSqQswkAeiOi1r83xy8JldOFDTWiug55KBu9Jnblncd2U6ViHmYgHf01TPZS77NJBhBMKdWj9HQMQ==",
|
||||
"dev": true,
|
||||
"funding": [
|
||||
{
|
||||
"type": "github",
|
||||
"url": "https://github.com/sponsors/sibiraj-s"
|
||||
}
|
||||
],
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/cli-highlight": {
|
||||
"version": "2.1.11",
|
||||
"resolved": "https://registry.npmjs.org/cli-highlight/-/cli-highlight-2.1.11.tgz",
|
||||
|
|
@ -2147,6 +2234,21 @@
|
|||
"integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/cross-spawn": {
|
||||
"version": "7.0.6",
|
||||
"resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.6.tgz",
|
||||
"integrity": "sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"path-key": "^3.1.0",
|
||||
"shebang-command": "^2.0.0",
|
||||
"which": "^2.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 8"
|
||||
}
|
||||
},
|
||||
"node_modules/data-uri-to-buffer": {
|
||||
"version": "4.0.1",
|
||||
"resolved": "https://registry.npmjs.org/data-uri-to-buffer/-/data-uri-to-buffer-4.0.1.tgz",
|
||||
|
|
@ -2173,6 +2275,24 @@
|
|||
}
|
||||
}
|
||||
},
|
||||
"node_modules/define-data-property": {
|
||||
"version": "1.1.4",
|
||||
"resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz",
|
||||
"integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"es-define-property": "^1.0.0",
|
||||
"es-errors": "^1.3.0",
|
||||
"gopd": "^1.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/degenerator": {
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/degenerator/-/degenerator-5.0.1.tgz",
|
||||
|
|
@ -2196,6 +2316,21 @@
|
|||
"node": ">=0.3.1"
|
||||
}
|
||||
},
|
||||
"node_modules/dunder-proto": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/dunder-proto/-/dunder-proto-1.0.1.tgz",
|
||||
"integrity": "sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"call-bind-apply-helpers": "^1.0.1",
|
||||
"es-errors": "^1.3.0",
|
||||
"gopd": "^1.2.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/ecdsa-sig-formatter": {
|
||||
"version": "1.0.11",
|
||||
"resolved": "https://registry.npmjs.org/ecdsa-sig-formatter/-/ecdsa-sig-formatter-1.0.11.tgz",
|
||||
|
|
@ -2220,6 +2355,39 @@
|
|||
"once": "^1.4.0"
|
||||
}
|
||||
},
|
||||
"node_modules/es-define-property": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
|
||||
"integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/es-errors": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
|
||||
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/es-object-atoms": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/es-object-atoms/-/es-object-atoms-1.1.1.tgz",
|
||||
"integrity": "sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"es-errors": "^1.3.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/escalade": {
|
||||
"version": "3.2.0",
|
||||
"resolved": "https://registry.npmjs.org/escalade/-/escalade-3.2.0.tgz",
|
||||
|
|
@ -2413,6 +2581,29 @@
|
|||
"url": "https://github.com/sindresorhus/file-type?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/fill-range": {
|
||||
"version": "7.1.1",
|
||||
"resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz",
|
||||
"integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"to-regex-range": "^5.0.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/find-yarn-workspace-root": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/find-yarn-workspace-root/-/find-yarn-workspace-root-2.0.0.tgz",
|
||||
"integrity": "sha512-1IMnbjt4KzsQfnhnzNd8wUEgXZ44IzZaZmnLYx7D5FZlaHt2gW20Cri8Q+E/t5tIj4+epTBub+2Zxu/vNILzqQ==",
|
||||
"dev": true,
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"micromatch": "^4.0.2"
|
||||
}
|
||||
},
|
||||
"node_modules/formdata-polyfill": {
|
||||
"version": "4.0.10",
|
||||
"resolved": "https://registry.npmjs.org/formdata-polyfill/-/formdata-polyfill-4.0.10.tgz",
|
||||
|
|
@ -2425,6 +2616,21 @@
|
|||
"node": ">=12.20.0"
|
||||
}
|
||||
},
|
||||
"node_modules/fs-extra": {
|
||||
"version": "10.1.0",
|
||||
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-10.1.0.tgz",
|
||||
"integrity": "sha512-oRXApq54ETRj4eMiFzGnHWGy+zo5raudjuxN0b8H7s/RU2oW0Wvsx9O0ACRN/kRq9E8Vu/ReskGB5o3ji+FzHQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"graceful-fs": "^4.2.0",
|
||||
"jsonfile": "^6.0.1",
|
||||
"universalify": "^2.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/fsevents": {
|
||||
"version": "2.3.2",
|
||||
"resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.2.tgz",
|
||||
|
|
@ -2439,6 +2645,16 @@
|
|||
"node": "^8.16.0 || ^10.6.0 || >=11.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/function-bind": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz",
|
||||
"integrity": "sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/gaxios": {
|
||||
"version": "7.1.4",
|
||||
"resolved": "https://registry.npmjs.org/gaxios/-/gaxios-7.1.4.tgz",
|
||||
|
|
@ -2488,6 +2704,45 @@
|
|||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/get-intrinsic": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/get-intrinsic/-/get-intrinsic-1.3.0.tgz",
|
||||
"integrity": "sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"call-bind-apply-helpers": "^1.0.2",
|
||||
"es-define-property": "^1.0.1",
|
||||
"es-errors": "^1.3.0",
|
||||
"es-object-atoms": "^1.1.1",
|
||||
"function-bind": "^1.1.2",
|
||||
"get-proto": "^1.0.1",
|
||||
"gopd": "^1.2.0",
|
||||
"has-symbols": "^1.1.0",
|
||||
"hasown": "^2.0.2",
|
||||
"math-intrinsics": "^1.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/get-proto": {
|
||||
"version": "1.0.1",
|
||||
"resolved": "https://registry.npmjs.org/get-proto/-/get-proto-1.0.1.tgz",
|
||||
"integrity": "sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"dunder-proto": "^1.0.1",
|
||||
"es-object-atoms": "^1.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/get-stream": {
|
||||
"version": "5.2.0",
|
||||
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz",
|
||||
|
|
@ -2569,6 +2824,19 @@
|
|||
"node": ">=14"
|
||||
}
|
||||
},
|
||||
"node_modules/gopd": {
|
||||
"version": "1.2.0",
|
||||
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
|
||||
"integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/graceful-fs": {
|
||||
"version": "4.2.11",
|
||||
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
|
||||
|
|
@ -2584,6 +2852,45 @@
|
|||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/has-property-descriptors": {
|
||||
"version": "1.0.2",
|
||||
"resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz",
|
||||
"integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"es-define-property": "^1.0.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/has-symbols": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz",
|
||||
"integrity": "sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/hasown": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/hasown/-/hasown-2.0.2.tgz",
|
||||
"integrity": "sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"function-bind": "^1.1.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/highlight.js": {
|
||||
"version": "10.7.3",
|
||||
"resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-10.7.3.tgz",
|
||||
|
|
@ -2669,6 +2976,22 @@
|
|||
"node": ">= 12"
|
||||
}
|
||||
},
|
||||
"node_modules/is-docker": {
|
||||
"version": "2.2.1",
|
||||
"resolved": "https://registry.npmjs.org/is-docker/-/is-docker-2.2.1.tgz",
|
||||
"integrity": "sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"bin": {
|
||||
"is-docker": "cli.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/is-fullwidth-code-point": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-3.0.0.tgz",
|
||||
|
|
@ -2678,6 +3001,43 @@
|
|||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/is-number": {
|
||||
"version": "7.0.0",
|
||||
"resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz",
|
||||
"integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=0.12.0"
|
||||
}
|
||||
},
|
||||
"node_modules/is-wsl": {
|
||||
"version": "2.2.0",
|
||||
"resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-2.2.0.tgz",
|
||||
"integrity": "sha512-fKzAra0rGJUUBwGBgNkHZuToZcn+TtXHpeCgmkMJMMYx1sQDYaCSyjJBSCa2nH1DGm7s3n1oBnohoVTBaN7Lww==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"is-docker": "^2.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/isarray": {
|
||||
"version": "2.0.5",
|
||||
"resolved": "https://registry.npmjs.org/isarray/-/isarray-2.0.5.tgz",
|
||||
"integrity": "sha512-xHjhDr3cNBK0BzdUJSPXZntQUx/mwMS5Rw4A7lPJ90XGAO6ISP/ePDNuo0vhqOZU+UD5JoodwCAAoZQd3FeAKw==",
|
||||
"dev": true,
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/isexe": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz",
|
||||
"integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==",
|
||||
"dev": true,
|
||||
"license": "ISC"
|
||||
},
|
||||
"node_modules/json-bigint": {
|
||||
"version": "1.0.0",
|
||||
"resolved": "https://registry.npmjs.org/json-bigint/-/json-bigint-1.0.0.tgz",
|
||||
|
|
@ -2706,6 +3066,49 @@
|
|||
"integrity": "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/json-stable-stringify": {
|
||||
"version": "1.3.0",
|
||||
"resolved": "https://registry.npmjs.org/json-stable-stringify/-/json-stable-stringify-1.3.0.tgz",
|
||||
"integrity": "sha512-qtYiSSFlwot9XHtF9bD9c7rwKjr+RecWT//ZnPvSmEjpV5mmPOCN4j8UjY5hbjNkOwZ/jQv3J6R1/pL7RwgMsg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"call-bind": "^1.0.8",
|
||||
"call-bound": "^1.0.4",
|
||||
"isarray": "^2.0.5",
|
||||
"jsonify": "^0.0.1",
|
||||
"object-keys": "^1.1.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/jsonfile": {
|
||||
"version": "6.2.0",
|
||||
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-6.2.0.tgz",
|
||||
"integrity": "sha512-FGuPw30AdOIUTRMC2OMRtQV+jkVj2cfPqSeWXv1NEAJ1qZ5zb1X6z1mFhbfOB/iy3ssJCD+3KuZ8r8C3uVFlAg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"universalify": "^2.0.0"
|
||||
},
|
||||
"optionalDependencies": {
|
||||
"graceful-fs": "^4.1.6"
|
||||
}
|
||||
},
|
||||
"node_modules/jsonify": {
|
||||
"version": "0.0.1",
|
||||
"resolved": "https://registry.npmjs.org/jsonify/-/jsonify-0.0.1.tgz",
|
||||
"integrity": "sha512-2/Ki0GcmuqSrgFyelQq9M05y7PS0mEwuIzrf3f1fPqkVDVRvZrPZtVSMHxdgo8Aq0sxAOb/cr2aqqA3LeWHVPg==",
|
||||
"dev": true,
|
||||
"license": "Public Domain",
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/jwa": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/jwa/-/jwa-2.0.1.tgz",
|
||||
|
|
@ -2727,6 +3130,16 @@
|
|||
"safe-buffer": "^5.0.1"
|
||||
}
|
||||
},
|
||||
"node_modules/klaw-sync": {
|
||||
"version": "6.0.0",
|
||||
"resolved": "https://registry.npmjs.org/klaw-sync/-/klaw-sync-6.0.0.tgz",
|
||||
"integrity": "sha512-nIeuVSzdCCs6TDPTqI8w1Yre34sSq7AkZ4B3sfOBbI2CgVSB4Du4aLQijFU2+lhAFCwt9+42Hel6lQNIv6AntQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"graceful-fs": "^4.1.11"
|
||||
}
|
||||
},
|
||||
"node_modules/koffi": {
|
||||
"version": "2.15.1",
|
||||
"resolved": "https://registry.npmjs.org/koffi/-/koffi-2.15.1.tgz",
|
||||
|
|
@ -2765,6 +3178,30 @@
|
|||
"node": ">= 18"
|
||||
}
|
||||
},
|
||||
"node_modules/math-intrinsics": {
|
||||
"version": "1.1.0",
|
||||
"resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz",
|
||||
"integrity": "sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/micromatch": {
|
||||
"version": "4.0.8",
|
||||
"resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz",
|
||||
"integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"braces": "^3.0.3",
|
||||
"picomatch": "^2.3.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8.6"
|
||||
}
|
||||
},
|
||||
"node_modules/mime-db": {
|
||||
"version": "1.54.0",
|
||||
"resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.54.0.tgz",
|
||||
|
|
@ -2805,6 +3242,16 @@
|
|||
"url": "https://github.com/sponsors/isaacs"
|
||||
}
|
||||
},
|
||||
"node_modules/minimist": {
|
||||
"version": "1.2.8",
|
||||
"resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz",
|
||||
"integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/ljharb"
|
||||
}
|
||||
},
|
||||
"node_modules/minipass": {
|
||||
"version": "7.1.3",
|
||||
"resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.3.tgz",
|
||||
|
|
@ -2887,6 +3334,16 @@
|
|||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/object-keys": {
|
||||
"version": "1.1.1",
|
||||
"resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
|
||||
"integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/once": {
|
||||
"version": "1.4.0",
|
||||
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
|
||||
|
|
@ -2896,6 +3353,23 @@
|
|||
"wrappy": "1"
|
||||
}
|
||||
},
|
||||
"node_modules/open": {
|
||||
"version": "7.4.2",
|
||||
"resolved": "https://registry.npmjs.org/open/-/open-7.4.2.tgz",
|
||||
"integrity": "sha512-MVHddDVweXZF3awtlAS+6pgKLlm/JgxZ90+/NBurBoQctVOOB/zDdVjcyPzQ+0laDGbsWgrRkflI65sQeOgT9Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"is-docker": "^2.0.0",
|
||||
"is-wsl": "^2.1.1"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/sindresorhus"
|
||||
}
|
||||
},
|
||||
"node_modules/openai": {
|
||||
"version": "6.26.0",
|
||||
"resolved": "https://registry.npmjs.org/openai/-/openai-6.26.0.tgz",
|
||||
|
|
@ -2989,6 +3463,53 @@
|
|||
"integrity": "sha512-Njv/59hHaokb/hRUjce3Hdv12wd60MtM9Z5Olmn+nehe0QDAsRtRbJPvJ0Z91TusF0SuZRIvnM+S4l6EIP8leA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/patch-package": {
|
||||
"version": "8.0.1",
|
||||
"resolved": "https://registry.npmjs.org/patch-package/-/patch-package-8.0.1.tgz",
|
||||
"integrity": "sha512-VsKRIA8f5uqHQ7NGhwIna6Bx6D9s/1iXlA1hthBVBEbkq+t4kXD0HHt+rJhf/Z+Ci0F/HCB2hvn0qLdLG+Qxlw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@yarnpkg/lockfile": "^1.1.0",
|
||||
"chalk": "^4.1.2",
|
||||
"ci-info": "^3.7.0",
|
||||
"cross-spawn": "^7.0.3",
|
||||
"find-yarn-workspace-root": "^2.0.0",
|
||||
"fs-extra": "^10.0.0",
|
||||
"json-stable-stringify": "^1.0.2",
|
||||
"klaw-sync": "^6.0.0",
|
||||
"minimist": "^1.2.6",
|
||||
"open": "^7.4.2",
|
||||
"semver": "^7.5.3",
|
||||
"slash": "^2.0.0",
|
||||
"tmp": "^0.2.4",
|
||||
"yaml": "^2.2.2"
|
||||
},
|
||||
"bin": {
|
||||
"patch-package": "index.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=14",
|
||||
"npm": ">5"
|
||||
}
|
||||
},
|
||||
"node_modules/patch-package/node_modules/chalk": {
|
||||
"version": "4.1.2",
|
||||
"resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz",
|
||||
"integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"ansi-styles": "^4.1.0",
|
||||
"supports-color": "^7.1.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/chalk/chalk?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/path-expression-matcher": {
|
||||
"version": "1.1.2",
|
||||
"resolved": "https://registry.npmjs.org/path-expression-matcher/-/path-expression-matcher-1.1.2.tgz",
|
||||
|
|
@ -3004,6 +3525,16 @@
|
|||
"node": ">=14.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/path-key": {
|
||||
"version": "3.1.1",
|
||||
"resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz",
|
||||
"integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/path-scurry": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-2.0.2.tgz",
|
||||
|
|
@ -3026,6 +3557,19 @@
|
|||
"integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/picomatch": {
|
||||
"version": "2.3.1",
|
||||
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz",
|
||||
"integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=8.6"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://github.com/sponsors/jonschlinkert"
|
||||
}
|
||||
},
|
||||
"node_modules/playwright": {
|
||||
"version": "1.58.2",
|
||||
"resolved": "https://registry.npmjs.org/playwright/-/playwright-1.58.2.tgz",
|
||||
|
|
@ -3191,12 +3735,76 @@
|
|||
],
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/semver": {
|
||||
"version": "7.7.4",
|
||||
"resolved": "https://registry.npmjs.org/semver/-/semver-7.7.4.tgz",
|
||||
"integrity": "sha512-vFKC2IEtQnVhpT78h1Yp8wzwrf8CM+MzKMHGJZfBtzhZNycRFnXsHk6E5TxIkkMsgNS7mdX3AGB7x2QM2di4lA==",
|
||||
"dev": true,
|
||||
"license": "ISC",
|
||||
"bin": {
|
||||
"semver": "bin/semver.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=10"
|
||||
}
|
||||
},
|
||||
"node_modules/set-function-length": {
|
||||
"version": "1.2.2",
|
||||
"resolved": "https://registry.npmjs.org/set-function-length/-/set-function-length-1.2.2.tgz",
|
||||
"integrity": "sha512-pgRc4hJ4/sNjWCSS9AmnS40x3bNMDTknHgL5UaMBTMyJnU90EgWh1Rz+MC9eFu4BuN/UwZjKQuY/1v3rM7HMfg==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"define-data-property": "^1.1.4",
|
||||
"es-errors": "^1.3.0",
|
||||
"function-bind": "^1.1.2",
|
||||
"get-intrinsic": "^1.2.4",
|
||||
"gopd": "^1.0.1",
|
||||
"has-property-descriptors": "^1.0.2"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 0.4"
|
||||
}
|
||||
},
|
||||
"node_modules/shebang-command": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz",
|
||||
"integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"shebang-regex": "^3.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/shebang-regex": {
|
||||
"version": "3.0.0",
|
||||
"resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz",
|
||||
"integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=8"
|
||||
}
|
||||
},
|
||||
"node_modules/signal-exit": {
|
||||
"version": "3.0.7",
|
||||
"resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.7.tgz",
|
||||
"integrity": "sha512-wnD2ZE+l+SPC/uoS0vXeE9L1+0wuaMqKlfz9AMUo38JsyLSBWSFcHR1Rri62LZc12vLr1gb3jl7iwQhgwpAbGQ==",
|
||||
"license": "ISC"
|
||||
},
|
||||
"node_modules/slash": {
|
||||
"version": "2.0.0",
|
||||
"resolved": "https://registry.npmjs.org/slash/-/slash-2.0.0.tgz",
|
||||
"integrity": "sha512-ZYKh3Wh2z1PpEXWr0MpSBZ0V6mZHAQfYevttO11c51CaWjGTaadiKZ+wVt1PbMlDV5qhMFslpZCemhwOK7C89A==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=6"
|
||||
}
|
||||
},
|
||||
"node_modules/smart-buffer": {
|
||||
"version": "4.2.0",
|
||||
"resolved": "https://registry.npmjs.org/smart-buffer/-/smart-buffer-4.2.0.tgz",
|
||||
|
|
@ -3362,6 +3970,29 @@
|
|||
"node": ">=0.8"
|
||||
}
|
||||
},
|
||||
"node_modules/tmp": {
|
||||
"version": "0.2.5",
|
||||
"resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.5.tgz",
|
||||
"integrity": "sha512-voyz6MApa1rQGUxT3E+BK7/ROe8itEx7vD8/HEvt4xwXucvQ5G5oeEiHkmHZJuBO21RpOf+YYm9MOivj709jow==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">=14.14"
|
||||
}
|
||||
},
|
||||
"node_modules/to-regex-range": {
|
||||
"version": "5.0.1",
|
||||
"resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz",
|
||||
"integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"is-number": "^7.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">=8.0"
|
||||
}
|
||||
},
|
||||
"node_modules/token-types": {
|
||||
"version": "6.1.2",
|
||||
"resolved": "https://registry.npmjs.org/token-types/-/token-types-6.1.2.tgz",
|
||||
|
|
@ -3433,6 +4064,16 @@
|
|||
"integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/universalify": {
|
||||
"version": "2.0.1",
|
||||
"resolved": "https://registry.npmjs.org/universalify/-/universalify-2.0.1.tgz",
|
||||
"integrity": "sha512-gptHNQghINnc/vTGIk0SOFGFNXw7JVrlRUtConJRlvaw6DuX0wO5Jeko9sWrMBhh+PsYAZ7oXAiOnf/UKogyiw==",
|
||||
"dev": true,
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 10.0.0"
|
||||
}
|
||||
},
|
||||
"node_modules/web-streams-polyfill": {
|
||||
"version": "3.3.3",
|
||||
"resolved": "https://registry.npmjs.org/web-streams-polyfill/-/web-streams-polyfill-3.3.3.tgz",
|
||||
|
|
@ -3442,6 +4083,22 @@
|
|||
"node": ">= 8"
|
||||
}
|
||||
},
|
||||
"node_modules/which": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",
|
||||
"integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==",
|
||||
"dev": true,
|
||||
"license": "ISC",
|
||||
"dependencies": {
|
||||
"isexe": "^2.0.0"
|
||||
},
|
||||
"bin": {
|
||||
"node-which": "bin/node-which"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 8"
|
||||
}
|
||||
},
|
||||
"node_modules/wrap-ansi": {
|
||||
"version": "7.0.0",
|
||||
"resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-7.0.0.tgz",
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
{
|
||||
"name": "gsd-pi",
|
||||
"version": "0.3.0",
|
||||
"version": "2.3.4",
|
||||
"description": "GSD — Get Shit Done coding agent",
|
||||
"license": "MIT",
|
||||
"repository": {
|
||||
|
|
@ -18,6 +18,7 @@
|
|||
},
|
||||
"files": [
|
||||
"dist",
|
||||
"patches",
|
||||
"pkg",
|
||||
"src/resources",
|
||||
"scripts/postinstall.js",
|
||||
|
|
@ -37,6 +38,8 @@
|
|||
"test": "node --import ./src/resources/extensions/gsd/tests/resolve-ts.mjs --experimental-strip-types --test 'src/resources/extensions/gsd/tests/*.test.ts' 'src/resources/extensions/gsd/tests/*.test.mjs' 'src/tests/*.test.ts'",
|
||||
"dev": "tsc --watch",
|
||||
"postinstall": "node scripts/postinstall.js",
|
||||
"pi:install-global": "node scripts/install-pi-global.js",
|
||||
"pi:uninstall-global": "node scripts/uninstall-pi-global.js",
|
||||
"sync-pkg-version": "node scripts/sync-pkg-version.cjs",
|
||||
"prepublishOnly": "npm run sync-pkg-version && npm run build"
|
||||
},
|
||||
|
|
@ -46,6 +49,7 @@
|
|||
},
|
||||
"devDependencies": {
|
||||
"@types/node": "^22.0.0",
|
||||
"patch-package": "^8.0.1",
|
||||
"typescript": "^5.4.0"
|
||||
},
|
||||
"overrides": {
|
||||
|
|
|
|||
48
patches/@mariozechner+pi-coding-agent+0.57.1.patch
Normal file
48
patches/@mariozechner+pi-coding-agent+0.57.1.patch
Normal file
|
|
@ -0,0 +1,48 @@
|
|||
diff --git a/node_modules/@mariozechner/pi-coding-agent/dist/core/tools/bash.js b/node_modules/@mariozechner/pi-coding-agent/dist/core/tools/bash.js
|
||||
index 27fe820..68f277f 100644
|
||||
--- a/node_modules/@mariozechner/pi-coding-agent/dist/core/tools/bash.js
|
||||
+++ b/node_modules/@mariozechner/pi-coding-agent/dist/core/tools/bash.js
|
||||
@@ -1,11 +1,35 @@
|
||||
import { randomBytes } from "node:crypto";
|
||||
import { createWriteStream, existsSync } from "node:fs";
|
||||
+import { createRequire } from "node:module";
|
||||
import { tmpdir } from "node:os";
|
||||
import { join } from "node:path";
|
||||
import { Type } from "@sinclair/typebox";
|
||||
import { spawn } from "child_process";
|
||||
import { getShellConfig, getShellEnv, killProcessTree } from "../../utils/shell.js";
|
||||
import { DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES, formatSize, truncateTail } from "./truncate.js";
|
||||
+// Cached Win32 FFI handles for restoring VT input after child processes
|
||||
+let _vtHandles = null;
|
||||
+function restoreWindowsVTInput() {
|
||||
+ if (process.platform !== "win32") return;
|
||||
+ try {
|
||||
+ if (!_vtHandles) {
|
||||
+ const cjsRequire = createRequire(import.meta.url);
|
||||
+ const koffi = cjsRequire("koffi");
|
||||
+ const k32 = koffi.load("kernel32.dll");
|
||||
+ const GetStdHandle = k32.func("void* __stdcall GetStdHandle(int)");
|
||||
+ const GetConsoleMode = k32.func("bool __stdcall GetConsoleMode(void*, _Out_ uint32_t*)");
|
||||
+ const SetConsoleMode = k32.func("bool __stdcall SetConsoleMode(void*, uint32_t)");
|
||||
+ const handle = GetStdHandle(-10);
|
||||
+ _vtHandles = { GetConsoleMode, SetConsoleMode, handle };
|
||||
+ }
|
||||
+ const ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200;
|
||||
+ const mode = new Uint32Array(1);
|
||||
+ _vtHandles.GetConsoleMode(_vtHandles.handle, mode);
|
||||
+ if (!(mode[0] & ENABLE_VIRTUAL_TERMINAL_INPUT)) {
|
||||
+ _vtHandles.SetConsoleMode(_vtHandles.handle, mode[0] | ENABLE_VIRTUAL_TERMINAL_INPUT);
|
||||
+ }
|
||||
+ } catch { }
|
||||
+}
|
||||
/**
|
||||
* Generate a unique temp file path for bash output
|
||||
*/
|
||||
@@ -76,6 +100,7 @@ const defaultBashOperations = {
|
||||
}
|
||||
// Handle process exit
|
||||
child.on("close", (code) => {
|
||||
+ restoreWindowsVTInput();
|
||||
if (timeoutHandle)
|
||||
clearTimeout(timeoutHandle);
|
||||
if (signal)
|
||||
47
patches/@mariozechner+pi-tui+0.57.1.patch
Normal file
47
patches/@mariozechner+pi-tui+0.57.1.patch
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
diff --git a/node_modules/@mariozechner/pi-tui/dist/terminal.js b/node_modules/@mariozechner/pi-tui/dist/terminal.js
|
||||
index cd20330..e836fcd 100644
|
||||
--- a/node_modules/@mariozechner/pi-tui/dist/terminal.js
|
||||
+++ b/node_modules/@mariozechner/pi-tui/dist/terminal.js
|
||||
@@ -7,6 +7,7 @@ const cjsRequire = createRequire(import.meta.url);
|
||||
* Real terminal using process.stdin/stdout
|
||||
*/
|
||||
export class ProcessTerminal {
|
||||
+ static _vtHandles = null;
|
||||
wasRaw = false;
|
||||
inputHandler;
|
||||
resizeHandler;
|
||||
@@ -126,20 +127,23 @@ export class ProcessTerminal {
|
||||
if (process.platform !== "win32")
|
||||
return;
|
||||
try {
|
||||
- // Dynamic require to avoid bundling koffi's 74MB of cross-platform
|
||||
- // native binaries into every compiled binary. Koffi is only needed
|
||||
- // on Windows for VT input support.
|
||||
- const koffi = cjsRequire("koffi");
|
||||
- const k32 = koffi.load("kernel32.dll");
|
||||
- const GetStdHandle = k32.func("void* __stdcall GetStdHandle(int)");
|
||||
- const GetConsoleMode = k32.func("bool __stdcall GetConsoleMode(void*, _Out_ uint32_t*)");
|
||||
- const SetConsoleMode = k32.func("bool __stdcall SetConsoleMode(void*, uint32_t)");
|
||||
- const STD_INPUT_HANDLE = -10;
|
||||
+ if (!ProcessTerminal._vtHandles) {
|
||||
+ const koffi = cjsRequire("koffi");
|
||||
+ const k32 = koffi.load("kernel32.dll");
|
||||
+ const GetStdHandle = k32.func("void* __stdcall GetStdHandle(int)");
|
||||
+ const GetConsoleMode = k32.func("bool __stdcall GetConsoleMode(void*, _Out_ uint32_t*)");
|
||||
+ const SetConsoleMode = k32.func("bool __stdcall SetConsoleMode(void*, uint32_t)");
|
||||
+ const STD_INPUT_HANDLE = -10;
|
||||
+ const handle = GetStdHandle(STD_INPUT_HANDLE);
|
||||
+ ProcessTerminal._vtHandles = { GetConsoleMode, SetConsoleMode, handle };
|
||||
+ }
|
||||
const ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200;
|
||||
- const handle = GetStdHandle(STD_INPUT_HANDLE);
|
||||
+ const { GetConsoleMode, SetConsoleMode, handle } = ProcessTerminal._vtHandles;
|
||||
const mode = new Uint32Array(1);
|
||||
GetConsoleMode(handle, mode);
|
||||
- SetConsoleMode(handle, mode[0] | ENABLE_VIRTUAL_TERMINAL_INPUT);
|
||||
+ if (!(mode[0] & ENABLE_VIRTUAL_TERMINAL_INPUT)) {
|
||||
+ SetConsoleMode(handle, mode[0] | ENABLE_VIRTUAL_TERMINAL_INPUT);
|
||||
+ }
|
||||
}
|
||||
catch {
|
||||
// koffi not available — Shift+Tab won't be distinguishable from Tab
|
||||
44
scripts/install-pi-global.js
Normal file
44
scripts/install-pi-global.js
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
#!/usr/bin/env node
|
||||
import { cpSync, existsSync, mkdirSync, readFileSync, writeFileSync } from 'node:fs'
|
||||
import os from 'node:os'
|
||||
import { dirname, join, resolve } from 'node:path'
|
||||
import { fileURLToPath } from 'node:url'
|
||||
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url))
|
||||
const resourcesDir = resolve(__dirname, '..', 'src', 'resources')
|
||||
const piRoot = join(os.homedir(), '.pi')
|
||||
const piAgentDir = join(piRoot, 'agent')
|
||||
|
||||
const copyDir = (name) => {
|
||||
const src = join(resourcesDir, name)
|
||||
const dest = join(piAgentDir, name)
|
||||
if (!existsSync(src)) return false
|
||||
mkdirSync(dest, { recursive: true })
|
||||
cpSync(src, dest, { recursive: true, force: true })
|
||||
return true
|
||||
}
|
||||
|
||||
mkdirSync(piAgentDir, { recursive: true })
|
||||
|
||||
const copied = []
|
||||
if (copyDir('extensions')) copied.push('extensions')
|
||||
if (copyDir('skills')) copied.push('skills')
|
||||
if (copyDir('agents')) copied.push('agents')
|
||||
|
||||
const agentsMdSrc = join(resourcesDir, 'AGENTS.md')
|
||||
if (existsSync(agentsMdSrc)) {
|
||||
writeFileSync(join(piAgentDir, 'AGENTS.md'), readFileSync(agentsMdSrc))
|
||||
copied.push('AGENTS.md')
|
||||
}
|
||||
|
||||
const workflowSrc = join(resourcesDir, 'GSD-WORKFLOW.md')
|
||||
if (existsSync(workflowSrc)) {
|
||||
writeFileSync(join(piRoot, 'GSD-WORKFLOW.md'), readFileSync(workflowSrc))
|
||||
copied.push('GSD-WORKFLOW.md')
|
||||
}
|
||||
|
||||
process.stdout.write(
|
||||
`Installed GSD resources for pi in ${piRoot}\n` +
|
||||
`Copied: ${copied.join(', ')}\n` +
|
||||
`Extensions are now available under ${join(piAgentDir, 'extensions')}\n`
|
||||
)
|
||||
|
|
@ -35,6 +35,14 @@ const banner =
|
|||
|
||||
process.stderr.write(banner)
|
||||
|
||||
// Apply patches to upstream dependencies (non-fatal)
|
||||
try {
|
||||
execSync('npx patch-package', { stdio: 'inherit', cwd: resolve(__dirname, '..') })
|
||||
process.stderr.write(`\n ${green}✓${reset} Patches applied\n`)
|
||||
} catch {
|
||||
process.stderr.write(`\n ${yellow}⚠${reset} Failed to apply patches — run ${cyan}npx patch-package${reset} manually\n`)
|
||||
}
|
||||
|
||||
// Install Playwright chromium for browser tools (non-fatal)
|
||||
const args = os.platform() === 'linux' ? '--with-deps' : ''
|
||||
try {
|
||||
|
|
|
|||
66
scripts/uninstall-pi-global.js
Normal file
66
scripts/uninstall-pi-global.js
Normal file
|
|
@ -0,0 +1,66 @@
|
|||
#!/usr/bin/env node
|
||||
import { existsSync, readFileSync, readdirSync, rmSync, rmdirSync } from 'node:fs'
|
||||
import os from 'node:os'
|
||||
import { dirname, join, resolve } from 'node:path'
|
||||
import { fileURLToPath } from 'node:url'
|
||||
|
||||
const __dirname = dirname(fileURLToPath(import.meta.url))
|
||||
const resourcesDir = resolve(__dirname, '..', 'src', 'resources')
|
||||
const piRoot = join(os.homedir(), '.pi')
|
||||
const piAgentDir = join(piRoot, 'agent')
|
||||
|
||||
const removed = []
|
||||
const skipped = []
|
||||
|
||||
function safeRemove(path, label) {
|
||||
if (!existsSync(path)) return
|
||||
rmSync(path, { recursive: true, force: true })
|
||||
removed.push(label)
|
||||
}
|
||||
|
||||
function removeResourceEntries(containerName) {
|
||||
const srcDir = join(resourcesDir, containerName)
|
||||
const destDir = join(piAgentDir, containerName)
|
||||
if (!existsSync(srcDir) || !existsSync(destDir)) return
|
||||
|
||||
for (const entry of readdirSync(srcDir)) {
|
||||
safeRemove(join(destDir, entry), `${containerName}/${entry}`)
|
||||
}
|
||||
|
||||
try {
|
||||
if (readdirSync(destDir).length === 0) {
|
||||
rmdirSync(destDir)
|
||||
removed.push(`${containerName}/`)
|
||||
}
|
||||
} catch {
|
||||
// ignore non-empty or missing dirs
|
||||
}
|
||||
}
|
||||
|
||||
function removeIfContentMatches(targetPath, sourcePath, label) {
|
||||
if (!existsSync(targetPath) || !existsSync(sourcePath)) return
|
||||
try {
|
||||
const target = readFileSync(targetPath, 'utf8')
|
||||
const source = readFileSync(sourcePath, 'utf8')
|
||||
if (target === source) {
|
||||
rmSync(targetPath, { force: true })
|
||||
removed.push(label)
|
||||
} else {
|
||||
skipped.push(`${label} (modified, left in place)`)
|
||||
}
|
||||
} catch {
|
||||
skipped.push(`${label} (could not verify, left in place)`)
|
||||
}
|
||||
}
|
||||
|
||||
removeResourceEntries('extensions')
|
||||
removeResourceEntries('skills')
|
||||
removeResourceEntries('agents')
|
||||
removeIfContentMatches(join(piAgentDir, 'AGENTS.md'), join(resourcesDir, 'AGENTS.md'), 'agent/AGENTS.md')
|
||||
removeIfContentMatches(join(piRoot, 'GSD-WORKFLOW.md'), join(resourcesDir, 'GSD-WORKFLOW.md'), 'GSD-WORKFLOW.md')
|
||||
|
||||
process.stdout.write(
|
||||
`Removed GSD resources from ${piRoot}\n` +
|
||||
`Removed: ${removed.length ? removed.join(', ') : '(nothing)'}\n` +
|
||||
(skipped.length ? `Skipped: ${skipped.join(', ')}\n` : '')
|
||||
)
|
||||
135
src/cli.ts
135
src/cli.ts
|
|
@ -1,18 +1,76 @@
|
|||
import {
|
||||
AuthStorage,
|
||||
DefaultResourceLoader,
|
||||
ModelRegistry,
|
||||
SettingsManager,
|
||||
SessionManager,
|
||||
createAgentSession,
|
||||
InteractiveMode,
|
||||
runPrintMode,
|
||||
} from '@mariozechner/pi-coding-agent'
|
||||
import { readFileSync } from 'node:fs'
|
||||
import { join } from 'node:path'
|
||||
import { agentDir, sessionsDir, authFilePath } from './app-paths.js'
|
||||
import { buildResourceLoader, initResources } from './resource-loader.js'
|
||||
import { initResources } from './resource-loader.js'
|
||||
import { ensureManagedTools } from './tool-bootstrap.js'
|
||||
import { loadStoredEnvKeys, runWizardIfNeeded } from './wizard.js'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Minimal CLI arg parser — detects print/subagent mode flags
|
||||
// ---------------------------------------------------------------------------
|
||||
interface CliFlags {
|
||||
mode?: 'text' | 'json' | 'rpc'
|
||||
print?: boolean
|
||||
noSession?: boolean
|
||||
model?: string
|
||||
extensions: string[]
|
||||
appendSystemPrompt?: string
|
||||
tools?: string[]
|
||||
messages: string[]
|
||||
}
|
||||
|
||||
function parseCliArgs(argv: string[]): CliFlags {
|
||||
const flags: CliFlags = { extensions: [], messages: [] }
|
||||
const args = argv.slice(2) // skip node + script
|
||||
for (let i = 0; i < args.length; i++) {
|
||||
const arg = args[i]
|
||||
if (arg === '--mode' && i + 1 < args.length) {
|
||||
const m = args[++i]
|
||||
if (m === 'text' || m === 'json' || m === 'rpc') flags.mode = m
|
||||
} else if (arg === '--print' || arg === '-p') {
|
||||
flags.print = true
|
||||
} else if (arg === '--no-session') {
|
||||
flags.noSession = true
|
||||
} else if (arg === '--model' && i + 1 < args.length) {
|
||||
flags.model = args[++i]
|
||||
} else if (arg === '--extension' && i + 1 < args.length) {
|
||||
flags.extensions.push(args[++i])
|
||||
} else if (arg === '--append-system-prompt' && i + 1 < args.length) {
|
||||
flags.appendSystemPrompt = args[++i]
|
||||
} else if (arg === '--tools' && i + 1 < args.length) {
|
||||
flags.tools = args[++i].split(',')
|
||||
} else if (!arg.startsWith('--') && !arg.startsWith('-')) {
|
||||
flags.messages.push(arg)
|
||||
}
|
||||
}
|
||||
return flags
|
||||
}
|
||||
|
||||
const cliFlags = parseCliArgs(process.argv)
|
||||
const isPrintMode = cliFlags.print || cliFlags.mode !== undefined
|
||||
|
||||
// Pi's tool bootstrap can mis-detect already-installed fd/rg on some systems
|
||||
// because spawnSync(..., ["--version"]) returns EPERM despite a zero exit code.
|
||||
// Provision local managed binaries first so Pi sees them without probing PATH.
|
||||
ensureManagedTools(join(agentDir, 'bin'))
|
||||
|
||||
const authStorage = AuthStorage.create(authFilePath)
|
||||
loadStoredEnvKeys(authStorage)
|
||||
await runWizardIfNeeded(authStorage)
|
||||
|
||||
// Skip the setup wizard in print mode — it requires TTY interaction
|
||||
if (!isPrintMode) {
|
||||
await runWizardIfNeeded(authStorage)
|
||||
}
|
||||
|
||||
const modelRegistry = new ModelRegistry(authStorage)
|
||||
const settingsManager = SettingsManager.create(agentDir)
|
||||
|
|
@ -53,10 +111,79 @@ if (!settingsManager.getCollapseChangelog()) {
|
|||
settingsManager.setCollapseChangelog(true)
|
||||
}
|
||||
|
||||
const sessionManager = SessionManager.create(process.cwd(), sessionsDir)
|
||||
// ---------------------------------------------------------------------------
|
||||
// Print / subagent mode — single-shot execution, no TTY required
|
||||
// ---------------------------------------------------------------------------
|
||||
if (isPrintMode) {
|
||||
const sessionManager = cliFlags.noSession
|
||||
? SessionManager.inMemory()
|
||||
: SessionManager.create(process.cwd())
|
||||
|
||||
// Read --append-system-prompt file content (subagent writes agent system prompts to temp files)
|
||||
let appendSystemPrompt: string | undefined
|
||||
if (cliFlags.appendSystemPrompt) {
|
||||
try {
|
||||
appendSystemPrompt = readFileSync(cliFlags.appendSystemPrompt, 'utf-8')
|
||||
} catch {
|
||||
// If it's not a file path, treat it as literal text
|
||||
appendSystemPrompt = cliFlags.appendSystemPrompt
|
||||
}
|
||||
}
|
||||
|
||||
initResources(agentDir)
|
||||
const resourceLoader = new DefaultResourceLoader({
|
||||
agentDir,
|
||||
additionalExtensionPaths: cliFlags.extensions.length > 0 ? cliFlags.extensions : undefined,
|
||||
appendSystemPrompt,
|
||||
})
|
||||
await resourceLoader.reload()
|
||||
|
||||
const { session, extensionsResult } = await createAgentSession({
|
||||
authStorage,
|
||||
modelRegistry,
|
||||
settingsManager,
|
||||
sessionManager,
|
||||
resourceLoader,
|
||||
})
|
||||
|
||||
if (extensionsResult.errors.length > 0) {
|
||||
for (const err of extensionsResult.errors) {
|
||||
process.stderr.write(`[gsd] Extension load error: ${err.error}\n`)
|
||||
}
|
||||
}
|
||||
|
||||
// Apply --model override if specified
|
||||
if (cliFlags.model) {
|
||||
const available = modelRegistry.getAvailable()
|
||||
const match =
|
||||
available.find((m) => m.id === cliFlags.model) ||
|
||||
available.find((m) => `${m.provider}/${m.id}` === cliFlags.model)
|
||||
if (match) {
|
||||
session.setModel(match)
|
||||
}
|
||||
}
|
||||
|
||||
const mode = cliFlags.mode || 'text'
|
||||
await runPrintMode(session, {
|
||||
mode: mode === 'rpc' ? 'json' : mode,
|
||||
messages: cliFlags.messages,
|
||||
})
|
||||
process.exit(0)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Interactive mode — normal TTY session
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// Per-directory session storage — same encoding as the upstream SDK so that
|
||||
// /resume only shows sessions from the current working directory.
|
||||
const cwd = process.cwd()
|
||||
const safePath = `--${cwd.replace(/^[/\\]/, '').replace(/[/\\:]/g, '-')}--`
|
||||
const projectSessionsDir = join(sessionsDir, safePath)
|
||||
const sessionManager = SessionManager.create(cwd, projectSessionsDir)
|
||||
|
||||
initResources(agentDir)
|
||||
const resourceLoader = buildResourceLoader(agentDir)
|
||||
const resourceLoader = new DefaultResourceLoader({ agentDir })
|
||||
await resourceLoader.reload()
|
||||
|
||||
const { session, extensionsResult } = await createAgentSession({
|
||||
|
|
|
|||
|
|
@ -1,4 +1,5 @@
|
|||
import { DefaultResourceLoader } from '@mariozechner/pi-coding-agent'
|
||||
import { homedir } from 'node:os'
|
||||
import { cpSync, existsSync, mkdirSync, readFileSync, writeFileSync } from 'node:fs'
|
||||
import { dirname, join, resolve } from 'node:path'
|
||||
import { fileURLToPath } from 'node:url'
|
||||
|
|
@ -52,10 +53,16 @@ export function initResources(agentDir: string): void {
|
|||
}
|
||||
|
||||
/**
|
||||
* Constructs a DefaultResourceLoader with no additionalExtensionPaths.
|
||||
* Extensions are synced to agentDir by initResources() and pi auto-discovers
|
||||
* them from ~/.gsd/agent/extensions/ via its normal agentDir scan.
|
||||
* Constructs a DefaultResourceLoader that loads extensions from both
|
||||
* ~/.gsd/agent/extensions/ (GSD's default) and ~/.pi/agent/extensions/ (pi's default).
|
||||
* This allows users to use extensions from either location.
|
||||
*/
|
||||
export function buildResourceLoader(agentDir: string): DefaultResourceLoader {
|
||||
return new DefaultResourceLoader({ agentDir })
|
||||
const piAgentDir = join(homedir(), '.pi', 'agent')
|
||||
const piExtensionsDir = join(piAgentDir, 'extensions')
|
||||
|
||||
return new DefaultResourceLoader({
|
||||
agentDir,
|
||||
additionalExtensionPaths: [piExtensionsDir],
|
||||
})
|
||||
}
|
||||
|
|
|
|||
|
|
@ -48,6 +48,34 @@ import { createConnection } from "node:net";
|
|||
import { randomUUID } from "node:crypto";
|
||||
import { writeFileSync, readFileSync, existsSync, mkdirSync } from "node:fs";
|
||||
import { join } from "node:path";
|
||||
import { createRequire } from "node:module";
|
||||
|
||||
// ── Windows VT Input Restoration ────────────────────────────────────────────
|
||||
// Child processes (esp. Git Bash / MSYS2) can strip the ENABLE_VIRTUAL_TERMINAL_INPUT
|
||||
// flag from the shared stdin console handle. Re-enable it after each child exits.
|
||||
|
||||
let _vtHandles: { GetConsoleMode: Function; SetConsoleMode: Function; handle: unknown } | null = null;
|
||||
function restoreWindowsVTInput(): void {
|
||||
if (process.platform !== "win32") return;
|
||||
try {
|
||||
if (!_vtHandles) {
|
||||
const cjsRequire = createRequire(import.meta.url);
|
||||
const koffi = cjsRequire("koffi");
|
||||
const k32 = koffi.load("kernel32.dll");
|
||||
const GetStdHandle = k32.func("void* __stdcall GetStdHandle(int)");
|
||||
const GetConsoleMode = k32.func("bool __stdcall GetConsoleMode(void*, _Out_ uint32_t*)");
|
||||
const SetConsoleMode = k32.func("bool __stdcall SetConsoleMode(void*, uint32_t)");
|
||||
const handle = GetStdHandle(-10);
|
||||
_vtHandles = { GetConsoleMode, SetConsoleMode, handle };
|
||||
}
|
||||
const ENABLE_VIRTUAL_TERMINAL_INPUT = 0x0200;
|
||||
const mode = new Uint32Array(1);
|
||||
_vtHandles.GetConsoleMode(_vtHandles.handle, mode);
|
||||
if (!(mode[0] & ENABLE_VIRTUAL_TERMINAL_INPUT)) {
|
||||
_vtHandles.SetConsoleMode(_vtHandles.handle, mode[0] | ENABLE_VIRTUAL_TERMINAL_INPUT);
|
||||
}
|
||||
} catch { /* koffi not available on non-Windows */ }
|
||||
}
|
||||
|
||||
// ── Types ──────────────────────────────────────────────────────────────────
|
||||
|
||||
|
|
@ -623,6 +651,7 @@ function startProcess(opts: StartOptions): BgProcess {
|
|||
});
|
||||
|
||||
proc.on("exit", (code, sig) => {
|
||||
restoreWindowsVTInput();
|
||||
bg.alive = false;
|
||||
bg.exitCode = code;
|
||||
bg.signal = sig ?? null;
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@
|
|||
* - Every action returns feedback (accessibility snapshot, screenshots on navigate)
|
||||
* - Errors include visual debugging (screenshots on failure, surfaced JS errors)
|
||||
* - Smart waits (domcontentloaded + best-effort settle, not blocking networkidle)
|
||||
* - 2x DPI screenshots for readable text
|
||||
* - Screenshots capped at 1568px max dimension (Anthropic API limit safety)
|
||||
* - JPEG for viewport screenshots (smaller), PNG for element crops (transparency)
|
||||
* - Auto-handles JS dialogs (alert/confirm/prompt) to prevent page freezes
|
||||
* - Auto-switches to new tabs (popups, target="_blank")
|
||||
|
|
@ -731,11 +731,75 @@ async function postActionSummary(p: Page, target?: Page | Frame): Promise<string
|
|||
}
|
||||
}
|
||||
|
||||
// Anthropic API rejects images > 2000px in multi-image requests.
|
||||
// Cap at 1568px (recommended optimal size) to stay well within limits.
|
||||
const MAX_SCREENSHOT_DIM = 1568;
|
||||
|
||||
/**
|
||||
* If either dimension of the image buffer exceeds MAX_SCREENSHOT_DIM,
|
||||
* downscale proportionally using the browser's canvas (zero dependencies).
|
||||
* Returns the original buffer unchanged if already within limits.
|
||||
*/
|
||||
async function constrainScreenshot(
|
||||
page: Page,
|
||||
buffer: Buffer,
|
||||
mimeType: string,
|
||||
quality: number,
|
||||
): Promise<Buffer> {
|
||||
let width: number;
|
||||
let height: number;
|
||||
|
||||
if (mimeType === "image/png") {
|
||||
width = buffer.readUInt32BE(16);
|
||||
height = buffer.readUInt32BE(20);
|
||||
} else {
|
||||
width = 0;
|
||||
height = 0;
|
||||
for (let i = 0; i < buffer.length - 8; i++) {
|
||||
if (buffer[i] === 0xff && (buffer[i + 1] === 0xc0 || buffer[i + 1] === 0xc2)) {
|
||||
height = buffer.readUInt16BE(i + 5);
|
||||
width = buffer.readUInt16BE(i + 7);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (width <= MAX_SCREENSHOT_DIM && height <= MAX_SCREENSHOT_DIM) {
|
||||
return buffer;
|
||||
}
|
||||
|
||||
const b64 = buffer.toString("base64");
|
||||
const result = await page.evaluate(
|
||||
async ({ b64, mime, maxDim, q }) => {
|
||||
const img = new Image();
|
||||
await new Promise<void>((resolve, reject) => {
|
||||
img.onload = () => resolve();
|
||||
img.onerror = reject;
|
||||
img.src = `data:${mime};base64,${b64}`;
|
||||
});
|
||||
const scale = Math.min(maxDim / img.width, maxDim / img.height);
|
||||
const w = Math.round(img.width * scale);
|
||||
const h = Math.round(img.height * scale);
|
||||
const canvas = document.createElement("canvas");
|
||||
canvas.width = w;
|
||||
canvas.height = h;
|
||||
const ctx = canvas.getContext("2d")!;
|
||||
ctx.drawImage(img, 0, 0, w, h);
|
||||
return canvas.toDataURL(mime, q / 100);
|
||||
},
|
||||
{ b64, mime: mimeType, maxDim: MAX_SCREENSHOT_DIM, q: quality },
|
||||
);
|
||||
|
||||
const resizedB64 = result.split(",")[1];
|
||||
return Buffer.from(resizedB64, "base64");
|
||||
}
|
||||
|
||||
/** Capture a JPEG screenshot for error debugging. Returns base64 or null. */
|
||||
async function captureErrorScreenshot(p: Page | null): Promise<{ data: string; mimeType: string } | null> {
|
||||
if (!p) return null;
|
||||
try {
|
||||
const buf = await p.screenshot({ type: "jpeg", quality: 60 });
|
||||
let buf = await p.screenshot({ type: "jpeg", quality: 60, scale: "css" });
|
||||
buf = await constrainScreenshot(p, buf, "image/jpeg", 60);
|
||||
return { data: buf.toString("base64"), mimeType: "image/jpeg" };
|
||||
} catch {
|
||||
return null;
|
||||
|
|
@ -1602,7 +1666,8 @@ export default function (pi: ExtensionAPI) {
|
|||
|
||||
let screenshotContent: any[] = [];
|
||||
try {
|
||||
const buf = await p.screenshot({ type: "jpeg", quality: 80 });
|
||||
let buf = await p.screenshot({ type: "jpeg", quality: 80, scale: "css" });
|
||||
buf = await constrainScreenshot(p, buf, "image/jpeg", 80);
|
||||
screenshotContent = [{ type: "image", data: buf.toString("base64"), mimeType: "image/jpeg" }];
|
||||
} catch {}
|
||||
|
||||
|
|
@ -1744,7 +1809,8 @@ export default function (pi: ExtensionAPI) {
|
|||
// Include screenshot like navigate does
|
||||
let screenshotContent: any[] = [];
|
||||
try {
|
||||
const buf = await p.screenshot({ type: "jpeg", quality: 80 });
|
||||
let buf = await p.screenshot({ type: "jpeg", quality: 80, scale: "css" });
|
||||
buf = await constrainScreenshot(p, buf, "image/jpeg", 80);
|
||||
screenshotContent = [{
|
||||
type: "image",
|
||||
data: buf.toString("base64"),
|
||||
|
|
@ -1805,23 +1871,27 @@ export default function (pi: ExtensionAPI) {
|
|||
|
||||
let screenshotBuffer: Buffer;
|
||||
let mimeType: string;
|
||||
const quality = params.quality ?? 80;
|
||||
|
||||
if (params.selector) {
|
||||
// Element screenshots: keep PNG (may have transparency)
|
||||
const locator = p.locator(params.selector).first();
|
||||
screenshotBuffer = await locator.screenshot({ type: "png" });
|
||||
screenshotBuffer = await locator.screenshot({ type: "png", scale: "css" });
|
||||
mimeType = "image/png";
|
||||
} else {
|
||||
// Viewport/fullpage: use JPEG (3-5x smaller, fine for AI analysis)
|
||||
const quality = params.quality ?? 80;
|
||||
screenshotBuffer = await p.screenshot({
|
||||
fullPage: params.fullPage ?? false,
|
||||
type: "jpeg",
|
||||
quality,
|
||||
scale: "css",
|
||||
});
|
||||
mimeType = "image/jpeg";
|
||||
}
|
||||
|
||||
// Downscale if dimensions exceed API limit (1568px max)
|
||||
screenshotBuffer = await constrainScreenshot(p, screenshotBuffer, mimeType, quality);
|
||||
|
||||
const base64Data = screenshotBuffer.toString("base64");
|
||||
const title = await p.title();
|
||||
const url = p.url();
|
||||
|
|
|
|||
|
|
@ -6,20 +6,44 @@
|
|||
* Falls back to raw REST API with GITHUB_TOKEN env var.
|
||||
*/
|
||||
|
||||
import { execSync } from "node:child_process";
|
||||
import { execSync, spawnSync, type SpawnSyncReturns } from "node:child_process";
|
||||
|
||||
// ─── Auth detection ───────────────────────────────────────────────────────────
|
||||
|
||||
let _useGhCli: boolean | null = null;
|
||||
|
||||
function hasGhCli(): boolean {
|
||||
let ghSpawnImpl = (args: string[], input?: string, cwd?: string): SpawnSyncReturns<string> =>
|
||||
spawnSync("gh", args, {
|
||||
cwd,
|
||||
encoding: "utf8",
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
input,
|
||||
});
|
||||
|
||||
function ghSpawn(args: string[], input?: string, cwd?: string): SpawnSyncReturns<string> {
|
||||
return ghSpawnImpl(args, input, cwd);
|
||||
}
|
||||
|
||||
export function resetGhCliDetectionForTests(): void {
|
||||
_useGhCli = null;
|
||||
ghSpawnImpl = (args: string[], input?: string, cwd?: string): SpawnSyncReturns<string> =>
|
||||
spawnSync("gh", args, {
|
||||
cwd,
|
||||
encoding: "utf8",
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
input,
|
||||
});
|
||||
}
|
||||
|
||||
export function setGhSpawnForTests(fn: (args: string[], input?: string, cwd?: string) => SpawnSyncReturns<string>): void {
|
||||
ghSpawnImpl = fn;
|
||||
_useGhCli = null;
|
||||
}
|
||||
|
||||
export function hasGhCli(): boolean {
|
||||
if (_useGhCli !== null) return _useGhCli;
|
||||
try {
|
||||
execSync("gh auth status", { encoding: "utf8", stdio: ["pipe", "pipe", "pipe"] });
|
||||
_useGhCli = true;
|
||||
} catch {
|
||||
_useGhCli = false;
|
||||
}
|
||||
const result = ghSpawn(["auth", "token"]);
|
||||
_useGhCli = result.status === 0 && !result.error && !!result.stdout?.trim();
|
||||
return _useGhCli;
|
||||
}
|
||||
|
||||
|
|
@ -120,11 +144,6 @@ export async function ghApi<T = unknown>(
|
|||
return fetchApi<T>(endpoint, method, options.params, options.body, token);
|
||||
}
|
||||
|
||||
function shellEscape(s: string): string {
|
||||
// Single-quote wrapping, escaping any existing single quotes
|
||||
return "'" + s.replace(/'/g, "'\\''") + "'";
|
||||
}
|
||||
|
||||
function ghCliApi<T>(
|
||||
endpoint: string,
|
||||
method: string,
|
||||
|
|
@ -132,39 +151,36 @@ function ghCliApi<T>(
|
|||
body?: Record<string, unknown>,
|
||||
cwd?: string,
|
||||
): T {
|
||||
const parts = ["gh", "api", shellEscape(endpoint), "--method", method];
|
||||
const args = ["api", endpoint, "--method", method];
|
||||
|
||||
if (params) {
|
||||
for (const [key, val] of Object.entries(params)) {
|
||||
if (val === undefined) continue;
|
||||
if (Array.isArray(val)) {
|
||||
for (const v of val) {
|
||||
parts.push("-f", shellEscape(`${key}[]=${v}`));
|
||||
args.push("-f", `${key}[]=${v}`);
|
||||
}
|
||||
} else {
|
||||
parts.push("-f", shellEscape(`${key}=${String(val)}`));
|
||||
args.push("-f", `${key}=${String(val)}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (body) {
|
||||
parts.push("--input", "-");
|
||||
args.push("--input", "-");
|
||||
}
|
||||
|
||||
try {
|
||||
const result = execSync(parts.join(" "), {
|
||||
cwd: cwd ?? process.cwd(),
|
||||
encoding: "utf8",
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
input: body ? JSON.stringify(body) : undefined,
|
||||
});
|
||||
if (!result.trim()) return {} as T;
|
||||
return JSON.parse(result) as T;
|
||||
} catch (e: unknown) {
|
||||
const err = e as { stderr?: string; stdout?: string; message?: string };
|
||||
const msg = err.stderr?.trim() || err.stdout?.trim() || err.message || String(e);
|
||||
throw new Error(`gh api error: ${msg}`);
|
||||
const result = ghSpawn(args, body ? JSON.stringify(body) : undefined, cwd ?? process.cwd());
|
||||
|
||||
const stdout = result.stdout?.trim() ?? "";
|
||||
const stderr = result.stderr?.trim() ?? "";
|
||||
|
||||
if (result.status !== 0) {
|
||||
throw new Error(`gh api error: ${stderr || stdout || result.error?.message || `exit code ${result.status}`}`);
|
||||
}
|
||||
|
||||
if (!stdout) return {} as T;
|
||||
return JSON.parse(stdout) as T;
|
||||
}
|
||||
|
||||
async function fetchApi<T>(
|
||||
|
|
|
|||
323
src/resources/extensions/google-search/index.ts
Normal file
323
src/resources/extensions/google-search/index.ts
Normal file
|
|
@ -0,0 +1,323 @@
|
|||
/**
|
||||
* Google Search Extension
|
||||
*
|
||||
* Provides a `google_search` tool that performs web searches via Gemini's
|
||||
* Google Search grounding feature. Uses the user's existing GEMINI_API_KEY
|
||||
* and Google Cloud GenAI credits.
|
||||
*
|
||||
* The tool sends queries to Gemini Flash with `googleSearch: {}` enabled.
|
||||
* Gemini internally performs Google searches, synthesizes an answer, and
|
||||
* returns it with source URLs from grounding metadata.
|
||||
*/
|
||||
|
||||
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
||||
import {
|
||||
DEFAULT_MAX_BYTES,
|
||||
DEFAULT_MAX_LINES,
|
||||
formatSize,
|
||||
truncateHead,
|
||||
} from "@mariozechner/pi-coding-agent";
|
||||
import { Text } from "@mariozechner/pi-tui";
|
||||
import { Type } from "@sinclair/typebox";
|
||||
import { GoogleGenAI } from "@google/genai";
|
||||
|
||||
// ── Types ────────────────────────────────────────────────────────────────────
|
||||
|
||||
interface SearchSource {
|
||||
title: string;
|
||||
uri: string;
|
||||
domain: string;
|
||||
}
|
||||
|
||||
interface SearchResult {
|
||||
answer: string;
|
||||
sources: SearchSource[];
|
||||
searchQueries: string[];
|
||||
cached: boolean;
|
||||
}
|
||||
|
||||
interface SearchDetails {
|
||||
query: string;
|
||||
sourceCount: number;
|
||||
cached: boolean;
|
||||
durationMs: number;
|
||||
error?: string;
|
||||
}
|
||||
|
||||
// ── Lazy singleton client ────────────────────────────────────────────────────
|
||||
|
||||
let client: GoogleGenAI | null = null;
|
||||
|
||||
function getClient(): GoogleGenAI {
|
||||
if (!client) {
|
||||
client = new GoogleGenAI({ apiKey: process.env.GEMINI_API_KEY! });
|
||||
}
|
||||
return client;
|
||||
}
|
||||
|
||||
// ── In-session cache ─────────────────────────────────────────────────────────
|
||||
|
||||
const resultCache = new Map<string, SearchResult>();
|
||||
|
||||
function cacheKey(query: string): string {
|
||||
return query.toLowerCase().trim();
|
||||
}
|
||||
|
||||
// ── Extension ────────────────────────────────────────────────────────────────
|
||||
|
||||
export default function (pi: ExtensionAPI) {
|
||||
pi.registerTool({
|
||||
name: "google_search",
|
||||
label: "Google Search",
|
||||
description:
|
||||
"Search the web using Google Search via Gemini. " +
|
||||
"Returns an AI-synthesized answer grounded in Google Search results, plus source URLs. " +
|
||||
"Use this when you need current information from the web: recent events, documentation, " +
|
||||
"product details, technical references, news, etc. " +
|
||||
"Requires GEMINI_API_KEY. Alternative to Brave-based search tools for users with Google Cloud credits.",
|
||||
promptSnippet: "Search the web via Google Search to get current information with sources",
|
||||
promptGuidelines: [
|
||||
"Use google_search when you need up-to-date web information that isn't in your training data.",
|
||||
"Be specific with queries for better results, e.g. 'Next.js 15 app router migration guide' not just 'Next.js'.",
|
||||
"The tool returns both an answer and source URLs. Cite sources when sharing results with the user.",
|
||||
"Results are cached per-session, so repeated identical queries are free.",
|
||||
"You can still use fetch_page to read a specific URL if needed after getting results from google_search.",
|
||||
],
|
||||
parameters: Type.Object({
|
||||
query: Type.String({
|
||||
description: "The search query, e.g. 'latest Node.js LTS version' or 'how to configure Tailwind v4'",
|
||||
}),
|
||||
maxSources: Type.Optional(
|
||||
Type.Number({
|
||||
description: "Maximum number of source URLs to include (default 5, max 10).",
|
||||
minimum: 1,
|
||||
maximum: 10,
|
||||
}),
|
||||
),
|
||||
}),
|
||||
|
||||
async execute(_toolCallId, params, signal, _onUpdate, _ctx) {
|
||||
const startTime = Date.now();
|
||||
const maxSources = Math.min(Math.max(params.maxSources ?? 5, 1), 10);
|
||||
|
||||
// Check for API key
|
||||
if (!process.env.GEMINI_API_KEY) {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: "text",
|
||||
text: "Error: GEMINI_API_KEY is not set. Please set this environment variable to use Google Search.\n\nExample: export GEMINI_API_KEY=your_key",
|
||||
},
|
||||
],
|
||||
isError: true,
|
||||
details: {
|
||||
query: params.query,
|
||||
sourceCount: 0,
|
||||
cached: false,
|
||||
durationMs: Date.now() - startTime,
|
||||
error: "auth_error: GEMINI_API_KEY not set",
|
||||
} as SearchDetails,
|
||||
};
|
||||
}
|
||||
|
||||
// Check cache
|
||||
const key = cacheKey(params.query);
|
||||
if (resultCache.has(key)) {
|
||||
const cached = resultCache.get(key)!;
|
||||
const output = formatOutput(cached, maxSources);
|
||||
return {
|
||||
content: [{ type: "text", text: output }],
|
||||
details: {
|
||||
query: params.query,
|
||||
sourceCount: cached.sources.length,
|
||||
cached: true,
|
||||
durationMs: Date.now() - startTime,
|
||||
} as SearchDetails,
|
||||
};
|
||||
}
|
||||
|
||||
// Call Gemini with Google Search grounding
|
||||
let result: SearchResult;
|
||||
try {
|
||||
const ai = getClient();
|
||||
const response = await ai.models.generateContent({
|
||||
model: "gemini-3-flash-preview",
|
||||
contents: params.query,
|
||||
config: {
|
||||
tools: [{ googleSearch: {} }],
|
||||
abortSignal: signal,
|
||||
},
|
||||
});
|
||||
|
||||
// Extract answer text
|
||||
const answer = response.text ?? "";
|
||||
|
||||
// Extract grounding metadata
|
||||
const candidate = response.candidates?.[0];
|
||||
const grounding = candidate?.groundingMetadata;
|
||||
|
||||
// Parse sources from grounding chunks
|
||||
const sources: SearchSource[] = [];
|
||||
const seenTitles = new Set<string>();
|
||||
if (grounding?.groundingChunks) {
|
||||
for (const chunk of grounding.groundingChunks) {
|
||||
if (chunk.web) {
|
||||
const title = chunk.web.title ?? "Untitled";
|
||||
// Dedupe by title since URIs are redirect URLs that differ per call
|
||||
if (seenTitles.has(title)) continue;
|
||||
seenTitles.add(title);
|
||||
// domain field is not available via Gemini API, use title as fallback
|
||||
// (title is typically the domain name, e.g. "wikipedia.org")
|
||||
const domain = chunk.web.domain ?? title;
|
||||
sources.push({
|
||||
title,
|
||||
uri: chunk.web.uri ?? "",
|
||||
domain,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract search queries Gemini actually performed
|
||||
const searchQueries = grounding?.webSearchQueries ?? [];
|
||||
|
||||
result = { answer, sources, searchQueries, cached: false };
|
||||
} catch (err: unknown) {
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
|
||||
let errorType = "api_error";
|
||||
if (msg.includes("401") || msg.includes("UNAUTHENTICATED")) {
|
||||
errorType = "auth_error";
|
||||
} else if (msg.includes("429") || msg.includes("RESOURCE_EXHAUSTED") || msg.includes("quota")) {
|
||||
errorType = "rate_limit";
|
||||
}
|
||||
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
type: "text",
|
||||
text: `Google Search failed (${errorType}): ${msg}`,
|
||||
},
|
||||
],
|
||||
isError: true,
|
||||
details: {
|
||||
query: params.query,
|
||||
sourceCount: 0,
|
||||
cached: false,
|
||||
durationMs: Date.now() - startTime,
|
||||
error: `${errorType}: ${msg}`,
|
||||
} as SearchDetails,
|
||||
};
|
||||
}
|
||||
|
||||
// Cache the result
|
||||
resultCache.set(key, result);
|
||||
|
||||
// Format and truncate output
|
||||
const rawOutput = formatOutput(result, maxSources);
|
||||
const truncation = truncateHead(rawOutput, {
|
||||
maxLines: DEFAULT_MAX_LINES,
|
||||
maxBytes: DEFAULT_MAX_BYTES,
|
||||
});
|
||||
|
||||
let finalText = truncation.content;
|
||||
if (truncation.truncated) {
|
||||
finalText +=
|
||||
`\n\n[Truncated: showing ${truncation.outputLines}/${truncation.totalLines} lines` +
|
||||
` (${formatSize(truncation.outputBytes)} of ${formatSize(truncation.totalBytes)})]`;
|
||||
}
|
||||
|
||||
return {
|
||||
content: [{ type: "text", text: finalText }],
|
||||
details: {
|
||||
query: params.query,
|
||||
sourceCount: result.sources.length,
|
||||
cached: false,
|
||||
durationMs: Date.now() - startTime,
|
||||
} as SearchDetails,
|
||||
};
|
||||
},
|
||||
|
||||
renderCall(args, theme) {
|
||||
let text = theme.fg("toolTitle", theme.bold("google_search "));
|
||||
text += theme.fg("accent", `"${args.query}"`);
|
||||
return new Text(text, 0, 0);
|
||||
},
|
||||
|
||||
renderResult(result, { isPartial, expanded }, theme) {
|
||||
const d = result.details as SearchDetails | undefined;
|
||||
|
||||
if (isPartial) return new Text(theme.fg("warning", "Searching Google..."), 0, 0);
|
||||
if (result.isError || d?.error) {
|
||||
return new Text(theme.fg("error", `Error: ${d?.error ?? "unknown"}`), 0, 0);
|
||||
}
|
||||
|
||||
let text = theme.fg("success", `${d?.sourceCount ?? 0} sources`);
|
||||
text += theme.fg("dim", ` (${d?.durationMs ?? 0}ms)`);
|
||||
if (d?.cached) text += theme.fg("dim", " · cached");
|
||||
|
||||
if (expanded) {
|
||||
const content = result.content[0];
|
||||
if (content?.type === "text") {
|
||||
const preview = content.text.split("\n").slice(0, 8).join("\n");
|
||||
text += "\n\n" + theme.fg("dim", preview);
|
||||
if (content.text.split("\n").length > 8) {
|
||||
text += "\n" + theme.fg("muted", "...");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return new Text(text, 0, 0);
|
||||
},
|
||||
});
|
||||
|
||||
// ── Startup notification ─────────────────────────────────────────────────
|
||||
|
||||
pi.on("session_start", async (_event, ctx) => {
|
||||
if (!process.env.GEMINI_API_KEY) {
|
||||
ctx.ui.notify(
|
||||
"Google Search: No GEMINI_API_KEY set. The google_search tool will not work until this is configured.",
|
||||
"warning",
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
// ── Output formatting ────────────────────────────────────────────────────────
|
||||
|
||||
function formatOutput(result: SearchResult, maxSources: number): string {
|
||||
const lines: string[] = [];
|
||||
|
||||
// Answer
|
||||
if (result.answer) {
|
||||
lines.push(result.answer);
|
||||
} else {
|
||||
lines.push("(No answer text returned from search)");
|
||||
}
|
||||
|
||||
// Sources
|
||||
if (result.sources.length > 0) {
|
||||
lines.push("");
|
||||
lines.push("Sources:");
|
||||
const sourcesToShow = result.sources.slice(0, maxSources);
|
||||
for (let i = 0; i < sourcesToShow.length; i++) {
|
||||
const s = sourcesToShow[i];
|
||||
lines.push(`[${i + 1}] ${s.title} - ${s.domain}`);
|
||||
lines.push(` ${s.uri}`);
|
||||
}
|
||||
if (result.sources.length > maxSources) {
|
||||
lines.push(`(${result.sources.length - maxSources} more sources omitted)`);
|
||||
}
|
||||
} else {
|
||||
lines.push("");
|
||||
lines.push("(No source URLs found in grounding metadata)");
|
||||
}
|
||||
|
||||
// Search queries
|
||||
if (result.searchQueries.length > 0) {
|
||||
lines.push("");
|
||||
lines.push(`Searches performed: ${result.searchQueries.map((q) => `"${q}"`).join(", ")}`);
|
||||
}
|
||||
|
||||
return lines.join("\n");
|
||||
}
|
||||
9
src/resources/extensions/google-search/package.json
Normal file
9
src/resources/extensions/google-search/package.json
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
{
|
||||
"name": "pi-extension-google-search",
|
||||
"private": true,
|
||||
"version": "1.0.0",
|
||||
"type": "module",
|
||||
"pi": {
|
||||
"extensions": ["./index.ts"]
|
||||
}
|
||||
}
|
||||
|
|
@ -18,7 +18,7 @@ import type {
|
|||
|
||||
import { deriveState } from "./state.js";
|
||||
import type { GSDState } from "./types.js";
|
||||
import { loadFile, parseContinue, parseRoadmap, parseSummary, extractUatType, inlinePriorMilestoneSummary } from "./files.js";
|
||||
import { loadFile, parseContinue, parsePlan, parseRoadmap, parseSummary, extractUatType, inlinePriorMilestoneSummary } from "./files.js";
|
||||
export { inlinePriorMilestoneSummary };
|
||||
import type { UatType } from "./files.js";
|
||||
import { loadPrompt } from "./prompt-loader.js";
|
||||
|
|
@ -36,7 +36,6 @@ import {
|
|||
clearUnitRuntimeRecord,
|
||||
formatExecuteTaskRecoveryStatus,
|
||||
inspectExecuteTaskDurability,
|
||||
recordUnitProgress,
|
||||
readUnitRuntimeRecord,
|
||||
writeUnitRuntimeRecord,
|
||||
} from "./unit-runtime.js";
|
||||
|
|
@ -49,6 +48,7 @@ import {
|
|||
formatValidationIssues,
|
||||
} from "./observability-validator.js";
|
||||
import { ensureGitignore } from "./gitignore.js";
|
||||
import { runGSDDoctor, rebuildState } from "./doctor.js";
|
||||
import { snapshotSkills, clearSkillSnapshot } from "./skill-discovery.js";
|
||||
import {
|
||||
initMetrics, resetMetrics, snapshotUnitMetrics, getLedger,
|
||||
|
|
@ -65,11 +65,13 @@ import {
|
|||
} from "./worktree.ts";
|
||||
import { truncateToWidth, visibleWidth } from "@mariozechner/pi-tui";
|
||||
import { makeUI, GLYPH, INDENT } from "../shared/ui.js";
|
||||
import { showNextAction } from "../shared/next-action-ui.js";
|
||||
|
||||
// ─── State ────────────────────────────────────────────────────────────────────
|
||||
|
||||
let active = false;
|
||||
let paused = false;
|
||||
let stepMode = false;
|
||||
let verbose = false;
|
||||
let cmdCtx: ExtensionCommandContext | null = null;
|
||||
let basePath = "";
|
||||
|
|
@ -102,6 +104,7 @@ let idleWatchdogHandle: ReturnType<typeof setInterval> | null = null;
|
|||
export interface AutoDashboardData {
|
||||
active: boolean;
|
||||
paused: boolean;
|
||||
stepMode: boolean;
|
||||
startTime: number;
|
||||
elapsed: number;
|
||||
currentUnit: { type: string; id: string; startedAt: number } | null;
|
||||
|
|
@ -118,6 +121,7 @@ export function getAutoDashboardData(): AutoDashboardData {
|
|||
return {
|
||||
active,
|
||||
paused,
|
||||
stepMode,
|
||||
startTime: autoStartTime,
|
||||
elapsed: (active || paused) ? Date.now() - autoStartTime : 0,
|
||||
currentUnit: currentUnit ? { ...currentUnit } : null,
|
||||
|
|
@ -138,6 +142,10 @@ export function isAutoPaused(): boolean {
|
|||
return paused;
|
||||
}
|
||||
|
||||
export function isStepMode(): boolean {
|
||||
return stepMode;
|
||||
}
|
||||
|
||||
function clearUnitTimeout(): void {
|
||||
if (unitTimeoutHandle) {
|
||||
clearTimeout(unitTimeoutHandle);
|
||||
|
|
@ -174,6 +182,7 @@ export async function stopAuto(ctx?: ExtensionContext, pi?: ExtensionAPI): Promi
|
|||
resetMetrics();
|
||||
active = false;
|
||||
paused = false;
|
||||
stepMode = false;
|
||||
lastUnit = null;
|
||||
currentUnit = null;
|
||||
currentMilestoneId = null;
|
||||
|
|
@ -208,8 +217,9 @@ export async function pauseAuto(ctx?: ExtensionContext, _pi?: ExtensionAPI): Pro
|
|||
// — all needed for resume and dashboard display
|
||||
ctx?.ui.setStatus("gsd-auto", "paused");
|
||||
ctx?.ui.setWidget("gsd-progress", undefined);
|
||||
const resumeCmd = stepMode ? "/gsd next" : "/gsd auto";
|
||||
ctx?.ui.notify(
|
||||
"Auto-mode paused (Escape). Type to interact, or /gsd auto to resume.",
|
||||
`${stepMode ? "Step" : "Auto"}-mode paused (Escape). Type to interact, or ${resumeCmd} to resume.`,
|
||||
"info",
|
||||
);
|
||||
}
|
||||
|
|
@ -219,19 +229,24 @@ export async function startAuto(
|
|||
pi: ExtensionAPI,
|
||||
base: string,
|
||||
verboseMode: boolean,
|
||||
options?: { step?: boolean },
|
||||
): Promise<void> {
|
||||
const requestedStepMode = options?.step ?? false;
|
||||
|
||||
// If resuming from paused state, just re-activate and dispatch next unit.
|
||||
// The conversation is still intact — no need to reinitialize everything.
|
||||
if (paused) {
|
||||
paused = false;
|
||||
active = true;
|
||||
verbose = verboseMode;
|
||||
// Allow switching between step/auto on resume
|
||||
stepMode = requestedStepMode;
|
||||
cmdCtx = ctx;
|
||||
basePath = base;
|
||||
// Re-initialize metrics in case ledger was lost during pause
|
||||
if (!getLedger()) initMetrics(base);
|
||||
ctx.ui.setStatus("gsd-auto", "auto");
|
||||
ctx.ui.notify("Auto-mode resumed.", "info");
|
||||
ctx.ui.setStatus("gsd-auto", stepMode ? "next" : "auto");
|
||||
ctx.ui.notify(stepMode ? "Step-mode resumed." : "Auto-mode resumed.", "info");
|
||||
await dispatchNextUnit(ctx, pi);
|
||||
return;
|
||||
}
|
||||
|
|
@ -287,7 +302,7 @@ export async function startAuto(
|
|||
// No active work at all — start a new milestone via the discuss flow.
|
||||
if (!state.activeMilestone || state.phase === "complete") {
|
||||
const { showSmartEntry } = await import("./guided-flow.js");
|
||||
await showSmartEntry(ctx, pi, base);
|
||||
await showSmartEntry(ctx, pi, base, { step: requestedStepMode });
|
||||
return;
|
||||
}
|
||||
|
||||
|
|
@ -299,13 +314,14 @@ export async function startAuto(
|
|||
const hasContext = !!(contextFile && await loadFile(contextFile));
|
||||
if (!hasContext) {
|
||||
const { showSmartEntry } = await import("./guided-flow.js");
|
||||
await showSmartEntry(ctx, pi, base);
|
||||
await showSmartEntry(ctx, pi, base, { step: requestedStepMode });
|
||||
return;
|
||||
}
|
||||
// Has context, no roadmap — auto-mode will research + plan it
|
||||
}
|
||||
|
||||
active = true;
|
||||
stepMode = requestedStepMode;
|
||||
verbose = verboseMode;
|
||||
cmdCtx = ctx;
|
||||
basePath = base;
|
||||
|
|
@ -325,12 +341,13 @@ export async function startAuto(
|
|||
snapshotSkills();
|
||||
}
|
||||
|
||||
ctx.ui.setStatus("gsd-auto", "auto");
|
||||
ctx.ui.setStatus("gsd-auto", stepMode ? "next" : "auto");
|
||||
const modeLabel = stepMode ? "Step-mode" : "Auto-mode";
|
||||
const pendingCount = state.registry.filter(m => m.status !== 'complete').length;
|
||||
const scopeMsg = pendingCount > 1
|
||||
? `Will loop through ${pendingCount} milestones.`
|
||||
: "Will loop until milestone complete.";
|
||||
ctx.ui.notify(`Auto-mode started. ${scopeMsg}`, "info");
|
||||
ctx.ui.notify(`${modeLabel} started. ${scopeMsg}`, "info");
|
||||
|
||||
// Dispatch the first unit
|
||||
await dispatchNextUnit(ctx, pi);
|
||||
|
|
@ -360,11 +377,141 @@ export async function handleAgentEnd(
|
|||
} catch {
|
||||
// Non-fatal
|
||||
}
|
||||
|
||||
// Post-hook: fix mechanical bookkeeping the LLM may have skipped.
|
||||
// 1. Doctor handles: checkbox marking, stub summaries/UATs.
|
||||
// 2. STATE.md is always rebuilt from disk state (purely derived, no LLM needed).
|
||||
// This is more reliable than prompt instructions for mechanical tasks.
|
||||
// Scope to slice level (M001/S01) so doctor checks all tasks within the slice.
|
||||
try {
|
||||
const scopeParts = currentUnit.id.split("/").slice(0, 2);
|
||||
const doctorScope = scopeParts.join("/");
|
||||
const report = await runGSDDoctor(basePath, { fix: true, scope: doctorScope });
|
||||
if (report.fixesApplied.length > 0) {
|
||||
ctx.ui.notify(`Post-hook: applied ${report.fixesApplied.length} fix(es).`, "info");
|
||||
}
|
||||
} catch {
|
||||
// Non-fatal — doctor failure should never block dispatch
|
||||
}
|
||||
try {
|
||||
await rebuildState(basePath);
|
||||
autoCommitCurrentBranch(basePath, currentUnit.type, currentUnit.id);
|
||||
} catch {
|
||||
// Non-fatal
|
||||
}
|
||||
}
|
||||
|
||||
// In step mode, pause and show a wizard instead of immediately dispatching
|
||||
if (stepMode) {
|
||||
await showStepWizard(ctx, pi);
|
||||
return;
|
||||
}
|
||||
|
||||
await dispatchNextUnit(ctx, pi);
|
||||
}
|
||||
|
||||
// ─── Step Mode Wizard ─────────────────────────────────────────────────────
|
||||
|
||||
/**
|
||||
* Show the step-mode wizard after a unit completes.
|
||||
* Derives the next unit from disk state and presents it to the user.
|
||||
* If the user confirms, dispatches the next unit. If not, pauses.
|
||||
*/
|
||||
async function showStepWizard(
|
||||
ctx: ExtensionContext,
|
||||
pi: ExtensionAPI,
|
||||
): Promise<void> {
|
||||
if (!cmdCtx) return;
|
||||
|
||||
const state = await deriveState(basePath);
|
||||
const mid = state.activeMilestone?.id;
|
||||
|
||||
// Build summary of what just completed
|
||||
const justFinished = currentUnit
|
||||
? `${unitVerb(currentUnit.type)} ${currentUnit.id}`
|
||||
: "previous unit";
|
||||
|
||||
// If no active milestone or everything is complete, stop
|
||||
if (!mid || state.phase === "complete") {
|
||||
await stopAuto(ctx, pi);
|
||||
return;
|
||||
}
|
||||
|
||||
// Peek at what's next by examining state
|
||||
const nextDesc = describeNextUnit(state);
|
||||
|
||||
const choice = await showNextAction(cmdCtx, {
|
||||
title: `GSD — ${justFinished} complete`,
|
||||
summary: [
|
||||
`${mid}: ${state.activeMilestone?.title ?? mid}`,
|
||||
...(state.activeSlice ? [`${state.activeSlice.id}: ${state.activeSlice.title}`] : []),
|
||||
],
|
||||
actions: [
|
||||
{
|
||||
id: "continue",
|
||||
label: nextDesc.label,
|
||||
description: nextDesc.description,
|
||||
recommended: true,
|
||||
},
|
||||
{
|
||||
id: "auto",
|
||||
label: "Switch to auto",
|
||||
description: "Continue without pausing between steps.",
|
||||
},
|
||||
{
|
||||
id: "status",
|
||||
label: "View status",
|
||||
description: "Open the dashboard.",
|
||||
},
|
||||
],
|
||||
notYetMessage: "Run /gsd next when ready to continue.",
|
||||
});
|
||||
|
||||
if (choice === "continue") {
|
||||
await dispatchNextUnit(ctx, pi);
|
||||
} else if (choice === "auto") {
|
||||
stepMode = false;
|
||||
ctx.ui.setStatus("gsd-auto", "auto");
|
||||
ctx.ui.notify("Switched to auto-mode.", "info");
|
||||
await dispatchNextUnit(ctx, pi);
|
||||
} else if (choice === "status") {
|
||||
// Show status then re-show the wizard
|
||||
const { fireStatusViaCommand } = await import("./commands.js");
|
||||
await fireStatusViaCommand(ctx as ExtensionCommandContext);
|
||||
await showStepWizard(ctx, pi);
|
||||
} else {
|
||||
// "not_yet" — pause
|
||||
await pauseAuto(ctx, pi);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Describe what the next unit will be, based on current state.
|
||||
*/
|
||||
function describeNextUnit(state: GSDState): { label: string; description: string } {
|
||||
const sid = state.activeSlice?.id;
|
||||
const sTitle = state.activeSlice?.title;
|
||||
const tid = state.activeTask?.id;
|
||||
const tTitle = state.activeTask?.title;
|
||||
|
||||
switch (state.phase) {
|
||||
case "pre-planning":
|
||||
return { label: "Research & plan milestone", description: "Scout the landscape and create the roadmap." };
|
||||
case "planning":
|
||||
return { label: `Plan ${sid}: ${sTitle}`, description: "Research and decompose into tasks." };
|
||||
case "executing":
|
||||
return { label: `Execute ${tid}: ${tTitle}`, description: "Run the next task in a fresh session." };
|
||||
case "summarizing":
|
||||
return { label: `Complete ${sid}: ${sTitle}`, description: "Write summary, UAT, and merge to main." };
|
||||
case "replanning-slice":
|
||||
return { label: `Replan ${sid}: ${sTitle}`, description: "Blocker found — replan the slice." };
|
||||
case "completing-milestone":
|
||||
return { label: "Complete milestone", description: "Write milestone summary." };
|
||||
default:
|
||||
return { label: "Continue", description: "Execute the next step." };
|
||||
}
|
||||
}
|
||||
|
||||
// ─── Progress Widget ──────────────────────────────────────────────────────
|
||||
|
||||
function unitVerb(unitType: string): string {
|
||||
|
|
@ -465,7 +612,8 @@ function updateProgressWidget(
|
|||
? theme.fg("accent", GLYPH.statusActive)
|
||||
: theme.fg("dim", GLYPH.statusPending);
|
||||
const elapsed = formatAutoElapsed();
|
||||
const headerLeft = `${pad}${dot} ${theme.fg("accent", theme.bold("GSD"))} ${theme.fg("success", "AUTO")}`;
|
||||
const modeTag = stepMode ? "NEXT" : "AUTO";
|
||||
const headerLeft = `${pad}${dot} ${theme.fg("accent", theme.bold("GSD"))} ${theme.fg("success", modeTag)}`;
|
||||
const headerRight = elapsed ? theme.fg("dim", elapsed) : "";
|
||||
lines.push(rightAlign(headerLeft, headerRight, width));
|
||||
|
||||
|
|
@ -985,6 +1133,17 @@ async function dispatchNextUnit(
|
|||
if (!runtime) return;
|
||||
if (Date.now() - runtime.lastProgressAt < idleTimeoutMs) return;
|
||||
|
||||
// Before triggering recovery, check if the agent is actually producing
|
||||
// work on disk. `git status --porcelain` is cheap and catches any
|
||||
// staged/unstaged/untracked changes the agent made since lastProgressAt.
|
||||
if (detectWorkingTreeActivity(basePath)) {
|
||||
writeUnitRuntimeRecord(basePath, unitType, unitId, currentUnit.startedAt, {
|
||||
lastProgressAt: Date.now(),
|
||||
lastProgressKind: "filesystem-activity",
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (currentUnit) {
|
||||
const modelId = ctx.model?.id ?? "unknown";
|
||||
snapshotUnitMetrics(ctx, currentUnit.type, currentUnit.id, currentUnit.startedAt, modelId);
|
||||
|
|
@ -2136,6 +2295,25 @@ export function skipExecuteTask(
|
|||
return true;
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect whether the agent is producing work on disk by checking git for
|
||||
* any working-tree changes (staged, unstaged, or untracked). Returns true
|
||||
* if there are uncommitted changes — meaning the agent is actively working,
|
||||
* even though it hasn't signaled progress through runtime records.
|
||||
*/
|
||||
function detectWorkingTreeActivity(cwd: string): boolean {
|
||||
try {
|
||||
const out = execSync("git status --porcelain", {
|
||||
cwd,
|
||||
stdio: ["pipe", "pipe", "pipe"],
|
||||
timeout: 5000,
|
||||
});
|
||||
return out.toString().trim().length > 0;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Resolve the expected artifact for a non-execute-task unit to an absolute path.
|
||||
* Returns null for unit types that don't produce a single file (execute-task,
|
||||
|
|
|
|||
|
|
@ -10,8 +10,8 @@ import { join, dirname } from "node:path";
|
|||
import { fileURLToPath } from "node:url";
|
||||
import { deriveState } from "./state.js";
|
||||
import { GSDDashboardOverlay } from "./dashboard-overlay.js";
|
||||
import { showSmartEntry, showQueue, showDiscuss } from "./guided-flow.js";
|
||||
import { startAuto, stopAuto, isAutoActive, isAutoPaused } from "./auto.js";
|
||||
import { showQueue, showDiscuss } from "./guided-flow.js";
|
||||
import { startAuto, stopAuto, isAutoActive, isAutoPaused, isStepMode } from "./auto.js";
|
||||
import {
|
||||
getGlobalGSDPreferencesPath,
|
||||
getLegacyGlobalGSDPreferencesPath,
|
||||
|
|
@ -53,10 +53,10 @@ function dispatchDoctorHeal(pi: ExtensionAPI, scope: string | undefined, reportT
|
|||
|
||||
export function registerGSDCommand(pi: ExtensionAPI): void {
|
||||
pi.registerCommand("gsd", {
|
||||
description: "GSD — Get Shit Done: /gsd auto|stop|status|queue|prefs|doctor|migrate|remote",
|
||||
description: "GSD — Get Shit Done: /gsd next|auto|stop|status|queue|prefs|doctor|migrate|remote",
|
||||
|
||||
getArgumentCompletions: (prefix: string) => {
|
||||
const subcommands = ["auto", "stop", "status", "queue", "discuss", "prefs", "doctor", "migrate", "remote"];
|
||||
const subcommands = ["next", "auto", "stop", "status", "queue", "discuss", "prefs", "doctor", "migrate", "remote"];
|
||||
const parts = prefix.trim().split(/\s+/);
|
||||
|
||||
if (parts.length <= 1) {
|
||||
|
|
@ -120,6 +120,12 @@ export function registerGSDCommand(pi: ExtensionAPI): void {
|
|||
return;
|
||||
}
|
||||
|
||||
if (trimmed === "next" || trimmed.startsWith("next ")) {
|
||||
const verboseMode = trimmed.includes("--verbose");
|
||||
await startAuto(ctx, pi, process.cwd(), verboseMode, { step: true });
|
||||
return;
|
||||
}
|
||||
|
||||
if (trimmed === "auto" || trimmed.startsWith("auto ")) {
|
||||
const verboseMode = trimmed.includes("--verbose");
|
||||
await startAuto(ctx, pi, process.cwd(), verboseMode);
|
||||
|
|
@ -156,12 +162,13 @@ export function registerGSDCommand(pi: ExtensionAPI): void {
|
|||
}
|
||||
|
||||
if (trimmed === "") {
|
||||
await showSmartEntry(ctx, pi, process.cwd());
|
||||
// Bare /gsd defaults to step mode
|
||||
await startAuto(ctx, pi, process.cwd(), false, { step: true });
|
||||
return;
|
||||
}
|
||||
|
||||
ctx.ui.notify(
|
||||
`Unknown: /gsd ${trimmed}. Use /gsd, /gsd auto, /gsd stop, /gsd status, /gsd queue, /gsd discuss, /gsd prefs [global|project|status], /gsd doctor [audit|fix|heal] [M###/S##], /gsd migrate <path>, or /gsd remote [slack|discord|status|disconnect].`,
|
||||
`Unknown: /gsd ${trimmed}. Use /gsd, /gsd next, /gsd auto, /gsd stop, /gsd status, /gsd queue, /gsd discuss, /gsd prefs [global|project|status], /gsd doctor [audit|fix|heal] [M###/S##], /gsd migrate <path>, or /gsd remote [slack|discord|status|disconnect].`,
|
||||
"warning",
|
||||
);
|
||||
},
|
||||
|
|
|
|||
|
|
@ -147,6 +147,13 @@ async function updateStateFile(basePath: string, fixesApplied: string[]): Promis
|
|||
fixesApplied.push(`updated ${path}`);
|
||||
}
|
||||
|
||||
/** Rebuild STATE.md from current disk state. Exported for auto-mode post-hooks. */
|
||||
export async function rebuildState(basePath: string): Promise<void> {
|
||||
const state = await deriveState(basePath);
|
||||
const path = resolveGsdRootFile(basePath, "STATE");
|
||||
await saveFile(path, buildStateMarkdown(state));
|
||||
}
|
||||
|
||||
async function ensureSliceSummaryStub(basePath: string, milestoneId: string, sliceId: string, fixesApplied: string[]): Promise<void> {
|
||||
const path = join(resolveSlicePath(basePath, milestoneId, sliceId) ?? relSlicePath(basePath, milestoneId, sliceId), `${sliceId}-SUMMARY.md`);
|
||||
const absolute = resolveSliceFile(basePath, milestoneId, sliceId, "SUMMARY") ?? join(resolveSlicePath(basePath, milestoneId, sliceId)!, `${sliceId}-SUMMARY.md`);
|
||||
|
|
|
|||
|
|
@ -31,13 +31,14 @@ let pendingAutoStart: {
|
|||
pi: ExtensionAPI;
|
||||
basePath: string;
|
||||
milestoneId: string; // the milestone being discussed
|
||||
step?: boolean; // preserve step mode through discuss → auto transition
|
||||
} | null = null;
|
||||
|
||||
/** Called from agent_end to check if auto-mode should start after discuss */
|
||||
export function checkAutoStartAfterDiscuss(): boolean {
|
||||
if (!pendingAutoStart) return false;
|
||||
|
||||
const { ctx, pi, basePath, milestoneId } = pendingAutoStart;
|
||||
const { ctx, pi, basePath, milestoneId, step } = pendingAutoStart;
|
||||
|
||||
// Don't fire until the discuss phase has actually produced a context file
|
||||
// for the milestone being discussed. agent_end fires after every LLM turn,
|
||||
|
|
@ -47,7 +48,7 @@ export function checkAutoStartAfterDiscuss(): boolean {
|
|||
if (!contextFile) return false; // no context yet — keep waiting
|
||||
|
||||
pendingAutoStart = null;
|
||||
startAuto(ctx, pi, basePath, false).catch(() => {});
|
||||
startAuto(ctx, pi, basePath, false, { step }).catch(() => {});
|
||||
return true;
|
||||
}
|
||||
|
||||
|
|
@ -435,7 +436,9 @@ export async function showSmartEntry(
|
|||
ctx: ExtensionCommandContext,
|
||||
pi: ExtensionAPI,
|
||||
basePath: string,
|
||||
options?: { step?: boolean },
|
||||
): Promise<void> {
|
||||
const stepMode = options?.step;
|
||||
|
||||
// ── Ensure git repo exists — GSD needs it for branch-per-slice ──────
|
||||
try {
|
||||
|
|
@ -501,7 +504,7 @@ export async function showSmartEntry(
|
|||
|
||||
if (isFirst) {
|
||||
// First ever — skip wizard, just ask directly
|
||||
pendingAutoStart = { ctx, pi, basePath, milestoneId: nextId };
|
||||
pendingAutoStart = { ctx, pi, basePath, milestoneId: nextId, step: stepMode };
|
||||
dispatchWorkflow(pi, buildDiscussPrompt(nextId,
|
||||
`New project, milestone ${nextId}. Do NOT read or explore .gsd/ — it's empty scaffolding.`,
|
||||
basePath
|
||||
|
|
@ -522,7 +525,7 @@ export async function showSmartEntry(
|
|||
});
|
||||
|
||||
if (choice === "new_milestone") {
|
||||
pendingAutoStart = { ctx, pi, basePath, milestoneId: nextId };
|
||||
pendingAutoStart = { ctx, pi, basePath, milestoneId: nextId, step: stepMode };
|
||||
dispatchWorkflow(pi, buildDiscussPrompt(nextId,
|
||||
`New milestone ${nextId}.`,
|
||||
basePath
|
||||
|
|
@ -560,7 +563,7 @@ export async function showSmartEntry(
|
|||
const milestoneIds = findMilestoneIds(basePath);
|
||||
const nextId = `M${String(milestoneIds.length + 1).padStart(3, "0")}`;
|
||||
|
||||
pendingAutoStart = { ctx, pi, basePath, milestoneId: nextId };
|
||||
pendingAutoStart = { ctx, pi, basePath, milestoneId: nextId, step: stepMode };
|
||||
dispatchWorkflow(pi, buildDiscussPrompt(nextId,
|
||||
`New milestone ${nextId}.`,
|
||||
basePath
|
||||
|
|
|
|||
|
|
@ -63,6 +63,14 @@ export default function (pi: ExtensionAPI) {
|
|||
registerGSDCommand(pi);
|
||||
registerWorktreeCommand(pi);
|
||||
|
||||
// ── /exit — kill the process immediately ──────────────────────────────
|
||||
pi.registerCommand("exit", {
|
||||
description: "Exit GSD immediately",
|
||||
handler: async (_ctx) => {
|
||||
process.exit(0);
|
||||
},
|
||||
});
|
||||
|
||||
// ── Dynamic-cwd bash tool with default timeout ────────────────────────
|
||||
// The built-in bash tool captures cwd at startup. This replacement uses
|
||||
// a spawnHook to read process.cwd() dynamically so that process.chdir()
|
||||
|
|
|
|||
|
|
@ -1,14 +1,23 @@
|
|||
{{preamble}}
|
||||
|
||||
Say exactly: "What's the vision?" — nothing else. Wait for the user's answer.
|
||||
Ask: "What's the vision?" once, and then use whatever the user replies with as the vision input to continue.
|
||||
|
||||
## Discussion Phase
|
||||
Special handling: if the user message is not a project description (for example, they ask about status, branch state, or other clarifications), treat it as the vision input and proceed with discussion logic instead of repeating "What's the vision?".
|
||||
|
||||
After they describe it, your job is to understand the project deeply enough to define the project's capability contract before planning slices.
|
||||
## Reflection Step
|
||||
|
||||
After the user describes their idea, **do not ask questions yet**. First, prove you understood by reflecting back:
|
||||
|
||||
1. Summarize what you understood in your own words — concretely, not abstractly.
|
||||
2. Include a complexity/scale read: "This sounds like [task/project/product] scale — roughly N milestone(s)."
|
||||
3. Include scope honesty — a bullet list of the major capabilities you're hearing: "Here's what I'm hearing: [bullet list of major capabilities]."
|
||||
4. Ask: "Did I get that right, or did I miss something?" — plain text, not `ask_user_questions`. Let them correct freely.
|
||||
|
||||
This prevents runaway questioning by forcing comprehension proof before anything else. Do not skip this step. Do not combine it with the first question round.
|
||||
|
||||
## Vision Mapping
|
||||
|
||||
Before diving into detailed Q&A, read the user's description and classify its scale:
|
||||
After reflection is confirmed, classify the scale:
|
||||
|
||||
- **Task** — a focused piece of work (single milestone, few slices)
|
||||
- **Project** — a coherent product with multiple major capabilities (multi-milestone likely)
|
||||
|
|
@ -19,40 +28,69 @@ Before diving into detailed Q&A, read the user's description and classify its sc
|
|||
2. Present this to the user for confirmation or adjustment
|
||||
3. Only then begin the deep Q&A — and scope the Q&A to the full vision, not just M001
|
||||
|
||||
**For Task scale:** Proceed directly to the discussion flow below (single milestone).
|
||||
**For Task scale:** Proceed directly to questioning.
|
||||
|
||||
**Anti-reduction rule:** If the user describes a big vision, plan the big vision. Do not ask "what's the minimum viable version?" or try to reduce scope unless the user explicitly asks for an MVP or minimal version. When something is complex or risky, phase it into a later milestone — do not cut it. The user's ambition is the target, and your job is to sequence it intelligently, not shrink it.
|
||||
|
||||
---
|
||||
## Mandatory Investigation Before First Question Round
|
||||
|
||||
**If the user provides a file path or pastes a large document** (spec, design doc, product plan, chat export), read it fully before asking questions. Use it as the starting point — don't ask them to re-explain what's already in the document. Your questions should fill gaps and resolve ambiguities the document doesn't cover.
|
||||
Before asking your first question, do a mandatory investigation pass. This is not optional.
|
||||
|
||||
**Investigate between question rounds to make your questions smarter.** Before each round of questions, do enough lightweight research that your questions are grounded in reality — not guesses about what exists or what's possible.
|
||||
1. **Scout the codebase** — `ls`, `find`, `rg`, or `scout` for broad unfamiliar areas. Understand what already exists, what patterns are established, what constraints current code imposes.
|
||||
2. **Check library docs** — `resolve_library` / `get_library_docs` for any tech the user mentioned. Get current facts about capabilities, constraints, API shapes, version-specific behavior.
|
||||
3. **Web search** — `search-the-web` if the domain is unfamiliar, if you need current best practices, or if the user referenced external services/APIs you need facts about. Use `fetch_page` for full content when snippets aren't enough.
|
||||
|
||||
- Check library docs (`resolve_library` / `get_library_docs`) when the user mentions tech you need current facts about — capabilities, constraints, API shapes, version-specific behavior
|
||||
- Do web searches (`search-the-web`) to verify the landscape — what solutions exist, what's changed recently, what's the current best practice. Use `freshness` for recency-sensitive queries, `domain` to target specific sites. Use `fetch_page` to read the full content of promising URLs when snippets aren't enough.
|
||||
- Scout the codebase (`ls`, `find`, `rg`, or `scout` for broad unfamiliar areas) to understand what already exists, what patterns are established, what constraints current code imposes
|
||||
This happens ONCE, before the first round. The goal: your first questions should reflect what's actually true, not what you assume.
|
||||
|
||||
Don't go deep — just enough that your next question reflects what's actually true rather than what you assume.
|
||||
For subsequent rounds, continue investigating between rounds — check docs, search, or scout as needed to make each round's questions smarter. But the first-round investigation is mandatory and explicit.
|
||||
|
||||
**Use this to actively surface:**
|
||||
- The biggest technical unknowns — what could fail, what hasn't been proven, what might invalidate the plan
|
||||
- Integration surfaces — external systems, APIs, libraries, or internal modules this work touches
|
||||
- What needs to be proven before committing — the things that, if they don't work, mean the plan is wrong
|
||||
- Product reality requirements: primary user loop, launchability expectations, continuity expectations, and failure visibility expectations
|
||||
- Items that are complex, risky, or lower priority — phase these into later milestones rather than deferring or cutting them. Only truly unwanted capabilities become anti-features.
|
||||
## Questioning Philosophy
|
||||
|
||||
**Then use ask_user_questions** to dig into gray areas — architecture choices, scope boundaries, tech preferences, what's in vs out. 1-3 questions per round.
|
||||
You are a thinking partner, not an interviewer.
|
||||
|
||||
If a `GSD Skill Preferences` block is present in system context, use it to decide which skills to load and follow during discuss/planning work, but do not let it override the required discuss flow or artifact requirements.
|
||||
**Start open, follow energy.** Let the user's enthusiasm guide where you dig deeper. If they light up about a particular aspect, explore it. If they're vague about something, that's where you probe.
|
||||
|
||||
**Self-regulate depth by scale:**
|
||||
- **Task scale:** After about 5-10 questions total (2-3 rounds), or when you feel you have a solid understanding, offer to proceed.
|
||||
- **Project/Product scale:** After about 15-25 questions total (5-8 rounds), or when you feel you have a solid understanding, offer to proceed.
|
||||
**Challenge vagueness, make abstract concrete.** When the user says something abstract ("it should be smart" / "it needs to handle edge cases" / "good UX"), push for specifics. What does "smart" mean in practice? Which edge cases? What does good UX look like for this specific interaction?
|
||||
|
||||
Include a question like:
|
||||
"I think I have a good picture. Ready to confirm requirements and milestone plan, or are there more things to discuss?"
|
||||
with options: "Ready to confirm requirements and milestone plan (Recommended)", "I have more to discuss"
|
||||
**Questions must be about the experience, not the implementation.** Never ask "what auth provider?" — ask "when someone logs in, what should that feel like?" Never ask "what database?" — ask "when they come back tomorrow, what should they see?" Implementation is your job. Understanding what they want to experience is the discussion's job.
|
||||
|
||||
**Freeform rule:** When the user selects "Other" or clearly wants to explain something freely, stop using `ask_user_questions` and switch to plain text follow-ups. Let them talk. Resume structured questions when appropriate.
|
||||
|
||||
**Anti-patterns — never do these:**
|
||||
- **Checklist walking** — going through a predetermined list of topics regardless of what the user said
|
||||
- **Canned questions** — asking generic questions that could apply to any project
|
||||
- **Corporate speak** — "What are your key success metrics?" / "Who are the stakeholders?"
|
||||
- **Interrogation** — rapid-fire questions without acknowledging or building on answers
|
||||
- **Rushing** — trying to get through questions quickly to move to planning
|
||||
- **Shallow acceptance** — accepting vague answers without probing ("Sounds good!" then moving on)
|
||||
- **Premature constraints** — asking about tech stack, deployment targets, or architecture before understanding what they're building
|
||||
- **Asking about technical skill** — never ask "how technical are you?" or "are you familiar with X?" — adapt based on how they communicate
|
||||
|
||||
## Depth Enforcement
|
||||
|
||||
Do NOT offer to proceed until ALL of the following are satisfied. Track these internally as a background checklist:
|
||||
|
||||
- [ ] **What they're building** — concrete enough that you could explain it to a stranger
|
||||
- [ ] **Why it needs to exist** — the problem it solves or the desire it fulfills
|
||||
- [ ] **Who it's for** — even if just themselves
|
||||
- [ ] **What "done" looks like** — observable outcomes, not abstract goals
|
||||
- [ ] **The biggest technical unknowns / risks** — what could fail, what hasn't been proven
|
||||
- [ ] **What external systems/services this touches** — APIs, databases, third-party services, hardware
|
||||
|
||||
**Minimum round counts before the wrap-up gate is allowed:**
|
||||
- **Task scale:** at least 2 full rounds (6+ questions asked and answered)
|
||||
- **Project/Product scale:** at least 4 full rounds (12+ questions asked and answered)
|
||||
|
||||
Do not count the reflection step as a question round. Rounds start after reflection is confirmed.
|
||||
|
||||
## Wrap-up Gate
|
||||
|
||||
Only after the depth checklist is fully satisfied AND minimum rounds are hit, offer to proceed.
|
||||
|
||||
The wrap-up gate must include a scope reflection:
|
||||
"Here's what I'm planning to build: [list of capabilities with rough complexity]. Does this match your vision, or did I miss something?"
|
||||
|
||||
Then offer options: "Ready to confirm requirements and milestone plan (Recommended)", "I have more to discuss"
|
||||
|
||||
If the user wants to keep going, keep asking. If they're ready, proceed.
|
||||
|
||||
|
|
@ -105,7 +143,9 @@ Rules:
|
|||
|
||||
For multi-milestone projects, requirements should span the full vision. Requirements owned by later milestones get provisional ownership. The full requirement set captures the user's complete vision — milestones are the sequencing strategy, not the scope boundary.
|
||||
|
||||
If the project is new or has no `REQUIREMENTS.md`, confirm candidate requirements with the user before writing the roadmap. Keep the confirmation lightweight: confirm, defer, reject, or add.
|
||||
If the project is new or has no `REQUIREMENTS.md`, confirm candidate requirements with the user before writing the roadmap.
|
||||
|
||||
**Print the requirements in chat before asking for confirmation.** Do not say "here are the requirements" and then only write them to a file. The user must see them in the terminal. Print a markdown table with columns: ID, Title, Status, Owner, Source. Group by status (Active, Deferred, Out of Scope). After the table, ask: "Confirm, adjust, or add?"
|
||||
|
||||
## Scope Assessment
|
||||
|
||||
|
|
@ -115,6 +155,12 @@ If Vision Mapping classified the work as Task but discussion revealed Project-sc
|
|||
|
||||
## Output Phase
|
||||
|
||||
### Roadmap Preview
|
||||
|
||||
Before writing any files, **print the planned roadmap in chat** so the user can see and approve it. Print a markdown table with columns: Slice, Title, Risk, Depends, Demo. One row per slice. Below the table, print the milestone definition of done as a bullet list.
|
||||
|
||||
Ask: "Ready to write the plan, or want to adjust?" Only proceed to writing files after the user confirms.
|
||||
|
||||
### Naming Convention
|
||||
|
||||
Directories use bare IDs. Files use ID-SUFFIX format. Titles live inside file content, not in names.
|
||||
|
|
|
|||
|
|
@ -1,8 +1,16 @@
|
|||
You are merging GSD artifacts from worktree **{{worktreeName}}** (branch `{{worktreeBranch}}`) into target branch `{{mainBranch}}`.
|
||||
You are merging changes from worktree **{{worktreeName}}** (branch `{{worktreeBranch}}`) into target branch `{{mainBranch}}`.
|
||||
|
||||
## Working Directory
|
||||
|
||||
Your current working directory has been set to the **main project tree** at `{{mainTreePath}}`. You are on the `{{mainBranch}}` branch. All git and file commands run from here.
|
||||
|
||||
- **Main tree (CWD):** `{{mainTreePath}}` — this is where you run `git merge`, read main-branch files, and commit
|
||||
- **Worktree directory:** `{{worktreePath}}` — the worktree's working copy; read files here to inspect worktree versions before merging
|
||||
- **Worktree branch:** `{{worktreeBranch}}`
|
||||
|
||||
## Context
|
||||
|
||||
The worktree was created as a parallel workspace. It may contain new milestones, updated roadmaps, new plans, research, decisions, or other GSD artifacts that need to be reconciled with the main branch.
|
||||
The worktree was created as a parallel workspace. It may contain code changes, new milestones, updated roadmaps, new plans, research, decisions, or other artifacts that need to be merged into the target branch.
|
||||
|
||||
### Commit History (worktree)
|
||||
|
||||
|
|
@ -10,7 +18,7 @@ The worktree was created as a parallel workspace. It may contain new milestones,
|
|||
{{commitLog}}
|
||||
```
|
||||
|
||||
### GSD Artifact Changes
|
||||
### Changed Files
|
||||
|
||||
**Added files:**
|
||||
{{addedFiles}}
|
||||
|
|
@ -21,10 +29,16 @@ The worktree was created as a parallel workspace. It may contain new milestones,
|
|||
**Removed files:**
|
||||
{{removedFiles}}
|
||||
|
||||
### Full Diff
|
||||
### Code Diff
|
||||
|
||||
```diff
|
||||
{{fullDiff}}
|
||||
{{codeDiff}}
|
||||
```
|
||||
|
||||
### GSD Artifact Diff
|
||||
|
||||
```diff
|
||||
{{gsdDiff}}
|
||||
```
|
||||
|
||||
## Your Task
|
||||
|
|
@ -33,7 +47,15 @@ Analyze the changes and guide the merge. Follow these steps exactly:
|
|||
|
||||
### Step 1: Categorize Changes
|
||||
|
||||
Classify each changed GSD artifact:
|
||||
Classify each changed file:
|
||||
|
||||
**Code changes:**
|
||||
- **New source files** — new modules, components, utilities, tests
|
||||
- **Modified source files** — changes to existing code
|
||||
- **Config changes** — package.json, tsconfig, build config, etc.
|
||||
- **Deleted files** — removed source or config files
|
||||
|
||||
**GSD artifact changes:**
|
||||
- **New milestones** — entirely new M###/ directories with roadmaps
|
||||
- **New slices/tasks** — new planning artifacts within existing milestones
|
||||
- **Updated roadmaps** — modifications to existing M###-ROADMAP.md files
|
||||
|
|
@ -47,7 +69,12 @@ Classify each changed GSD artifact:
|
|||
|
||||
For each **modified** file, check whether the main branch version has also changed since the worktree branched off. Flag any files where both branches have diverged — these need manual reconciliation.
|
||||
|
||||
Read the current main-branch version of each modified file and compare it against both the worktree version and the common ancestor to identify:
|
||||
To compare versions:
|
||||
- **Main-branch version:** read the file at its normal path (your CWD is the main tree)
|
||||
- **Worktree version:** read the file at `{{worktreePath}}/<relative-path>`
|
||||
- Use `git merge-base {{mainBranch}} {{worktreeBranch}}` to find the common ancestor if needed
|
||||
|
||||
Classify each modified file:
|
||||
- **Clean merges** — main hasn't changed, worktree changes can apply directly
|
||||
- **Conflicts** — both branches changed the same file; needs reconciliation
|
||||
- **Stale changes** — worktree modified a file that main has since replaced or removed
|
||||
|
|
@ -58,28 +85,35 @@ Present a merge plan to the user:
|
|||
|
||||
1. For **clean merges**: list files that will merge without conflict
|
||||
2. For **conflicts**: show both versions side-by-side and propose a reconciled version
|
||||
3. For **new artifacts**: confirm they should be added to the main branch
|
||||
4. For **removed artifacts**: confirm the removals are intentional
|
||||
3. For **new files**: confirm they should be added to the main branch
|
||||
4. For **removed files**: confirm the removals are intentional
|
||||
|
||||
Ask the user to confirm the merge plan before proceeding.
|
||||
|
||||
### Step 4: Execute Merge
|
||||
|
||||
Once confirmed:
|
||||
Once confirmed, run all commands from `{{mainTreePath}}` (your CWD):
|
||||
|
||||
1. If there are conflicts requiring manual reconciliation, apply the reconciled versions to the main branch working tree
|
||||
2. Run `git merge --squash {{worktreeBranch}}` to bring in all changes
|
||||
3. Review the staged changes — if any reconciled files need adjustment, apply them now
|
||||
4. Commit with message: `merge(worktree/{{worktreeName}}): <summary of what was merged>`
|
||||
5. Report what was merged
|
||||
1. Ensure you are on the target branch: `git checkout {{mainBranch}}`
|
||||
2. If there are conflicts requiring manual reconciliation, apply the reconciled versions first
|
||||
3. Run `git merge --squash {{worktreeBranch}}` to bring in all changes
|
||||
4. Review the staged changes — if any reconciled files need adjustment, apply them now
|
||||
5. Commit with message: `merge(worktree/{{worktreeName}}): <summary of what was merged>`
|
||||
6. Report what was merged
|
||||
|
||||
### Step 5: Cleanup Prompt
|
||||
|
||||
After a successful merge, ask the user whether to:
|
||||
- **Remove the worktree** — delete `.gsd/worktrees/{{worktreeName}}/` and the `{{worktreeBranch}}` branch
|
||||
- **Remove the worktree** — delete the worktree directory and the `{{worktreeBranch}}` branch
|
||||
- **Keep the worktree** — leave it for continued parallel work
|
||||
|
||||
If the user chooses to remove it, run `/worktree remove {{worktreeName}}`.
|
||||
If the user chooses to remove it, run these commands from `{{mainTreePath}}`:
|
||||
```
|
||||
git worktree remove {{worktreePath}}
|
||||
git branch -D {{worktreeBranch}}
|
||||
```
|
||||
|
||||
**Do NOT use `/worktree remove` — the command handler may not have the correct state after the merge.** Use the git commands directly.
|
||||
|
||||
## Important
|
||||
|
||||
|
|
|
|||
38
src/resources/extensions/gsd/tests/discuss-prompt.test.ts
Normal file
38
src/resources/extensions/gsd/tests/discuss-prompt.test.ts
Normal file
|
|
@ -0,0 +1,38 @@
|
|||
import { readFileSync } from 'node:fs';
|
||||
import { join } from 'node:path';
|
||||
|
||||
let passed = 0;
|
||||
let failed = 0;
|
||||
|
||||
function assert(condition: boolean, message: string): void {
|
||||
if (condition) passed++;
|
||||
else {
|
||||
failed++;
|
||||
console.error(` FAIL: ${message}`);
|
||||
}
|
||||
}
|
||||
|
||||
const promptPath = join(process.cwd(), 'src/resources/extensions/gsd/prompts/discuss.md');
|
||||
const discussPrompt = readFileSync(promptPath, 'utf-8');
|
||||
|
||||
console.log('\n=== discuss prompt: resilient vision framing ===');
|
||||
{
|
||||
const hardenedPattern = /Say exactly:\s*"What's the vision\?"/;
|
||||
assert(!hardenedPattern.test(discussPrompt), 'prompt no longer uses exact-verbosity lock');
|
||||
assert(
|
||||
discussPrompt.includes('Ask: "What\'s the vision?" once'),
|
||||
'prompt asks for vision exactly once',
|
||||
);
|
||||
assert(
|
||||
discussPrompt.includes('Special handling'),
|
||||
'prompt documents special handling for non-vision user messages',
|
||||
);
|
||||
assert(
|
||||
discussPrompt.includes('instead of repeating "What\'s the vision?"'),
|
||||
'prompt forbids repeating the vision question',
|
||||
);
|
||||
}
|
||||
|
||||
console.log(`\nResults: ${passed} passed, ${failed} failed`);
|
||||
if (failed > 0) process.exit(1);
|
||||
console.log('All tests passed ✓');
|
||||
|
|
@ -6,7 +6,7 @@
|
|||
* Usage:
|
||||
* /worktree <name> — create a new worktree
|
||||
* /worktree list — list existing worktrees
|
||||
* /worktree merge <branch> [target] — start LLM-guided merge (default target: main)
|
||||
* /worktree merge [name] [target] — start LLM-guided merge (auto-detects when inside a worktree)
|
||||
* /worktree remove <name> — remove a worktree and its branch
|
||||
*/
|
||||
|
||||
|
|
@ -18,15 +18,18 @@ import {
|
|||
createWorktree,
|
||||
listWorktrees,
|
||||
removeWorktree,
|
||||
diffWorktreeGSD,
|
||||
diffWorktreeAll,
|
||||
diffWorktreeNumstat,
|
||||
getMainBranch,
|
||||
getWorktreeGSDDiff,
|
||||
getWorktreeCodeDiff,
|
||||
getWorktreeLog,
|
||||
worktreeBranchName,
|
||||
worktreePath,
|
||||
} from "./worktree-manager.js";
|
||||
import type { FileLineStat } from "./worktree-manager.js";
|
||||
import { existsSync, realpathSync, readFileSync, utimesSync } from "node:fs";
|
||||
import { join, resolve } from "node:path";
|
||||
import { join, resolve, sep } from "node:path";
|
||||
|
||||
/**
|
||||
* Tracks the original project root so we can switch back.
|
||||
|
|
@ -100,7 +103,7 @@ export function getActiveWorktreeName(): string | null {
|
|||
|
||||
function worktreeCompletions(prefix: string) {
|
||||
const parts = prefix.trim().split(/\s+/);
|
||||
const subcommands = ["list", "merge", "remove", "switch", "return"];
|
||||
const subcommands = ["list", "merge", "remove", "switch", "create", "return"];
|
||||
|
||||
if (parts.length <= 1) {
|
||||
const partial = parts[0] ?? "";
|
||||
|
|
@ -119,13 +122,21 @@ function worktreeCompletions(prefix: string) {
|
|||
}
|
||||
}
|
||||
|
||||
if ((parts[0] === "merge" || parts[0] === "remove" || parts[0] === "switch") && parts.length <= 2) {
|
||||
if ((parts[0] === "merge" || parts[0] === "remove" || parts[0] === "switch" || parts[0] === "create") && parts.length <= 2) {
|
||||
const namePrefix = parts[1] ?? "";
|
||||
try {
|
||||
const existing = listWorktrees(process.cwd());
|
||||
return existing
|
||||
const mainBase = getWorktreeOriginalCwd() ?? process.cwd();
|
||||
const existing = listWorktrees(mainBase);
|
||||
const nameCompletions = existing
|
||||
.filter(wt => wt.name.startsWith(namePrefix))
|
||||
.map(wt => ({ value: `${parts[0]} ${wt.name}`, label: wt.name }));
|
||||
|
||||
// Add "all" option for remove
|
||||
if (parts[0] === "remove" && "all".startsWith(namePrefix)) {
|
||||
nameCompletions.push({ value: "remove all", label: "all" });
|
||||
}
|
||||
|
||||
return nameCompletions;
|
||||
} catch {
|
||||
return [];
|
||||
}
|
||||
|
|
@ -151,8 +162,8 @@ async function worktreeHandler(
|
|||
` /${alias} switch <name> — switch into an existing worktree`,
|
||||
` /${alias} return — switch back to the main project tree`,
|
||||
` /${alias} list — list all worktrees`,
|
||||
` /${alias} merge <branch> [target] — merge worktree into target branch`,
|
||||
` /${alias} remove <name> — remove a worktree and its branch`,
|
||||
` /${alias} merge [name] [target] — merge worktree into target branch (auto-detects when inside a worktree)`,
|
||||
` /${alias} remove <name|all> — remove a worktree (or all) and its branch`,
|
||||
].join("\n"),
|
||||
"info",
|
||||
);
|
||||
|
|
@ -169,41 +180,76 @@ async function worktreeHandler(
|
|||
return;
|
||||
}
|
||||
|
||||
if (trimmed.startsWith("switch ")) {
|
||||
const name = trimmed.replace(/^switch\s+/, "").trim();
|
||||
if (trimmed.startsWith("switch ") || trimmed.startsWith("create ")) {
|
||||
const name = trimmed.replace(/^(?:switch|create)\s+/, "").trim();
|
||||
if (!name) {
|
||||
ctx.ui.notify(`Usage: /${alias} switch <name>`, "warning");
|
||||
ctx.ui.notify(`Usage: /${alias} ${trimmed.split(" ")[0]} <name>`, "warning");
|
||||
return;
|
||||
}
|
||||
await handleSwitch(basePath, name, ctx);
|
||||
// create and switch both do the same thing: switch if exists, create if not
|
||||
const mainBase = originalCwd ?? basePath;
|
||||
const existing = listWorktrees(mainBase);
|
||||
if (existing.some(wt => wt.name === name)) {
|
||||
await handleSwitch(basePath, name, ctx);
|
||||
} else {
|
||||
await handleCreate(basePath, name, ctx);
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
if (trimmed.startsWith("merge ")) {
|
||||
const mergeArgs = trimmed.replace(/^merge\s+/, "").trim().split(/\s+/);
|
||||
const name = mergeArgs[0] ?? "";
|
||||
if (trimmed === "merge" || trimmed.startsWith("merge ")) {
|
||||
const mergeArgs = trimmed.replace(/^merge\s*/, "").trim().split(/\s+/).filter(Boolean);
|
||||
const mainBase = originalCwd ?? basePath;
|
||||
const activeWt = getActiveWorktreeName();
|
||||
|
||||
if (mergeArgs.length === 0) {
|
||||
// Bare "/worktree merge" — only valid when inside a worktree
|
||||
if (!activeWt) {
|
||||
ctx.ui.notify(`Usage: /${alias} merge <name> [target]`, "warning");
|
||||
return;
|
||||
}
|
||||
await handleMerge(mainBase, activeWt, ctx, pi, undefined);
|
||||
return;
|
||||
}
|
||||
|
||||
const name = mergeArgs[0]!;
|
||||
const targetBranch = mergeArgs[1];
|
||||
if (!name) {
|
||||
ctx.ui.notify(`Usage: /${alias} merge <branch> [target]`, "warning");
|
||||
return;
|
||||
|
||||
// Check if 'name' is an actual worktree
|
||||
const worktrees = listWorktrees(mainBase);
|
||||
const isWorktree = worktrees.some(w => w.name === name);
|
||||
|
||||
if (isWorktree) {
|
||||
await handleMerge(mainBase, name, ctx, pi, targetBranch);
|
||||
} else if (activeWt) {
|
||||
// Not a worktree name — user is in a worktree and gave the target branch
|
||||
// e.g. "/worktree merge main" while inside worktree "new"
|
||||
await handleMerge(mainBase, activeWt, ctx, pi, name);
|
||||
} else {
|
||||
ctx.ui.notify(`Worktree "${name}" not found. Run /${alias} list to see available worktrees.`, "warning");
|
||||
}
|
||||
const mainBase = originalCwd ?? basePath;
|
||||
await handleMerge(mainBase, name, ctx, pi, targetBranch);
|
||||
return;
|
||||
}
|
||||
|
||||
if (trimmed.startsWith("remove ")) {
|
||||
const name = trimmed.replace(/^remove\s+/, "").trim();
|
||||
if (!name) {
|
||||
ctx.ui.notify(`Usage: /${alias} remove <name>`, "warning");
|
||||
if (trimmed === "remove" || trimmed.startsWith("remove ")) {
|
||||
const name = trimmed.replace(/^remove\s*/, "").trim();
|
||||
const mainBase = originalCwd ?? basePath;
|
||||
|
||||
if (name === "all") {
|
||||
await handleRemoveAll(mainBase, ctx);
|
||||
return;
|
||||
}
|
||||
const mainBase = originalCwd ?? basePath;
|
||||
|
||||
if (!name) {
|
||||
ctx.ui.notify(`Usage: /${alias} remove <name|all>`, "warning");
|
||||
return;
|
||||
}
|
||||
|
||||
await handleRemove(mainBase, name, ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
const RESERVED = ["list", "return", "switch", "merge", "remove"];
|
||||
const RESERVED = ["list", "return", "switch", "create", "merge", "remove"];
|
||||
if (RESERVED.includes(trimmed)) {
|
||||
ctx.ui.notify(`Usage: /${alias} ${trimmed}${trimmed === "list" || trimmed === "return" ? "" : " <name>"}`, "warning");
|
||||
return;
|
||||
|
|
@ -225,8 +271,20 @@ async function worktreeHandler(
|
|||
}
|
||||
|
||||
export function registerWorktreeCommand(pi: ExtensionAPI): void {
|
||||
// Restore worktree state after /reload.
|
||||
// The module-level originalCwd resets to null when extensions are re-loaded,
|
||||
// but process.cwd() is still inside the worktree. Detect this and recover.
|
||||
if (!originalCwd) {
|
||||
const cwd = process.cwd();
|
||||
const marker = `${sep}.gsd${sep}worktrees${sep}`;
|
||||
const markerIdx = cwd.indexOf(marker);
|
||||
if (markerIdx !== -1) {
|
||||
originalCwd = cwd.slice(0, markerIdx);
|
||||
}
|
||||
}
|
||||
|
||||
pi.registerCommand("worktree", {
|
||||
description: "Git worktrees: /worktree <name> | list | merge <branch> [target] | remove <name>",
|
||||
description: "Git worktrees (also /wt): /worktree <name> | list | merge | remove",
|
||||
getArgumentCompletions: worktreeCompletions,
|
||||
|
||||
async handler(args: string, ctx: ExtensionCommandContext) {
|
||||
|
|
@ -236,7 +294,7 @@ export function registerWorktreeCommand(pi: ExtensionAPI): void {
|
|||
|
||||
// /wt alias — same handler, same completions
|
||||
pi.registerCommand("wt", {
|
||||
description: "Alias for /worktree — Git worktrees: /wt <name> | list | merge | remove",
|
||||
description: "Alias for /worktree",
|
||||
getArgumentCompletions: worktreeCompletions,
|
||||
async handler(args: string, ctx: ExtensionCommandContext) {
|
||||
await worktreeHandler(args, ctx, pi, "wt");
|
||||
|
|
@ -362,6 +420,7 @@ const DIM = "\x1b[2m";
|
|||
const RESET = "\x1b[0m";
|
||||
const CYAN = "\x1b[36m";
|
||||
const GREEN = "\x1b[32m";
|
||||
const RED = "\x1b[31m";
|
||||
const YELLOW = "\x1b[33m";
|
||||
const WHITE = "\x1b[37m";
|
||||
|
||||
|
|
@ -423,9 +482,11 @@ async function handleMerge(
|
|||
return;
|
||||
}
|
||||
|
||||
// Gather merge context
|
||||
const diffSummary = diffWorktreeGSD(basePath, name);
|
||||
const fullDiff = getWorktreeGSDDiff(basePath, name);
|
||||
// Gather merge context — full repo diff, not just .gsd/
|
||||
const diffSummary = diffWorktreeAll(basePath, name);
|
||||
const numstat = diffWorktreeNumstat(basePath, name);
|
||||
const gsdDiff = getWorktreeGSDDiff(basePath, name);
|
||||
const codeDiff = getWorktreeCodeDiff(basePath, name);
|
||||
const commitLog = getWorktreeLog(basePath, name);
|
||||
|
||||
const totalChanges = diffSummary.added.length + diffSummary.modified.length + diffSummary.removed.length;
|
||||
|
|
@ -434,27 +495,48 @@ async function handleMerge(
|
|||
return;
|
||||
}
|
||||
|
||||
// Build a map of file → line stats for the preview
|
||||
const statMap = new Map<string, FileLineStat>();
|
||||
for (const s of numstat) statMap.set(s.file, s);
|
||||
|
||||
// Compute totals
|
||||
let totalAdded = 0;
|
||||
let totalRemoved = 0;
|
||||
for (const s of numstat) { totalAdded += s.added; totalRemoved += s.removed; }
|
||||
|
||||
// Split files into code vs GSD for the preview
|
||||
const isGSD = (f: string) => f.startsWith(".gsd/");
|
||||
const codeChanges = diffSummary.added.filter(f => !isGSD(f)).length
|
||||
+ diffSummary.modified.filter(f => !isGSD(f)).length
|
||||
+ diffSummary.removed.filter(f => !isGSD(f)).length;
|
||||
const gsdChanges = diffSummary.added.filter(isGSD).length
|
||||
+ diffSummary.modified.filter(isGSD).length
|
||||
+ diffSummary.removed.filter(isGSD).length;
|
||||
|
||||
// Format a file line with +/- stats
|
||||
const formatFileLine = (prefix: string, file: string): string => {
|
||||
const s = statMap.get(file);
|
||||
const stat = s ? ` ${GREEN}+${s.added}${RESET} ${RED}-${s.removed}${RESET}` : "";
|
||||
return ` ${prefix} ${file}${stat}`;
|
||||
};
|
||||
|
||||
// Preview confirmation before merge dispatch
|
||||
const previewLines = [
|
||||
`Merge worktree "${name}" → ${mainBranch}`,
|
||||
"",
|
||||
` ${diffSummary.added.length} added · ${diffSummary.modified.length} modified · ${diffSummary.removed.length} removed`,
|
||||
` ${totalChanges} file${totalChanges === 1 ? "" : "s"} changed, ${GREEN}+${totalAdded}${RESET} ${RED}-${totalRemoved}${RESET} lines (${codeChanges} code, ${gsdChanges} GSD)`,
|
||||
];
|
||||
if (diffSummary.added.length > 0) {
|
||||
previewLines.push("", " Added:");
|
||||
for (const f of diffSummary.added.slice(0, 10)) previewLines.push(` + ${f}`);
|
||||
if (diffSummary.added.length > 10) previewLines.push(` … and ${diffSummary.added.length - 10} more`);
|
||||
}
|
||||
if (diffSummary.modified.length > 0) {
|
||||
previewLines.push("", " Modified:");
|
||||
for (const f of diffSummary.modified.slice(0, 10)) previewLines.push(` ~ ${f}`);
|
||||
if (diffSummary.modified.length > 10) previewLines.push(` … and ${diffSummary.modified.length - 10} more`);
|
||||
}
|
||||
if (diffSummary.removed.length > 0) {
|
||||
previewLines.push("", " Removed:");
|
||||
for (const f of diffSummary.removed.slice(0, 10)) previewLines.push(` - ${f}`);
|
||||
if (diffSummary.removed.length > 10) previewLines.push(` … and ${diffSummary.removed.length - 10} more`);
|
||||
}
|
||||
|
||||
const appendFileList = (label: string, files: string[], prefix: string, limit = 10) => {
|
||||
if (files.length === 0) return;
|
||||
previewLines.push("", ` ${label}:`);
|
||||
for (const f of files.slice(0, limit)) previewLines.push(formatFileLine(prefix, f));
|
||||
if (files.length > limit) previewLines.push(` … and ${files.length - limit} more`);
|
||||
};
|
||||
|
||||
appendFileList("Added", diffSummary.added, "+");
|
||||
appendFileList("Modified", diffSummary.modified, "~");
|
||||
appendFileList("Removed", diffSummary.removed, "-");
|
||||
|
||||
const confirmed = await showConfirm(ctx, {
|
||||
title: "Worktree Merge",
|
||||
|
|
@ -467,20 +549,34 @@ async function handleMerge(
|
|||
return;
|
||||
}
|
||||
|
||||
// Switch to the main tree before dispatching the merge.
|
||||
// The LLM needs to run git merge --squash from the main branch, and if
|
||||
// it later removes the worktree, the agent's CWD must not be inside it.
|
||||
if (originalCwd) {
|
||||
const prevCwd = process.cwd();
|
||||
process.chdir(basePath);
|
||||
nudgeGitBranchCache(prevCwd);
|
||||
originalCwd = null;
|
||||
}
|
||||
|
||||
// Format file lists for the prompt
|
||||
const formatFiles = (files: string[]) =>
|
||||
files.length > 0 ? files.map(f => `- \`${f}\``).join("\n") : "_(none)_";
|
||||
|
||||
// Load and populate the merge prompt
|
||||
const wtPath = worktreePath(basePath, name);
|
||||
const prompt = loadPrompt("worktree-merge", {
|
||||
worktreeName: name,
|
||||
worktreeBranch: branch,
|
||||
mainBranch,
|
||||
mainTreePath: basePath,
|
||||
worktreePath: wtPath,
|
||||
commitLog: commitLog || "(no commits)",
|
||||
addedFiles: formatFiles(diffSummary.added),
|
||||
modifiedFiles: formatFiles(diffSummary.modified),
|
||||
removedFiles: formatFiles(diffSummary.removed),
|
||||
fullDiff: fullDiff || "(no diff)",
|
||||
gsdDiff: gsdDiff || "(no GSD artifact changes)",
|
||||
codeDiff: codeDiff || "(no code changes)",
|
||||
});
|
||||
|
||||
// Dispatch to the LLM
|
||||
|
|
@ -494,7 +590,7 @@ async function handleMerge(
|
|||
);
|
||||
|
||||
ctx.ui.notify(
|
||||
`Merge helper started for worktree "${name}" (${totalChanges} GSD artifact change${totalChanges === 1 ? "" : "s"}).`,
|
||||
`Merge helper started for worktree "${name}" (${codeChanges} code + ${gsdChanges} GSD artifact change${totalChanges === 1 ? "" : "s"}).`,
|
||||
"info",
|
||||
);
|
||||
} catch (error) {
|
||||
|
|
@ -510,6 +606,26 @@ async function handleRemove(
|
|||
): Promise<void> {
|
||||
try {
|
||||
const mainBase = originalCwd ?? basePath;
|
||||
|
||||
// Validate the worktree exists before attempting removal
|
||||
const worktrees = listWorktrees(mainBase);
|
||||
const wt = worktrees.find(w => w.name === name);
|
||||
if (!wt) {
|
||||
ctx.ui.notify(`Worktree "${name}" not found. Run /worktree list to see available worktrees.`, "warning");
|
||||
return;
|
||||
}
|
||||
|
||||
const confirmed = await showConfirm(ctx, {
|
||||
title: "Remove Worktree",
|
||||
message: `Remove worktree "${name}" and delete branch ${wt.branch}?`,
|
||||
confirmLabel: "Remove",
|
||||
declineLabel: "Cancel",
|
||||
});
|
||||
if (!confirmed) {
|
||||
ctx.ui.notify("Cancelled.", "info");
|
||||
return;
|
||||
}
|
||||
|
||||
const prevCwd = process.cwd();
|
||||
removeWorktree(mainBase, name, { deleteBranch: true });
|
||||
|
||||
|
|
@ -525,3 +641,57 @@ async function handleRemove(
|
|||
ctx.ui.notify(`Failed to remove worktree: ${msg}`, "error");
|
||||
}
|
||||
}
|
||||
|
||||
async function handleRemoveAll(
|
||||
basePath: string,
|
||||
ctx: ExtensionCommandContext,
|
||||
): Promise<void> {
|
||||
try {
|
||||
const mainBase = originalCwd ?? basePath;
|
||||
const worktrees = listWorktrees(mainBase);
|
||||
|
||||
if (worktrees.length === 0) {
|
||||
ctx.ui.notify("No worktrees to remove.", "info");
|
||||
return;
|
||||
}
|
||||
|
||||
const names = worktrees.map(w => w.name);
|
||||
const confirmed = await showConfirm(ctx, {
|
||||
title: "Remove All Worktrees",
|
||||
message: `This will remove ${worktrees.length} worktree${worktrees.length === 1 ? "" : "s"} and delete their branches:\n\n${names.map(n => ` • ${n}`).join("\n")}`,
|
||||
confirmLabel: "Remove all",
|
||||
declineLabel: "Cancel",
|
||||
});
|
||||
if (!confirmed) {
|
||||
ctx.ui.notify("Cancelled.", "info");
|
||||
return;
|
||||
}
|
||||
|
||||
const prevCwd = process.cwd();
|
||||
const removed: string[] = [];
|
||||
const failed: string[] = [];
|
||||
|
||||
for (const wt of worktrees) {
|
||||
try {
|
||||
removeWorktree(mainBase, wt.name, { deleteBranch: true });
|
||||
removed.push(wt.name);
|
||||
} catch {
|
||||
failed.push(wt.name);
|
||||
}
|
||||
}
|
||||
|
||||
// If we were in a worktree that got removed, clear tracking
|
||||
if (originalCwd && process.cwd() !== prevCwd) {
|
||||
nudgeGitBranchCache(prevCwd);
|
||||
originalCwd = null;
|
||||
}
|
||||
|
||||
const lines: string[] = [];
|
||||
if (removed.length > 0) lines.push(`Removed: ${removed.join(", ")}`);
|
||||
if (failed.length > 0) lines.push(`Failed: ${failed.join(", ")}`);
|
||||
ctx.ui.notify(lines.join("\n"), failed.length > 0 ? "warning" : "info");
|
||||
} catch (error) {
|
||||
const msg = error instanceof Error ? error.message : String(error);
|
||||
ctx.ui.notify(`Failed to remove worktrees: ${msg}`, "error");
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -28,6 +28,13 @@ export interface WorktreeInfo {
|
|||
exists: boolean;
|
||||
}
|
||||
|
||||
/** Per-file line change stats from git diff --numstat. */
|
||||
export interface FileLineStat {
|
||||
file: string;
|
||||
added: number;
|
||||
removed: number;
|
||||
}
|
||||
|
||||
export interface WorktreeDiffSummary {
|
||||
/** Files only in the worktree .gsd/ (new artifacts) */
|
||||
added: string[];
|
||||
|
|
@ -109,6 +116,18 @@ export function createWorktree(basePath: string, name: string): WorktreeInfo {
|
|||
const mainBranch = getMainBranch(basePath);
|
||||
|
||||
if (branchExists) {
|
||||
// Check if the branch is actively used by an existing worktree.
|
||||
// `git branch -f` will fail if the branch is checked out somewhere.
|
||||
const worktreeUsing = runGit(basePath, ["worktree", "list", "--porcelain"], { allowFailure: true });
|
||||
const branchInUse = worktreeUsing.includes(`branch refs/heads/${branch}`);
|
||||
|
||||
if (branchInUse) {
|
||||
throw new Error(
|
||||
`Branch "${branch}" is already in use by another worktree. ` +
|
||||
`Remove the existing worktree first with /worktree remove ${name}.`,
|
||||
);
|
||||
}
|
||||
|
||||
// Reset the stale branch to current main, then attach worktree to it
|
||||
runGit(basePath, ["branch", "-f", branch, mainBranch]);
|
||||
runGit(basePath, ["worktree", "add", wtPath, branch]);
|
||||
|
|
@ -212,19 +231,17 @@ export function removeWorktree(
|
|||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Diff the .gsd/ directory between the worktree branch and main branch.
|
||||
* Returns a summary of added, modified, and removed GSD artifacts.
|
||||
*/
|
||||
export function diffWorktreeGSD(basePath: string, name: string): WorktreeDiffSummary {
|
||||
const branch = worktreeBranchName(name);
|
||||
const mainBranch = getMainBranch(basePath);
|
||||
/** Paths to skip in all worktree diffs (internal/runtime artifacts). */
|
||||
const SKIP_PATHS = [".gsd/worktrees/", ".gsd/runtime/", ".gsd/activity/"];
|
||||
const SKIP_EXACT = [".gsd/STATE.md", ".gsd/auto.lock", ".gsd/metrics.json"];
|
||||
|
||||
// Use git diff to compare .gsd/ between branches
|
||||
const diffOutput = runGit(basePath, [
|
||||
"diff", "--name-status", `${mainBranch}...${branch}`, "--", ".gsd/",
|
||||
], { allowFailure: true });
|
||||
function shouldSkipPath(filePath: string): boolean {
|
||||
if (SKIP_PATHS.some(p => filePath.startsWith(p))) return true;
|
||||
if (SKIP_EXACT.includes(filePath)) return true;
|
||||
return false;
|
||||
}
|
||||
|
||||
function parseDiffNameStatus(diffOutput: string): WorktreeDiffSummary {
|
||||
const added: string[] = [];
|
||||
const modified: string[] = [];
|
||||
const removed: string[] = [];
|
||||
|
|
@ -235,11 +252,7 @@ export function diffWorktreeGSD(basePath: string, name: string): WorktreeDiffSum
|
|||
const [status, ...pathParts] = line.split("\t");
|
||||
const filePath = pathParts.join("\t");
|
||||
|
||||
// Skip worktree-internal paths (e.g. .gsd/worktrees/, .gsd/runtime/)
|
||||
if (filePath.startsWith(".gsd/worktrees/") || filePath.startsWith(".gsd/runtime/")) continue;
|
||||
// Skip gitignored runtime files
|
||||
if (filePath === ".gsd/STATE.md" || filePath === ".gsd/auto.lock" || filePath === ".gsd/metrics.json") continue;
|
||||
if (filePath.startsWith(".gsd/activity/")) continue;
|
||||
if (shouldSkipPath(filePath)) continue;
|
||||
|
||||
switch (status) {
|
||||
case "A": added.push(filePath); break;
|
||||
|
|
@ -256,6 +269,68 @@ export function diffWorktreeGSD(basePath: string, name: string): WorktreeDiffSum
|
|||
return { added, modified, removed };
|
||||
}
|
||||
|
||||
/**
|
||||
* Diff the .gsd/ directory between the worktree branch and main branch.
|
||||
* Returns a summary of added, modified, and removed GSD artifacts.
|
||||
*/
|
||||
export function diffWorktreeGSD(basePath: string, name: string): WorktreeDiffSummary {
|
||||
const branch = worktreeBranchName(name);
|
||||
const mainBranch = getMainBranch(basePath);
|
||||
|
||||
const diffOutput = runGit(basePath, [
|
||||
"diff", "--name-status", `${mainBranch}...${branch}`, "--", ".gsd/",
|
||||
], { allowFailure: true });
|
||||
|
||||
return parseDiffNameStatus(diffOutput);
|
||||
}
|
||||
|
||||
/**
|
||||
* Diff ALL files between the worktree branch and main branch.
|
||||
* Returns a summary of added, modified, and removed files across the entire repo.
|
||||
*/
|
||||
/**
|
||||
* Diff ALL files between the worktree branch and main branch.
|
||||
* Uses direct diff (no merge-base) to show what will actually change
|
||||
* on main when the merge is applied. If both branches have identical
|
||||
* content, this correctly returns an empty diff.
|
||||
*/
|
||||
export function diffWorktreeAll(basePath: string, name: string): WorktreeDiffSummary {
|
||||
const branch = worktreeBranchName(name);
|
||||
const mainBranch = getMainBranch(basePath);
|
||||
|
||||
const diffOutput = runGit(basePath, [
|
||||
"diff", "--name-status", mainBranch, branch,
|
||||
], { allowFailure: true });
|
||||
|
||||
return parseDiffNameStatus(diffOutput);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get per-file line addition/deletion stats for what will change on main.
|
||||
* Uses direct diff (not merge-base) so the preview matches the actual merge outcome.
|
||||
*/
|
||||
export function diffWorktreeNumstat(basePath: string, name: string): FileLineStat[] {
|
||||
const branch = worktreeBranchName(name);
|
||||
const mainBranch = getMainBranch(basePath);
|
||||
|
||||
const raw = runGit(basePath, [
|
||||
"diff", "--numstat", mainBranch, branch,
|
||||
], { allowFailure: true });
|
||||
|
||||
if (!raw.trim()) return [];
|
||||
|
||||
const stats: FileLineStat[] = [];
|
||||
for (const line of raw.split("\n").filter(Boolean)) {
|
||||
const [a, r, ...pathParts] = line.split("\t");
|
||||
const file = pathParts.join("\t");
|
||||
if (shouldSkipPath(file)) continue;
|
||||
const added = a === "-" ? 0 : parseInt(a ?? "0", 10);
|
||||
const removed = r === "-" ? 0 : parseInt(r ?? "0", 10);
|
||||
stats.push({ file, added, removed });
|
||||
}
|
||||
return stats;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the full diff content for .gsd/ between the worktree branch and main.
|
||||
* Returns the raw unified diff for LLM consumption.
|
||||
|
|
@ -269,6 +344,21 @@ export function getWorktreeGSDDiff(basePath: string, name: string): string {
|
|||
], { allowFailure: true });
|
||||
}
|
||||
|
||||
/**
|
||||
* Get the full diff content for non-.gsd/ files between the worktree branch and main.
|
||||
* Returns the raw unified diff for LLM consumption.
|
||||
*/
|
||||
export function getWorktreeCodeDiff(basePath: string, name: string): string {
|
||||
const branch = worktreeBranchName(name);
|
||||
const mainBranch = getMainBranch(basePath);
|
||||
|
||||
// Get full diff, then exclude .gsd/ paths
|
||||
// We use pathspec magic to exclude .gsd/
|
||||
return runGit(basePath, [
|
||||
"diff", `${mainBranch}...${branch}`, "--", ".", ":(exclude).gsd/",
|
||||
], { allowFailure: true });
|
||||
}
|
||||
|
||||
/**
|
||||
* Get commit log for the worktree branch since it diverged from main.
|
||||
*/
|
||||
|
|
|
|||
410
src/resources/extensions/mcporter/index.ts
Normal file
410
src/resources/extensions/mcporter/index.ts
Normal file
|
|
@ -0,0 +1,410 @@
|
|||
/**
|
||||
* MCPorter Extension — Lazy MCP server integration for pi
|
||||
*
|
||||
* Provides on-demand access to all MCP servers configured on the system
|
||||
* (via Claude Desktop, Cursor, VS Code, mcporter config, etc.) without
|
||||
* registering every tool upfront. This keeps token usage near-zero until
|
||||
* the agent actually needs an MCP tool.
|
||||
*
|
||||
* Three tools:
|
||||
* mcp_servers — List available MCP servers (cached after first call)
|
||||
* mcp_discover — Get tool signatures for a specific server
|
||||
* mcp_call — Call a tool on an MCP server
|
||||
*
|
||||
* Requirements:
|
||||
* - mcporter installed globally: npm i -g mcporter
|
||||
*/
|
||||
|
||||
import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
||||
import {
|
||||
truncateHead,
|
||||
DEFAULT_MAX_BYTES,
|
||||
DEFAULT_MAX_LINES,
|
||||
formatSize,
|
||||
} from "@mariozechner/pi-coding-agent";
|
||||
import { Text } from "@mariozechner/pi-tui";
|
||||
import { Type } from "@sinclair/typebox";
|
||||
import { execFile, exec } from "node:child_process";
|
||||
import { promisify } from "node:util";
|
||||
|
||||
const execFileAsync = promisify(execFile);
|
||||
const execAsync = promisify(exec);
|
||||
|
||||
// ─── Types ────────────────────────────────────────────────────────────────────
|
||||
|
||||
interface McpServer {
|
||||
name: string;
|
||||
status: string;
|
||||
transport?: string;
|
||||
tools: { name: string; description: string }[];
|
||||
}
|
||||
|
||||
interface McpListResponse {
|
||||
mode: string;
|
||||
counts: { ok: number; auth: number; offline: number; http: number; error: number };
|
||||
servers: McpServer[];
|
||||
}
|
||||
|
||||
interface McpToolSchema {
|
||||
name: string;
|
||||
description: string;
|
||||
inputSchema?: Record<string, unknown>;
|
||||
}
|
||||
|
||||
interface McpServerDetail {
|
||||
name: string;
|
||||
status: string;
|
||||
tools: McpToolSchema[];
|
||||
}
|
||||
|
||||
// ─── Cache ────────────────────────────────────────────────────────────────────
|
||||
|
||||
let serverListCache: McpServer[] | null = null;
|
||||
const serverDetailCache = new Map<string, McpServerDetail>();
|
||||
|
||||
// ─── Helpers ──────────────────────────────────────────────────────────────────
|
||||
|
||||
async function runMcporter(
|
||||
args: string[],
|
||||
signal?: AbortSignal,
|
||||
timeoutMs = 30000,
|
||||
): Promise<string> {
|
||||
// Use shell exec so PATH resolution works in all contexts
|
||||
const escaped = args.map((a) => `'${a.replace(/'/g, "'\\''")}'`).join(" ");
|
||||
const { stdout } = await execAsync(`mcporter ${escaped}`, {
|
||||
timeout: timeoutMs,
|
||||
maxBuffer: 1024 * 1024,
|
||||
signal,
|
||||
env: { ...process.env },
|
||||
});
|
||||
return stdout;
|
||||
}
|
||||
|
||||
async function getServerList(signal?: AbortSignal): Promise<McpServer[]> {
|
||||
if (serverListCache) return serverListCache;
|
||||
|
||||
const raw = await runMcporter(["list", "--json"], signal, 60000);
|
||||
let data: McpListResponse;
|
||||
try {
|
||||
data = JSON.parse(raw) as McpListResponse;
|
||||
} catch (e) {
|
||||
throw new Error(`Failed to parse mcporter output: ${raw.slice(0, 300)}`);
|
||||
}
|
||||
if (!Array.isArray(data.servers)) {
|
||||
throw new Error(`Unexpected mcporter response shape: ${JSON.stringify(Object.keys(data))}`);
|
||||
}
|
||||
serverListCache = data.servers;
|
||||
return serverListCache;
|
||||
}
|
||||
|
||||
async function getServerDetail(
|
||||
serverName: string,
|
||||
signal?: AbortSignal,
|
||||
): Promise<McpServerDetail> {
|
||||
if (serverDetailCache.has(serverName)) return serverDetailCache.get(serverName)!;
|
||||
|
||||
const raw = await runMcporter(["list", serverName, "--schema", "--json"], signal);
|
||||
const data = JSON.parse(raw) as McpServerDetail;
|
||||
serverDetailCache.set(serverName, data);
|
||||
return data;
|
||||
}
|
||||
|
||||
function formatServerList(servers: McpServer[]): string {
|
||||
if (servers.length === 0) return "No MCP servers found.";
|
||||
|
||||
const lines: string[] = [`${servers.length} MCP servers available:\n`];
|
||||
|
||||
for (const s of servers) {
|
||||
const tools = s.tools ?? [];
|
||||
const status = s.status === "ok" ? "✓" : s.status === "auth" ? "🔑" : "✗";
|
||||
lines.push(`${status} ${s.name} — ${tools.length} tools (${s.status})`);
|
||||
for (const t of tools) {
|
||||
lines.push(` ${t.name}: ${t.description?.slice(0, 100) ?? ""}`);
|
||||
}
|
||||
}
|
||||
|
||||
lines.push("\nUse mcp_discover to see full tool schemas for a specific server.");
|
||||
lines.push("Use mcp_call to invoke a tool: mcp_call(server, tool, args).");
|
||||
return lines.join("\n");
|
||||
}
|
||||
|
||||
function formatServerDetail(detail: McpServerDetail): string {
|
||||
const lines: string[] = [`${detail.name} — ${detail.tools.length} tools:\n`];
|
||||
|
||||
for (const tool of detail.tools) {
|
||||
lines.push(`## ${tool.name}`);
|
||||
if (tool.description) lines.push(tool.description);
|
||||
if (tool.inputSchema) {
|
||||
lines.push("```json");
|
||||
lines.push(JSON.stringify(tool.inputSchema, null, 2));
|
||||
lines.push("```");
|
||||
}
|
||||
lines.push("");
|
||||
}
|
||||
|
||||
lines.push(`Call with: mcp_call(server="${detail.name}", tool="<tool_name>", args={...})`);
|
||||
return lines.join("\n");
|
||||
}
|
||||
|
||||
// ─── Extension ────────────────────────────────────────────────────────────────
|
||||
|
||||
export default function (pi: ExtensionAPI) {
|
||||
// ── mcp_servers ──────────────────────────────────────────────────────────
|
||||
|
||||
pi.registerTool({
|
||||
name: "mcp_servers",
|
||||
label: "MCP Servers",
|
||||
description:
|
||||
"List all available MCP servers discovered from your system (Claude Desktop, Cursor, VS Code, mcporter config). " +
|
||||
"Shows server names, status, and tool counts. Use mcp_discover to get full tool schemas for a server.",
|
||||
promptSnippet:
|
||||
"List available MCP servers and their tools (lazy discovery via mcporter)",
|
||||
promptGuidelines: [
|
||||
"Call mcp_servers to see what MCP servers are available before trying to use one.",
|
||||
"MCP servers provide external integrations (Twitter, Linear, Railway, etc.) via the Model Context Protocol.",
|
||||
"After listing, use mcp_discover(server) to get tool schemas, then mcp_call(server, tool, args) to invoke.",
|
||||
],
|
||||
parameters: Type.Object({
|
||||
refresh: Type.Optional(
|
||||
Type.Boolean({ description: "Force refresh the server list (default: use cache)" }),
|
||||
),
|
||||
}),
|
||||
|
||||
async execute(_id, params, signal) {
|
||||
if (params.refresh) serverListCache = null;
|
||||
|
||||
try {
|
||||
const servers = await getServerList(signal);
|
||||
return {
|
||||
content: [{ type: "text", text: formatServerList(servers) }],
|
||||
details: {
|
||||
serverCount: servers.length,
|
||||
cached: !params.refresh && serverListCache !== null,
|
||||
},
|
||||
};
|
||||
} catch (err: unknown) {
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
throw new Error(
|
||||
`Failed to list MCP servers. Is mcporter installed? (npm i -g mcporter)\n${msg}`,
|
||||
);
|
||||
}
|
||||
},
|
||||
|
||||
renderCall(args, theme) {
|
||||
let text = theme.fg("toolTitle", theme.bold("mcp_servers"));
|
||||
if (args.refresh) text += theme.fg("warning", " (refresh)");
|
||||
return new Text(text, 0, 0);
|
||||
},
|
||||
|
||||
renderResult(result, { isPartial }, theme) {
|
||||
if (isPartial) return new Text(theme.fg("warning", "Discovering MCP servers..."), 0, 0);
|
||||
const d = result.details as { serverCount: number } | undefined;
|
||||
return new Text(
|
||||
theme.fg("success", `${d?.serverCount ?? 0} servers found`),
|
||||
0,
|
||||
0,
|
||||
);
|
||||
},
|
||||
});
|
||||
|
||||
// ── mcp_discover ─────────────────────────────────────────────────────────
|
||||
|
||||
pi.registerTool({
|
||||
name: "mcp_discover",
|
||||
label: "MCP Discover",
|
||||
description:
|
||||
"Get detailed tool signatures and JSON schemas for a specific MCP server. " +
|
||||
"Use this to understand what tools a server provides and what arguments they accept " +
|
||||
"before calling them with mcp_call.",
|
||||
promptSnippet:
|
||||
"Get tool schemas for a specific MCP server before calling its tools",
|
||||
promptGuidelines: [
|
||||
"Call mcp_discover with a server name to see the full tool signatures before calling mcp_call.",
|
||||
"The schemas show required and optional parameters with types and descriptions.",
|
||||
],
|
||||
parameters: Type.Object({
|
||||
server: Type.String({
|
||||
description:
|
||||
"MCP server name (from mcp_servers output), e.g. 'railway', 'twitter-mcp', 'linear'",
|
||||
}),
|
||||
}),
|
||||
|
||||
async execute(_id, params, signal) {
|
||||
try {
|
||||
const detail = await getServerDetail(params.server, signal);
|
||||
const text = formatServerDetail(detail);
|
||||
|
||||
// Truncation guard
|
||||
const truncation = truncateHead(text, {
|
||||
maxLines: DEFAULT_MAX_LINES,
|
||||
maxBytes: DEFAULT_MAX_BYTES,
|
||||
});
|
||||
let finalText = truncation.content;
|
||||
if (truncation.truncated) {
|
||||
finalText +=
|
||||
`\n\n[Truncated: ${truncation.outputLines}/${truncation.totalLines} lines ` +
|
||||
`(${formatSize(truncation.outputBytes)} of ${formatSize(truncation.totalBytes)})]`;
|
||||
}
|
||||
|
||||
return {
|
||||
content: [{ type: "text", text: finalText }],
|
||||
details: {
|
||||
server: params.server,
|
||||
toolCount: detail.tools.length,
|
||||
cached: serverDetailCache.has(params.server),
|
||||
},
|
||||
};
|
||||
} catch (err: unknown) {
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
throw new Error(`Failed to discover tools for "${params.server}": ${msg}`);
|
||||
}
|
||||
},
|
||||
|
||||
renderCall(args, theme) {
|
||||
let text = theme.fg("toolTitle", theme.bold("mcp_discover "));
|
||||
text += theme.fg("accent", args.server);
|
||||
return new Text(text, 0, 0);
|
||||
},
|
||||
|
||||
renderResult(result, { isPartial }, theme) {
|
||||
if (isPartial)
|
||||
return new Text(theme.fg("warning", "Discovering tools..."), 0, 0);
|
||||
const d = result.details as { server: string; toolCount: number } | undefined;
|
||||
return new Text(
|
||||
theme.fg("success", `${d?.toolCount ?? 0} tools`) +
|
||||
theme.fg("dim", ` · ${d?.server}`),
|
||||
0,
|
||||
0,
|
||||
);
|
||||
},
|
||||
});
|
||||
|
||||
// ── mcp_call ─────────────────────────────────────────────────────────────
|
||||
|
||||
pi.registerTool({
|
||||
name: "mcp_call",
|
||||
label: "MCP Call",
|
||||
description:
|
||||
"Call a tool on an MCP server. Provide the server name, tool name, and arguments. " +
|
||||
"Use mcp_discover first to see available tools and their required arguments.",
|
||||
promptSnippet: "Call a tool on an MCP server via mcporter",
|
||||
promptGuidelines: [
|
||||
"Always use mcp_discover first to understand the tool's parameters before calling mcp_call.",
|
||||
"Arguments are passed as a JSON object matching the tool's input schema.",
|
||||
],
|
||||
parameters: Type.Object({
|
||||
server: Type.String({
|
||||
description: "MCP server name, e.g. 'railway', 'twitter-mcp'",
|
||||
}),
|
||||
tool: Type.String({
|
||||
description: "Tool name on that server, e.g. 'railway_list_projects'",
|
||||
}),
|
||||
args: Type.Optional(
|
||||
Type.Record(Type.String(), Type.Unknown(), {
|
||||
description:
|
||||
"Tool arguments as key-value pairs matching the tool's input schema",
|
||||
}),
|
||||
),
|
||||
}),
|
||||
|
||||
async execute(_id, params, signal) {
|
||||
// Build mcporter call command: mcporter call server.tool key:value ...
|
||||
const callTarget = `${params.server}.${params.tool}`;
|
||||
const cliArgs = ["call", callTarget, "--output", "raw"];
|
||||
|
||||
if (params.args && Object.keys(params.args).length > 0) {
|
||||
for (const [key, value] of Object.entries(params.args)) {
|
||||
const strVal =
|
||||
typeof value === "string" ? value : JSON.stringify(value);
|
||||
cliArgs.push(`${key}:${strVal}`);
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const raw = await runMcporter(cliArgs, signal, 60000);
|
||||
|
||||
// Truncation guard
|
||||
const truncation = truncateHead(raw, {
|
||||
maxLines: DEFAULT_MAX_LINES,
|
||||
maxBytes: DEFAULT_MAX_BYTES,
|
||||
});
|
||||
let finalText = truncation.content;
|
||||
if (truncation.truncated) {
|
||||
finalText +=
|
||||
`\n\n[Output truncated: ${truncation.outputLines}/${truncation.totalLines} lines ` +
|
||||
`(${formatSize(truncation.outputBytes)} of ${formatSize(truncation.totalBytes)})]`;
|
||||
}
|
||||
|
||||
return {
|
||||
content: [{ type: "text", text: finalText }],
|
||||
details: {
|
||||
server: params.server,
|
||||
tool: params.tool,
|
||||
charCount: finalText.length,
|
||||
truncated: truncation.truncated,
|
||||
},
|
||||
};
|
||||
} catch (err: unknown) {
|
||||
const msg = err instanceof Error ? err.message : String(err);
|
||||
throw new Error(
|
||||
`MCP call failed: ${params.server}.${params.tool}\n${msg}`,
|
||||
);
|
||||
}
|
||||
},
|
||||
|
||||
renderCall(args, theme) {
|
||||
let text = theme.fg("toolTitle", theme.bold("mcp_call "));
|
||||
text += theme.fg("accent", `${args.server}.${args.tool}`);
|
||||
if (args.args && Object.keys(args.args).length > 0) {
|
||||
const preview = Object.entries(args.args)
|
||||
.slice(0, 3)
|
||||
.map(([k, v]) => {
|
||||
const val = typeof v === "string" ? v : JSON.stringify(v);
|
||||
return `${k}:${val.length > 30 ? val.slice(0, 30) + "…" : val}`;
|
||||
})
|
||||
.join(" ");
|
||||
text += " " + theme.fg("muted", preview);
|
||||
}
|
||||
return new Text(text, 0, 0);
|
||||
},
|
||||
|
||||
renderResult(result, { isPartial, expanded }, theme) {
|
||||
if (isPartial) return new Text(theme.fg("warning", "Calling MCP tool..."), 0, 0);
|
||||
|
||||
const d = result.details as {
|
||||
server: string;
|
||||
tool: string;
|
||||
charCount: number;
|
||||
truncated: boolean;
|
||||
} | undefined;
|
||||
|
||||
let text = theme.fg("success", `✓ ${d?.server}.${d?.tool}`);
|
||||
text += theme.fg("dim", ` · ${(d?.charCount ?? 0).toLocaleString()} chars`);
|
||||
if (d?.truncated) text += theme.fg("warning", " · truncated");
|
||||
|
||||
if (expanded) {
|
||||
const content = result.content[0];
|
||||
if (content?.type === "text") {
|
||||
const preview = content.text.split("\n").slice(0, 15).join("\n");
|
||||
text += "\n\n" + theme.fg("dim", preview);
|
||||
}
|
||||
}
|
||||
|
||||
return new Text(text, 0, 0);
|
||||
},
|
||||
});
|
||||
|
||||
// ── Verify mcporter is available ─────────────────────────────────────────
|
||||
|
||||
pi.on("session_start", async (_event, ctx) => {
|
||||
try {
|
||||
const ver = (await runMcporter(["--version"], undefined, 5000)).trim();
|
||||
ctx.ui.notify(`MCPorter ${ver} ready`, "info");
|
||||
} catch {
|
||||
ctx.ui.notify(
|
||||
"MCPorter not found. Install with: npm i -g mcporter",
|
||||
"error",
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
10
src/resources/extensions/slash-commands/clear.ts
Normal file
10
src/resources/extensions/slash-commands/clear.ts
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
import type { ExtensionAPI, ExtensionCommandContext } from "@mariozechner/pi-coding-agent";
|
||||
|
||||
export default function clearCommand(pi: ExtensionAPI) {
|
||||
pi.registerCommand("clear", {
|
||||
description: "Alias for /new — start a new session",
|
||||
async handler(_args: string, ctx: ExtensionCommandContext) {
|
||||
await ctx.newSession();
|
||||
},
|
||||
});
|
||||
}
|
||||
|
|
@ -1,34 +0,0 @@
|
|||
import { readFileSync } from "node:fs";
|
||||
import { join } from "node:path";
|
||||
import type { ExtensionAPI, ExtensionCommandContext } from "@mariozechner/pi-coding-agent";
|
||||
|
||||
export default function gsdRun(pi: ExtensionAPI) {
|
||||
pi.registerCommand("gsd-run", {
|
||||
description: "Read GSD-WORKFLOW.md and execute — lightweight protocol-driven GSD",
|
||||
async handler(args: string, ctx: ExtensionCommandContext) {
|
||||
const workflowPath = process.env.GSD_WORKFLOW_PATH ?? join(process.env.HOME ?? "~", ".pi", "GSD-WORKFLOW.md");
|
||||
|
||||
let workflow: string;
|
||||
try {
|
||||
workflow = readFileSync(workflowPath, "utf-8");
|
||||
} catch {
|
||||
ctx.ui.notify(`Cannot read ${workflowPath}`, "error");
|
||||
return;
|
||||
}
|
||||
|
||||
const userNote = (typeof args === "string" ? args : "").trim();
|
||||
const noteSection = userNote
|
||||
? `\n\n## User Note\n\n${userNote}\n`
|
||||
: "";
|
||||
|
||||
pi.sendMessage(
|
||||
{
|
||||
customType: "gsd-run",
|
||||
content: `Read the following GSD workflow protocol and execute exactly.\n\n${workflow}${noteSection}`,
|
||||
display: false,
|
||||
},
|
||||
{ triggerTurn: true },
|
||||
);
|
||||
},
|
||||
});
|
||||
}
|
||||
|
|
@ -2,11 +2,11 @@ import type { ExtensionAPI } from "@mariozechner/pi-coding-agent";
|
|||
import createSlashCommand from "./create-slash-command.js";
|
||||
import createExtension from "./create-extension.js";
|
||||
import auditCommand from "./audit.js";
|
||||
import gsdRun from "./gsd-run.js";
|
||||
import clearCommand from "./clear.js";
|
||||
|
||||
export default function slashCommands(pi: ExtensionAPI) {
|
||||
createSlashCommand(pi);
|
||||
createExtension(pi);
|
||||
auditCommand(pi);
|
||||
gsdRun(pi);
|
||||
clearCommand(pi);
|
||||
}
|
||||
|
|
|
|||
1
src/resources/extensions/voice/.gitignore
vendored
Normal file
1
src/resources/extensions/voice/.gitignore
vendored
Normal file
|
|
@ -0,0 +1 @@
|
|||
speech-recognizer
|
||||
195
src/resources/extensions/voice/index.ts
Normal file
195
src/resources/extensions/voice/index.ts
Normal file
|
|
@ -0,0 +1,195 @@
|
|||
import type { ExtensionAPI, ExtensionContext } from "@mariozechner/pi-coding-agent";
|
||||
import type { AssistantMessage } from "@mariozechner/pi-ai";
|
||||
import { isKeyRelease, Key, matchesKey, truncateToWidth, visibleWidth } from "@mariozechner/pi-tui";
|
||||
import { spawn, execSync, type ChildProcess } from "node:child_process";
|
||||
import * as fs from "node:fs";
|
||||
import * as path from "node:path";
|
||||
import * as readline from "node:readline";
|
||||
|
||||
const SWIFT_SRC = path.join(__dirname, "speech-recognizer.swift");
|
||||
const RECOGNIZER_BIN = path.join(__dirname, "speech-recognizer");
|
||||
|
||||
function ensureBinary(): boolean {
|
||||
if (fs.existsSync(RECOGNIZER_BIN)) return true;
|
||||
try {
|
||||
execSync(`swiftc "${SWIFT_SRC}" -o "${RECOGNIZER_BIN}" -framework Speech -framework AVFoundation`, {
|
||||
timeout: 60000,
|
||||
});
|
||||
return true;
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export default function (pi: ExtensionAPI) {
|
||||
if (process.platform !== "darwin") return;
|
||||
|
||||
let active = false;
|
||||
let recognizerProcess: ChildProcess | null = null;
|
||||
let finalized = "";
|
||||
let flashOn = true;
|
||||
let flashTimer: ReturnType<typeof setInterval> | null = null;
|
||||
let footerTui: { requestRender: () => void } | null = null;
|
||||
|
||||
function setVoiceFooter(ctx: ExtensionContext, on: boolean) {
|
||||
if (!on) {
|
||||
stopFlash();
|
||||
ctx.ui.setFooter(undefined);
|
||||
return;
|
||||
}
|
||||
|
||||
flashOn = true;
|
||||
flashTimer = setInterval(() => {
|
||||
flashOn = !flashOn;
|
||||
footerTui?.requestRender();
|
||||
}, 500);
|
||||
|
||||
ctx.ui.setFooter((tui, theme, footerData) => {
|
||||
footerTui = tui;
|
||||
const branchUnsub = footerData.onBranchChange(() => tui.requestRender());
|
||||
|
||||
return {
|
||||
dispose: branchUnsub,
|
||||
invalidate() {},
|
||||
render(width: number): string[] {
|
||||
// Row 1: pwd (branch) ... ● transcribing
|
||||
let pwd = process.cwd();
|
||||
const home = process.env.HOME || process.env.USERPROFILE;
|
||||
if (home && pwd.startsWith(home)) pwd = `~${pwd.slice(home.length)}`;
|
||||
const branch = footerData.getGitBranch();
|
||||
if (branch) pwd = `${pwd} (${branch})`;
|
||||
|
||||
const dot = flashOn ? theme.fg("error", "●") : theme.fg("dim", "●");
|
||||
const voiceTag = `${dot} ${theme.fg("error", "transcribing")}`;
|
||||
const voiceTagWidth = visibleWidth(voiceTag);
|
||||
|
||||
const maxPwdWidth = width - voiceTagWidth - 2;
|
||||
const pwdStr = truncateToWidth(theme.fg("dim", pwd), maxPwdWidth, theme.fg("dim", "..."));
|
||||
const pad1 = " ".repeat(Math.max(1, width - visibleWidth(pwdStr) - voiceTagWidth));
|
||||
const row1 = truncateToWidth(pwdStr + pad1 + voiceTag, width);
|
||||
|
||||
// Row 2: stats ... model
|
||||
let totalInput = 0, totalOutput = 0, totalCost = 0;
|
||||
for (const entry of ctx.sessionManager.getEntries()) {
|
||||
if (entry.type === "message" && entry.message.role === "assistant") {
|
||||
const m = entry.message as AssistantMessage;
|
||||
totalInput += m.usage.input;
|
||||
totalOutput += m.usage.output;
|
||||
totalCost += m.usage.cost.total;
|
||||
}
|
||||
}
|
||||
|
||||
const fmt = (n: number) => n < 1000 ? `${n}` : n < 10000 ? `${(n / 1000).toFixed(1)}k` : `${Math.round(n / 1000)}k`;
|
||||
const parts: string[] = [];
|
||||
if (totalInput) parts.push(`↑${fmt(totalInput)}`);
|
||||
if (totalOutput) parts.push(`↓${fmt(totalOutput)}`);
|
||||
if (totalCost) parts.push(`$${totalCost.toFixed(3)}`);
|
||||
|
||||
const usage = ctx.getContextUsage();
|
||||
const ctxPct = usage?.percent !== null && usage?.percent !== undefined ? `${usage.percent.toFixed(1)}%` : "?";
|
||||
const ctxWin = usage?.contextWindow ?? ctx.model?.contextWindow ?? 0;
|
||||
parts.push(`${ctxPct}/${fmt(ctxWin)}`);
|
||||
|
||||
const statsLeft = theme.fg("dim", parts.join(" "));
|
||||
const modelRight = theme.fg("dim", ctx.model?.id || "no-model");
|
||||
const statsLeftW = visibleWidth(statsLeft);
|
||||
const modelRightW = visibleWidth(modelRight);
|
||||
const pad2 = " ".repeat(Math.max(2, width - statsLeftW - modelRightW));
|
||||
const row2 = truncateToWidth(statsLeft + pad2 + modelRight, width);
|
||||
|
||||
return [row1, row2];
|
||||
},
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
function stopFlash() {
|
||||
if (flashTimer) { clearInterval(flashTimer); flashTimer = null; }
|
||||
footerTui = null;
|
||||
}
|
||||
|
||||
async function toggleVoice(ctx: ExtensionContext) {
|
||||
if (active) {
|
||||
killRecognizer();
|
||||
active = false;
|
||||
setVoiceFooter(ctx, false);
|
||||
return;
|
||||
}
|
||||
|
||||
if (!ensureBinary()) {
|
||||
ctx.ui.notify("Voice: failed to compile speech recognizer (need Xcode CLI tools)", "error");
|
||||
return;
|
||||
}
|
||||
|
||||
active = true;
|
||||
finalized = "";
|
||||
setVoiceFooter(ctx, true);
|
||||
await runVoiceSession(ctx);
|
||||
}
|
||||
|
||||
pi.registerCommand("voice", {
|
||||
description: "Toggle voice mode",
|
||||
handler: async (_args, ctx) => toggleVoice(ctx),
|
||||
});
|
||||
|
||||
pi.registerShortcut("ctrl+alt+v", {
|
||||
description: "Toggle voice mode",
|
||||
handler: async (ctx) => toggleVoice(ctx),
|
||||
});
|
||||
|
||||
function killRecognizer() {
|
||||
if (recognizerProcess) { recognizerProcess.kill("SIGTERM"); recognizerProcess = null; }
|
||||
}
|
||||
|
||||
function startRecognizer(
|
||||
onPartial: (text: string) => void,
|
||||
onFinal: (text: string) => void,
|
||||
onError: (msg: string) => void,
|
||||
onReady: () => void,
|
||||
) {
|
||||
recognizerProcess = spawn(RECOGNIZER_BIN, [], { stdio: ["pipe", "pipe", "pipe"] });
|
||||
const rl = readline.createInterface({ input: recognizerProcess.stdout! });
|
||||
rl.on("line", (line: string) => {
|
||||
if (line === "READY") { onReady(); return; }
|
||||
if (line.startsWith("PARTIAL:")) onPartial(line.slice(8));
|
||||
else if (line.startsWith("FINAL:")) onFinal(line.slice(6));
|
||||
else if (line.startsWith("ERROR:")) onError(line.slice(6));
|
||||
});
|
||||
recognizerProcess.on("error", (err) => onError(err.message));
|
||||
recognizerProcess.on("exit", () => { recognizerProcess = null; });
|
||||
}
|
||||
|
||||
async function runVoiceSession(ctx: ExtensionContext): Promise<void> {
|
||||
return new Promise<void>((resolve) => {
|
||||
startRecognizer(
|
||||
(text) => {
|
||||
const full = finalized + (finalized && text ? " " : "") + text;
|
||||
ctx.ui.setEditorText(full);
|
||||
},
|
||||
(text) => {
|
||||
finalized = (finalized ? finalized + " " : "") + text;
|
||||
ctx.ui.setEditorText(finalized);
|
||||
},
|
||||
(msg) => ctx.ui.notify(`Voice: ${msg}`, "error"),
|
||||
() => {},
|
||||
);
|
||||
|
||||
ctx.ui.custom<void>(
|
||||
(_tui, _theme, _kb, done) => ({
|
||||
render(): string[] { return []; },
|
||||
handleInput(data: string) {
|
||||
if (isKeyRelease(data)) return;
|
||||
if (matchesKey(data, Key.escape) || matchesKey(data, Key.enter)) {
|
||||
killRecognizer();
|
||||
active = false;
|
||||
setVoiceFooter(ctx, false);
|
||||
done();
|
||||
}
|
||||
},
|
||||
invalidate() {},
|
||||
}),
|
||||
{ overlay: true, overlayOptions: { anchor: "bottom-center", width: "100%" } },
|
||||
).then(() => resolve());
|
||||
});
|
||||
}
|
||||
}
|
||||
76
src/resources/extensions/voice/speech-recognizer.swift
Normal file
76
src/resources/extensions/voice/speech-recognizer.swift
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
import Foundation
|
||||
import Speech
|
||||
import AVFoundation
|
||||
|
||||
// Unbuffered stdout
|
||||
setbuf(stdout, nil)
|
||||
|
||||
guard SFSpeechRecognizer.authorizationStatus() == .authorized ||
|
||||
SFSpeechRecognizer.authorizationStatus() == .notDetermined else {
|
||||
print("ERROR:Speech recognition not authorized")
|
||||
exit(1)
|
||||
}
|
||||
|
||||
SFSpeechRecognizer.requestAuthorization { status in
|
||||
guard status == .authorized else {
|
||||
print("ERROR:Speech recognition denied")
|
||||
exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
let recognizer = SFSpeechRecognizer(locale: Locale(identifier: "en-US"))!
|
||||
guard recognizer.isAvailable else {
|
||||
print("ERROR:Speech recognizer not available")
|
||||
exit(1)
|
||||
}
|
||||
|
||||
let audioEngine = AVAudioEngine()
|
||||
let request = SFSpeechAudioBufferRecognitionRequest()
|
||||
request.shouldReportPartialResults = true
|
||||
request.requiresOnDeviceRecognition = true
|
||||
|
||||
let node = audioEngine.inputNode
|
||||
let format = node.outputFormat(forBus: 0)
|
||||
|
||||
node.installTap(onBus: 0, bufferSize: 1024, format: format) { buffer, _ in
|
||||
request.append(buffer)
|
||||
}
|
||||
|
||||
audioEngine.prepare()
|
||||
do {
|
||||
try audioEngine.start()
|
||||
print("READY")
|
||||
} catch {
|
||||
print("ERROR:Failed to start audio engine: \(error.localizedDescription)")
|
||||
exit(1)
|
||||
}
|
||||
|
||||
var lastText = ""
|
||||
|
||||
recognizer.recognitionTask(with: request) { result, error in
|
||||
if let result = result {
|
||||
let text = result.bestTranscription.formattedString
|
||||
if text != lastText {
|
||||
lastText = text
|
||||
let prefix = result.isFinal ? "FINAL" : "PARTIAL"
|
||||
print("\(prefix):\(text)")
|
||||
}
|
||||
}
|
||||
if let error = error {
|
||||
// Task finished errors are normal on kill
|
||||
let nsError = error as NSError
|
||||
if nsError.code != 216 { // kAFAssistantErrorDomain code for cancelled
|
||||
print("ERROR:\(error.localizedDescription)")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle SIGTERM/SIGINT gracefully
|
||||
signal(SIGTERM) { _ in
|
||||
exit(0)
|
||||
}
|
||||
signal(SIGINT) { _ in
|
||||
exit(0)
|
||||
}
|
||||
|
||||
RunLoop.current.run()
|
||||
|
|
@ -366,3 +366,33 @@ test("gsd launches and loads extensions without errors", async () => {
|
|||
"no ERR_MODULE_NOT_FOUND",
|
||||
);
|
||||
});
|
||||
/**
|
||||
* 9. buildResourceLoader includes ~/.pi/agent/extensions in additionalExtensionPaths
|
||||
*/
|
||||
test("buildResourceLoader source includes ~/.pi/agent/extensions path", async () => {
|
||||
const { join } = await import("node:path");
|
||||
|
||||
// Verify the source code includes the pi extensions path
|
||||
const loaderSrc = readFileSync(join(projectRoot, "src", "resource-loader.ts"), "utf-8");
|
||||
|
||||
// Check that buildResourceLoader references ~/.pi/agent
|
||||
assert.ok(
|
||||
loaderSrc.includes(".pi"),
|
||||
"resource-loader.ts references .pi directory"
|
||||
);
|
||||
assert.ok(
|
||||
loaderSrc.includes("additionalExtensionPaths"),
|
||||
"resource-loader.ts uses additionalExtensionPaths"
|
||||
);
|
||||
assert.ok(
|
||||
loaderSrc.includes("homedir()"),
|
||||
"resource-loader.ts uses homedir() to construct paths"
|
||||
);
|
||||
|
||||
// Verify the function constructs the correct path
|
||||
assert.match(
|
||||
loaderSrc,
|
||||
/join\(homedir\(\),\s*['"]\.pi['"],\s*['"]agent['"]\)/,
|
||||
"buildResourceLoader constructs ~/.pi/agent path"
|
||||
);
|
||||
});
|
||||
|
|
|
|||
52
src/tests/gh-api.test.ts
Normal file
52
src/tests/gh-api.test.ts
Normal file
|
|
@ -0,0 +1,52 @@
|
|||
import test from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
import { spawnSync as realSpawnSync } from "node:child_process";
|
||||
|
||||
import * as ghApiModule from "../resources/extensions/github/gh-api.ts";
|
||||
|
||||
function makeSpawnResult(overrides: Partial<ReturnType<typeof realSpawnSync>>): ReturnType<typeof realSpawnSync> {
|
||||
return {
|
||||
status: 0,
|
||||
stdout: "",
|
||||
stderr: "",
|
||||
output: [null, "", ""],
|
||||
pid: 1,
|
||||
signal: null,
|
||||
...overrides,
|
||||
} as ReturnType<typeof realSpawnSync>;
|
||||
}
|
||||
|
||||
test("hasGhCli treats zero-exit token output as authenticated", () => {
|
||||
ghApiModule.setGhSpawnForTests(() => makeSpawnResult({ stdout: "gho_test\n" }));
|
||||
|
||||
try {
|
||||
assert.equal(ghApiModule.hasGhCli(), true);
|
||||
assert.equal(ghApiModule.authMethod(), "gh CLI");
|
||||
} finally {
|
||||
ghApiModule.resetGhCliDetectionForTests();
|
||||
}
|
||||
});
|
||||
|
||||
test("hasGhCli rejects zero-exit responses with empty stdout", () => {
|
||||
ghApiModule.setGhSpawnForTests(() => makeSpawnResult({ stdout: "" }));
|
||||
|
||||
try {
|
||||
assert.equal(ghApiModule.hasGhCli(), false);
|
||||
} finally {
|
||||
ghApiModule.resetGhCliDetectionForTests();
|
||||
}
|
||||
});
|
||||
|
||||
test("hasGhCli rejects spawnSync error even with zero exit", () => {
|
||||
ghApiModule.setGhSpawnForTests(() => makeSpawnResult({
|
||||
stdout: "gho_test\n",
|
||||
stderr: "EPERM",
|
||||
error: new Error("spawnSync gh EPERM"),
|
||||
}));
|
||||
|
||||
try {
|
||||
assert.equal(ghApiModule.hasGhCli(), false);
|
||||
} finally {
|
||||
ghApiModule.resetGhCliDetectionForTests();
|
||||
}
|
||||
});
|
||||
73
src/tests/tool-bootstrap.test.ts
Normal file
73
src/tests/tool-bootstrap.test.ts
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
import test from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
import { chmodSync, existsSync, lstatSync, mkdtempSync, mkdirSync, readFileSync, rmSync, symlinkSync, writeFileSync } from "node:fs";
|
||||
import { join } from "node:path";
|
||||
import { tmpdir } from "node:os";
|
||||
|
||||
import { ensureManagedTools, resolveToolFromPath } from "../tool-bootstrap.js";
|
||||
|
||||
function makeExecutable(dir: string, name: string, content = "#!/bin/sh\nexit 0\n"): string {
|
||||
const file = join(dir, name);
|
||||
writeFileSync(file, content);
|
||||
chmodSync(file, 0o755);
|
||||
return file;
|
||||
}
|
||||
|
||||
test("resolveToolFromPath finds fd via fdfind fallback", () => {
|
||||
const tmp = mkdtempSync(join(tmpdir(), "gsd-tool-bootstrap-resolve-"));
|
||||
try {
|
||||
makeExecutable(tmp, "fdfind");
|
||||
const resolved = resolveToolFromPath("fd", tmp);
|
||||
assert.equal(resolved, join(tmp, "fdfind"));
|
||||
} finally {
|
||||
rmSync(tmp, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
test("ensureManagedTools provisions fd and rg into managed bin dir", () => {
|
||||
const tmp = mkdtempSync(join(tmpdir(), "gsd-tool-bootstrap-provision-"));
|
||||
const sourceBin = join(tmp, "source-bin");
|
||||
const targetBin = join(tmp, "target-bin");
|
||||
|
||||
mkdirSync(sourceBin, { recursive: true });
|
||||
mkdirSync(targetBin, { recursive: true });
|
||||
|
||||
try {
|
||||
makeExecutable(sourceBin, "fdfind");
|
||||
makeExecutable(sourceBin, "rg");
|
||||
|
||||
const provisioned = ensureManagedTools(targetBin, sourceBin);
|
||||
|
||||
assert.equal(provisioned.length, 2);
|
||||
assert.ok(existsSync(join(targetBin, "fd")));
|
||||
assert.ok(existsSync(join(targetBin, "rg")));
|
||||
assert.ok(lstatSync(join(targetBin, "fd")).isSymbolicLink() || lstatSync(join(targetBin, "fd")).isFile());
|
||||
assert.ok(lstatSync(join(targetBin, "rg")).isSymbolicLink() || lstatSync(join(targetBin, "rg")).isFile());
|
||||
} finally {
|
||||
rmSync(tmp, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
|
||||
test("ensureManagedTools copies executable when symlink target already exists as a broken link", () => {
|
||||
const tmp = mkdtempSync(join(tmpdir(), "gsd-tool-bootstrap-copy-"));
|
||||
const sourceBin = join(tmp, "source-bin");
|
||||
const targetBin = join(tmp, "target-bin");
|
||||
const targetFd = join(targetBin, "fd");
|
||||
|
||||
mkdirSync(sourceBin, { recursive: true });
|
||||
mkdirSync(targetBin, { recursive: true });
|
||||
|
||||
try {
|
||||
makeExecutable(sourceBin, "fdfind", "#!/bin/sh\necho fd\n");
|
||||
makeExecutable(sourceBin, "rg", "#!/bin/sh\necho rg\n");
|
||||
symlinkSync(join(tmp, "missing-target"), targetFd);
|
||||
|
||||
const provisioned = ensureManagedTools(targetBin, sourceBin);
|
||||
|
||||
assert.equal(provisioned.length, 2);
|
||||
assert.ok(lstatSync(targetFd).isFile(), "fd fallback should replace broken symlink with a copied file");
|
||||
assert.match(readFileSync(targetFd, "utf8"), /echo fd/);
|
||||
} finally {
|
||||
rmSync(tmp, { recursive: true, force: true });
|
||||
}
|
||||
});
|
||||
85
src/tool-bootstrap.ts
Normal file
85
src/tool-bootstrap.ts
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
import { chmodSync, copyFileSync, existsSync, lstatSync, mkdirSync, rmSync, symlinkSync } from "node:fs";
|
||||
import { delimiter, join } from "node:path";
|
||||
|
||||
type ManagedTool = "fd" | "rg";
|
||||
|
||||
interface ToolSpec {
|
||||
targetName: string;
|
||||
candidates: string[];
|
||||
}
|
||||
|
||||
const TOOL_SPECS: Record<ManagedTool, ToolSpec> = {
|
||||
fd: {
|
||||
targetName: process.platform === "win32" ? "fd.exe" : "fd",
|
||||
candidates: process.platform === "win32" ? ["fd.exe", "fd", "fdfind.exe", "fdfind"] : ["fd", "fdfind"],
|
||||
},
|
||||
rg: {
|
||||
targetName: process.platform === "win32" ? "rg.exe" : "rg",
|
||||
candidates: process.platform === "win32" ? ["rg.exe", "rg"] : ["rg"],
|
||||
},
|
||||
};
|
||||
|
||||
function splitPath(pathValue: string | undefined): string[] {
|
||||
if (!pathValue) return [];
|
||||
return pathValue.split(delimiter).map((segment) => segment.trim()).filter(Boolean);
|
||||
}
|
||||
|
||||
function getCandidateNames(name: string): string[] {
|
||||
if (process.platform !== "win32") return [name];
|
||||
const lower = name.toLowerCase();
|
||||
if (lower.endsWith(".exe") || lower.endsWith(".cmd") || lower.endsWith(".bat")) return [name];
|
||||
return [name, `${name}.exe`, `${name}.cmd`, `${name}.bat`];
|
||||
}
|
||||
|
||||
function isRegularFile(path: string): boolean {
|
||||
try {
|
||||
return lstatSync(path).isFile() || lstatSync(path).isSymbolicLink();
|
||||
} catch {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
export function resolveToolFromPath(tool: ManagedTool, pathValue: string | undefined = process.env.PATH): string | null {
|
||||
const spec = TOOL_SPECS[tool];
|
||||
for (const dir of splitPath(pathValue)) {
|
||||
for (const candidate of spec.candidates) {
|
||||
for (const name of getCandidateNames(candidate)) {
|
||||
const fullPath = join(dir, name);
|
||||
if (existsSync(fullPath) && isRegularFile(fullPath)) {
|
||||
return fullPath;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return null;
|
||||
}
|
||||
|
||||
function provisionTool(targetDir: string, tool: ManagedTool, sourcePath: string): string {
|
||||
const targetPath = join(targetDir, TOOL_SPECS[tool].targetName);
|
||||
if (existsSync(targetPath)) return targetPath;
|
||||
|
||||
mkdirSync(targetDir, { recursive: true });
|
||||
|
||||
try {
|
||||
symlinkSync(sourcePath, targetPath);
|
||||
} catch {
|
||||
rmSync(targetPath, { force: true });
|
||||
copyFileSync(sourcePath, targetPath);
|
||||
chmodSync(targetPath, 0o755);
|
||||
}
|
||||
|
||||
return targetPath;
|
||||
}
|
||||
|
||||
export function ensureManagedTools(targetDir: string, pathValue: string | undefined = process.env.PATH): string[] {
|
||||
const provisioned: string[] = [];
|
||||
|
||||
for (const tool of Object.keys(TOOL_SPECS) as ManagedTool[]) {
|
||||
if (existsSync(join(targetDir, TOOL_SPECS[tool].targetName))) continue;
|
||||
const sourcePath = resolveToolFromPath(tool, pathValue);
|
||||
if (!sourcePath) continue;
|
||||
provisioned.push(provisionTool(targetDir, tool, sourcePath));
|
||||
}
|
||||
|
||||
return provisioned;
|
||||
}
|
||||
Loading…
Add table
Reference in a new issue