feat: add /review skill, /test skill, chokidar file watcher, subcommand help
- Add /review skill: reviews staged/unstaged/commit changes for security, performance, bugs, and quality with structured findings by severity - Add /test skill: auto-detects test framework, generates comprehensive tests for source files, or runs suites with failure analysis - Add chokidar file watcher: watches ~/.gsd/agent/ for config changes (settings.json, auth.json, models.json, extensions/) with debounced events on an EventBus - Add --help per subcommand: `gsd config --help` and `gsd update --help` show subcommand-specific usage information - 8 new file-watcher tests (start/stop, event emission, debouncing, unrelated file filtering)
This commit is contained in:
parent
a90aa0c8d6
commit
0b3163d297
8 changed files with 727 additions and 3 deletions
33
package-lock.json
generated
33
package-lock.json
generated
|
|
@ -1,12 +1,12 @@
|
|||
{
|
||||
"name": "gsd-pi",
|
||||
"version": "2.19.0",
|
||||
"version": "2.20.0",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "gsd-pi",
|
||||
"version": "2.19.0",
|
||||
"version": "2.20.0",
|
||||
"hasInstallScript": true,
|
||||
"license": "MIT",
|
||||
"workspaces": [
|
||||
|
|
@ -25,6 +25,7 @@
|
|||
"ajv": "^8.17.1",
|
||||
"ajv-formats": "^3.0.1",
|
||||
"chalk": "^5.6.2",
|
||||
"chokidar": "^5.0.0",
|
||||
"diff": "^8.0.2",
|
||||
"extract-zip": "^2.0.1",
|
||||
"file-type": "^21.1.1",
|
||||
|
|
@ -2530,6 +2531,21 @@
|
|||
"url": "https://github.com/chalk/chalk?sponsor=1"
|
||||
}
|
||||
},
|
||||
"node_modules/chokidar": {
|
||||
"version": "5.0.0",
|
||||
"resolved": "https://registry.npmjs.org/chokidar/-/chokidar-5.0.0.tgz",
|
||||
"integrity": "sha512-TQMmc3w+5AxjpL8iIiwebF73dRDF4fBIieAqGn9RGCWaEVwQ6Fb2cGe31Yns0RRIzii5goJ1Y7xbMwo1TxMplw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"readdirp": "^5.0.0"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 20.19.0"
|
||||
},
|
||||
"funding": {
|
||||
"url": "https://paulmillr.com/funding/"
|
||||
}
|
||||
},
|
||||
"node_modules/cliui": {
|
||||
"version": "8.0.1",
|
||||
"resolved": "https://registry.npmjs.org/cliui/-/cliui-8.0.1.tgz",
|
||||
|
|
@ -3791,6 +3807,19 @@
|
|||
"once": "^1.3.1"
|
||||
}
|
||||
},
|
||||
"node_modules/readdirp": {
|
||||
"version": "5.0.0",
|
||||
"resolved": "https://registry.npmjs.org/readdirp/-/readdirp-5.0.0.tgz",
|
||||
"integrity": "sha512-9u/XQ1pvrQtYyMpZe7DXKv2p5CNvyVwzUB6uhLAnQwHMSgKMBR62lc7AHljaeteeHXn11XTAaLLUVZYVZyuRBQ==",
|
||||
"license": "MIT",
|
||||
"engines": {
|
||||
"node": ">= 20.19.0"
|
||||
},
|
||||
"funding": {
|
||||
"type": "individual",
|
||||
"url": "https://paulmillr.com/funding/"
|
||||
}
|
||||
},
|
||||
"node_modules/require-directory": {
|
||||
"version": "2.1.1",
|
||||
"resolved": "https://registry.npmjs.org/require-directory/-/require-directory-2.1.1.tgz",
|
||||
|
|
|
|||
|
|
@ -78,6 +78,7 @@
|
|||
"ajv": "^8.17.1",
|
||||
"ajv-formats": "^3.0.1",
|
||||
"chalk": "^5.6.2",
|
||||
"chokidar": "^5.0.0",
|
||||
"diff": "^8.0.2",
|
||||
"extract-zip": "^2.0.1",
|
||||
"file-type": "^21.1.1",
|
||||
|
|
|
|||
10
src/cli.ts
10
src/cli.ts
|
|
@ -19,7 +19,7 @@ import { getPiDefaultModelAndProvider, migratePiCredentials } from './pi-migrati
|
|||
import { shouldRunOnboarding, runOnboarding } from './onboarding.js'
|
||||
import chalk from 'chalk'
|
||||
import { checkForUpdates } from './update-check.js'
|
||||
import { printHelp } from './help-text.js'
|
||||
import { printHelp, printSubcommandHelp } from './help-text.js'
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Minimal CLI arg parser — detects print/subagent mode flags
|
||||
|
|
@ -92,6 +92,14 @@ function parseCliArgs(argv: string[]): CliFlags {
|
|||
const cliFlags = parseCliArgs(process.argv)
|
||||
const isPrintMode = cliFlags.print || cliFlags.mode !== undefined
|
||||
|
||||
// `gsd <subcommand> --help` — show subcommand-specific help
|
||||
const subcommand = cliFlags.messages[0]
|
||||
if (subcommand && process.argv.includes('--help')) {
|
||||
if (printSubcommandHelp(subcommand, process.env.GSD_VERSION || '0.0.0')) {
|
||||
process.exit(0)
|
||||
}
|
||||
}
|
||||
|
||||
// `gsd config` — replay the setup wizard and exit
|
||||
if (cliFlags.messages[0] === 'config') {
|
||||
const authStorage = AuthStorage.create(authFilePath)
|
||||
|
|
|
|||
|
|
@ -1,3 +1,25 @@
|
|||
const SUBCOMMAND_HELP: Record<string, string> = {
|
||||
config: [
|
||||
'Usage: gsd config',
|
||||
'',
|
||||
'Re-run the interactive setup wizard to configure:',
|
||||
' - LLM provider (Anthropic, OpenAI, Google, etc.)',
|
||||
' - Web search provider (Brave, Tavily, built-in)',
|
||||
' - Remote questions (Discord, Slack, Telegram)',
|
||||
' - Tool API keys (Context7, Jina, Groq)',
|
||||
'',
|
||||
'All steps are skippable and can be changed later with /login or /search-provider.',
|
||||
].join('\n'),
|
||||
|
||||
update: [
|
||||
'Usage: gsd update',
|
||||
'',
|
||||
'Update GSD to the latest version.',
|
||||
'',
|
||||
'Equivalent to: npm install -g gsd-pi@latest',
|
||||
].join('\n'),
|
||||
}
|
||||
|
||||
export function printHelp(version: string): void {
|
||||
process.stdout.write(`GSD v${version} — Get Shit Done\n\n`)
|
||||
process.stdout.write('Usage: gsd [options] [message...]\n\n')
|
||||
|
|
@ -15,4 +37,13 @@ export function printHelp(version: string): void {
|
|||
process.stdout.write('\nSubcommands:\n')
|
||||
process.stdout.write(' config Re-run the setup wizard\n')
|
||||
process.stdout.write(' update Update GSD to the latest version\n')
|
||||
process.stdout.write('\nRun gsd <subcommand> --help for subcommand-specific help.\n')
|
||||
}
|
||||
|
||||
export function printSubcommandHelp(subcommand: string, version: string): boolean {
|
||||
const help = SUBCOMMAND_HELP[subcommand]
|
||||
if (!help) return false
|
||||
process.stdout.write(`GSD v${version} — Get Shit Done\n\n`)
|
||||
process.stdout.write(help + '\n')
|
||||
return true
|
||||
}
|
||||
|
|
|
|||
97
src/resources/extensions/gsd/file-watcher.ts
Normal file
97
src/resources/extensions/gsd/file-watcher.ts
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
import type { FSWatcher } from "chokidar";
|
||||
import type { EventBus } from "@gsd/pi-coding-agent";
|
||||
|
||||
let watcher: FSWatcher | null = null;
|
||||
|
||||
const EVENT_MAP: Record<string, string> = {
|
||||
"settings.json": "settings-changed",
|
||||
"auth.json": "auth-changed",
|
||||
"models.json": "models-changed",
|
||||
};
|
||||
|
||||
const EXTENSIONS_DIR = "extensions";
|
||||
|
||||
const IGNORED_PATTERNS = [
|
||||
"**/sessions/**",
|
||||
"**/*.tmp",
|
||||
"**/*.swp",
|
||||
"**/*~",
|
||||
"**/.DS_Store",
|
||||
];
|
||||
|
||||
const DEBOUNCE_MS = 300;
|
||||
|
||||
/**
|
||||
* Start watching `agentDir` (e.g. `~/.gsd/agent/`) for config changes.
|
||||
* Emits events on the supplied EventBus when watched files are modified.
|
||||
*/
|
||||
export async function startFileWatcher(
|
||||
agentDir: string,
|
||||
eventBus: EventBus,
|
||||
): Promise<void> {
|
||||
if (watcher) {
|
||||
await watcher.close();
|
||||
}
|
||||
|
||||
const { watch } = await import("chokidar");
|
||||
|
||||
const pending = new Map<string, ReturnType<typeof setTimeout>>();
|
||||
|
||||
function debounceEmit(event: string): void {
|
||||
const existing = pending.get(event);
|
||||
if (existing) clearTimeout(existing);
|
||||
pending.set(
|
||||
event,
|
||||
setTimeout(() => {
|
||||
pending.delete(event);
|
||||
eventBus.emit(event, { timestamp: Date.now() });
|
||||
}, DEBOUNCE_MS),
|
||||
);
|
||||
}
|
||||
|
||||
function resolveEvent(filePath: string): string | null {
|
||||
const relative = filePath
|
||||
.replace(agentDir, "")
|
||||
.replace(/^[/\\]+/, "");
|
||||
|
||||
// Check direct file matches
|
||||
for (const [file, event] of Object.entries(EVENT_MAP)) {
|
||||
if (relative === file) return event;
|
||||
}
|
||||
|
||||
// Check extensions directory
|
||||
if (relative.startsWith(EXTENSIONS_DIR + "/") || relative === EXTENSIONS_DIR) {
|
||||
return "extensions-changed";
|
||||
}
|
||||
|
||||
return null;
|
||||
}
|
||||
|
||||
watcher = watch(agentDir, {
|
||||
ignoreInitial: true,
|
||||
depth: 2,
|
||||
ignored: IGNORED_PATTERNS,
|
||||
});
|
||||
|
||||
for (const eventType of ["add", "change", "unlink"] as const) {
|
||||
watcher.on(eventType, (filePath: string) => {
|
||||
const event = resolveEvent(filePath);
|
||||
if (event) debounceEmit(event);
|
||||
});
|
||||
}
|
||||
|
||||
// Wait for watcher to be ready
|
||||
await new Promise<void>((resolve) => {
|
||||
watcher!.on("ready", resolve);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop the file watcher and clean up resources.
|
||||
*/
|
||||
export async function stopFileWatcher(): Promise<void> {
|
||||
if (watcher) {
|
||||
await watcher.close();
|
||||
watcher = null;
|
||||
}
|
||||
}
|
||||
214
src/resources/skills/review/SKILL.md
Normal file
214
src/resources/skills/review/SKILL.md
Normal file
|
|
@ -0,0 +1,214 @@
|
|||
---
|
||||
name: review
|
||||
description: Review code changes for security, performance, bugs, and quality. Reviews staged changes, unstaged changes, specific commits, or PR-ready diffs.
|
||||
---
|
||||
|
||||
<objective>
|
||||
Review code changes and provide structured feedback covering security, performance, bug risks, code quality, and test coverage gaps. This skill analyzes diffs and surrounding context to catch issues before they reach production.
|
||||
</objective>
|
||||
|
||||
<context>
|
||||
This skill reviews code changes at various stages of the development workflow. It can review staged changes before a commit, unstaged work-in-progress, a specific commit, or the full set of changes on a branch that are ready for a pull request.
|
||||
|
||||
The reviewer reads both the diff and the surrounding source files to understand intent and catch issues that only appear in context.
|
||||
</context>
|
||||
|
||||
<core_principle>
|
||||
**FIND REAL ISSUES, NOT STYLE NITS.** Focus on problems that cause bugs, security vulnerabilities, performance degradation, or maintainability pain. Avoid nitpicking formatting or subjective style preferences unless they harm readability.
|
||||
</core_principle>
|
||||
|
||||
<analysis_only_rule>
|
||||
**THIS SKILL IS READ-ONLY. DO NOT MODIFY CODE.**
|
||||
|
||||
The purpose is to review and report findings. Making changes during review conflates the reviewer and author roles. Present findings and let the user decide what to act on.
|
||||
</analysis_only_rule>
|
||||
|
||||
<quick_start>
|
||||
|
||||
<determine_review_scope>
|
||||
|
||||
Parse the user's input to determine what to review:
|
||||
|
||||
1. **No arguments** - Review staged changes first. If nothing is staged, review unstaged changes.
|
||||
- Staged: `git diff --cached`
|
||||
- Unstaged: `git diff`
|
||||
- If both are empty, review the most recent commit: `git show HEAD`
|
||||
|
||||
2. **Commit hash argument** (e.g., `/review abc1234`) - Review that specific commit.
|
||||
- `git show <hash>`
|
||||
|
||||
3. **File path argument** (e.g., `/review src/foo.ts`) - Review unstaged changes in that file.
|
||||
- `git diff -- <path>` then fall back to `git diff --cached -- <path>`
|
||||
|
||||
4. **"pr" argument** (e.g., `/review pr`) - Review all changes since branching from main.
|
||||
- `git diff main...HEAD`
|
||||
- If on main, review `git diff HEAD~1`
|
||||
|
||||
After obtaining the diff, if it is empty, inform the user that there are no changes to review and stop.
|
||||
|
||||
</determine_review_scope>
|
||||
|
||||
<gather_context>
|
||||
|
||||
Before analyzing the diff:
|
||||
|
||||
1. **Read changed files in full** - Do not review a diff in isolation. Read each modified file to understand the surrounding code, imports, types, and control flow.
|
||||
2. **Identify the tech stack** - Note languages, frameworks, and libraries in use. This affects what patterns are risky.
|
||||
3. **Check for related test files** - For each changed source file, look for corresponding test files. Note whether tests were updated alongside the changes.
|
||||
4. **Check for configuration changes** - If config files changed (env, CI, package.json, tsconfig, etc.), pay extra attention to side effects.
|
||||
|
||||
</gather_context>
|
||||
|
||||
<review_categories>
|
||||
|
||||
Analyze the changes against each category below. Only report findings that are actually present. Skip categories with no issues.
|
||||
|
||||
**A. Security Issues** (Severity: CRITICAL or HIGH)
|
||||
- Injection vulnerabilities (SQL injection, command injection, template injection)
|
||||
- Cross-site scripting (XSS) - unsanitized user input rendered in HTML
|
||||
- Authentication and authorization flaws (missing auth checks, privilege escalation)
|
||||
- Secrets or credentials hardcoded or logged
|
||||
- Insecure deserialization or unsafe eval usage
|
||||
- Path traversal or file access vulnerabilities
|
||||
- Missing input validation on external data
|
||||
|
||||
**B. Performance Concerns** (Severity: HIGH or MEDIUM)
|
||||
- N+1 query patterns in database access
|
||||
- Unnecessary memory allocations in hot paths or loops
|
||||
- Blocking operations on the main thread or in async contexts
|
||||
- Missing pagination on unbounded queries
|
||||
- Redundant computation that could be cached or memoized
|
||||
- Large payloads without streaming or chunking
|
||||
|
||||
**C. Bug Risks** (Severity: HIGH or MEDIUM)
|
||||
- Off-by-one errors in loops or array access
|
||||
- Null/undefined dereferences without guards
|
||||
- Race conditions in concurrent or async code
|
||||
- Incorrect error handling (swallowed errors, wrong error types)
|
||||
- Type mismatches or unsafe type assertions
|
||||
- Logic errors in conditionals (inverted checks, missing cases)
|
||||
- Resource leaks (unclosed connections, file handles, listeners)
|
||||
|
||||
**D. Code Quality** (Severity: MEDIUM or LOW)
|
||||
- Unclear or misleading naming
|
||||
- Significant code duplication that should be extracted
|
||||
- Excessive complexity (deeply nested logic, functions doing too many things)
|
||||
- Dead code or unreachable branches
|
||||
- Missing or misleading comments on non-obvious logic
|
||||
- Inconsistency with patterns used elsewhere in the codebase
|
||||
|
||||
**E. Test Coverage Gaps** (Severity: MEDIUM or LOW)
|
||||
- New logic paths without corresponding test cases
|
||||
- Changed behavior without updated tests
|
||||
- Edge cases not covered (empty inputs, boundary values, error paths)
|
||||
- Missing integration tests for new API endpoints or database changes
|
||||
|
||||
</review_categories>
|
||||
|
||||
<format_findings>
|
||||
|
||||
For each finding, use this structure:
|
||||
|
||||
```
|
||||
### [SEVERITY] Category: Brief Title
|
||||
|
||||
**File**: `path/to/file.ext` (lines X-Y)
|
||||
|
||||
**Issue**: Clear description of the problem.
|
||||
|
||||
**Why it matters**: What could go wrong if this is not addressed.
|
||||
|
||||
**Suggestion**: How to fix it, with a code snippet if helpful.
|
||||
```
|
||||
|
||||
Severity levels:
|
||||
- **CRITICAL** - Must fix before merge. Security vulnerability or data loss risk.
|
||||
- **HIGH** - Should fix before merge. Likely bug or significant performance issue.
|
||||
- **MEDIUM** - Should fix soon. Code quality or moderate risk issue.
|
||||
- **LOW** - Consider fixing. Minor improvement opportunity.
|
||||
|
||||
</format_findings>
|
||||
|
||||
</quick_start>
|
||||
|
||||
<critical_rules>
|
||||
|
||||
1. **READ THE FULL FILE**: Never review a diff without reading the complete source file for context
|
||||
2. **NO FALSE ALARMS**: Only report issues you can explain concretely. Do not report vague concerns
|
||||
3. **PRIORITIZE**: Lead with the most severe findings. Do not bury critical issues under style nits
|
||||
4. **BE SPECIFIC**: Include file paths, line numbers, and code references for every finding
|
||||
5. **EXPLAIN THE RISK**: For each finding, explain what could actually go wrong
|
||||
6. **CHECK TESTS**: Always check whether changes have corresponding test updates
|
||||
7. **CONSIDER THE STACK**: Apply language-specific and framework-specific knowledge to your review
|
||||
8. **DO NOT MODIFY CODE**: Present findings only. The user decides what to act on
|
||||
|
||||
</critical_rules>
|
||||
|
||||
<output_format>
|
||||
|
||||
```markdown
|
||||
## Code Review: [brief description of what was reviewed]
|
||||
|
||||
**Scope**: [staged changes | unstaged changes | commit abc1234 | PR changes from main]
|
||||
**Files reviewed**: [count] files changed, [additions] additions, [deletions] deletions
|
||||
|
||||
---
|
||||
|
||||
### Findings
|
||||
|
||||
[Findings grouped by severity, highest first. Use the format from <format_findings>.]
|
||||
|
||||
---
|
||||
|
||||
### Summary
|
||||
|
||||
| Severity | Count |
|
||||
|----------|-------|
|
||||
| CRITICAL | X |
|
||||
| HIGH | X |
|
||||
| MEDIUM | X |
|
||||
| LOW | X |
|
||||
|
||||
### Recommended Actions
|
||||
|
||||
1. [Most important action to take]
|
||||
2. [Next most important action]
|
||||
3. [...]
|
||||
```
|
||||
|
||||
If no issues are found:
|
||||
|
||||
```markdown
|
||||
## Code Review: [brief description]
|
||||
|
||||
**Scope**: [what was reviewed]
|
||||
**Files reviewed**: [count]
|
||||
|
||||
No significant issues found. The changes look good to merge.
|
||||
```
|
||||
|
||||
</output_format>
|
||||
|
||||
<decision_gate>
|
||||
|
||||
**After presenting findings, ALWAYS offer these options:**
|
||||
|
||||
```
|
||||
─────────────────────────────────────────
|
||||
REVIEW COMPLETE
|
||||
|
||||
What would you like to do?
|
||||
|
||||
1. **Fix issues** - I'll address the findings starting with the most critical
|
||||
2. **Save review** - Export findings to a markdown file
|
||||
3. **Review again** - Re-review with different scope or focus
|
||||
4. **Discuss a finding** - Ask questions about a specific issue
|
||||
5. **Other** - Tell me what you need
|
||||
─────────────────────────────────────────
|
||||
```
|
||||
|
||||
**Wait for user response before taking any action.**
|
||||
|
||||
This gate is MANDATORY. Never skip it. Never auto-implement fixes.
|
||||
|
||||
</decision_gate>
|
||||
201
src/resources/skills/test/SKILL.md
Normal file
201
src/resources/skills/test/SKILL.md
Normal file
|
|
@ -0,0 +1,201 @@
|
|||
---
|
||||
name: test
|
||||
description: Generate or run tests. Auto-detects test framework, generates comprehensive tests for source files, or runs existing test suites with failure analysis.
|
||||
---
|
||||
|
||||
<objective>
|
||||
Generate or run tests for the current project. This skill auto-detects the test framework in use, generates comprehensive tests for source files, or runs existing test suites and analyzes failures.
|
||||
|
||||
Accepts optional arguments:
|
||||
- A file path: generate tests for that source file
|
||||
- `run`: run the existing test suite and analyze results
|
||||
- No arguments: suggest what to test based on recent changes
|
||||
</objective>
|
||||
|
||||
<context>
|
||||
This skill handles test generation and execution across multiple languages and frameworks. It adapts to whatever testing conventions the project already uses rather than imposing new ones.
|
||||
</context>
|
||||
|
||||
<quick_start>
|
||||
|
||||
<step_1_detect_framework>
|
||||
|
||||
**Detect the test framework and conventions before doing anything else.**
|
||||
|
||||
Check these sources in order:
|
||||
|
||||
1. **package.json** (Node/JS/TS projects):
|
||||
- `scripts.test` for the test command
|
||||
- `devDependencies` for jest, vitest, mocha, ava, tap, node:test, playwright, cypress
|
||||
- `jest` or `vitest` config keys
|
||||
|
||||
2. **Config files**:
|
||||
- `jest.config.*`, `vitest.config.*`, `.mocharc.*`, `ava.config.*`
|
||||
- `pytest.ini`, `pyproject.toml` (look for `[tool.pytest]`), `setup.cfg`
|
||||
- `go.mod` (Go projects use `go test` by default)
|
||||
- `Cargo.toml` (Rust projects use `cargo test`)
|
||||
|
||||
3. **Existing test files**:
|
||||
- Scan for `*.test.*`, `*.spec.*`, `*_test.*`, `test_*.*` files
|
||||
- Read 1-2 existing test files to understand patterns, imports, assertion style, and structure
|
||||
- Note the directory structure (co-located tests vs `__tests__/` vs `tests/` vs `test/`)
|
||||
|
||||
4. **Record your findings**:
|
||||
- Framework name and version
|
||||
- Test file naming convention
|
||||
- Test file location convention
|
||||
- Import/require style
|
||||
- Assertion style (expect, assert, chai, etc.)
|
||||
- Any custom utilities, fixtures, or helpers used
|
||||
|
||||
</step_1_detect_framework>
|
||||
|
||||
<step_2_handle_arguments>
|
||||
|
||||
**Route based on the argument provided.**
|
||||
|
||||
- **File path given** -> Go to `generate_tests`
|
||||
- **"run" given** -> Go to `run_tests`
|
||||
- **No arguments** -> Go to `suggest_tests`
|
||||
|
||||
</step_2_handle_arguments>
|
||||
|
||||
<generate_tests>
|
||||
|
||||
**Generate tests for the specified source file.**
|
||||
|
||||
**A. Read and analyze the source file:**
|
||||
- Identify all exported/public functions, classes, methods, and types
|
||||
- Understand each function's parameters, return types, and side effects
|
||||
- Note error handling patterns (throws, returns null, returns Result, etc.)
|
||||
- Identify dependencies that will need mocking
|
||||
|
||||
**B. Read existing test files in the project (1-2 files minimum):**
|
||||
- Match their import style exactly
|
||||
- Match their describe/it or test block structure
|
||||
- Match their assertion patterns
|
||||
- Match their mock/stub approach
|
||||
- Use the same test utilities and helpers
|
||||
|
||||
**C. Generate tests covering:**
|
||||
|
||||
1. **Happy paths**: Normal expected inputs produce correct outputs
|
||||
2. **Edge cases**:
|
||||
- Empty inputs (empty string, empty array, null, undefined, zero)
|
||||
- Boundary values (min/max integers, very long strings)
|
||||
- Single element collections
|
||||
3. **Error handling**:
|
||||
- Invalid inputs that should throw or return errors
|
||||
- Missing required parameters
|
||||
- Type mismatches (if applicable)
|
||||
4. **Async behavior** (if the function is async):
|
||||
- Successful resolution
|
||||
- Rejection/error cases
|
||||
- Timeout scenarios (if relevant)
|
||||
5. **Dependencies**:
|
||||
- Mock external dependencies (APIs, databases, file system)
|
||||
- Verify correct interaction with dependencies (called with right args)
|
||||
|
||||
**D. Place the test file correctly:**
|
||||
- Follow the project's existing convention for test file location
|
||||
- Use the project's naming convention (`.test.ts`, `.spec.js`, `_test.go`, `test_*.py`, etc.)
|
||||
|
||||
**E. Run the generated tests immediately to verify they pass.**
|
||||
- If tests fail, read the error output carefully
|
||||
- Fix the test code (not the source code)
|
||||
- Re-run until all tests pass
|
||||
|
||||
</generate_tests>
|
||||
|
||||
<run_tests>
|
||||
|
||||
**Run the existing test suite and analyze results.**
|
||||
|
||||
**A. Determine the test command:**
|
||||
- Check `package.json` `scripts.test` for Node projects
|
||||
- Use `pytest` for Python projects
|
||||
- Use `go test ./...` for Go projects
|
||||
- Use `cargo test` for Rust projects
|
||||
- Fall back to the detected framework's CLI
|
||||
|
||||
**B. Run the tests:**
|
||||
- Execute the test command
|
||||
- Capture full output including failures and errors
|
||||
|
||||
**C. Analyze results:**
|
||||
- Report total passed, failed, skipped counts
|
||||
- For each failure:
|
||||
- Identify the failing test name and file
|
||||
- Show the assertion that failed (expected vs actual)
|
||||
- Read the relevant source code if needed
|
||||
- Provide a specific diagnosis of why it failed
|
||||
- Suggest a concrete fix (is it a test bug or a source bug?)
|
||||
|
||||
**D. Present a summary:**
|
||||
|
||||
```
|
||||
Test Results: X passed, Y failed, Z skipped
|
||||
|
||||
Failures:
|
||||
1. [test name] - [brief diagnosis]
|
||||
Fix: [specific suggestion]
|
||||
|
||||
2. [test name] - [brief diagnosis]
|
||||
Fix: [specific suggestion]
|
||||
```
|
||||
|
||||
</run_tests>
|
||||
|
||||
<suggest_tests>
|
||||
|
||||
**Suggest what to test when no arguments are given.**
|
||||
|
||||
**A. Check recent changes:**
|
||||
- Run `git diff --name-only HEAD~5` to find recently changed files
|
||||
- Run `git diff --name-only --cached` for staged files
|
||||
- Filter to source files (exclude configs, docs, lockfiles)
|
||||
|
||||
**B. Check test coverage gaps:**
|
||||
- Find source files that have no corresponding test file
|
||||
- Prioritize files that were recently modified
|
||||
|
||||
**C. Present suggestions:**
|
||||
|
||||
```
|
||||
Suggested files to test (based on recent changes and coverage gaps):
|
||||
|
||||
1. [file path] - modified recently, no test file exists
|
||||
2. [file path] - modified recently, tests exist but may need updating
|
||||
3. [file path] - no test coverage found
|
||||
|
||||
Run `/test <file path>` to generate tests for any of these.
|
||||
Run `/test run` to run the existing test suite.
|
||||
```
|
||||
|
||||
</suggest_tests>
|
||||
|
||||
</quick_start>
|
||||
|
||||
<critical_rules>
|
||||
|
||||
1. **MATCH EXISTING PATTERNS**: Never impose a new test style. Always mirror what the project already does.
|
||||
2. **READ BEFORE WRITING**: Always read existing test files before generating new ones.
|
||||
3. **VERIFY GENERATED TESTS**: Always run generated tests. Untested test code is unreliable.
|
||||
4. **DON'T MODIFY SOURCE CODE**: If generated tests fail, fix the tests, not the source. If the source has a real bug, report it to the user.
|
||||
5. **MOCK EXTERNAL DEPENDENCIES**: Never let tests hit real APIs, databases, or file systems unless the project explicitly uses integration tests that way.
|
||||
6. **ONE FILE AT A TIME**: Generate tests for one source file per invocation. Keep scope manageable.
|
||||
7. **USE PROJECT DEPENDENCIES**: Only use test libraries already installed in the project. Do not add new dependencies without asking.
|
||||
|
||||
</critical_rules>
|
||||
|
||||
<success_criteria>
|
||||
|
||||
Before completing:
|
||||
- [ ] Test framework and conventions were detected correctly
|
||||
- [ ] Generated tests match the project's existing test style
|
||||
- [ ] All generated tests pass when run
|
||||
- [ ] Tests cover happy paths, edge cases, and error handling
|
||||
- [ ] Test file is placed in the correct location with the correct naming convention
|
||||
- [ ] No source code was modified
|
||||
|
||||
</success_criteria>
|
||||
143
src/tests/file-watcher.test.ts
Normal file
143
src/tests/file-watcher.test.ts
Normal file
|
|
@ -0,0 +1,143 @@
|
|||
import { test, afterEach } from "node:test";
|
||||
import assert from "node:assert";
|
||||
import { mkdtempSync, mkdirSync, writeFileSync } from "node:fs";
|
||||
import { join } from "node:path";
|
||||
import { tmpdir } from "node:os";
|
||||
import { setTimeout as delay } from "node:timers/promises";
|
||||
|
||||
import {
|
||||
startFileWatcher,
|
||||
stopFileWatcher,
|
||||
} from "../resources/extensions/gsd/file-watcher.ts";
|
||||
|
||||
function createTempAgentDir(): string {
|
||||
const tmp = mkdtempSync(join(tmpdir(), "gsd-fw-test-"));
|
||||
mkdirSync(join(tmp, "extensions"), { recursive: true });
|
||||
// Seed watched files so chokidar treats writes as "change" not "add"
|
||||
writeFileSync(join(tmp, "settings.json"), "{}");
|
||||
writeFileSync(join(tmp, "auth.json"), "{}");
|
||||
writeFileSync(join(tmp, "models.json"), "{}");
|
||||
return tmp;
|
||||
}
|
||||
|
||||
function createMockEventBus() {
|
||||
const events: { channel: string; data: unknown }[] = [];
|
||||
return {
|
||||
events,
|
||||
emit(channel: string, data: unknown) {
|
||||
events.push({ channel, data });
|
||||
},
|
||||
on(_channel: string, _handler: (data: unknown) => void) {
|
||||
return () => {};
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
afterEach(async () => {
|
||||
await stopFileWatcher();
|
||||
});
|
||||
|
||||
test("startFileWatcher and stopFileWatcher run without errors", async () => {
|
||||
const dir = createTempAgentDir();
|
||||
const bus = createMockEventBus();
|
||||
|
||||
await startFileWatcher(dir, bus);
|
||||
await stopFileWatcher();
|
||||
});
|
||||
|
||||
test("stopFileWatcher is safe to call when no watcher is active", async () => {
|
||||
await stopFileWatcher();
|
||||
});
|
||||
|
||||
test("settings.json change emits settings-changed event", async () => {
|
||||
const dir = createTempAgentDir();
|
||||
const bus = createMockEventBus();
|
||||
|
||||
await startFileWatcher(dir, bus);
|
||||
|
||||
writeFileSync(join(dir, "settings.json"), JSON.stringify({ updated: true }));
|
||||
// Wait for debounce (300ms) + filesystem propagation
|
||||
await delay(600);
|
||||
|
||||
const matched = bus.events.filter((e) => e.channel === "settings-changed");
|
||||
assert.ok(matched.length > 0, "should emit settings-changed event");
|
||||
});
|
||||
|
||||
test("auth.json change emits auth-changed event", async () => {
|
||||
const dir = createTempAgentDir();
|
||||
const bus = createMockEventBus();
|
||||
|
||||
await startFileWatcher(dir, bus);
|
||||
|
||||
writeFileSync(join(dir, "auth.json"), JSON.stringify({ token: "new" }));
|
||||
await delay(600);
|
||||
|
||||
const matched = bus.events.filter((e) => e.channel === "auth-changed");
|
||||
assert.ok(matched.length > 0, "should emit auth-changed event");
|
||||
});
|
||||
|
||||
test("models.json change emits models-changed event", async () => {
|
||||
const dir = createTempAgentDir();
|
||||
const bus = createMockEventBus();
|
||||
|
||||
await startFileWatcher(dir, bus);
|
||||
|
||||
writeFileSync(join(dir, "models.json"), JSON.stringify({ model: "new" }));
|
||||
await delay(600);
|
||||
|
||||
const matched = bus.events.filter((e) => e.channel === "models-changed");
|
||||
assert.ok(matched.length > 0, "should emit models-changed event");
|
||||
});
|
||||
|
||||
test("extensions directory change emits extensions-changed event", async () => {
|
||||
const dir = createTempAgentDir();
|
||||
const bus = createMockEventBus();
|
||||
|
||||
await startFileWatcher(dir, bus);
|
||||
|
||||
writeFileSync(
|
||||
join(dir, "extensions", "my-ext.json"),
|
||||
JSON.stringify({ name: "test" }),
|
||||
);
|
||||
await delay(600);
|
||||
|
||||
const matched = bus.events.filter(
|
||||
(e) => e.channel === "extensions-changed",
|
||||
);
|
||||
assert.ok(matched.length > 0, "should emit extensions-changed event");
|
||||
});
|
||||
|
||||
test("unrelated file changes are ignored", async () => {
|
||||
const dir = createTempAgentDir();
|
||||
const bus = createMockEventBus();
|
||||
|
||||
await startFileWatcher(dir, bus);
|
||||
// Wait for watcher to settle, then clear any residual events from setup
|
||||
await delay(400);
|
||||
bus.events.length = 0;
|
||||
|
||||
writeFileSync(join(dir, "random.txt"), "hello");
|
||||
await delay(600);
|
||||
|
||||
assert.strictEqual(bus.events.length, 0, "should not emit any events");
|
||||
});
|
||||
|
||||
test("debouncing coalesces rapid changes into one event", async () => {
|
||||
const dir = createTempAgentDir();
|
||||
const bus = createMockEventBus();
|
||||
|
||||
await startFileWatcher(dir, bus);
|
||||
|
||||
// Rapid-fire writes
|
||||
for (let i = 0; i < 5; i++) {
|
||||
writeFileSync(join(dir, "settings.json"), JSON.stringify({ i }));
|
||||
}
|
||||
await delay(600);
|
||||
|
||||
const matched = bus.events.filter((e) => e.channel === "settings-changed");
|
||||
assert.strictEqual(
|
||||
matched.length,
|
||||
1,
|
||||
"rapid changes should be debounced into a single event",
|
||||
);
|
||||
});
|
||||
Loading…
Add table
Reference in a new issue