feat: add hashline edits — line-hash-anchored file editing
Implement hashline edit mode inspired by Oh My Pi's approach. Each line in a file is identified by a content hash (xxHash32, 2-char nibble alphabet), enabling the model to reference lines by stable LINE#ID tags instead of reproducing full line text. This eliminates the most common edit failure mode (slightly misquoted original text) and reduces output tokens. New files: - hashline.ts: core hash computation, formatting, parsing, validation, and edit application engine (pure JS xxHash32, no native deps) - hashline-edit.ts: AgentTool wrapper for hash-anchored file edits - hashline-read.ts: read tool variant that outputs LINE#ID:CONTENT format - hashline.test.ts: 54 tests covering all core operations Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
parent
c9135da035
commit
83445f4449
5 changed files with 1600 additions and 0 deletions
301
packages/pi-coding-agent/src/core/tools/hashline-edit.ts
Normal file
301
packages/pi-coding-agent/src/core/tools/hashline-edit.ts
Normal file
|
|
@ -0,0 +1,301 @@
|
|||
/**
|
||||
* Hashline edit tool — applies file edits using line-hash anchors.
|
||||
*
|
||||
* The model references lines by `LINE#ID` tags from read output.
|
||||
* Each tag uniquely identifies a line, so edits remain stable even when lines shift.
|
||||
*/
|
||||
import type { AgentTool } from "@gsd/pi-agent-core";
|
||||
import { type Static, Type } from "@sinclair/typebox";
|
||||
import { constants } from "fs";
|
||||
import { access as fsAccess, readFile as fsReadFile, unlink as fsUnlink, writeFile as fsWriteFile } from "fs/promises";
|
||||
import {
|
||||
detectLineEnding,
|
||||
generateDiffString,
|
||||
normalizeToLF,
|
||||
restoreLineEndings,
|
||||
stripBom,
|
||||
} from "./edit-diff.js";
|
||||
import {
|
||||
type Anchor,
|
||||
applyHashlineEdits,
|
||||
computeLineHash,
|
||||
type HashlineEdit,
|
||||
hashlineParseText,
|
||||
parseTag,
|
||||
} from "./hashline.js";
|
||||
import { resolveToCwd } from "./path-utils.js";
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// Schema
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
const hashlineEditItemSchema = Type.Object(
|
||||
{
|
||||
op: Type.Union([Type.Literal("replace"), Type.Literal("append"), Type.Literal("prepend")]),
|
||||
pos: Type.Optional(Type.String({ description: "Anchor tag (e.g. \"5#QQ\")" })),
|
||||
end: Type.Optional(Type.String({ description: "End anchor for range replace" })),
|
||||
lines: Type.Union([
|
||||
Type.Array(Type.String(), { description: "Replacement content lines" }),
|
||||
Type.String(),
|
||||
Type.Null(),
|
||||
]),
|
||||
},
|
||||
{ additionalProperties: false },
|
||||
);
|
||||
|
||||
const hashlineEditSchema = Type.Object(
|
||||
{
|
||||
path: Type.String({ description: "Path to the file to edit" }),
|
||||
edits: Type.Array(hashlineEditItemSchema, { description: "Edits to apply (referenced by LINE#ID tags from read output)" }),
|
||||
delete: Type.Optional(Type.Boolean({ description: "If true, delete the file" })),
|
||||
move: Type.Optional(Type.String({ description: "If set, move/rename the file to this path" })),
|
||||
},
|
||||
{ additionalProperties: false },
|
||||
);
|
||||
|
||||
export type HashlineEditInput = Static<typeof hashlineEditSchema>;
|
||||
export type HashlineEditItem = Static<typeof hashlineEditItemSchema>;
|
||||
|
||||
export interface HashlineEditToolDetails {
|
||||
/** Unified diff of the changes made */
|
||||
diff: string;
|
||||
/** Line number of the first change in the new file */
|
||||
firstChangedLine?: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Pluggable operations for the hashline edit tool.
|
||||
*/
|
||||
export interface HashlineEditOperations {
|
||||
readFile: (absolutePath: string) => Promise<Buffer>;
|
||||
writeFile: (absolutePath: string, content: string) => Promise<void>;
|
||||
access: (absolutePath: string) => Promise<void>;
|
||||
unlink: (absolutePath: string) => Promise<void>;
|
||||
}
|
||||
|
||||
const defaultHashlineEditOperations: HashlineEditOperations = {
|
||||
readFile: (path) => fsReadFile(path),
|
||||
writeFile: (path, content) => fsWriteFile(path, content, "utf-8"),
|
||||
access: (path) => fsAccess(path, constants.R_OK | constants.W_OK),
|
||||
unlink: (path) => fsUnlink(path),
|
||||
};
|
||||
|
||||
export interface HashlineEditToolOptions {
|
||||
operations?: HashlineEditOperations;
|
||||
}
|
||||
|
||||
/** Parse a tag, returning undefined instead of throwing on garbage. */
|
||||
function tryParseTag(raw: string): Anchor | undefined {
|
||||
try {
|
||||
return parseTag(raw);
|
||||
} catch {
|
||||
return undefined;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Map flat tool-schema edits into typed HashlineEdit objects.
|
||||
*/
|
||||
function resolveEditAnchors(edits: HashlineEditItem[]): HashlineEdit[] {
|
||||
const result: HashlineEdit[] = [];
|
||||
for (const edit of edits) {
|
||||
const lines = hashlineParseText(edit.lines);
|
||||
const tag = edit.pos ? tryParseTag(edit.pos) : undefined;
|
||||
const end = edit.end ? tryParseTag(edit.end) : undefined;
|
||||
|
||||
const op = edit.op === "append" || edit.op === "prepend" ? edit.op : "replace";
|
||||
switch (op) {
|
||||
case "replace": {
|
||||
if (tag && end) {
|
||||
result.push({ op: "replace", pos: tag, end, lines });
|
||||
} else if (tag || end) {
|
||||
result.push({ op: "replace", pos: tag || end!, lines });
|
||||
} else {
|
||||
throw new Error("Replace requires at least one anchor (pos or end).");
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "append": {
|
||||
result.push({ op: "append", pos: tag ?? end, lines });
|
||||
break;
|
||||
}
|
||||
case "prepend": {
|
||||
result.push({ op: "prepend", pos: end ?? tag, lines });
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
const HASHLINE_EDIT_DESCRIPTION = `Edit a file by referencing LINE#ID tags from read output. Each tag uniquely identifies a line via content hash, so edits remain stable even when lines shift.
|
||||
|
||||
Read the file first to get fresh tags. Submit one edit call per file with all operations batched.
|
||||
|
||||
Operations:
|
||||
- replace: Replace line(s) at pos (and optionally through end) with lines content
|
||||
- append: Insert lines after pos (omit pos for end of file)
|
||||
- prepend: Insert lines before pos (omit pos for beginning of file)
|
||||
|
||||
Set lines to null or [] to delete lines. Set delete:true to delete the file.`;
|
||||
|
||||
export function createHashlineEditTool(cwd: string, options?: HashlineEditToolOptions): AgentTool<typeof hashlineEditSchema> {
|
||||
const ops = options?.operations ?? defaultHashlineEditOperations;
|
||||
|
||||
return {
|
||||
name: "hashline_edit",
|
||||
label: "hashline_edit",
|
||||
description: HASHLINE_EDIT_DESCRIPTION,
|
||||
parameters: hashlineEditSchema,
|
||||
execute: async (
|
||||
_toolCallId: string,
|
||||
params: HashlineEditInput,
|
||||
signal?: AbortSignal,
|
||||
) => {
|
||||
const { path, edits, delete: deleteFile, move } = params;
|
||||
const absolutePath = resolveToCwd(path, cwd);
|
||||
|
||||
return new Promise<{
|
||||
content: Array<{ type: "text"; text: string }>;
|
||||
details: HashlineEditToolDetails | undefined;
|
||||
}>((resolve, reject) => {
|
||||
if (signal?.aborted) {
|
||||
reject(new Error("Operation aborted"));
|
||||
return;
|
||||
}
|
||||
|
||||
let aborted = false;
|
||||
const onAbort = () => {
|
||||
aborted = true;
|
||||
reject(new Error("Operation aborted"));
|
||||
};
|
||||
if (signal) {
|
||||
signal.addEventListener("abort", onAbort, { once: true });
|
||||
}
|
||||
|
||||
(async () => {
|
||||
try {
|
||||
// Handle delete
|
||||
if (deleteFile) {
|
||||
try {
|
||||
await ops.access(absolutePath);
|
||||
await ops.unlink(absolutePath);
|
||||
} catch {
|
||||
// File doesn't exist, that's fine for delete
|
||||
}
|
||||
if (signal) signal.removeEventListener("abort", onAbort);
|
||||
resolve({
|
||||
content: [{ type: "text", text: `Deleted ${path}` }],
|
||||
details: { diff: "" },
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
// Handle file creation (no existing file, anchorless appends/prepends)
|
||||
let fileExists = true;
|
||||
try {
|
||||
await ops.access(absolutePath);
|
||||
} catch {
|
||||
fileExists = false;
|
||||
}
|
||||
|
||||
if (!fileExists) {
|
||||
const lines: string[] = [];
|
||||
for (const edit of edits) {
|
||||
if ((edit.op === "append" || edit.op === "prepend") && !edit.pos && !edit.end) {
|
||||
if (edit.op === "prepend") {
|
||||
lines.unshift(...hashlineParseText(edit.lines));
|
||||
} else {
|
||||
lines.push(...hashlineParseText(edit.lines));
|
||||
}
|
||||
} else {
|
||||
throw new Error(`File not found: ${path}`);
|
||||
}
|
||||
}
|
||||
await ops.writeFile(absolutePath, lines.join("\n"));
|
||||
if (signal) signal.removeEventListener("abort", onAbort);
|
||||
resolve({
|
||||
content: [{ type: "text", text: `Created ${path}` }],
|
||||
details: { diff: "" },
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
if (aborted) return;
|
||||
|
||||
// Read file
|
||||
const rawContent = (await ops.readFile(absolutePath)).toString("utf-8");
|
||||
const { bom, text } = stripBom(rawContent);
|
||||
const originalEnding = detectLineEnding(text);
|
||||
const originalNormalized = normalizeToLF(text);
|
||||
|
||||
if (aborted) return;
|
||||
|
||||
// Resolve and apply edits
|
||||
const anchorEdits = resolveEditAnchors(edits);
|
||||
const result = applyHashlineEdits(originalNormalized, anchorEdits);
|
||||
|
||||
if (originalNormalized === result.lines && !move) {
|
||||
let diagnostic = `No changes made to ${path}. The edits produced identical content.`;
|
||||
if (result.noopEdits && result.noopEdits.length > 0) {
|
||||
const details = result.noopEdits
|
||||
.map(
|
||||
e =>
|
||||
`Edit ${e.editIndex}: replacement for ${e.loc} is identical to current content:\n ${e.loc}| ${e.current}`,
|
||||
)
|
||||
.join("\n");
|
||||
diagnostic += `\n${details}`;
|
||||
diagnostic +=
|
||||
"\nYour content must differ from what the file already contains. Re-read the file to see the current state.";
|
||||
}
|
||||
throw new Error(diagnostic);
|
||||
}
|
||||
|
||||
if (aborted) return;
|
||||
|
||||
// Write result
|
||||
const finalContent = bom + restoreLineEndings(result.lines, originalEnding);
|
||||
const writePath = move ? resolveToCwd(move, cwd) : absolutePath;
|
||||
await ops.writeFile(writePath, finalContent);
|
||||
|
||||
// If moved, delete original
|
||||
if (move && writePath !== absolutePath) {
|
||||
await ops.unlink(absolutePath);
|
||||
}
|
||||
|
||||
if (aborted) return;
|
||||
|
||||
if (signal) signal.removeEventListener("abort", onAbort);
|
||||
|
||||
const diffResult = generateDiffString(originalNormalized, result.lines);
|
||||
const resultText = move ? `Moved ${path} to ${move}` : `Updated ${path}`;
|
||||
const warningsBlock = result.warnings?.length
|
||||
? `\nWarnings:\n${result.warnings.join("\n")}`
|
||||
: "";
|
||||
|
||||
resolve({
|
||||
content: [
|
||||
{
|
||||
type: "text",
|
||||
text: `${resultText}${warningsBlock}`,
|
||||
},
|
||||
],
|
||||
details: {
|
||||
diff: diffResult.diff,
|
||||
firstChangedLine: result.firstChangedLine ?? diffResult.firstChangedLine,
|
||||
},
|
||||
});
|
||||
} catch (error: any) {
|
||||
if (signal) signal.removeEventListener("abort", onAbort);
|
||||
if (!aborted) {
|
||||
reject(error);
|
||||
}
|
||||
}
|
||||
})();
|
||||
});
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/** Default hashline edit tool using process.cwd() */
|
||||
export const hashlineEditTool = createHashlineEditTool(process.cwd());
|
||||
196
packages/pi-coding-agent/src/core/tools/hashline-read.ts
Normal file
196
packages/pi-coding-agent/src/core/tools/hashline-read.ts
Normal file
|
|
@ -0,0 +1,196 @@
|
|||
/**
|
||||
* Hashline read tool — reads files with LINE#ID prefix on each line.
|
||||
*
|
||||
* Produces output like:
|
||||
* 1#QQ:function hello() {
|
||||
* 2#KX: return 42;
|
||||
* 3#NW:}
|
||||
*
|
||||
* These tags are used by the hashline_edit tool to address lines precisely.
|
||||
*/
|
||||
import type { AgentTool } from "@gsd/pi-agent-core";
|
||||
import type { ImageContent, TextContent } from "@gsd/pi-ai";
|
||||
import { type Static, Type } from "@sinclair/typebox";
|
||||
import { constants } from "fs";
|
||||
import { access as fsAccess, readFile as fsReadFile } from "fs/promises";
|
||||
import { formatDimensionNote, resizeImage } from "../../utils/image-resize.js";
|
||||
import { detectSupportedImageMimeTypeFromFile } from "../../utils/mime.js";
|
||||
import { formatHashLines } from "./hashline.js";
|
||||
import { resolveReadPath } from "./path-utils.js";
|
||||
import { DEFAULT_MAX_BYTES, DEFAULT_MAX_LINES, formatSize, type TruncationResult, truncateHead } from "./truncate.js";
|
||||
|
||||
const readSchema = Type.Object({
|
||||
path: Type.String({ description: "Path to the file to read (relative or absolute)" }),
|
||||
offset: Type.Optional(Type.Number({ description: "Line number to start reading from (1-indexed)" })),
|
||||
limit: Type.Optional(Type.Number({ description: "Maximum number of lines to read" })),
|
||||
});
|
||||
|
||||
export type HashlineReadToolInput = Static<typeof readSchema>;
|
||||
|
||||
export interface HashlineReadToolDetails {
|
||||
truncation?: TruncationResult;
|
||||
}
|
||||
|
||||
/**
|
||||
* Pluggable operations for the hashline read tool.
|
||||
*/
|
||||
export interface HashlineReadOperations {
|
||||
readFile: (absolutePath: string) => Promise<Buffer>;
|
||||
access: (absolutePath: string) => Promise<void>;
|
||||
detectImageMimeType?: (absolutePath: string) => Promise<string | null | undefined>;
|
||||
}
|
||||
|
||||
const defaultReadOperations: HashlineReadOperations = {
|
||||
readFile: (path) => fsReadFile(path),
|
||||
access: (path) => fsAccess(path, constants.R_OK),
|
||||
detectImageMimeType: detectSupportedImageMimeTypeFromFile,
|
||||
};
|
||||
|
||||
export interface HashlineReadToolOptions {
|
||||
autoResizeImages?: boolean;
|
||||
operations?: HashlineReadOperations;
|
||||
}
|
||||
|
||||
export function createHashlineReadTool(cwd: string, options?: HashlineReadToolOptions): AgentTool<typeof readSchema> {
|
||||
const autoResizeImages = options?.autoResizeImages ?? true;
|
||||
const ops = options?.operations ?? defaultReadOperations;
|
||||
|
||||
return {
|
||||
name: "read",
|
||||
label: "read",
|
||||
description: `Read a file with LINE#ID hash anchors on each line. These anchors are used by hashline_edit for precise edits. Output format: LINENUM#HASH:CONTENT. Supports text files and images. For text files, output is truncated to ${DEFAULT_MAX_LINES} lines or ${DEFAULT_MAX_BYTES / 1024}KB. Use offset/limit for large files.`,
|
||||
parameters: readSchema,
|
||||
execute: async (
|
||||
_toolCallId: string,
|
||||
{ path, offset, limit }: { path: string; offset?: number; limit?: number },
|
||||
signal?: AbortSignal,
|
||||
) => {
|
||||
const absolutePath = resolveReadPath(path, cwd);
|
||||
|
||||
return new Promise<{ content: (TextContent | ImageContent)[]; details: HashlineReadToolDetails | undefined }>(
|
||||
(resolve, reject) => {
|
||||
if (signal?.aborted) {
|
||||
reject(new Error("Operation aborted"));
|
||||
return;
|
||||
}
|
||||
|
||||
let aborted = false;
|
||||
const onAbort = () => {
|
||||
aborted = true;
|
||||
reject(new Error("Operation aborted"));
|
||||
};
|
||||
if (signal) {
|
||||
signal.addEventListener("abort", onAbort, { once: true });
|
||||
}
|
||||
|
||||
(async () => {
|
||||
try {
|
||||
await ops.access(absolutePath);
|
||||
|
||||
if (aborted) return;
|
||||
|
||||
const mimeType = ops.detectImageMimeType ? await ops.detectImageMimeType(absolutePath) : undefined;
|
||||
|
||||
let content: (TextContent | ImageContent)[];
|
||||
let details: HashlineReadToolDetails | undefined;
|
||||
|
||||
if (mimeType) {
|
||||
// Image handling (identical to standard read tool)
|
||||
const buffer = await ops.readFile(absolutePath);
|
||||
const base64 = buffer.toString("base64");
|
||||
|
||||
if (autoResizeImages) {
|
||||
const resized = await resizeImage({ type: "image", data: base64, mimeType });
|
||||
const dimensionNote = formatDimensionNote(resized);
|
||||
let textNote = `Read image file [${resized.mimeType}]`;
|
||||
if (dimensionNote) {
|
||||
textNote += `\n${dimensionNote}`;
|
||||
}
|
||||
content = [
|
||||
{ type: "text", text: textNote },
|
||||
{ type: "image", data: resized.data, mimeType: resized.mimeType },
|
||||
];
|
||||
} else {
|
||||
content = [
|
||||
{ type: "text", text: `Read image file [${mimeType}]` },
|
||||
{ type: "image", data: base64, mimeType },
|
||||
];
|
||||
}
|
||||
} else {
|
||||
// Text file — format with hashline prefixes
|
||||
const buffer = await ops.readFile(absolutePath);
|
||||
const textContent = buffer.toString("utf-8");
|
||||
const allLines = textContent.split("\n");
|
||||
const totalFileLines = allLines.length;
|
||||
|
||||
const startLine = offset ? Math.max(0, offset - 1) : 0;
|
||||
const startLineDisplay = startLine + 1;
|
||||
|
||||
if (startLine >= allLines.length) {
|
||||
throw new Error(`Offset ${offset} is beyond end of file (${allLines.length} lines total)`);
|
||||
}
|
||||
|
||||
let selectedContent: string;
|
||||
let userLimitedLines: number | undefined;
|
||||
if (limit !== undefined) {
|
||||
const endLine = Math.min(startLine + limit, allLines.length);
|
||||
selectedContent = allLines.slice(startLine, endLine).join("\n");
|
||||
userLimitedLines = endLine - startLine;
|
||||
} else {
|
||||
selectedContent = allLines.slice(startLine).join("\n");
|
||||
}
|
||||
|
||||
// Apply truncation
|
||||
const truncation = truncateHead(selectedContent);
|
||||
|
||||
let outputText: string;
|
||||
|
||||
if (truncation.firstLineExceedsLimit) {
|
||||
const firstLineSize = formatSize(Buffer.byteLength(allLines[startLine], "utf-8"));
|
||||
outputText = `[Line ${startLineDisplay} is ${firstLineSize}, exceeds ${formatSize(DEFAULT_MAX_BYTES)} limit. Use bash: sed -n '${startLineDisplay}p' ${path} | head -c ${DEFAULT_MAX_BYTES}]`;
|
||||
details = { truncation };
|
||||
} else if (truncation.truncated) {
|
||||
const endLineDisplay = startLineDisplay + truncation.outputLines - 1;
|
||||
const nextOffset = endLineDisplay + 1;
|
||||
|
||||
// Format with hashline prefixes
|
||||
outputText = formatHashLines(truncation.content, startLineDisplay);
|
||||
|
||||
if (truncation.truncatedBy === "lines") {
|
||||
outputText += `\n\n[Showing lines ${startLineDisplay}-${endLineDisplay} of ${totalFileLines}. Use offset=${nextOffset} to continue.]`;
|
||||
} else {
|
||||
outputText += `\n\n[Showing lines ${startLineDisplay}-${endLineDisplay} of ${totalFileLines} (${formatSize(DEFAULT_MAX_BYTES)} limit). Use offset=${nextOffset} to continue.]`;
|
||||
}
|
||||
details = { truncation };
|
||||
} else if (userLimitedLines !== undefined && startLine + userLimitedLines < allLines.length) {
|
||||
const remaining = allLines.length - (startLine + userLimitedLines);
|
||||
const nextOffset = startLine + userLimitedLines + 1;
|
||||
|
||||
outputText = formatHashLines(truncation.content, startLineDisplay);
|
||||
outputText += `\n\n[${remaining} more lines in file. Use offset=${nextOffset} to continue.]`;
|
||||
} else {
|
||||
outputText = formatHashLines(truncation.content, startLineDisplay);
|
||||
}
|
||||
|
||||
content = [{ type: "text", text: outputText }];
|
||||
}
|
||||
|
||||
if (aborted) return;
|
||||
|
||||
if (signal) signal.removeEventListener("abort", onAbort);
|
||||
resolve({ content, details });
|
||||
} catch (error: any) {
|
||||
if (signal) signal.removeEventListener("abort", onAbort);
|
||||
if (!aborted) {
|
||||
reject(error);
|
||||
}
|
||||
}
|
||||
})();
|
||||
},
|
||||
);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/** Default hashline read tool using process.cwd() */
|
||||
export const hashlineReadTool = createHashlineReadTool(process.cwd());
|
||||
456
packages/pi-coding-agent/src/core/tools/hashline.test.ts
Normal file
456
packages/pi-coding-agent/src/core/tools/hashline.test.ts
Normal file
|
|
@ -0,0 +1,456 @@
|
|||
import { describe, it } from "node:test";
|
||||
import assert from "node:assert/strict";
|
||||
import {
|
||||
computeLineHash,
|
||||
formatHashLines,
|
||||
formatLineTag,
|
||||
parseTag,
|
||||
validateLineRef,
|
||||
applyHashlineEdits,
|
||||
HashlineMismatchError,
|
||||
hashlineParseText,
|
||||
stripNewLinePrefixes,
|
||||
type HashlineEdit,
|
||||
type Anchor,
|
||||
} from "./hashline.ts";
|
||||
|
||||
function makeTag(line: number, content: string): Anchor {
|
||||
return parseTag(formatLineTag(line, content));
|
||||
}
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// computeLineHash
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
describe("computeLineHash", () => {
|
||||
it("returns 2-character hash string from nibble alphabet", () => {
|
||||
const hash = computeLineHash(1, "hello");
|
||||
assert.match(hash, /^[ZPMQVRWSNKTXJBYH]{2}$/);
|
||||
});
|
||||
|
||||
it("same content at same line produces same hash", () => {
|
||||
const a = computeLineHash(1, "hello");
|
||||
const b = computeLineHash(1, "hello");
|
||||
assert.equal(a, b);
|
||||
});
|
||||
|
||||
it("different content produces different hash", () => {
|
||||
const a = computeLineHash(1, "hello");
|
||||
const b = computeLineHash(1, "world");
|
||||
assert.notEqual(a, b);
|
||||
});
|
||||
|
||||
it("empty line produces valid hash", () => {
|
||||
const hash = computeLineHash(1, "");
|
||||
assert.match(hash, /^[ZPMQVRWSNKTXJBYH]{2}$/);
|
||||
});
|
||||
|
||||
it("uses line number for symbol-only lines", () => {
|
||||
const a = computeLineHash(1, "***");
|
||||
const b = computeLineHash(2, "***");
|
||||
assert.notEqual(a, b);
|
||||
});
|
||||
|
||||
it("does not use line number for alphanumeric lines", () => {
|
||||
const a = computeLineHash(1, "hello");
|
||||
const b = computeLineHash(2, "hello");
|
||||
assert.equal(a, b);
|
||||
});
|
||||
|
||||
it("strips trailing whitespace before hashing", () => {
|
||||
const a = computeLineHash(1, "hello");
|
||||
const b = computeLineHash(1, "hello ");
|
||||
assert.equal(a, b);
|
||||
});
|
||||
|
||||
it("strips CR before hashing", () => {
|
||||
const a = computeLineHash(1, "hello");
|
||||
const b = computeLineHash(1, "hello\r");
|
||||
assert.equal(a, b);
|
||||
});
|
||||
});
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// formatHashLines
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
describe("formatHashLines", () => {
|
||||
it("formats single line", () => {
|
||||
const result = formatHashLines("hello");
|
||||
const hash = computeLineHash(1, "hello");
|
||||
assert.equal(result, `1#${hash}:hello`);
|
||||
});
|
||||
|
||||
it("formats multiple lines with 1-indexed numbers", () => {
|
||||
const result = formatHashLines("foo\nbar\nbaz");
|
||||
const lines = result.split("\n");
|
||||
assert.equal(lines.length, 3);
|
||||
assert.ok(lines[0].startsWith("1#"));
|
||||
assert.ok(lines[1].startsWith("2#"));
|
||||
assert.ok(lines[2].startsWith("3#"));
|
||||
});
|
||||
|
||||
it("respects custom startLine", () => {
|
||||
const result = formatHashLines("foo\nbar", 10);
|
||||
const lines = result.split("\n");
|
||||
assert.ok(lines[0].startsWith("10#"));
|
||||
assert.ok(lines[1].startsWith("11#"));
|
||||
});
|
||||
|
||||
it("handles empty lines in content", () => {
|
||||
const result = formatHashLines("foo\n\nbar");
|
||||
const lines = result.split("\n");
|
||||
assert.equal(lines.length, 3);
|
||||
assert.match(lines[1], /^2#[ZPMQVRWSNKTXJBYH]{2}:$/);
|
||||
});
|
||||
|
||||
it("round-trips with computeLineHash", () => {
|
||||
const content = "function hello() {\n return 42;\n}";
|
||||
const formatted = formatHashLines(content);
|
||||
const lines = formatted.split("\n");
|
||||
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
const match = lines[i].match(/^(\d+)#([ZPMQVRWSNKTXJBYH]{2}):(.*)$/);
|
||||
assert.ok(match, `Line ${i} should match hashline format`);
|
||||
const lineNum = Number.parseInt(match![1], 10);
|
||||
const hash = match![2];
|
||||
const lineContent = match![3];
|
||||
assert.equal(computeLineHash(lineNum, lineContent), hash);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// parseTag
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
describe("parseTag", () => {
|
||||
it("parses valid reference", () => {
|
||||
const ref = parseTag("5#QQ");
|
||||
assert.deepEqual(ref, { line: 5, hash: "QQ" });
|
||||
});
|
||||
|
||||
it("rejects single-character hash", () => {
|
||||
assert.throws(() => parseTag("1#Q"), /Invalid line reference/);
|
||||
});
|
||||
|
||||
it("parses long hash by taking strict 2-char prefix", () => {
|
||||
const ref = parseTag("100#QQQQ");
|
||||
assert.deepEqual(ref, { line: 100, hash: "QQ" });
|
||||
});
|
||||
|
||||
it("rejects missing separator", () => {
|
||||
assert.throws(() => parseTag("5QQ"), /Invalid line reference/);
|
||||
});
|
||||
|
||||
it("rejects non-numeric line", () => {
|
||||
assert.throws(() => parseTag("abc#Q"), /Invalid line reference/);
|
||||
});
|
||||
|
||||
it("rejects line number 0", () => {
|
||||
assert.throws(() => parseTag("0#QQ"), /Line number must be >= 1/);
|
||||
});
|
||||
|
||||
it("rejects empty string", () => {
|
||||
assert.throws(() => parseTag(""), /Invalid line reference/);
|
||||
});
|
||||
});
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// validateLineRef
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
describe("validateLineRef", () => {
|
||||
it("accepts valid ref with matching hash", () => {
|
||||
const lines = ["hello", "world"];
|
||||
const hash = computeLineHash(1, "hello");
|
||||
assert.doesNotThrow(() => validateLineRef({ line: 1, hash }, lines));
|
||||
});
|
||||
|
||||
it("rejects line out of range", () => {
|
||||
const lines = ["hello"];
|
||||
const hash = computeLineHash(1, "hello");
|
||||
assert.throws(() => validateLineRef({ line: 2, hash }, lines), /does not exist/);
|
||||
});
|
||||
|
||||
it("rejects mismatched hash", () => {
|
||||
const lines = ["hello", "world"];
|
||||
assert.throws(() => validateLineRef({ line: 1, hash: "ZZ" }, lines), /has changed since last read/);
|
||||
});
|
||||
});
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// applyHashlineEdits — replace
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
describe("applyHashlineEdits — replace", () => {
|
||||
it("replaces single line", () => {
|
||||
const content = "aaa\nbbb\nccc";
|
||||
const edits: HashlineEdit[] = [{ op: "replace", pos: makeTag(2, "bbb"), lines: ["BBB"] }];
|
||||
const result = applyHashlineEdits(content, edits);
|
||||
assert.equal(result.lines, "aaa\nBBB\nccc");
|
||||
assert.equal(result.firstChangedLine, 2);
|
||||
});
|
||||
|
||||
it("range replace (shrink)", () => {
|
||||
const content = "aaa\nbbb\nccc\nddd";
|
||||
const edits: HashlineEdit[] = [{ op: "replace", pos: makeTag(2, "bbb"), end: makeTag(3, "ccc"), lines: ["ONE"] }];
|
||||
const result = applyHashlineEdits(content, edits);
|
||||
assert.equal(result.lines, "aaa\nONE\nddd");
|
||||
});
|
||||
|
||||
it("range replace (same count)", () => {
|
||||
const content = "aaa\nbbb\nccc\nddd";
|
||||
const edits: HashlineEdit[] = [
|
||||
{ op: "replace", pos: makeTag(2, "bbb"), end: makeTag(3, "ccc"), lines: ["XXX", "YYY"] },
|
||||
];
|
||||
const result = applyHashlineEdits(content, edits);
|
||||
assert.equal(result.lines, "aaa\nXXX\nYYY\nddd");
|
||||
});
|
||||
|
||||
it("replaces first line", () => {
|
||||
const content = "first\nsecond\nthird";
|
||||
const edits: HashlineEdit[] = [{ op: "replace", pos: makeTag(1, "first"), lines: ["FIRST"] }];
|
||||
const result = applyHashlineEdits(content, edits);
|
||||
assert.equal(result.lines, "FIRST\nsecond\nthird");
|
||||
});
|
||||
|
||||
it("replaces last line", () => {
|
||||
const content = "first\nsecond\nthird";
|
||||
const edits: HashlineEdit[] = [{ op: "replace", pos: makeTag(3, "third"), lines: ["THIRD"] }];
|
||||
const result = applyHashlineEdits(content, edits);
|
||||
assert.equal(result.lines, "first\nsecond\nTHIRD");
|
||||
});
|
||||
});
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// applyHashlineEdits — delete
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
describe("applyHashlineEdits — delete", () => {
|
||||
it("deletes single line", () => {
|
||||
const content = "aaa\nbbb\nccc";
|
||||
const edits: HashlineEdit[] = [{ op: "replace", pos: makeTag(2, "bbb"), lines: [] }];
|
||||
const result = applyHashlineEdits(content, edits);
|
||||
assert.equal(result.lines, "aaa\nccc");
|
||||
});
|
||||
|
||||
it("deletes range of lines", () => {
|
||||
const content = "aaa\nbbb\nccc\nddd";
|
||||
const edits: HashlineEdit[] = [{ op: "replace", pos: makeTag(2, "bbb"), end: makeTag(3, "ccc"), lines: [] }];
|
||||
const result = applyHashlineEdits(content, edits);
|
||||
assert.equal(result.lines, "aaa\nddd");
|
||||
});
|
||||
});
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// applyHashlineEdits — append
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
describe("applyHashlineEdits — append", () => {
|
||||
it("inserts after a line", () => {
|
||||
const content = "aaa\nbbb\nccc";
|
||||
const edits: HashlineEdit[] = [{ op: "append", pos: makeTag(1, "aaa"), lines: ["NEW"] }];
|
||||
const result = applyHashlineEdits(content, edits);
|
||||
assert.equal(result.lines, "aaa\nNEW\nbbb\nccc");
|
||||
assert.equal(result.firstChangedLine, 2);
|
||||
});
|
||||
|
||||
it("inserts multiple lines", () => {
|
||||
const content = "aaa\nbbb";
|
||||
const edits: HashlineEdit[] = [{ op: "append", pos: makeTag(1, "aaa"), lines: ["x", "y", "z"] }];
|
||||
const result = applyHashlineEdits(content, edits);
|
||||
assert.equal(result.lines, "aaa\nx\ny\nz\nbbb");
|
||||
});
|
||||
|
||||
it("inserts at EOF without anchors", () => {
|
||||
const content = "aaa\nbbb";
|
||||
const edits = [{ op: "append", lines: ["NEW"] }] as unknown as HashlineEdit[];
|
||||
const result = applyHashlineEdits(content, edits);
|
||||
assert.equal(result.lines, "aaa\nbbb\nNEW");
|
||||
});
|
||||
});
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// applyHashlineEdits — prepend
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
describe("applyHashlineEdits — prepend", () => {
|
||||
it("inserts before a line", () => {
|
||||
const content = "aaa\nbbb\nccc";
|
||||
const edits: HashlineEdit[] = [{ op: "prepend", pos: makeTag(2, "bbb"), lines: ["NEW"] }];
|
||||
const result = applyHashlineEdits(content, edits);
|
||||
assert.equal(result.lines, "aaa\nNEW\nbbb\nccc");
|
||||
});
|
||||
|
||||
it("prepends at BOF without anchor", () => {
|
||||
const content = "aaa\nbbb";
|
||||
const edits = [{ op: "prepend", lines: ["NEW"] }] as unknown as HashlineEdit[];
|
||||
const result = applyHashlineEdits(content, edits);
|
||||
assert.equal(result.lines, "NEW\naaa\nbbb");
|
||||
});
|
||||
|
||||
it("insert before and insert after at same line produce correct order", () => {
|
||||
const content = "aaa\nbbb\nccc";
|
||||
const edits: HashlineEdit[] = [
|
||||
{ op: "prepend", pos: makeTag(2, "bbb"), lines: ["BEFORE"] },
|
||||
{ op: "append", pos: makeTag(2, "bbb"), lines: ["AFTER"] },
|
||||
];
|
||||
const result = applyHashlineEdits(content, edits);
|
||||
assert.equal(result.lines, "aaa\nBEFORE\nbbb\nAFTER\nccc");
|
||||
});
|
||||
});
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// applyHashlineEdits — multiple edits
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
describe("applyHashlineEdits — multiple edits", () => {
|
||||
it("applies two non-overlapping replaces (bottom-up safe)", () => {
|
||||
const content = "aaa\nbbb\nccc\nddd\neee";
|
||||
const edits: HashlineEdit[] = [
|
||||
{ op: "replace", pos: makeTag(2, "bbb"), lines: ["BBB"] },
|
||||
{ op: "replace", pos: makeTag(4, "ddd"), lines: ["DDD"] },
|
||||
];
|
||||
const result = applyHashlineEdits(content, edits);
|
||||
assert.equal(result.lines, "aaa\nBBB\nccc\nDDD\neee");
|
||||
});
|
||||
|
||||
it("empty edits array is a no-op", () => {
|
||||
const content = "aaa\nbbb";
|
||||
const result = applyHashlineEdits(content, []);
|
||||
assert.equal(result.lines, content);
|
||||
assert.equal(result.firstChangedLine, undefined);
|
||||
});
|
||||
});
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// applyHashlineEdits — error cases
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
describe("applyHashlineEdits — errors", () => {
|
||||
it("rejects stale hash", () => {
|
||||
const content = "aaa\nbbb\nccc";
|
||||
const edits: HashlineEdit[] = [{ op: "replace", pos: parseTag("2#QQ"), lines: ["BBB"] }];
|
||||
assert.throws(() => applyHashlineEdits(content, edits), (err: any) => err instanceof HashlineMismatchError);
|
||||
});
|
||||
|
||||
it("stale hash error shows >>> markers with correct hashes", () => {
|
||||
const content = "aaa\nbbb\nccc\nddd\neee";
|
||||
const edits: HashlineEdit[] = [{ op: "replace", pos: parseTag("2#QQ"), lines: ["BBB"] }];
|
||||
|
||||
try {
|
||||
applyHashlineEdits(content, edits);
|
||||
assert.fail("should have thrown");
|
||||
} catch (err: any) {
|
||||
assert.ok(err instanceof HashlineMismatchError);
|
||||
assert.ok(err.message.includes(">>>"));
|
||||
const correctHash = computeLineHash(2, "bbb");
|
||||
assert.ok(err.message.includes(`2#${correctHash}:bbb`));
|
||||
}
|
||||
});
|
||||
|
||||
it("rejects out-of-range line", () => {
|
||||
const content = "aaa\nbbb";
|
||||
const edits: HashlineEdit[] = [{ op: "replace", pos: parseTag("10#ZZ"), lines: ["X"] }];
|
||||
assert.throws(() => applyHashlineEdits(content, edits), /does not exist/);
|
||||
});
|
||||
|
||||
it("rejects range with start > end", () => {
|
||||
const content = "aaa\nbbb\nccc\nddd\neee";
|
||||
const edits: HashlineEdit[] = [{ op: "replace", pos: makeTag(5, "eee"), end: makeTag(2, "bbb"), lines: ["X"] }];
|
||||
assert.throws(() => applyHashlineEdits(content, edits));
|
||||
});
|
||||
});
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// stripNewLinePrefixes
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
describe("stripNewLinePrefixes", () => {
|
||||
it("strips leading '+' when majority of lines start with '+'", () => {
|
||||
const lines = ["+line one", "+line two", "+line three"];
|
||||
assert.deepEqual(stripNewLinePrefixes(lines), ["line one", "line two", "line three"]);
|
||||
});
|
||||
|
||||
it("does NOT strip leading '-' from Markdown list items", () => {
|
||||
const lines = ["- item one", "- item two", "- item three"];
|
||||
assert.deepEqual(stripNewLinePrefixes(lines), ["- item one", "- item two", "- item three"]);
|
||||
});
|
||||
|
||||
it("strips hashline prefixes when all non-empty lines carry them", () => {
|
||||
const lines = ["1#WQ:foo", "2#TZ:bar", "3#HX:baz"];
|
||||
assert.deepEqual(stripNewLinePrefixes(lines), ["foo", "bar", "baz"]);
|
||||
});
|
||||
|
||||
it("does NOT strip hashline prefixes when any non-empty line is plain content", () => {
|
||||
const lines = ["1#WQ:foo", "bar", "3#HX:baz"];
|
||||
assert.deepEqual(stripNewLinePrefixes(lines), ["1#WQ:foo", "bar", "3#HX:baz"]);
|
||||
});
|
||||
|
||||
it("does NOT strip comment lines that look like hashline prefixes", () => {
|
||||
assert.deepEqual(stripNewLinePrefixes([" # Note: Using a fixed version"]), [" # Note: Using a fixed version"]);
|
||||
assert.deepEqual(stripNewLinePrefixes(["# TODO: remove this"]), ["# TODO: remove this"]);
|
||||
});
|
||||
});
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// hashlineParseText
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
describe("hashlineParseText", () => {
|
||||
it("returns empty array for null", () => {
|
||||
assert.deepEqual(hashlineParseText(null), []);
|
||||
});
|
||||
|
||||
it("returns array input as-is when no strip heuristic applies", () => {
|
||||
const input = ["- [x] done", "- [ ] todo"];
|
||||
assert.equal(hashlineParseText(input), input);
|
||||
});
|
||||
|
||||
it("splits string on newline and preserves Markdown list '-' prefix", () => {
|
||||
const result = hashlineParseText("- item one\n- item two\n- item three");
|
||||
assert.deepEqual(result, ["- item one", "- item two", "- item three"]);
|
||||
});
|
||||
|
||||
it("strips '+' diff markers from string input", () => {
|
||||
const result = hashlineParseText("+line one\n+line two");
|
||||
assert.deepEqual(result, ["line one", "line two"]);
|
||||
});
|
||||
|
||||
it("still strips trailing empty from string split", () => {
|
||||
assert.deepEqual(hashlineParseText("foo\n"), ["foo"]);
|
||||
});
|
||||
});
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// Auto-correction heuristics
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
describe("applyHashlineEdits — heuristics", () => {
|
||||
it("auto-corrects off-by-one range end that duplicates a closing brace", () => {
|
||||
const content = "if (ok) {\n run();\n}\nafter();";
|
||||
const edits: HashlineEdit[] = [
|
||||
{
|
||||
op: "replace",
|
||||
pos: makeTag(1, "if (ok) {"),
|
||||
end: makeTag(2, " run();"),
|
||||
lines: ["if (ok) {", " runSafe();", "}"],
|
||||
},
|
||||
];
|
||||
const result = applyHashlineEdits(content, edits);
|
||||
assert.equal(result.lines, "if (ok) {\n runSafe();\n}\nafter();");
|
||||
assert.ok(result.warnings);
|
||||
assert.equal(result.warnings!.length, 1);
|
||||
assert.ok(result.warnings![0].includes("Auto-corrected range replace"));
|
||||
});
|
||||
|
||||
it("auto-corrects escaped tab indentation", () => {
|
||||
const content = "root\n\tchild\n\t\tvalue\nend";
|
||||
const edits: HashlineEdit[] = [{ op: "replace", pos: makeTag(3, "\t\tvalue"), lines: ["\\t\\treplaced"] }];
|
||||
const result = applyHashlineEdits(content, edits);
|
||||
assert.equal(result.lines, "root\n\tchild\n\t\treplaced\nend");
|
||||
assert.ok(result.warnings);
|
||||
assert.ok(result.warnings![0].includes("Auto-corrected escaped tab indentation"));
|
||||
});
|
||||
});
|
||||
594
packages/pi-coding-agent/src/core/tools/hashline.ts
Normal file
594
packages/pi-coding-agent/src/core/tools/hashline.ts
Normal file
|
|
@ -0,0 +1,594 @@
|
|||
/**
|
||||
* Hashline edit mode — a line-addressable edit format using content-hash anchors.
|
||||
*
|
||||
* Each line in a file is identified by its 1-indexed line number and a short
|
||||
* hash derived from the normalized line text (xxHash32, truncated to 2 chars
|
||||
* from a custom nibble alphabet).
|
||||
*
|
||||
* The combined `LINE#ID` reference acts as both an address and a staleness check:
|
||||
* if the file has changed since the caller last read it, hash mismatches are caught
|
||||
* before any mutation occurs.
|
||||
*
|
||||
* Displayed format: `LINENUM#HASH:TEXT`
|
||||
* Reference format: `"LINENUM#HASH"` (e.g. `"5#QQ"`)
|
||||
*
|
||||
* Adapted from Oh My Pi's hashline implementation for Node.js (no Bun dependency).
|
||||
*/
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// xxHash32 — pure JS implementation (no native dependencies)
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
const PRIME32_1 = 0x9e3779b1;
|
||||
const PRIME32_2 = 0x85ebca77;
|
||||
const PRIME32_3 = 0xc2b2ae3d;
|
||||
const PRIME32_4 = 0x27d4eb2f;
|
||||
const PRIME32_5 = 0x165667b1;
|
||||
|
||||
function rotl32(val: number, bits: number): number {
|
||||
return ((val << bits) | (val >>> (32 - bits))) >>> 0;
|
||||
}
|
||||
|
||||
function imul32(a: number, b: number): number {
|
||||
return Math.imul(a, b) >>> 0;
|
||||
}
|
||||
|
||||
/**
|
||||
* Pure JS xxHash32 operating on a UTF-8 encoded string.
|
||||
* Matches Bun.hash.xxHash32(str, seed) behavior.
|
||||
*/
|
||||
function xxHash32(input: string, seed: number): number {
|
||||
const buf = Buffer.from(input, "utf-8");
|
||||
const len = buf.length;
|
||||
let h32: number;
|
||||
let i = 0;
|
||||
|
||||
if (len >= 16) {
|
||||
let v1 = (seed + PRIME32_1 + PRIME32_2) >>> 0;
|
||||
let v2 = (seed + PRIME32_2) >>> 0;
|
||||
let v3 = (seed + 0) >>> 0;
|
||||
let v4 = (seed - PRIME32_1) >>> 0;
|
||||
|
||||
while (i <= len - 16) {
|
||||
v1 = (imul32(rotl32((v1 + imul32(buf.readUInt32LE(i), PRIME32_2)) >>> 0, 13), PRIME32_1)) >>> 0;
|
||||
i += 4;
|
||||
v2 = (imul32(rotl32((v2 + imul32(buf.readUInt32LE(i), PRIME32_2)) >>> 0, 13), PRIME32_1)) >>> 0;
|
||||
i += 4;
|
||||
v3 = (imul32(rotl32((v3 + imul32(buf.readUInt32LE(i), PRIME32_2)) >>> 0, 13), PRIME32_1)) >>> 0;
|
||||
i += 4;
|
||||
v4 = (imul32(rotl32((v4 + imul32(buf.readUInt32LE(i), PRIME32_2)) >>> 0, 13), PRIME32_1)) >>> 0;
|
||||
i += 4;
|
||||
}
|
||||
|
||||
h32 = (rotl32(v1, 1) + rotl32(v2, 7) + rotl32(v3, 12) + rotl32(v4, 18)) >>> 0;
|
||||
} else {
|
||||
h32 = (seed + PRIME32_5) >>> 0;
|
||||
}
|
||||
|
||||
h32 = (h32 + len) >>> 0;
|
||||
|
||||
while (i <= len - 4) {
|
||||
h32 = (h32 + imul32(buf.readUInt32LE(i), PRIME32_3)) >>> 0;
|
||||
h32 = imul32(rotl32(h32, 17), PRIME32_4);
|
||||
i += 4;
|
||||
}
|
||||
|
||||
while (i < len) {
|
||||
h32 = (h32 + imul32(buf[i], PRIME32_5)) >>> 0;
|
||||
h32 = imul32(rotl32(h32, 11), PRIME32_1);
|
||||
i += 1;
|
||||
}
|
||||
|
||||
h32 = imul32(h32 ^ (h32 >>> 15), PRIME32_2);
|
||||
h32 = imul32(h32 ^ (h32 >>> 13), PRIME32_3);
|
||||
h32 = (h32 ^ (h32 >>> 16)) >>> 0;
|
||||
|
||||
return h32;
|
||||
}
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// Hash Computation
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
export type Anchor = { line: number; hash: string };
|
||||
export type HashlineEdit =
|
||||
| { op: "replace"; pos: Anchor; end?: Anchor; lines: string[] }
|
||||
| { op: "append"; pos?: Anchor; lines: string[] }
|
||||
| { op: "prepend"; pos?: Anchor; lines: string[] };
|
||||
|
||||
const NIBBLE_STR = "ZPMQVRWSNKTXJBYH";
|
||||
|
||||
const DICT = Array.from({ length: 256 }, (_, i) => {
|
||||
const h = i >>> 4;
|
||||
const l = i & 0x0f;
|
||||
return `${NIBBLE_STR[h]}${NIBBLE_STR[l]}`;
|
||||
});
|
||||
|
||||
const RE_SIGNIFICANT = /[\p{L}\p{N}]/u;
|
||||
|
||||
/**
|
||||
* Compute a short hash of a single line.
|
||||
*
|
||||
* Uses xxHash32 on a trailing-whitespace-trimmed, CR-stripped line, truncated to 2 chars
|
||||
* from the nibble alphabet. For lines containing no alphanumeric characters (only
|
||||
* punctuation/symbols/whitespace), the line number is mixed in to reduce hash collisions.
|
||||
*/
|
||||
export function computeLineHash(idx: number, line: string): string {
|
||||
line = line.replace(/\r/g, "").trimEnd();
|
||||
|
||||
let seed = 0;
|
||||
if (!RE_SIGNIFICANT.test(line)) {
|
||||
seed = idx;
|
||||
}
|
||||
return DICT[xxHash32(line, seed) & 0xff];
|
||||
}
|
||||
|
||||
/**
|
||||
* Formats a tag given the line number and text.
|
||||
*/
|
||||
export function formatLineTag(line: number, text: string): string {
|
||||
return `${line}#${computeLineHash(line, text)}`;
|
||||
}
|
||||
|
||||
/**
|
||||
* Format file text with hashline prefixes for display.
|
||||
*
|
||||
* Each line becomes `LINENUM#HASH:TEXT` where LINENUM is 1-indexed.
|
||||
*/
|
||||
export function formatHashLines(text: string, startLine = 1): string {
|
||||
const lines = text.split("\n");
|
||||
return lines
|
||||
.map((line, i) => {
|
||||
const num = startLine + i;
|
||||
return `${formatLineTag(num, line)}:${line}`;
|
||||
})
|
||||
.join("\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse a line reference string like `"5#QQ"` into structured form.
|
||||
*
|
||||
* @throws Error if the format is invalid
|
||||
*/
|
||||
export function parseTag(ref: string): Anchor {
|
||||
const match = ref.match(/^\s*[>+-]*\s*(\d+)\s*#\s*([ZPMQVRWSNKTXJBYH]{2})/);
|
||||
if (!match) {
|
||||
throw new Error(`Invalid line reference "${ref}". Expected format "LINE#ID" (e.g. "5#QQ").`);
|
||||
}
|
||||
const line = Number.parseInt(match[1], 10);
|
||||
if (line < 1) {
|
||||
throw new Error(`Line number must be >= 1, got ${line} in "${ref}".`);
|
||||
}
|
||||
return { line, hash: match[2] };
|
||||
}
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// Hash Mismatch Error
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
export interface HashMismatch {
|
||||
line: number;
|
||||
expected: string;
|
||||
actual: string;
|
||||
}
|
||||
|
||||
const MISMATCH_CONTEXT = 2;
|
||||
|
||||
/**
|
||||
* Error thrown when one or more hashline references have stale hashes.
|
||||
* Displays grep-style output with `>>>` markers on mismatched lines,
|
||||
* showing the correct `LINE#ID` so the caller can fix all refs at once.
|
||||
*/
|
||||
export class HashlineMismatchError extends Error {
|
||||
readonly mismatches: HashMismatch[];
|
||||
readonly fileLines: string[];
|
||||
readonly remaps: ReadonlyMap<string, string>;
|
||||
constructor(
|
||||
mismatches: HashMismatch[],
|
||||
fileLines: string[],
|
||||
) {
|
||||
super(HashlineMismatchError.formatMessage(mismatches, fileLines));
|
||||
this.name = "HashlineMismatchError";
|
||||
this.mismatches = mismatches;
|
||||
this.fileLines = fileLines;
|
||||
const remaps = new Map<string, string>();
|
||||
for (const m of mismatches) {
|
||||
const actual = computeLineHash(m.line, fileLines[m.line - 1]);
|
||||
remaps.set(`${m.line}#${m.expected}`, `${m.line}#${actual}`);
|
||||
}
|
||||
this.remaps = remaps;
|
||||
}
|
||||
|
||||
static formatMessage(mismatches: HashMismatch[], fileLines: string[]): string {
|
||||
const mismatchSet = new Map<number, HashMismatch>();
|
||||
for (const m of mismatches) {
|
||||
mismatchSet.set(m.line, m);
|
||||
}
|
||||
|
||||
const displayLines = new Set<number>();
|
||||
for (const m of mismatches) {
|
||||
const lo = Math.max(1, m.line - MISMATCH_CONTEXT);
|
||||
const hi = Math.min(fileLines.length, m.line + MISMATCH_CONTEXT);
|
||||
for (let i = lo; i <= hi; i++) {
|
||||
displayLines.add(i);
|
||||
}
|
||||
}
|
||||
|
||||
const sorted = [...displayLines].sort((a, b) => a - b);
|
||||
const lines: string[] = [];
|
||||
|
||||
lines.push(
|
||||
`${mismatches.length} line${mismatches.length > 1 ? "s have" : " has"} changed since last read. Use the updated LINE#ID references shown below (>>> marks changed lines).`,
|
||||
);
|
||||
lines.push("");
|
||||
|
||||
let prevLine = -1;
|
||||
for (const lineNum of sorted) {
|
||||
if (prevLine !== -1 && lineNum > prevLine + 1) {
|
||||
lines.push(" ...");
|
||||
}
|
||||
prevLine = lineNum;
|
||||
|
||||
const text = fileLines[lineNum - 1];
|
||||
const hash = computeLineHash(lineNum, text);
|
||||
const prefix = `${lineNum}#${hash}`;
|
||||
|
||||
if (mismatchSet.has(lineNum)) {
|
||||
lines.push(`>>> ${prefix}:${text}`);
|
||||
} else {
|
||||
lines.push(` ${prefix}:${text}`);
|
||||
}
|
||||
}
|
||||
return lines.join("\n");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Validate that a line reference points to an existing line with a matching hash.
|
||||
*/
|
||||
export function validateLineRef(ref: Anchor, fileLines: string[]): void {
|
||||
if (ref.line < 1 || ref.line > fileLines.length) {
|
||||
throw new Error(`Line ${ref.line} does not exist (file has ${fileLines.length} lines)`);
|
||||
}
|
||||
const actualHash = computeLineHash(ref.line, fileLines[ref.line - 1]);
|
||||
if (actualHash !== ref.hash) {
|
||||
throw new HashlineMismatchError([{ line: ref.line, expected: ref.hash, actual: actualHash }], fileLines);
|
||||
}
|
||||
}
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// Prefix Stripping
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
/** Pattern matching hashline display format prefixes: `LINE#ID:CONTENT` and `#ID:CONTENT` */
|
||||
const HASHLINE_PREFIX_RE = /^\s*(?:>>>|>>)?\s*(?:\d+\s*#\s*|#\s*)[ZPMQVRWSNKTXJBYH]{2}:/;
|
||||
|
||||
/** Pattern matching a unified-diff added-line `+` prefix (but not `++`). */
|
||||
const DIFF_PLUS_RE = /^[+](?![+])/;
|
||||
|
||||
/**
|
||||
* Strip hashline display prefixes and diff `+` markers from replacement lines.
|
||||
*
|
||||
* Models frequently copy the `LINE#ID` prefix from read output into their
|
||||
* replacement content. This strips them heuristically before application.
|
||||
*/
|
||||
export function stripNewLinePrefixes(lines: string[]): string[] {
|
||||
let hashPrefixCount = 0;
|
||||
let diffPlusCount = 0;
|
||||
let nonEmpty = 0;
|
||||
for (const l of lines) {
|
||||
if (l.length === 0) continue;
|
||||
nonEmpty++;
|
||||
if (HASHLINE_PREFIX_RE.test(l)) hashPrefixCount++;
|
||||
if (DIFF_PLUS_RE.test(l)) diffPlusCount++;
|
||||
}
|
||||
if (nonEmpty === 0) return lines;
|
||||
|
||||
const stripHash = hashPrefixCount > 0 && hashPrefixCount === nonEmpty;
|
||||
const stripPlus = !stripHash && diffPlusCount > 0 && diffPlusCount >= nonEmpty * 0.5;
|
||||
if (!stripHash && !stripPlus) return lines;
|
||||
|
||||
return lines.map(l => {
|
||||
if (stripHash) return l.replace(HASHLINE_PREFIX_RE, "");
|
||||
if (stripPlus) return l.replace(DIFF_PLUS_RE, "");
|
||||
return l;
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Parse edit content — handles string, array, or null input.
|
||||
* Strips hashline prefixes and diff markers from model output.
|
||||
*/
|
||||
export function hashlineParseText(edit: string[] | string | null): string[] {
|
||||
if (edit === null) return [];
|
||||
if (typeof edit === "string") {
|
||||
const normalizedEdit = edit.endsWith("\n") ? edit.slice(0, -1) : edit;
|
||||
edit = normalizedEdit.replaceAll("\r", "").split("\n");
|
||||
}
|
||||
return stripNewLinePrefixes(edit);
|
||||
}
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// Auto-correction Heuristics
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
function maybeAutocorrectEscapedTabIndentation(edits: HashlineEdit[], warnings: string[]): void {
|
||||
for (const edit of edits) {
|
||||
if (edit.lines.length === 0) continue;
|
||||
const hasEscapedTabs = edit.lines.some(line => line.includes("\\t"));
|
||||
if (!hasEscapedTabs) continue;
|
||||
const hasRealTabs = edit.lines.some(line => line.includes("\t"));
|
||||
if (hasRealTabs) continue;
|
||||
let correctedCount = 0;
|
||||
const corrected = edit.lines.map(line =>
|
||||
line.replace(/^((?:\\t)+)/, escaped => {
|
||||
correctedCount += escaped.length / 2;
|
||||
return "\t".repeat(escaped.length / 2);
|
||||
}),
|
||||
);
|
||||
if (correctedCount === 0) continue;
|
||||
edit.lines = corrected;
|
||||
warnings.push(
|
||||
`Auto-corrected escaped tab indentation in edit: converted leading \\t sequence(s) to real tab characters`,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
const MIN_AUTOCORRECT_LENGTH = 2;
|
||||
|
||||
function shouldAutocorrect(line: string, otherLine: string): boolean {
|
||||
if (!line || line !== otherLine) return false;
|
||||
line = line.trim();
|
||||
if (line.length < MIN_AUTOCORRECT_LENGTH) {
|
||||
return line.endsWith("}") || line.endsWith(")");
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
// Edit Application
|
||||
// ═══════════════════════════════════════════════════════════════════════════
|
||||
|
||||
/**
|
||||
* Apply an array of hashline edits to file content.
|
||||
*
|
||||
* Each edit operation identifies target lines directly (`replace`,
|
||||
* `append`, `prepend`). Line references are resolved via parseTag
|
||||
* and hashes validated before any mutation.
|
||||
*
|
||||
* Edits are sorted bottom-up (highest effective line first) so earlier
|
||||
* splices don't invalidate later line numbers.
|
||||
*
|
||||
* @returns The modified content and the 1-indexed first changed line number
|
||||
*/
|
||||
export function applyHashlineEdits(
|
||||
text: string,
|
||||
edits: HashlineEdit[],
|
||||
): {
|
||||
lines: string;
|
||||
firstChangedLine: number | undefined;
|
||||
warnings?: string[];
|
||||
noopEdits?: Array<{ editIndex: number; loc: string; current: string }>;
|
||||
} {
|
||||
if (edits.length === 0) {
|
||||
return { lines: text, firstChangedLine: undefined };
|
||||
}
|
||||
|
||||
const fileLines = text.split("\n");
|
||||
const originalFileLines = [...fileLines];
|
||||
let firstChangedLine: number | undefined;
|
||||
const noopEdits: Array<{ editIndex: number; loc: string; current: string }> = [];
|
||||
const warnings: string[] = [];
|
||||
|
||||
// Pre-validate: collect all hash mismatches before mutating
|
||||
const mismatches: HashMismatch[] = [];
|
||||
function validateRef(ref: Anchor): boolean {
|
||||
if (ref.line < 1 || ref.line > fileLines.length) {
|
||||
throw new Error(`Line ${ref.line} does not exist (file has ${fileLines.length} lines)`);
|
||||
}
|
||||
const actualHash = computeLineHash(ref.line, fileLines[ref.line - 1]);
|
||||
if (actualHash === ref.hash) {
|
||||
return true;
|
||||
}
|
||||
mismatches.push({ line: ref.line, expected: ref.hash, actual: actualHash });
|
||||
return false;
|
||||
}
|
||||
for (const edit of edits) {
|
||||
switch (edit.op) {
|
||||
case "replace": {
|
||||
if (edit.end) {
|
||||
const startValid = validateRef(edit.pos);
|
||||
const endValid = validateRef(edit.end);
|
||||
if (!startValid || !endValid) continue;
|
||||
if (edit.pos.line > edit.end.line) {
|
||||
throw new Error(`Range start line ${edit.pos.line} must be <= end line ${edit.end.line}`);
|
||||
}
|
||||
} else {
|
||||
if (!validateRef(edit.pos)) continue;
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "append": {
|
||||
if (edit.pos && !validateRef(edit.pos)) continue;
|
||||
if (edit.lines.length === 0) {
|
||||
edit.lines = [""];
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "prepend": {
|
||||
if (edit.pos && !validateRef(edit.pos)) continue;
|
||||
if (edit.lines.length === 0) {
|
||||
edit.lines = [""];
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (mismatches.length > 0) {
|
||||
throw new HashlineMismatchError(mismatches, fileLines);
|
||||
}
|
||||
maybeAutocorrectEscapedTabIndentation(edits, warnings);
|
||||
|
||||
// Deduplicate identical edits targeting the same line(s)
|
||||
const seenEditKeys = new Map<string, number>();
|
||||
const dedupIndices = new Set<number>();
|
||||
for (let i = 0; i < edits.length; i++) {
|
||||
const edit = edits[i];
|
||||
let lineKey: string;
|
||||
switch (edit.op) {
|
||||
case "replace":
|
||||
lineKey = edit.end ? `r:${edit.pos.line}:${edit.end.line}` : `s:${edit.pos.line}`;
|
||||
break;
|
||||
case "append":
|
||||
lineKey = edit.pos ? `i:${edit.pos.line}` : "ieof";
|
||||
break;
|
||||
case "prepend":
|
||||
lineKey = edit.pos ? `ib:${edit.pos.line}` : "ibef";
|
||||
break;
|
||||
}
|
||||
const dstKey = `${lineKey}:${edit.lines.join("\n")}`;
|
||||
if (seenEditKeys.has(dstKey)) {
|
||||
dedupIndices.add(i);
|
||||
} else {
|
||||
seenEditKeys.set(dstKey, i);
|
||||
}
|
||||
}
|
||||
if (dedupIndices.size > 0) {
|
||||
for (let i = edits.length - 1; i >= 0; i--) {
|
||||
if (dedupIndices.has(i)) edits.splice(i, 1);
|
||||
}
|
||||
}
|
||||
|
||||
// Compute sort key (descending) — bottom-up application
|
||||
const annotated = edits.map((edit, idx) => {
|
||||
let sortLine: number;
|
||||
let precedence: number;
|
||||
switch (edit.op) {
|
||||
case "replace":
|
||||
sortLine = edit.end ? edit.end.line : edit.pos.line;
|
||||
precedence = 0;
|
||||
break;
|
||||
case "append":
|
||||
sortLine = edit.pos ? edit.pos.line : fileLines.length + 1;
|
||||
precedence = 1;
|
||||
break;
|
||||
case "prepend":
|
||||
sortLine = edit.pos ? edit.pos.line : 0;
|
||||
precedence = 2;
|
||||
break;
|
||||
}
|
||||
return { edit, idx, sortLine, precedence };
|
||||
});
|
||||
|
||||
annotated.sort((a, b) => b.sortLine - a.sortLine || a.precedence - b.precedence || a.idx - b.idx);
|
||||
|
||||
function trackFirstChanged(line: number): void {
|
||||
if (firstChangedLine === undefined || line < firstChangedLine) {
|
||||
firstChangedLine = line;
|
||||
}
|
||||
}
|
||||
|
||||
// Apply edits bottom-up
|
||||
for (const { edit, idx } of annotated) {
|
||||
switch (edit.op) {
|
||||
case "replace": {
|
||||
if (!edit.end) {
|
||||
const origLines = originalFileLines.slice(edit.pos.line - 1, edit.pos.line);
|
||||
const newLines = edit.lines;
|
||||
if (origLines.length === newLines.length && origLines.every((line, i) => line === newLines[i])) {
|
||||
noopEdits.push({
|
||||
editIndex: idx,
|
||||
loc: `${edit.pos.line}#${edit.pos.hash}`,
|
||||
current: origLines.join("\n"),
|
||||
});
|
||||
break;
|
||||
}
|
||||
fileLines.splice(edit.pos.line - 1, 1, ...newLines);
|
||||
trackFirstChanged(edit.pos.line);
|
||||
} else {
|
||||
const count = edit.end.line - edit.pos.line + 1;
|
||||
const newLines = [...edit.lines];
|
||||
const trailingReplacementLine = newLines[newLines.length - 1]?.trimEnd();
|
||||
const nextSurvivingLine = fileLines[edit.end.line]?.trimEnd();
|
||||
if (
|
||||
shouldAutocorrect(trailingReplacementLine, nextSurvivingLine) &&
|
||||
fileLines[edit.end.line - 1]?.trimEnd() !== trailingReplacementLine
|
||||
) {
|
||||
newLines.pop();
|
||||
warnings.push(
|
||||
`Auto-corrected range replace ${edit.pos.line}#${edit.pos.hash}-${edit.end.line}#${edit.end.hash}: removed trailing replacement line "${trailingReplacementLine}" that duplicated next surviving line`,
|
||||
);
|
||||
}
|
||||
const leadingReplacementLine = newLines[0]?.trimEnd();
|
||||
const prevSurvivingLine = fileLines[edit.pos.line - 2]?.trimEnd();
|
||||
if (
|
||||
shouldAutocorrect(leadingReplacementLine, prevSurvivingLine) &&
|
||||
fileLines[edit.pos.line - 1]?.trimEnd() !== leadingReplacementLine
|
||||
) {
|
||||
newLines.shift();
|
||||
warnings.push(
|
||||
`Auto-corrected range replace ${edit.pos.line}#${edit.pos.hash}-${edit.end.line}#${edit.end.hash}: removed leading replacement line "${leadingReplacementLine}" that duplicated preceding surviving line`,
|
||||
);
|
||||
}
|
||||
fileLines.splice(edit.pos.line - 1, count, ...newLines);
|
||||
trackFirstChanged(edit.pos.line);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "append": {
|
||||
const inserted = edit.lines;
|
||||
if (inserted.length === 0) {
|
||||
noopEdits.push({
|
||||
editIndex: idx,
|
||||
loc: edit.pos ? `${edit.pos.line}#${edit.pos.hash}` : "EOF",
|
||||
current: edit.pos ? originalFileLines[edit.pos.line - 1] : "",
|
||||
});
|
||||
break;
|
||||
}
|
||||
if (edit.pos) {
|
||||
fileLines.splice(edit.pos.line, 0, ...inserted);
|
||||
trackFirstChanged(edit.pos.line + 1);
|
||||
} else {
|
||||
if (fileLines.length === 1 && fileLines[0] === "") {
|
||||
fileLines.splice(0, 1, ...inserted);
|
||||
trackFirstChanged(1);
|
||||
} else {
|
||||
fileLines.splice(fileLines.length, 0, ...inserted);
|
||||
trackFirstChanged(fileLines.length - inserted.length + 1);
|
||||
}
|
||||
}
|
||||
break;
|
||||
}
|
||||
case "prepend": {
|
||||
const inserted = edit.lines;
|
||||
if (inserted.length === 0) {
|
||||
noopEdits.push({
|
||||
editIndex: idx,
|
||||
loc: edit.pos ? `${edit.pos.line}#${edit.pos.hash}` : "BOF",
|
||||
current: edit.pos ? originalFileLines[edit.pos.line - 1] : "",
|
||||
});
|
||||
break;
|
||||
}
|
||||
if (edit.pos) {
|
||||
fileLines.splice(edit.pos.line - 1, 0, ...inserted);
|
||||
trackFirstChanged(edit.pos.line);
|
||||
} else {
|
||||
if (fileLines.length === 1 && fileLines[0] === "") {
|
||||
fileLines.splice(0, 1, ...inserted);
|
||||
} else {
|
||||
fileLines.splice(0, 0, ...inserted);
|
||||
}
|
||||
trackFirstChanged(1);
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
lines: fileLines.join("\n"),
|
||||
firstChangedLine,
|
||||
...(warnings.length > 0 ? { warnings } : {}),
|
||||
...(noopEdits.length > 0 ? { noopEdits } : {}),
|
||||
};
|
||||
}
|
||||
|
|
@ -65,6 +65,37 @@ export {
|
|||
type WriteToolOptions,
|
||||
writeTool,
|
||||
} from "./write.js";
|
||||
export {
|
||||
createHashlineEditTool,
|
||||
type HashlineEditInput,
|
||||
type HashlineEditItem,
|
||||
type HashlineEditOperations,
|
||||
type HashlineEditToolDetails,
|
||||
type HashlineEditToolOptions,
|
||||
hashlineEditTool,
|
||||
} from "./hashline-edit.js";
|
||||
export {
|
||||
createHashlineReadTool,
|
||||
type HashlineReadOperations,
|
||||
type HashlineReadToolDetails,
|
||||
type HashlineReadToolInput,
|
||||
type HashlineReadToolOptions,
|
||||
hashlineReadTool,
|
||||
} from "./hashline-read.js";
|
||||
export {
|
||||
type Anchor,
|
||||
applyHashlineEdits,
|
||||
computeLineHash,
|
||||
formatHashLines,
|
||||
formatLineTag,
|
||||
type HashlineEdit,
|
||||
HashlineMismatchError,
|
||||
hashlineParseText,
|
||||
type HashMismatch,
|
||||
parseTag,
|
||||
stripNewLinePrefixes,
|
||||
validateLineRef,
|
||||
} from "./hashline.js";
|
||||
export {
|
||||
createLspTool,
|
||||
type LspToolDetails,
|
||||
|
|
@ -78,6 +109,8 @@ import { type BashToolOptions, bashTool, createBashTool } from "./bash.js";
|
|||
import { createEditTool, editTool } from "./edit.js";
|
||||
import { createFindTool, findTool } from "./find.js";
|
||||
import { createGrepTool, grepTool } from "./grep.js";
|
||||
import { createHashlineEditTool, hashlineEditTool } from "./hashline-edit.js";
|
||||
import { createHashlineReadTool, hashlineReadTool } from "./hashline-read.js";
|
||||
import { createLsTool, lsTool } from "./ls.js";
|
||||
import { createReadTool, type ReadToolOptions, readTool } from "./read.js";
|
||||
import { createWriteTool, writeTool } from "./write.js";
|
||||
|
|
@ -102,8 +135,13 @@ export const allTools = {
|
|||
find: findTool,
|
||||
ls: lsTool,
|
||||
lsp: lspTool,
|
||||
hashline_edit: hashlineEditTool,
|
||||
hashline_read: hashlineReadTool,
|
||||
};
|
||||
|
||||
// Hashline-mode coding tools — read with hash anchors, edit with hash references
|
||||
export const hashlineCodingTools: Tool[] = [hashlineReadTool, bashTool, hashlineEditTool, writeTool];
|
||||
|
||||
export type ToolName = keyof typeof allTools;
|
||||
|
||||
export interface ToolsOptions {
|
||||
|
|
@ -145,5 +183,20 @@ export function createAllTools(cwd: string, options?: ToolsOptions): Record<Tool
|
|||
find: createFindTool(cwd),
|
||||
ls: createLsTool(cwd),
|
||||
lsp: createLspTool(cwd),
|
||||
hashline_edit: createHashlineEditTool(cwd),
|
||||
hashline_read: createHashlineReadTool(cwd, options?.read),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create hashline-mode coding tools configured for a specific working directory.
|
||||
* Uses hashline read (LINE#ID prefixed output) and hashline edit (hash-anchor based edits).
|
||||
*/
|
||||
export function createHashlineCodingTools(cwd: string, options?: ToolsOptions): Tool[] {
|
||||
return [
|
||||
createHashlineReadTool(cwd, options?.read),
|
||||
createBashTool(cwd, options?.bash),
|
||||
createHashlineEditTool(cwd),
|
||||
createWriteTool(cwd),
|
||||
];
|
||||
}
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue