Merge pull request #1449 from frizynn/refactor/compaction-helpers-extraction

refactor: extract shared helpers in compaction module
This commit is contained in:
TÂCHES 2026-03-19 15:45:21 -06:00 committed by GitHub
commit dea2f1a426
3 changed files with 121 additions and 127 deletions

View file

@ -9,20 +9,18 @@ import type { AgentMessage } from "@gsd/pi-agent-core";
import type { Model } from "@gsd/pi-ai";
import { completeSimple } from "@gsd/pi-ai";
import { COMPACTION_RESERVE_TOKENS } from "../constants.js";
import {
convertToLlm,
createBranchSummaryMessage,
createCompactionSummaryMessage,
createCustomMessage,
} from "../messages.js";
import { convertToLlm } from "../messages.js";
import type { ReadonlySessionManager, SessionEntry } from "../session-manager.js";
import { estimateTokens } from "./compaction.js";
import {
computeFileLists,
createFileOps,
createSummarizationMessage,
extractFileOpsFromMessage,
extractTextContent,
type FileOperations,
formatFileOperations,
getMessageFromEntry,
SUMMARIZATION_SYSTEM_PROMPT,
serializeConversation,
} from "./utils.js";
@ -134,39 +132,6 @@ export function collectEntriesForBranchSummary(
return { entries, commonAncestorId };
}
// ============================================================================
// Entry to Message Conversion
// ============================================================================
/**
* Extract AgentMessage from a session entry.
* Similar to getMessageFromEntry in compaction.ts but also handles compaction entries.
*/
function getMessageFromEntry(entry: SessionEntry): AgentMessage | undefined {
switch (entry.type) {
case "message":
// Skip tool results - context is in assistant's tool call
if (entry.message.role === "toolResult") return undefined;
return entry.message;
case "custom_message":
return createCustomMessage(entry.customType, entry.content, entry.display, entry.details, entry.timestamp);
case "branch_summary":
return createBranchSummaryMessage(entry.summary, entry.fromId, entry.timestamp);
case "compaction":
return createCompactionSummaryMessage(entry.summary, entry.tokensBefore, entry.timestamp);
// These don't contribute to conversation content
case "thinking_level_change":
case "model_change":
case "custom":
case "label":
return undefined;
}
}
/**
* Prepare entries for summarization with token budget.
*
@ -206,7 +171,7 @@ export function prepareBranchEntries(entries: SessionEntry[], tokenBudget: numbe
// Second pass: walk from newest to oldest, adding messages until token budget
for (let i = entries.length - 1; i >= 0; i--) {
const entry = entries[i];
const message = getMessageFromEntry(entry);
const message = getMessageFromEntry(entry, /* skipToolResults */ true);
if (!message) continue;
// Extract file ops from assistant messages (tool calls)
@ -310,18 +275,10 @@ export async function generateBranchSummary(
}
const promptText = `<conversation>\n${conversationText}\n</conversation>\n\n${instructions}`;
const summarizationMessages = [
{
role: "user" as const,
content: [{ type: "text" as const, text: promptText }],
timestamp: Date.now(),
},
];
// Call LLM for summarization
const response = await completeSimple(
model,
{ systemPrompt: SUMMARIZATION_SYSTEM_PROMPT, messages: summarizationMessages },
{ systemPrompt: SUMMARIZATION_SYSTEM_PROMPT, messages: createSummarizationMessage(promptText) },
{ apiKey, signal, maxTokens: 2048 },
);
@ -333,10 +290,7 @@ export async function generateBranchSummary(
return { error: response.errorMessage || "Summarization failed" };
}
let summary = response.content
.filter((c): c is { type: "text"; text: string } => c.type === "text")
.map((c) => c.text)
.join("\n");
let summary = extractTextContent(response.content);
// Prepend preamble to provide context about the branch summary
summary = BRANCH_SUMMARY_PREAMBLE + summary;

View file

@ -9,19 +9,18 @@ import type { AgentMessage } from "@gsd/pi-agent-core";
import type { AssistantMessage, Model, Usage } from "@gsd/pi-ai";
import { completeSimple } from "@gsd/pi-ai";
import { COMPACTION_KEEP_RECENT_TOKENS, COMPACTION_RESERVE_TOKENS } from "../constants.js";
import {
convertToLlm,
createBranchSummaryMessage,
createCompactionSummaryMessage,
createCustomMessage,
} from "../messages.js";
import { convertToLlm } from "../messages.js";
import type { CompactionEntry, SessionEntry } from "../session-manager.js";
import {
collectMessages,
computeFileLists,
createFileOps,
createSummarizationMessage,
extractFileOpsFromMessage,
extractTextContent,
type FileOperations,
formatFileOperations,
getMessageFromEntry,
SUMMARIZATION_SYSTEM_PROMPT,
serializeConversation,
} from "./utils.js";
@ -69,30 +68,6 @@ function extractFileOperations(
return fileOps;
}
// ============================================================================
// Message Extraction
// ============================================================================
/**
* Extract AgentMessage from an entry if it produces one.
* Returns undefined for entries that don't contribute to LLM context.
*/
function getMessageFromEntry(entry: SessionEntry): AgentMessage | undefined {
if (entry.type === "message") {
return entry.message;
}
if (entry.type === "custom_message") {
return createCustomMessage(entry.customType, entry.content, entry.display, entry.details, entry.timestamp);
}
if (entry.type === "branch_summary") {
return createBranchSummaryMessage(entry.summary, entry.fromId, entry.timestamp);
}
if (entry.type === "compaction") {
return createCompactionSummaryMessage(entry.summary, entry.tokensBefore, entry.timestamp);
}
return undefined;
}
/** Result from compact() - SessionManager adds uuid/parentUuid when saving */
export interface CompactionResult<T = unknown> {
summary: string;
@ -547,21 +522,13 @@ export async function generateSummary(
}
promptText += basePrompt;
const summarizationMessages = [
{
role: "user" as const,
content: [{ type: "text" as const, text: promptText }],
timestamp: Date.now(),
},
];
const completionOptions = model.reasoning
? { maxTokens, signal, apiKey, reasoning: "high" as const }
: { maxTokens, signal, apiKey };
const response = await completeSimple(
model,
{ systemPrompt: SUMMARIZATION_SYSTEM_PROMPT, messages: summarizationMessages },
{ systemPrompt: SUMMARIZATION_SYSTEM_PROMPT, messages: createSummarizationMessage(promptText) },
completionOptions,
);
@ -569,12 +536,7 @@ export async function generateSummary(
throw new Error(`Summarization failed: ${response.errorMessage || "Unknown error"}`);
}
const textContent = response.content
.filter((c): c is { type: "text"; text: string } => c.type === "text")
.map((c) => c.text)
.join("\n");
return textContent;
return extractTextContent(response.content);
}
// ============================================================================
@ -618,11 +580,7 @@ export function prepareCompaction(
const boundaryEnd = pathEntries.length;
const usageStart = prevCompactionIndex >= 0 ? prevCompactionIndex : 0;
const usageMessages: AgentMessage[] = [];
for (let i = usageStart; i < boundaryEnd; i++) {
const msg = getMessageFromEntry(pathEntries[i]);
if (msg) usageMessages.push(msg);
}
const usageMessages = collectMessages(pathEntries, usageStart, boundaryEnd);
const tokensBefore = estimateContextTokens(usageMessages).tokens;
const cutPoint = findCutPoint(pathEntries, boundaryStart, boundaryEnd, settings.keepRecentTokens);
@ -637,20 +595,12 @@ export function prepareCompaction(
const historyEnd = cutPoint.isSplitTurn ? cutPoint.turnStartIndex : cutPoint.firstKeptEntryIndex;
// Messages to summarize (will be discarded after summary)
const messagesToSummarize: AgentMessage[] = [];
for (let i = boundaryStart; i < historyEnd; i++) {
const msg = getMessageFromEntry(pathEntries[i]);
if (msg) messagesToSummarize.push(msg);
}
const messagesToSummarize = collectMessages(pathEntries, boundaryStart, historyEnd);
// Messages for turn prefix summary (if splitting a turn)
const turnPrefixMessages: AgentMessage[] = [];
if (cutPoint.isSplitTurn) {
for (let i = cutPoint.turnStartIndex; i < cutPoint.firstKeptEntryIndex; i++) {
const msg = getMessageFromEntry(pathEntries[i]);
if (msg) turnPrefixMessages.push(msg);
}
}
const turnPrefixMessages = cutPoint.isSplitTurn
? collectMessages(pathEntries, cutPoint.turnStartIndex, cutPoint.firstKeptEntryIndex)
: [];
// Get previous summary for iterative update
let previousSummary: string | undefined;
@ -789,17 +739,10 @@ async function generateTurnPrefixSummary(
const llmMessages = convertToLlm(messages);
const conversationText = serializeConversation(llmMessages);
const promptText = `<conversation>\n${conversationText}\n</conversation>\n\n${TURN_PREFIX_SUMMARIZATION_PROMPT}`;
const summarizationMessages = [
{
role: "user" as const,
content: [{ type: "text" as const, text: promptText }],
timestamp: Date.now(),
},
];
const response = await completeSimple(
model,
{ systemPrompt: SUMMARIZATION_SYSTEM_PROMPT, messages: summarizationMessages },
{ systemPrompt: SUMMARIZATION_SYSTEM_PROMPT, messages: createSummarizationMessage(promptText) },
{ maxTokens, signal, apiKey },
);
@ -807,8 +750,5 @@ async function generateTurnPrefixSummary(
throw new Error(`Turn prefix summarization failed: ${response.errorMessage || "Unknown error"}`);
}
return response.content
.filter((c): c is { type: "text"; text: string } => c.type === "text")
.map((c) => c.text)
.join("\n");
return extractTextContent(response.content);
}

View file

@ -5,6 +5,12 @@
import type { AgentMessage } from "@gsd/pi-agent-core";
import type { Message } from "@gsd/pi-ai";
import { TOOL_RESULT_MAX_CHARS } from "../constants.js";
import {
createBranchSummaryMessage,
createCompactionSummaryMessage,
createCustomMessage,
} from "../messages.js";
import type { SessionEntry } from "../session-manager.js";
// ============================================================================
// File Operation Tracking
@ -82,6 +88,100 @@ export function formatFileOperations(readFiles: string[], modifiedFiles: string[
return `\n\n${sections.join("\n\n")}`;
}
// ============================================================================
// Message Extraction
// ============================================================================
/**
* Extract AgentMessage from a session entry.
*
* Handles all entry types: message, custom_message, branch_summary, and compaction.
* Returns undefined for entries that don't contribute to LLM context (e.g., settings changes).
*
* @param skipToolResults - If true, skips toolResult messages (used by branch summarization
* where tool call context is sufficient). Default false.
*/
export function getMessageFromEntry(entry: SessionEntry, skipToolResults = false): AgentMessage | undefined {
switch (entry.type) {
case "message":
if (skipToolResults && entry.message.role === "toolResult") return undefined;
return entry.message;
case "custom_message":
return createCustomMessage(entry.customType, entry.content, entry.display, entry.details, entry.timestamp);
case "branch_summary":
return createBranchSummaryMessage(entry.summary, entry.fromId, entry.timestamp);
case "compaction":
return createCompactionSummaryMessage(entry.summary, entry.tokensBefore, entry.timestamp);
case "thinking_level_change":
case "model_change":
case "custom":
case "label":
return undefined;
}
}
/**
* Collect AgentMessages from a range of session entries.
*
* @param entries - Session entries array
* @param startIndex - First index (inclusive)
* @param endIndex - Last index (exclusive)
* @param skipToolResults - If true, skips toolResult messages. Default false.
*/
export function collectMessages(
entries: SessionEntry[],
startIndex: number,
endIndex: number,
skipToolResults = false,
): AgentMessage[] {
const result: AgentMessage[] = [];
for (let i = startIndex; i < endIndex; i++) {
const msg = getMessageFromEntry(entries[i], skipToolResults);
if (msg) result.push(msg);
}
return result;
}
// ============================================================================
// Text Content Extraction
// ============================================================================
/**
* Extract text from an array of content blocks, filtering to text-type blocks.
* Replaces the recurring `.filter(c => c.type === "text").map(c => c.text).join(sep)` pattern.
*/
export function extractTextContent(
content: Array<{ type: string; text?: string }>,
separator = "\n",
): string {
return content
.filter((c): c is { type: "text"; text: string } => c.type === "text")
.map((c) => c.text)
.join(separator);
}
// ============================================================================
// Summarization Message Construction
// ============================================================================
/**
* Create a single-message array for summarization prompts.
* Wraps promptText in the standard `[{ role: "user", content: [{ type: "text", text }], timestamp }]` shape.
*/
export function createSummarizationMessage(promptText: string): [{ role: "user"; content: [{ type: "text"; text: string }]; timestamp: number }] {
return [
{
role: "user" as const,
content: [{ type: "text" as const, text: promptText }],
timestamp: Date.now(),
},
];
}
// ============================================================================
// Message Serialization
// ============================================================================