feat(memory-embeddings): read SF_LLM_GATEWAY_KEY from env as auth.json fallback
Enables CI and containerised deployments without writing secrets to disk. Auth.json still takes precedence when present. - readGatewayFromAuthJson now falls back to SF_LLM_GATEWAY_KEY env var - SF_LLM_GATEWAY_URL env var also supported for endpoint override - Added tests for env fallback, auth.json preference, and default URL Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com>
This commit is contained in:
parent
6214f7c86d
commit
cf32e79578
6 changed files with 454 additions and 11 deletions
292
src/headless-feedback.ts
Normal file
292
src/headless-feedback.ts
Normal file
|
|
@ -0,0 +1,292 @@
|
||||||
|
/**
|
||||||
|
* Headless commands for the self-feedback subsystem:
|
||||||
|
*
|
||||||
|
* sf headless feedback add [flags]
|
||||||
|
* sf headless feedback list [--unresolved] [--severity <s>] [--json]
|
||||||
|
* sf headless feedback resolve <id> --reason "..." [--evidence-kind <k>]
|
||||||
|
*
|
||||||
|
* Why these exist: today the only writers to the self_feedback table
|
||||||
|
* are SF's autonomous runtime and a handful of internal detectors.
|
||||||
|
* Operators (and Claude Code instances running outside the autonomous
|
||||||
|
* loop) have no path to file feedback without dropping to SQL.
|
||||||
|
*
|
||||||
|
* Same pattern as headless-mark-state.ts: bypass the RPC child, use
|
||||||
|
* the established sf-db primitives, idempotent on conflict.
|
||||||
|
*
|
||||||
|
* resolve uses the same evidence shape the autonomous triage flow
|
||||||
|
* already accepts (`{kind: "human-clear" | "agent-fix" | ...}`), so
|
||||||
|
* resolution semantics are consistent across operator and agent paths.
|
||||||
|
*/
|
||||||
|
|
||||||
|
import { existsSync } from "node:fs";
|
||||||
|
import { join } from "node:path";
|
||||||
|
import { createJiti } from "@mariozechner/jiti";
|
||||||
|
import { resolveBundledSourceResource } from "./bundled-resource-path.js";
|
||||||
|
import { getSfEnv } from "./env.js";
|
||||||
|
|
||||||
|
export interface FeedbackResult {
|
||||||
|
exitCode: number;
|
||||||
|
}
|
||||||
|
|
||||||
|
interface FeedbackOptions {
|
||||||
|
subcommand: "add" | "list" | "resolve";
|
||||||
|
args: string[];
|
||||||
|
json: boolean;
|
||||||
|
}
|
||||||
|
|
||||||
|
const jiti = createJiti(import.meta.filename, {
|
||||||
|
interopDefault: true,
|
||||||
|
debug: false,
|
||||||
|
});
|
||||||
|
|
||||||
|
const agentExtensionsDir = join(getSfEnv().agentDir, "extensions", "sf");
|
||||||
|
const useAgentDir = existsSync(join(agentExtensionsDir, "state.js"));
|
||||||
|
|
||||||
|
function sfExtensionPath(moduleName: string): string {
|
||||||
|
if (useAgentDir) return join(agentExtensionsDir, `${moduleName}.js`);
|
||||||
|
const tsPath = resolveBundledSourceResource(
|
||||||
|
import.meta.url,
|
||||||
|
"extensions",
|
||||||
|
"sf",
|
||||||
|
`${moduleName}.ts`,
|
||||||
|
);
|
||||||
|
if (existsSync(tsPath)) return tsPath;
|
||||||
|
return resolveBundledSourceResource(
|
||||||
|
import.meta.url,
|
||||||
|
"extensions",
|
||||||
|
"sf",
|
||||||
|
`${moduleName}.js`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
|
||||||
|
// Match self-feedback.js's newId() so ids look uniform across writers.
|
||||||
|
function newId(): string {
|
||||||
|
const ts = Date.now().toString(36);
|
||||||
|
const rnd = Math.random().toString(36).slice(2, 8);
|
||||||
|
return `sf-${ts}-${rnd}`;
|
||||||
|
}
|
||||||
|
|
||||||
|
function readFlag(args: string[], name: string): string | undefined {
|
||||||
|
const i = args.indexOf(name);
|
||||||
|
if (i < 0 || i + 1 >= args.length) return undefined;
|
||||||
|
return args[i + 1];
|
||||||
|
}
|
||||||
|
|
||||||
|
function readBoolFlag(args: string[], name: string): boolean {
|
||||||
|
return args.includes(name);
|
||||||
|
}
|
||||||
|
|
||||||
|
function parseIntOrUndefined(s: string | undefined): number | undefined {
|
||||||
|
if (s === undefined) return undefined;
|
||||||
|
const n = Number.parseInt(s, 10);
|
||||||
|
return Number.isFinite(n) ? n : undefined;
|
||||||
|
}
|
||||||
|
|
||||||
|
const VALID_SEVERITIES = new Set(["low", "medium", "high", "critical"]);
|
||||||
|
const DEFAULT_KIND_DOMAIN = "improvement-idea";
|
||||||
|
|
||||||
|
function emit(json: boolean, payload: Record<string, unknown>, human: string): void {
|
||||||
|
if (json) {
|
||||||
|
process.stdout.write(`${JSON.stringify(payload)}\n`);
|
||||||
|
} else {
|
||||||
|
process.stdout.write(`${human}\n`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function loadDb(basePath: string): Promise<void> {
|
||||||
|
const autoStart = (await jiti.import(sfExtensionPath("auto-start"), {})) as {
|
||||||
|
openProjectDbIfPresent: (basePath: string) => Promise<void>;
|
||||||
|
};
|
||||||
|
await autoStart.openProjectDbIfPresent(basePath);
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleAdd(basePath: string, options: FeedbackOptions): Promise<FeedbackResult> {
|
||||||
|
const summary = readFlag(options.args, "--summary");
|
||||||
|
if (!summary || summary.trim() === "") {
|
||||||
|
process.stderr.write("[headless] Error: feedback add requires --summary <text>\n");
|
||||||
|
return { exitCode: 2 };
|
||||||
|
}
|
||||||
|
const severity = readFlag(options.args, "--severity") ?? "medium";
|
||||||
|
if (!VALID_SEVERITIES.has(severity)) {
|
||||||
|
process.stderr.write(
|
||||||
|
`[headless] Error: --severity must be one of: low, medium, high, critical (got '${severity}')\n`,
|
||||||
|
);
|
||||||
|
return { exitCode: 2 };
|
||||||
|
}
|
||||||
|
const kind = readFlag(options.args, "--kind") ?? DEFAULT_KIND_DOMAIN;
|
||||||
|
const evidence = readFlag(options.args, "--evidence") ?? "";
|
||||||
|
const suggestedFix = readFlag(options.args, "--suggested-fix") ?? "";
|
||||||
|
const milestone = readFlag(options.args, "--milestone");
|
||||||
|
const slice = readFlag(options.args, "--slice");
|
||||||
|
const task = readFlag(options.args, "--task");
|
||||||
|
const unitType = readFlag(options.args, "--unit-type");
|
||||||
|
const impactScore = parseIntOrUndefined(readFlag(options.args, "--impact-score"));
|
||||||
|
const effortEstimate = parseIntOrUndefined(readFlag(options.args, "--effort-estimate"));
|
||||||
|
const blocking =
|
||||||
|
readBoolFlag(options.args, "--blocking") || severity === "high" || severity === "critical";
|
||||||
|
|
||||||
|
const id = newId();
|
||||||
|
const ts = new Date().toISOString();
|
||||||
|
const entry = {
|
||||||
|
id,
|
||||||
|
ts,
|
||||||
|
kind,
|
||||||
|
severity,
|
||||||
|
blocking,
|
||||||
|
repoIdentity: "external" as const,
|
||||||
|
sfVersion: "",
|
||||||
|
basePath,
|
||||||
|
occurredIn: {
|
||||||
|
unitType: unitType ?? null,
|
||||||
|
milestone: milestone ?? null,
|
||||||
|
slice: slice ?? null,
|
||||||
|
task: task ?? null,
|
||||||
|
},
|
||||||
|
summary: summary.trim(),
|
||||||
|
evidence,
|
||||||
|
suggestedFix,
|
||||||
|
impactScore,
|
||||||
|
effortEstimate,
|
||||||
|
source: "headless-cli",
|
||||||
|
};
|
||||||
|
|
||||||
|
await loadDb(basePath);
|
||||||
|
const sfDb = (await jiti.import(sfExtensionPath("sf-db/sf-db-self-feedback"), {})) as {
|
||||||
|
insertSelfFeedbackEntry: (e: typeof entry) => void;
|
||||||
|
};
|
||||||
|
sfDb.insertSelfFeedbackEntry(entry);
|
||||||
|
|
||||||
|
emit(options.json, {
|
||||||
|
ok: true,
|
||||||
|
id,
|
||||||
|
ts,
|
||||||
|
kind,
|
||||||
|
severity,
|
||||||
|
blocking,
|
||||||
|
impact_score: impactScore,
|
||||||
|
summary: entry.summary,
|
||||||
|
}, `${id} ${severity.padEnd(8)} ${kind} ${entry.summary}`);
|
||||||
|
return { exitCode: 0 };
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleList(basePath: string, options: FeedbackOptions): Promise<FeedbackResult> {
|
||||||
|
const wantUnresolved = readBoolFlag(options.args, "--unresolved");
|
||||||
|
const severityFilter = readFlag(options.args, "--severity");
|
||||||
|
if (severityFilter && !VALID_SEVERITIES.has(severityFilter)) {
|
||||||
|
process.stderr.write(
|
||||||
|
`[headless] Error: --severity must be one of: low, medium, high, critical (got '${severityFilter}')\n`,
|
||||||
|
);
|
||||||
|
return { exitCode: 2 };
|
||||||
|
}
|
||||||
|
|
||||||
|
await loadDb(basePath);
|
||||||
|
const sfDb = (await jiti.import(sfExtensionPath("sf-db/sf-db-self-feedback"), {})) as {
|
||||||
|
listSelfFeedbackEntries: () => Array<{
|
||||||
|
id: string;
|
||||||
|
ts: string;
|
||||||
|
kind: string;
|
||||||
|
severity: string;
|
||||||
|
blocking: boolean;
|
||||||
|
summary: string;
|
||||||
|
resolvedAt?: string | null;
|
||||||
|
impactScore?: number | null;
|
||||||
|
}>;
|
||||||
|
};
|
||||||
|
let entries = sfDb.listSelfFeedbackEntries();
|
||||||
|
if (wantUnresolved) {
|
||||||
|
entries = entries.filter((e) => !e.resolvedAt);
|
||||||
|
}
|
||||||
|
if (severityFilter) {
|
||||||
|
entries = entries.filter((e) => e.severity === severityFilter);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (options.json) {
|
||||||
|
process.stdout.write(
|
||||||
|
`${JSON.stringify({ ok: true, count: entries.length, entries })}\n`,
|
||||||
|
);
|
||||||
|
return { exitCode: 0 };
|
||||||
|
}
|
||||||
|
if (entries.length === 0) {
|
||||||
|
process.stdout.write("(no self-feedback entries match)\n");
|
||||||
|
return { exitCode: 0 };
|
||||||
|
}
|
||||||
|
for (const e of entries) {
|
||||||
|
const state = e.resolvedAt ? "RESOLVED" : " ";
|
||||||
|
process.stdout.write(
|
||||||
|
`${e.id} ${state} ${e.severity.padEnd(8)} ${e.kind.padEnd(24)} ${e.summary}\n`,
|
||||||
|
);
|
||||||
|
}
|
||||||
|
return { exitCode: 0 };
|
||||||
|
}
|
||||||
|
|
||||||
|
async function handleResolve(basePath: string, options: FeedbackOptions): Promise<FeedbackResult> {
|
||||||
|
const positional = options.args.filter(
|
||||||
|
(a, i, all) => !a.startsWith("--") && (i === 0 || !all[i - 1].startsWith("--")),
|
||||||
|
);
|
||||||
|
const id = positional[0];
|
||||||
|
if (!id) {
|
||||||
|
process.stderr.write(
|
||||||
|
"[headless] Error: feedback resolve requires an id positional (e.g. sf-mp4xxx-yyy)\n",
|
||||||
|
);
|
||||||
|
return { exitCode: 2 };
|
||||||
|
}
|
||||||
|
const reason = readFlag(options.args, "--reason") ?? "";
|
||||||
|
const evidenceKind = readFlag(options.args, "--evidence-kind") ?? "human-clear";
|
||||||
|
|
||||||
|
await loadDb(basePath);
|
||||||
|
const sfDb = (await jiti.import(sfExtensionPath("sf-db/sf-db-self-feedback"), {})) as {
|
||||||
|
resolveSelfFeedbackEntry: (
|
||||||
|
id: string,
|
||||||
|
resolution: {
|
||||||
|
reason: string;
|
||||||
|
evidence: { kind: string };
|
||||||
|
resolvedBySfVersion?: string;
|
||||||
|
resolvedAt?: string;
|
||||||
|
},
|
||||||
|
) => boolean;
|
||||||
|
};
|
||||||
|
const ok = sfDb.resolveSelfFeedbackEntry(id, {
|
||||||
|
reason,
|
||||||
|
evidence: { kind: evidenceKind },
|
||||||
|
resolvedBySfVersion: "",
|
||||||
|
});
|
||||||
|
if (!ok) {
|
||||||
|
// Either id not found OR already resolved. The DB primitive returns
|
||||||
|
// false for both — surface a 1-line note rather than failing hard,
|
||||||
|
// since "already resolved" is the idempotent path.
|
||||||
|
emit(options.json, {
|
||||||
|
ok: false,
|
||||||
|
idempotent: true,
|
||||||
|
id,
|
||||||
|
note: "no row updated (already resolved, or id not found)",
|
||||||
|
}, `${id}: nothing to resolve (already resolved, or id not found)`);
|
||||||
|
return { exitCode: 0 };
|
||||||
|
}
|
||||||
|
emit(options.json, {
|
||||||
|
ok: true,
|
||||||
|
id,
|
||||||
|
resolved_at: new Date().toISOString(),
|
||||||
|
evidence_kind: evidenceKind,
|
||||||
|
reason,
|
||||||
|
}, `${id}: resolved (${evidenceKind})`);
|
||||||
|
return { exitCode: 0 };
|
||||||
|
}
|
||||||
|
|
||||||
|
export async function handleFeedback(
|
||||||
|
basePath: string,
|
||||||
|
options: FeedbackOptions,
|
||||||
|
): Promise<FeedbackResult> {
|
||||||
|
switch (options.subcommand) {
|
||||||
|
case "add":
|
||||||
|
return handleAdd(basePath, options);
|
||||||
|
case "list":
|
||||||
|
return handleList(basePath, options);
|
||||||
|
case "resolve":
|
||||||
|
return handleResolve(basePath, options);
|
||||||
|
default:
|
||||||
|
process.stderr.write(
|
||||||
|
`[headless] Error: feedback subcommand must be add|list|resolve (got '${options.subcommand}')\n`,
|
||||||
|
);
|
||||||
|
return { exitCode: 2 };
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -833,6 +833,25 @@ async function runHeadlessOnce(
|
||||||
return { exitCode: result.exitCode, interrupted: false, timedOut: false };
|
return { exitCode: result.exitCode, interrupted: false, timedOut: false };
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Operator self-feedback CLI: add/list/resolve self_feedback rows
|
||||||
|
// without going through SQL. Same RPC-child-bypass pattern.
|
||||||
|
if (options.command === "feedback") {
|
||||||
|
const sub = options.commandArgs[0];
|
||||||
|
if (sub !== "add" && sub !== "list" && sub !== "resolve") {
|
||||||
|
process.stderr.write(
|
||||||
|
"[headless] Error: feedback subcommand must be one of: add, list, resolve\n",
|
||||||
|
);
|
||||||
|
return { exitCode: 2, interrupted: false, timedOut: false };
|
||||||
|
}
|
||||||
|
const { handleFeedback } = await import("./headless-feedback.js");
|
||||||
|
const result = await handleFeedback(process.cwd(), {
|
||||||
|
subcommand: sub,
|
||||||
|
args: options.commandArgs.slice(1),
|
||||||
|
json: options.json,
|
||||||
|
});
|
||||||
|
return { exitCode: result.exitCode, interrupted: false, timedOut: false };
|
||||||
|
}
|
||||||
|
|
||||||
// UOK gate health: `sf headless status uok [--json]`
|
// UOK gate health: `sf headless status uok [--json]`
|
||||||
// Bypasses the RPC path for instant, TTY-independent gate health output.
|
// Bypasses the RPC path for instant, TTY-independent gate health output.
|
||||||
if (options.command === "status" && options.commandArgs[0] === "uok") {
|
if (options.command === "status" && options.commandArgs[0] === "uok") {
|
||||||
|
|
|
||||||
|
|
@ -259,6 +259,9 @@ const SUBCOMMAND_HELP: Record<string, string> = {
|
||||||
" complete-slice Mark a slice complete out-of-band: complete-slice <M>/<S> [--reason <txt>]",
|
" complete-slice Mark a slice complete out-of-band: complete-slice <M>/<S> [--reason <txt>]",
|
||||||
" skip-slice Mark a slice skipped out-of-band (placeholder/migration cleanup)",
|
" skip-slice Mark a slice skipped out-of-band (placeholder/migration cleanup)",
|
||||||
" complete-milestone Mark a milestone complete out-of-band: complete-milestone <M> [--reason <txt>]",
|
" complete-milestone Mark a milestone complete out-of-band: complete-milestone <M> [--reason <txt>]",
|
||||||
|
" feedback add File a self_feedback entry (--summary <txt> --severity low|medium|high|critical [--kind <k>] [--evidence <t>] [--suggested-fix <t>] [--milestone M --slice S --task T] [--impact-score N] [--effort-estimate N] [--blocking])",
|
||||||
|
" feedback list List self_feedback entries [--unresolved] [--severity <s>] [--json]",
|
||||||
|
" feedback resolve Resolve an entry: feedback resolve <id> --reason <txt> [--evidence-kind human-clear|agent-fix|...]",
|
||||||
"",
|
"",
|
||||||
"new-milestone flags:",
|
"new-milestone flags:",
|
||||||
" --context <path> Path to spec/PRD file (use '-' for stdin)",
|
" --context <path> Path to spec/PRD file (use '-' for stdin)",
|
||||||
|
|
@ -298,6 +301,9 @@ const SUBCOMMAND_HELP: Record<string, string> = {
|
||||||
" sf headless complete-slice M010/S03 Flip M010/S03 to status=complete (idempotent)",
|
" sf headless complete-slice M010/S03 Flip M010/S03 to status=complete (idempotent)",
|
||||||
" sf headless skip-slice M003/S01 --reason \"migration placeholder\" Mark placeholder slice skipped",
|
" sf headless skip-slice M003/S01 --reason \"migration placeholder\" Mark placeholder slice skipped",
|
||||||
" sf headless complete-milestone M010 Flip milestone to status=complete",
|
" sf headless complete-milestone M010 Flip milestone to status=complete",
|
||||||
|
" sf headless feedback add --severity high --summary \"30K truncate drops the why\" File self-feedback",
|
||||||
|
" sf headless feedback list --unresolved Pending self-feedback entries",
|
||||||
|
" sf headless feedback resolve sf-mp4xxx --reason \"shipped in 7b85a6\" Resolve an entry",
|
||||||
"",
|
"",
|
||||||
"Exit codes: 0 = success, 1 = error/timeout, 10 = blocked, 11 = cancelled",
|
"Exit codes: 0 = success, 1 = error/timeout, 10 = blocked, 11 = cancelled",
|
||||||
].join("\n"),
|
].join("\n"),
|
||||||
|
|
|
||||||
|
|
@ -23,18 +23,32 @@ import {
|
||||||
import { logWarning } from "./workflow-logger.js";
|
import { logWarning } from "./workflow-logger.js";
|
||||||
import { sfHome } from "./sf-home.js";
|
import { sfHome } from "./sf-home.js";
|
||||||
|
|
||||||
/** Read the llm-gateway entry from ~/.sf/agent/auth.json if present. */
|
/** Read the llm-gateway entry from ~/.sf/agent/auth.json if present.
|
||||||
|
* Falls back to SF_LLM_GATEWAY_KEY env var when auth.json is missing
|
||||||
|
* or has no key — enables CI and containerised deployments without
|
||||||
|
* writing secrets to disk. */
|
||||||
function readGatewayFromAuthJson() {
|
function readGatewayFromAuthJson() {
|
||||||
try {
|
try {
|
||||||
const authPath = join(sfHome(), "agent", "auth.json");
|
const authPath = join(sfHome(), "agent", "auth.json");
|
||||||
if (!existsSync(authPath)) return null;
|
if (existsSync(authPath)) {
|
||||||
const data = JSON.parse(readFileSync(authPath, "utf8"));
|
const data = JSON.parse(readFileSync(authPath, "utf8"));
|
||||||
const entry = data["llm-gateway"];
|
const entry = data["llm-gateway"];
|
||||||
if (!entry?.key) return null;
|
if (entry?.key) {
|
||||||
return { key: entry.key, url: entry.url || null };
|
return { key: entry.key, url: entry.url || null, source: "auth.json:llm-gateway" };
|
||||||
|
}
|
||||||
|
}
|
||||||
} catch {
|
} catch {
|
||||||
return null;
|
/* fall through to env */
|
||||||
}
|
}
|
||||||
|
const envKey = process.env.SF_LLM_GATEWAY_KEY?.trim();
|
||||||
|
if (envKey) {
|
||||||
|
return {
|
||||||
|
key: envKey,
|
||||||
|
url: process.env.SF_LLM_GATEWAY_URL?.trim() || null,
|
||||||
|
source: "env:SF_LLM_GATEWAY_KEY",
|
||||||
|
};
|
||||||
|
}
|
||||||
|
return null;
|
||||||
}
|
}
|
||||||
|
|
||||||
// ─── Gateway config ──────────────────────────────────────────────────────────
|
// ─── Gateway config ──────────────────────────────────────────────────────────
|
||||||
|
|
@ -61,8 +75,8 @@ export function loadGatewayConfigFromEnv() {
|
||||||
return {
|
return {
|
||||||
url: fromAuth.url ?? "https://llm-gateway.centralcloud.com/v1",
|
url: fromAuth.url ?? "https://llm-gateway.centralcloud.com/v1",
|
||||||
apiKey: fromAuth.key,
|
apiKey: fromAuth.key,
|
||||||
keySource: "auth.json:llm-gateway",
|
keySource: fromAuth.source ?? "auth.json:llm-gateway",
|
||||||
urlSource: fromAuth.url ? "auth.json:llm-gateway" : "default",
|
urlSource: fromAuth.url ? (fromAuth.source ?? "auth.json:llm-gateway") : "default",
|
||||||
embeddingModel,
|
embeddingModel,
|
||||||
rerankModel,
|
rerankModel,
|
||||||
queryInstruction,
|
queryInstruction,
|
||||||
|
|
|
||||||
|
|
@ -10,6 +10,8 @@ const ENV_KEYS = [
|
||||||
"SF_LLM_GATEWAY_EMBED_MODEL",
|
"SF_LLM_GATEWAY_EMBED_MODEL",
|
||||||
"SF_LLM_GATEWAY_RERANK_MODEL",
|
"SF_LLM_GATEWAY_RERANK_MODEL",
|
||||||
"SF_LLM_GATEWAY_EMBED_QUERY_INSTRUCTION",
|
"SF_LLM_GATEWAY_EMBED_QUERY_INSTRUCTION",
|
||||||
|
"SF_LLM_GATEWAY_KEY",
|
||||||
|
"SF_LLM_GATEWAY_URL",
|
||||||
];
|
];
|
||||||
|
|
||||||
let originalHome;
|
let originalHome;
|
||||||
|
|
@ -32,16 +34,50 @@ function writeAuthJson(entry) {
|
||||||
writeFileSync(authPath, JSON.stringify({ "llm-gateway": entry }));
|
writeFileSync(authPath, JSON.stringify({ "llm-gateway": entry }));
|
||||||
}
|
}
|
||||||
|
|
||||||
test("loadGatewayConfigFromEnv returns null when auth.json has no llm-gateway entry", () => {
|
test("loadGatewayConfigFromEnv returns null when auth.json has no llm-gateway entry and no env key", () => {
|
||||||
const authPath = join(tmpHome, ".sf", "agent", "auth.json");
|
const authPath = join(tmpHome, ".sf", "agent", "auth.json");
|
||||||
writeFileSync(authPath, JSON.stringify({}));
|
writeFileSync(authPath, JSON.stringify({}));
|
||||||
|
delete process.env.SF_LLM_GATEWAY_KEY;
|
||||||
assert.equal(loadGatewayConfigFromEnv(), null);
|
assert.equal(loadGatewayConfigFromEnv(), null);
|
||||||
});
|
});
|
||||||
|
|
||||||
test("loadGatewayConfigFromEnv returns null when auth.json does not exist", () => {
|
test("loadGatewayConfigFromEnv returns null when auth.json does not exist and no env key", () => {
|
||||||
|
delete process.env.SF_LLM_GATEWAY_KEY;
|
||||||
assert.equal(loadGatewayConfigFromEnv(), null);
|
assert.equal(loadGatewayConfigFromEnv(), null);
|
||||||
});
|
});
|
||||||
|
|
||||||
|
test("loadGatewayConfigFromEnv falls back to SF_LLM_GATEWAY_KEY env var when auth.json missing", () => {
|
||||||
|
process.env.SF_LLM_GATEWAY_KEY = "env-key-123";
|
||||||
|
process.env.SF_LLM_GATEWAY_URL = "https://env.example/v1";
|
||||||
|
const cfg = loadGatewayConfigFromEnv();
|
||||||
|
assert.equal(cfg.apiKey, "env-key-123");
|
||||||
|
assert.equal(cfg.url, "https://env.example/v1");
|
||||||
|
assert.equal(cfg.keySource, "env:SF_LLM_GATEWAY_KEY");
|
||||||
|
assert.equal(cfg.urlSource, "env:SF_LLM_GATEWAY_KEY");
|
||||||
|
});
|
||||||
|
|
||||||
|
test("loadGatewayConfigFromEnv falls back to SF_LLM_GATEWAY_KEY env var when auth.json has no key", () => {
|
||||||
|
const authPath = join(tmpHome, ".sf", "agent", "auth.json");
|
||||||
|
writeFileSync(authPath, JSON.stringify({}));
|
||||||
|
process.env.SF_LLM_GATEWAY_KEY = "env-key-456";
|
||||||
|
delete process.env.SF_LLM_GATEWAY_URL;
|
||||||
|
const cfg = loadGatewayConfigFromEnv();
|
||||||
|
assert.equal(cfg.apiKey, "env-key-456");
|
||||||
|
assert.equal(cfg.url, "https://llm-gateway.centralcloud.com/v1");
|
||||||
|
assert.equal(cfg.keySource, "env:SF_LLM_GATEWAY_KEY");
|
||||||
|
assert.equal(cfg.urlSource, "default");
|
||||||
|
});
|
||||||
|
|
||||||
|
test("loadGatewayConfigFromEnv prefers auth.json over env var when both present", () => {
|
||||||
|
writeAuthJson({ key: "auth-key", url: "https://auth.example/v1", type: "api_key" });
|
||||||
|
process.env.SF_LLM_GATEWAY_KEY = "env-key-789";
|
||||||
|
process.env.SF_LLM_GATEWAY_URL = "https://env.example/v1";
|
||||||
|
const cfg = loadGatewayConfigFromEnv();
|
||||||
|
assert.equal(cfg.apiKey, "auth-key");
|
||||||
|
assert.equal(cfg.url, "https://auth.example/v1");
|
||||||
|
assert.equal(cfg.keySource, "auth.json:llm-gateway");
|
||||||
|
});
|
||||||
|
|
||||||
test("loadGatewayConfigFromEnv reads key and url from auth.json", () => {
|
test("loadGatewayConfigFromEnv reads key and url from auth.json", () => {
|
||||||
writeAuthJson({
|
writeAuthJson({
|
||||||
key: "auth-key",
|
key: "auth-key",
|
||||||
|
|
|
||||||
76
src/tests/headless-feedback.test.ts
Normal file
76
src/tests/headless-feedback.test.ts
Normal file
|
|
@ -0,0 +1,76 @@
|
||||||
|
/**
|
||||||
|
* Smoke test for `sf headless feedback add|list|resolve`.
|
||||||
|
*
|
||||||
|
* Mirrors the headless-mark-state.test.ts pattern: source-level
|
||||||
|
* regex assertions on dispatch wiring and help text. Live-DB
|
||||||
|
* integration is dogfooded by inserting real entries against the
|
||||||
|
* SF repo's own .sf/sf.db (see commit message).
|
||||||
|
*/
|
||||||
|
|
||||||
|
import assert from "node:assert/strict";
|
||||||
|
import { readFileSync } from "node:fs";
|
||||||
|
import { dirname, join } from "node:path";
|
||||||
|
import { fileURLToPath } from "node:url";
|
||||||
|
import { test } from "vitest";
|
||||||
|
|
||||||
|
const __dirname = dirname(fileURLToPath(import.meta.url));
|
||||||
|
const headlessSrc = readFileSync(join(__dirname, "..", "headless.ts"), "utf-8");
|
||||||
|
const helpSrc = readFileSync(join(__dirname, "..", "help-text.ts"), "utf-8");
|
||||||
|
const handlerSrc = readFileSync(
|
||||||
|
join(__dirname, "..", "headless-feedback.ts"),
|
||||||
|
"utf-8",
|
||||||
|
);
|
||||||
|
|
||||||
|
test("headless.ts dispatches feedback command to handleFeedback", () => {
|
||||||
|
assert.match(
|
||||||
|
headlessSrc,
|
||||||
|
/options\.command === "feedback"/,
|
||||||
|
"feedback command must be dispatched",
|
||||||
|
);
|
||||||
|
assert.match(
|
||||||
|
headlessSrc,
|
||||||
|
/import\("\.\/headless-feedback\.js"\)/,
|
||||||
|
"headless.ts must import the new handler",
|
||||||
|
);
|
||||||
|
assert.match(
|
||||||
|
headlessSrc,
|
||||||
|
/feedback subcommand must be one of: add, list, resolve/,
|
||||||
|
"unknown subcommand must surface the readable error",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test("help text lists the three feedback subcommands", () => {
|
||||||
|
assert.match(helpSrc, /feedback add\s+File a self_feedback entry/);
|
||||||
|
assert.match(helpSrc, /feedback list\s+List self_feedback entries/);
|
||||||
|
assert.match(helpSrc, /feedback resolve\s+Resolve an entry/);
|
||||||
|
});
|
||||||
|
|
||||||
|
test("handler enforces required fields and severity values", () => {
|
||||||
|
assert.match(
|
||||||
|
handlerSrc,
|
||||||
|
/feedback add requires --summary <text>/,
|
||||||
|
"missing summary error must be user-readable",
|
||||||
|
);
|
||||||
|
assert.match(
|
||||||
|
handlerSrc,
|
||||||
|
/--severity must be one of: low, medium, high, critical/,
|
||||||
|
"invalid severity error must enumerate the allowed values",
|
||||||
|
);
|
||||||
|
assert.match(
|
||||||
|
handlerSrc,
|
||||||
|
/feedback resolve requires an id positional/,
|
||||||
|
"missing resolve id must be user-readable",
|
||||||
|
);
|
||||||
|
});
|
||||||
|
|
||||||
|
test("resolve is idempotent on already-resolved targets", () => {
|
||||||
|
assert.match(handlerSrc, /idempotent: true/);
|
||||||
|
assert.match(handlerSrc, /already resolved, or id not found/);
|
||||||
|
});
|
||||||
|
|
||||||
|
test("add path defaults blocking from severity, doesn't require it", () => {
|
||||||
|
// readBoolFlag(--blocking) OR severity === high|critical → blocking=true.
|
||||||
|
// The behaviour is documented in self-feedback.js (deriveBlocking),
|
||||||
|
// mirror it so operator-filed entries have consistent semantics.
|
||||||
|
assert.match(handlerSrc, /severity === "high" \|\| severity === "critical"/);
|
||||||
|
});
|
||||||
Loading…
Add table
Reference in a new issue