sf snapshot: pre-dispatch, uncommitted changes after 41m inactivity

This commit is contained in:
Mikael Hugo 2026-05-04 21:59:01 +02:00
parent b8a5a01de4
commit 4053819854
43 changed files with 3436 additions and 3103 deletions

View file

@ -1,6 +1,6 @@
{
"last_session_id": "67e970c5-7790-4d38-ba0b-527b9f349c49",
"last_event_key": "67e970c5-7790-4d38-ba0b-527b9f349c49:transcript:70f7463d95fcfa9de1ead358c8fab10cd302abfc43cc274eb68fa952a0c97675",
"last_event_key": "67e970c5-7790-4d38-ba0b-527b9f349c49:transcript:01389baa63d7cd14460c1725484e72f23651a4b02cc12b87f3b6f1bf6043a8d0",
"last_prompted_session_id": "",
"last_reason": "short-session",
"last_prompted_at": "",
@ -8,5 +8,5 @@
"last_actionable_message_count": 0,
"deep_interview_lock_active": false,
"deep_interview_lock_source": "/home/mhugo/code/singularity-forge/.omg/state/deep-interview.json",
"updated_at": "2026-05-04T17:09:50.283Z"
"updated_at": "2026-05-04T19:57:31.227Z"
}

View file

@ -9,11 +9,17 @@ node_modules/**
**/__pycache__/**
*.pyc
*.egg-info/**
build/**
dist/**
target/**
vendor/**
coverage/**
**/build/**
**/dist/**
**/target/**
**/vendor/**
**/coverage/**
.cache/**
tmp/**
*.log
dist-test/**
packages/*/dist/**
packages/*/target/**
rust-engine/target/**
rust-engine/addon/*.node
**/tsconfig.tsbuildinfo

View file

@ -1,8 +1,13 @@
//! AST-aware structural search and rewrite powered by ast-grep.
use std::{collections::{BTreeMap, BTreeSet, HashMap}, path::{Path, PathBuf}};
use std::{
collections::{BTreeMap, BTreeSet, HashMap},
path::{Path, PathBuf},
};
use ast_grep_core::{Language, MatchStrictness, matcher::Pattern, source::Edit, tree_sitter::LanguageExt};
use ast_grep_core::{
matcher::Pattern, source::Edit, tree_sitter::LanguageExt, Language, MatchStrictness,
};
use ignore::WalkBuilder;
use napi::bindgen_prelude::*;
use napi_derive::napi;
@ -81,7 +86,9 @@ pub struct AstReplaceOptions {
#[napi(object)]
pub struct AstReplaceChange {
pub path: String, pub before: String, pub after: String,
pub path: String,
pub before: String,
pub after: String,
#[napi(js_name = "byteStart")]
pub byte_start: u32,
#[napi(js_name = "byteEnd")]
@ -99,7 +106,10 @@ pub struct AstReplaceChange {
}
#[napi(object)]
pub struct AstReplaceFileChange { pub path: String, pub count: u32 }
pub struct AstReplaceFileChange {
pub path: String,
pub count: u32,
}
#[napi(object)]
pub struct AstReplaceResult {
@ -119,9 +129,17 @@ pub struct AstReplaceResult {
pub parse_errors: Option<Vec<String>>,
}
struct FileCandidate { absolute_path: PathBuf, display_path: String }
struct PendingFileChange { change: AstReplaceChange, edit: Edit<String> }
fn to_u32(value: usize) -> u32 { value.min(u32::MAX as usize) as u32 }
struct FileCandidate {
absolute_path: PathBuf,
display_path: String,
}
struct PendingFileChange {
change: AstReplaceChange,
edit: Edit<String>,
}
fn to_u32(value: usize) -> u32 {
value.min(u32::MAX as usize) as u32
}
static LANG_ALIASES: phf::Map<&'static str, SupportLang> = phf::phf_map! {
"bash" => SupportLang::Bash, "sh" => SupportLang::Bash,
@ -156,20 +174,38 @@ static LANG_ALIASES: phf::Map<&'static str, SupportLang> = phf::phf_map! {
"yaml" => SupportLang::Yaml, "yml" => SupportLang::Yaml, "zig" => SupportLang::Zig,
};
fn supported_lang_list() -> String { let mut keys: Vec<&str> = LANG_ALIASES.keys().copied().collect(); keys.sort_unstable(); keys.join(", ") }
fn supported_lang_list() -> String {
let mut keys: Vec<&str> = LANG_ALIASES.keys().copied().collect();
keys.sort_unstable();
keys.join(", ")
}
fn resolve_supported_lang(value: &str) -> Result<SupportLang> {
let lower = value.to_ascii_lowercase();
LANG_ALIASES.get(lower.as_str()).copied().ok_or_else(|| Error::from_reason(format!("Unsupported language '{value}'. Supported: {}", supported_lang_list())))
LANG_ALIASES.get(lower.as_str()).copied().ok_or_else(|| {
Error::from_reason(format!(
"Unsupported language '{value}'. Supported: {}",
supported_lang_list()
))
})
}
fn resolve_language(lang: Option<&str>, file_path: &Path) -> Result<SupportLang> {
if let Some(lang) = lang.map(str::trim).filter(|l| !l.is_empty()) { return resolve_supported_lang(lang); }
SupportLang::from_path(file_path).ok_or_else(|| Error::from_reason(format!("Unable to infer language from file extension: {}. Specify `lang` explicitly.", file_path.display())))
if let Some(lang) = lang.map(str::trim).filter(|l| !l.is_empty()) {
return resolve_supported_lang(lang);
}
SupportLang::from_path(file_path).ok_or_else(|| {
Error::from_reason(format!(
"Unable to infer language from file extension: {}. Specify `lang` explicitly.",
file_path.display()
))
})
}
fn is_supported_file(file_path: &Path, explicit_lang: Option<&str>) -> bool {
if explicit_lang.is_some() { return true; }
if explicit_lang.is_some() {
return true;
}
resolve_language(None, file_path).is_ok()
}
@ -178,55 +214,130 @@ fn infer_single_replace_lang(candidates: &[FileCandidate]) -> Result<String> {
let mut unresolved = Vec::new();
for c in candidates {
match resolve_language(None, &c.absolute_path) {
Ok(l) => { inferred.insert(l.canonical_name().to_string()); },
Ok(l) => {
inferred.insert(l.canonical_name().to_string());
}
Err(e) => unresolved.push(format!("{}: {}", c.display_path, e)),
}
}
if !unresolved.is_empty() { return Err(Error::from_reason(format!("`lang` is required for ast_edit when language cannot be inferred from all files:\n{}", unresolved.into_iter().map(|e| format!("- {e}")).collect::<Vec<_>>().join("\n")))); }
if inferred.is_empty() { return Err(Error::from_reason("`lang` is required for ast_edit when no files match path/glob".to_string())); }
if inferred.len() > 1 { return Err(Error::from_reason(format!("`lang` is required for ast_edit when path/glob resolves to multiple languages: {}", inferred.into_iter().collect::<Vec<_>>().join(", ")))); }
if !unresolved.is_empty() {
return Err(Error::from_reason(format!(
"`lang` is required for ast_edit when language cannot be inferred from all files:\n{}",
unresolved
.into_iter()
.map(|e| format!("- {e}"))
.collect::<Vec<_>>()
.join("\n")
)));
}
if inferred.is_empty() {
return Err(Error::from_reason(
"`lang` is required for ast_edit when no files match path/glob".to_string(),
));
}
if inferred.len() > 1 {
return Err(Error::from_reason(format!(
"`lang` is required for ast_edit when path/glob resolves to multiple languages: {}",
inferred.into_iter().collect::<Vec<_>>().join(", ")
)));
}
Ok(inferred.into_iter().next().unwrap())
}
fn parse_strictness(value: Option<&str>) -> Result<MatchStrictness> {
let Some(raw) = value.map(str::trim).filter(|v| !v.is_empty()) else { return Ok(MatchStrictness::Smart) };
raw.parse::<MatchStrictness>().map_err(|e| Error::from_reason(format!("Invalid strictness '{raw}': {e}")))
let Some(raw) = value.map(str::trim).filter(|v| !v.is_empty()) else {
return Ok(MatchStrictness::Smart);
};
raw.parse::<MatchStrictness>()
.map_err(|e| Error::from_reason(format!("Invalid strictness '{raw}': {e}")))
}
fn normalize_search_path(path: Option<String>) -> Result<PathBuf> {
let raw = path.unwrap_or_else(|| ".".into());
let candidate = PathBuf::from(raw.trim());
let absolute = if candidate.is_absolute() { candidate } else { std::env::current_dir().map_err(|e| Error::from_reason(format!("Failed to resolve cwd: {e}")))?.join(candidate) };
let absolute = if candidate.is_absolute() {
candidate
} else {
std::env::current_dir()
.map_err(|e| Error::from_reason(format!("Failed to resolve cwd: {e}")))?
.join(candidate)
};
Ok(std::fs::canonicalize(&absolute).unwrap_or(absolute))
}
fn collect_candidates(path: Option<String>, glob: Option<&str>) -> Result<Vec<FileCandidate>> {
let search_path = normalize_search_path(path)?;
let metadata = std::fs::metadata(&search_path).map_err(|e| Error::from_reason(format!("Path not found: {e}")))?;
let metadata = std::fs::metadata(&search_path)
.map_err(|e| Error::from_reason(format!("Path not found: {e}")))?;
if metadata.is_file() {
let display_path = search_path.file_name().and_then(|n| n.to_str()).map_or_else(|| search_path.to_string_lossy().into_owned(), |s| s.to_string());
return Ok(vec![FileCandidate { absolute_path: search_path, display_path }]);
let display_path = search_path
.file_name()
.and_then(|n| n.to_str())
.map_or_else(
|| search_path.to_string_lossy().into_owned(),
|s| s.to_string(),
);
return Ok(vec![FileCandidate {
absolute_path: search_path,
display_path,
}]);
}
if !metadata.is_dir() {
return Err(Error::from_reason(format!(
"Search path must be a file or directory: {}",
search_path.display()
)));
}
if !metadata.is_dir() { return Err(Error::from_reason(format!("Search path must be a file or directory: {}", search_path.display()))); }
let glob_set = glob_util::try_compile_glob(glob, false)?;
let mentions_node_modules = glob.is_some_and(|v| v.contains("node_modules"));
let walker = WalkBuilder::new(&search_path).hidden(true).git_ignore(true).git_global(true).git_exclude(true).build();
let walker = WalkBuilder::new(&search_path)
.hidden(true)
.git_ignore(true)
.git_global(true)
.git_exclude(true)
.build();
let mut files = Vec::new();
for entry in walker {
let entry = match entry { Ok(e) => e, Err(_) => continue };
if !entry.file_type().is_some_and(|ft| ft.is_file()) { continue; }
let entry = match entry {
Ok(e) => e,
Err(_) => continue,
};
if !entry.file_type().is_some_and(|ft| ft.is_file()) {
continue;
}
let abs = entry.path().to_path_buf();
let relative = abs.strip_prefix(&search_path).map(|p| p.to_string_lossy().replace('\\', "/")).unwrap_or_else(|_| abs.to_string_lossy().into_owned());
if !mentions_node_modules && relative.contains("node_modules") { continue; }
if let Some(ref gs) = glob_set { if !gs.is_match(&relative) { continue; } }
files.push(FileCandidate { absolute_path: abs, display_path: relative });
let relative = abs
.strip_prefix(&search_path)
.map(|p| p.to_string_lossy().replace('\\', "/"))
.unwrap_or_else(|_| abs.to_string_lossy().into_owned());
if !mentions_node_modules && relative.contains("node_modules") {
continue;
}
if let Some(ref gs) = glob_set {
if !gs.is_match(&relative) {
continue;
}
}
files.push(FileCandidate {
absolute_path: abs,
display_path: relative,
});
}
files.sort_by(|a, b| a.display_path.cmp(&b.display_path));
Ok(files)
}
fn compile_pattern(pattern: &str, selector: Option<&str>, strictness: &MatchStrictness, lang: SupportLang) -> Result<Pattern> {
let mut compiled = if let Some(sel) = selector.map(str::trim).filter(|s| !s.is_empty()) { Pattern::contextual(pattern, sel, lang) } else { Pattern::try_new(pattern, lang) }
fn compile_pattern(
pattern: &str,
selector: Option<&str>,
strictness: &MatchStrictness,
lang: SupportLang,
) -> Result<Pattern> {
let mut compiled = if let Some(sel) = selector.map(str::trim).filter(|s| !s.is_empty()) {
Pattern::contextual(pattern, sel, lang)
} else {
Pattern::try_new(pattern, lang)
}
.map_err(|e| Error::from_reason(format!("Invalid pattern: {e}")))?;
compiled.strictness = strictness.clone();
Ok(compiled)
@ -236,153 +347,455 @@ fn apply_edits(content: &str, edits: &[Edit<String>]) -> Result<String> {
let mut sorted: Vec<&Edit<String>> = edits.iter().collect();
sorted.sort_by_key(|e| e.position);
let mut prev_end = 0usize;
for edit in &sorted { if edit.position < prev_end { return Err(Error::from_reason("Overlapping replacements detected".to_string())); } prev_end = edit.position.saturating_add(edit.deleted_length); }
for edit in &sorted {
if edit.position < prev_end {
return Err(Error::from_reason(
"Overlapping replacements detected".to_string(),
));
}
prev_end = edit.position.saturating_add(edit.deleted_length);
}
let mut output = content.to_string();
for edit in sorted.into_iter().rev() {
let start = edit.position; let end = edit.position.saturating_add(edit.deleted_length);
if end > output.len() || start > end { return Err(Error::from_reason("Computed edit range is out of bounds".to_string())); }
let replacement = String::from_utf8(edit.inserted_text.clone()).map_err(|e| Error::from_reason(format!("Replacement text is not valid UTF-8: {e}")))?;
let start = edit.position;
let end = edit.position.saturating_add(edit.deleted_length);
if end > output.len() || start > end {
return Err(Error::from_reason(
"Computed edit range is out of bounds".to_string(),
));
}
let replacement = String::from_utf8(edit.inserted_text.clone())
.map_err(|e| Error::from_reason(format!("Replacement text is not valid UTF-8: {e}")))?;
output.replace_range(start..end, &replacement);
}
Ok(output)
}
fn normalize_pattern_list(patterns: Option<Vec<String>>) -> Result<Vec<String>> {
let mut normalized = Vec::new(); let mut seen = BTreeSet::new();
for raw in patterns.unwrap_or_default() { let p = raw.trim(); if !p.is_empty() && seen.insert(p.to_string()) { normalized.push(p.to_string()); } }
if normalized.is_empty() { return Err(Error::from_reason("`patterns` is required and must include at least one non-empty pattern".to_string())); }
let mut normalized = Vec::new();
let mut seen = BTreeSet::new();
for raw in patterns.unwrap_or_default() {
let p = raw.trim();
if !p.is_empty() && seen.insert(p.to_string()) {
normalized.push(p.to_string());
}
}
if normalized.is_empty() {
return Err(Error::from_reason(
"`patterns` is required and must include at least one non-empty pattern".to_string(),
));
}
Ok(normalized)
}
fn normalize_rewrite_map(rewrites: Option<HashMap<String, String>>) -> Result<Vec<(String, String)>> {
fn normalize_rewrite_map(
rewrites: Option<HashMap<String, String>>,
) -> Result<Vec<(String, String)>> {
let mut normalized = Vec::new();
for (p, r) in rewrites.unwrap_or_default() { if p.is_empty() { return Err(Error::from_reason("`rewrites` keys must be non-empty".to_string())); } normalized.push((p, r)); }
if normalized.is_empty() { return Err(Error::from_reason("`rewrites` is required".to_string())); }
normalized.sort_by(|l, r| l.0.cmp(&r.0)); Ok(normalized)
for (p, r) in rewrites.unwrap_or_default() {
if p.is_empty() {
return Err(Error::from_reason(
"`rewrites` keys must be non-empty".to_string(),
));
}
normalized.push((p, r));
}
if normalized.is_empty() {
return Err(Error::from_reason("`rewrites` is required".to_string()));
}
normalized.sort_by(|l, r| l.0.cmp(&r.0));
Ok(normalized)
}
struct CompiledFindPattern { pattern: String, compiled_by_lang: HashMap<String, Pattern>, compile_errors_by_lang: HashMap<String, String> }
struct ResolvedCandidate { candidate: FileCandidate, language: Option<SupportLang>, language_error: Option<String> }
struct CompiledFindPattern {
pattern: String,
compiled_by_lang: HashMap<String, Pattern>,
compile_errors_by_lang: HashMap<String, String>,
}
struct ResolvedCandidate {
candidate: FileCandidate,
language: Option<SupportLang>,
language_error: Option<String>,
}
fn resolve_candidates_for_find(candidates: Vec<FileCandidate>, lang: Option<&str>) -> Result<(Vec<ResolvedCandidate>, HashMap<String, SupportLang>)> {
let mut resolved = Vec::with_capacity(candidates.len()); let mut languages = HashMap::new();
fn resolve_candidates_for_find(
candidates: Vec<FileCandidate>,
lang: Option<&str>,
) -> Result<(Vec<ResolvedCandidate>, HashMap<String, SupportLang>)> {
let mut resolved = Vec::with_capacity(candidates.len());
let mut languages = HashMap::new();
for candidate in candidates {
match resolve_language(lang, &candidate.absolute_path) {
Ok(language) => { languages.entry(language.canonical_name().to_string()).or_insert(language); resolved.push(ResolvedCandidate { candidate, language: Some(language), language_error: None }); },
Err(err) => resolved.push(ResolvedCandidate { candidate, language: None, language_error: Some(err.to_string()) }),
Ok(language) => {
languages
.entry(language.canonical_name().to_string())
.or_insert(language);
resolved.push(ResolvedCandidate {
candidate,
language: Some(language),
language_error: None,
});
}
Err(err) => resolved.push(ResolvedCandidate {
candidate,
language: None,
language_error: Some(err.to_string()),
}),
}
}
Ok((resolved, languages))
}
fn compile_find_patterns(patterns: &[String], languages: &HashMap<String, SupportLang>, selector: Option<&str>, strictness: &MatchStrictness) -> Result<Vec<CompiledFindPattern>> {
fn compile_find_patterns(
patterns: &[String],
languages: &HashMap<String, SupportLang>,
selector: Option<&str>,
strictness: &MatchStrictness,
) -> Result<Vec<CompiledFindPattern>> {
let mut compiled = Vec::with_capacity(patterns.len());
for pattern in patterns {
let mut by_lang = HashMap::with_capacity(languages.len()); let mut errors = HashMap::new();
for (key, &lang) in languages { match compile_pattern(pattern, selector, strictness, lang) { Ok(p) => { by_lang.insert(key.clone(), p); }, Err(e) => { errors.insert(key.clone(), e.to_string()); } } }
compiled.push(CompiledFindPattern { pattern: pattern.clone(), compiled_by_lang: by_lang, compile_errors_by_lang: errors });
let mut by_lang = HashMap::with_capacity(languages.len());
let mut errors = HashMap::new();
for (key, &lang) in languages {
match compile_pattern(pattern, selector, strictness, lang) {
Ok(p) => {
by_lang.insert(key.clone(), p);
}
Err(e) => {
errors.insert(key.clone(), e.to_string());
}
}
}
compiled.push(CompiledFindPattern {
pattern: pattern.clone(),
compiled_by_lang: by_lang,
compile_errors_by_lang: errors,
});
}
Ok(compiled)
}
#[napi(js_name = "astGrep")]
pub fn ast_grep(options: AstFindOptions) -> Result<AstFindResult> {
let AstFindOptions { patterns, lang, path, glob, selector, strictness, limit, offset, include_meta, context: _ } = options;
let AstFindOptions {
patterns,
lang,
path,
glob,
selector,
strictness,
limit,
offset,
include_meta,
context: _,
} = options;
let normalized_limit = limit.unwrap_or(DEFAULT_FIND_LIMIT).max(1);
let normalized_offset = offset.unwrap_or(0);
let patterns = normalize_pattern_list(patterns)?;
let strictness = parse_strictness(strictness.as_deref())?;
let include_meta = include_meta.unwrap_or(false);
let lang_str = lang.as_deref().map(str::trim).filter(|v| !v.is_empty());
let candidates: Vec<_> = collect_candidates(path, glob.as_deref())?.into_iter().filter(|c| is_supported_file(&c.absolute_path, lang_str)).collect();
let candidates: Vec<_> = collect_candidates(path, glob.as_deref())?
.into_iter()
.filter(|c| is_supported_file(&c.absolute_path, lang_str))
.collect();
let (resolved_candidates, languages) = resolve_candidates_for_find(candidates, lang_str)?;
let compiled_patterns = compile_find_patterns(&patterns, &languages, selector.as_deref(), &strictness)?;
let compiled_patterns =
compile_find_patterns(&patterns, &languages, selector.as_deref(), &strictness)?;
let files_searched = to_u32(resolved_candidates.len());
let mut all_matches = Vec::new(); let mut parse_errors = Vec::new(); let mut total_matches = 0u32; let mut files_with_matches = BTreeSet::new();
let mut all_matches = Vec::new();
let mut parse_errors = Vec::new();
let mut total_matches = 0u32;
let mut files_with_matches = BTreeSet::new();
for resolved in resolved_candidates {
let ResolvedCandidate { candidate, language, language_error } = resolved;
if let Some(error) = language_error.as_deref() { for c in &compiled_patterns { parse_errors.push(format!("{}: {}: {error}", c.pattern, candidate.display_path)); } continue; }
let ResolvedCandidate {
candidate,
language,
language_error,
} = resolved;
if let Some(error) = language_error.as_deref() {
for c in &compiled_patterns {
parse_errors.push(format!(
"{}: {}: {error}",
c.pattern, candidate.display_path
));
}
continue;
}
let Some(language) = language else { continue };
let lang_key = language.canonical_name();
let source = match std::fs::read_to_string(&candidate.absolute_path) { Ok(s) => s, Err(e) => { for c in &compiled_patterns { parse_errors.push(format!("{}: {}: {e}", c.pattern, candidate.display_path)); } continue; } };
let source = match std::fs::read_to_string(&candidate.absolute_path) {
Ok(s) => s,
Err(e) => {
for c in &compiled_patterns {
parse_errors.push(format!("{}: {}: {e}", c.pattern, candidate.display_path));
}
continue;
}
};
let mut runnable: Vec<(&str, &Pattern)> = Vec::new();
for c in &compiled_patterns {
if let Some(e) = c.compile_errors_by_lang.get(lang_key) { parse_errors.push(format!("{}: {}: {e}", c.pattern, candidate.display_path)); continue; }
if let Some(p) = c.compiled_by_lang.get(lang_key) { runnable.push((c.pattern.as_str(), p)); }
if let Some(e) = c.compile_errors_by_lang.get(lang_key) {
parse_errors.push(format!("{}: {}: {e}", c.pattern, candidate.display_path));
continue;
}
if let Some(p) = c.compiled_by_lang.get(lang_key) {
runnable.push((c.pattern.as_str(), p));
}
}
if runnable.is_empty() {
continue;
}
if runnable.is_empty() { continue; }
let ast = language.ast_grep(source);
if ast.root().dfs().any(|node| node.is_error()) { parse_errors.push(format!("{}: parse error (syntax tree contains error nodes)", candidate.display_path)); }
if ast.root().dfs().any(|node| node.is_error()) {
parse_errors.push(format!(
"{}: parse error (syntax tree contains error nodes)",
candidate.display_path
));
}
for (_, pattern) in runnable {
for matched in ast.root().find_all(pattern.clone()) {
total_matches = total_matches.saturating_add(1);
let range = matched.range(); let start = matched.start_pos(); let end = matched.end_pos();
let meta_variables = if include_meta { Some(HashMap::<String, String>::from(matched.get_env().clone())) } else { None };
all_matches.push(AstFindMatch { path: candidate.display_path.clone(), text: matched.text().into_owned(), byte_start: to_u32(range.start), byte_end: to_u32(range.end), start_line: to_u32(start.line().saturating_add(1)), start_column: to_u32(start.column(matched.get_node()).saturating_add(1)), end_line: to_u32(end.line().saturating_add(1)), end_column: to_u32(end.column(matched.get_node()).saturating_add(1)), meta_variables });
let range = matched.range();
let start = matched.start_pos();
let end = matched.end_pos();
let meta_variables = if include_meta {
Some(HashMap::<String, String>::from(matched.get_env().clone()))
} else {
None
};
all_matches.push(AstFindMatch {
path: candidate.display_path.clone(),
text: matched.text().into_owned(),
byte_start: to_u32(range.start),
byte_end: to_u32(range.end),
start_line: to_u32(start.line().saturating_add(1)),
start_column: to_u32(start.column(matched.get_node()).saturating_add(1)),
end_line: to_u32(end.line().saturating_add(1)),
end_column: to_u32(end.column(matched.get_node()).saturating_add(1)),
meta_variables,
});
files_with_matches.insert(candidate.display_path.clone());
}
}
}
all_matches.sort_by(|l, r| l.path.cmp(&r.path).then(l.start_line.cmp(&r.start_line)).then(l.start_column.cmp(&r.start_column)));
let visible: Vec<_> = all_matches.into_iter().skip(normalized_offset as usize).collect();
all_matches.sort_by(|l, r| {
l.path
.cmp(&r.path)
.then(l.start_line.cmp(&r.start_line))
.then(l.start_column.cmp(&r.start_column))
});
let visible: Vec<_> = all_matches
.into_iter()
.skip(normalized_offset as usize)
.collect();
let limit_reached = visible.len() > normalized_limit as usize;
let matches: Vec<_> = visible.into_iter().take(normalized_limit as usize).collect();
Ok(AstFindResult { matches, total_matches, files_with_matches: to_u32(files_with_matches.len()), files_searched, limit_reached, parse_errors: (!parse_errors.is_empty()).then_some(parse_errors) })
let matches: Vec<_> = visible
.into_iter()
.take(normalized_limit as usize)
.collect();
Ok(AstFindResult {
matches,
total_matches,
files_with_matches: to_u32(files_with_matches.len()),
files_searched,
limit_reached,
parse_errors: (!parse_errors.is_empty()).then_some(parse_errors),
})
}
#[napi(js_name = "astEdit")]
pub fn ast_edit(options: AstReplaceOptions) -> Result<AstReplaceResult> {
let AstReplaceOptions { rewrites, lang, path, glob, selector, strictness, dry_run, max_replacements, max_files, fail_on_parse_error } = options;
let AstReplaceOptions {
rewrites,
lang,
path,
glob,
selector,
strictness,
dry_run,
max_replacements,
max_files,
fail_on_parse_error,
} = options;
let rewrite_rules = normalize_rewrite_map(rewrites)?;
let strictness = parse_strictness(strictness.as_deref())?;
let dry_run = dry_run.unwrap_or(true); let max_replacements = max_replacements.unwrap_or(u32::MAX).max(1); let max_files = max_files.unwrap_or(u32::MAX).max(1); let fail_on_parse_error = fail_on_parse_error.unwrap_or(false);
let dry_run = dry_run.unwrap_or(true);
let max_replacements = max_replacements.unwrap_or(u32::MAX).max(1);
let max_files = max_files.unwrap_or(u32::MAX).max(1);
let fail_on_parse_error = fail_on_parse_error.unwrap_or(false);
let lang_str = lang.as_deref().map(str::trim).filter(|v| !v.is_empty());
let candidates: Vec<_> = collect_candidates(path, glob.as_deref())?.into_iter().filter(|c| is_supported_file(&c.absolute_path, lang_str)).collect();
let effective_lang = if let Some(l) = lang_str { l.to_string() } else { infer_single_replace_lang(&candidates)? };
let candidates: Vec<_> = collect_candidates(path, glob.as_deref())?
.into_iter()
.filter(|c| is_supported_file(&c.absolute_path, lang_str))
.collect();
let effective_lang = if let Some(l) = lang_str {
l.to_string()
} else {
infer_single_replace_lang(&candidates)?
};
let language = resolve_supported_lang(&effective_lang)?;
let mut parse_errors = Vec::new(); let mut compiled_rules = Vec::new();
let mut parse_errors = Vec::new();
let mut compiled_rules = Vec::new();
for (pattern, rewrite) in rewrite_rules {
match compile_pattern(&pattern, selector.as_deref(), &strictness, language) { Ok(c) => compiled_rules.push((pattern, rewrite, c)), Err(e) => { if fail_on_parse_error { return Err(e); } parse_errors.push(format!("{pattern}: {e}")); } }
match compile_pattern(&pattern, selector.as_deref(), &strictness, language) {
Ok(c) => compiled_rules.push((pattern, rewrite, c)),
Err(e) => {
if fail_on_parse_error {
return Err(e);
}
if compiled_rules.is_empty() { return Ok(AstReplaceResult { file_changes: vec![], total_replacements: 0, files_touched: 0, files_searched: to_u32(candidates.len()), applied: !dry_run, limit_reached: false, parse_errors: (!parse_errors.is_empty()).then_some(parse_errors), changes: vec![] }); }
let mut changes = Vec::new(); let mut file_counts: BTreeMap<String, u32> = BTreeMap::new(); let mut files_touched = 0u32; let mut limit_reached = false;
parse_errors.push(format!("{pattern}: {e}"));
}
}
}
if compiled_rules.is_empty() {
return Ok(AstReplaceResult {
file_changes: vec![],
total_replacements: 0,
files_touched: 0,
files_searched: to_u32(candidates.len()),
applied: !dry_run,
limit_reached: false,
parse_errors: (!parse_errors.is_empty()).then_some(parse_errors),
changes: vec![],
});
}
let mut changes = Vec::new();
let mut file_counts: BTreeMap<String, u32> = BTreeMap::new();
let mut files_touched = 0u32;
let mut limit_reached = false;
for candidate in &candidates {
let source = match std::fs::read_to_string(&candidate.absolute_path) { Ok(s) => s, Err(e) => { if fail_on_parse_error { return Err(Error::from_reason(format!("{}: {e}", candidate.display_path))); } parse_errors.push(format!("{}: {e}", candidate.display_path)); continue; } };
let source = match std::fs::read_to_string(&candidate.absolute_path) {
Ok(s) => s,
Err(e) => {
if fail_on_parse_error {
return Err(Error::from_reason(format!(
"{}: {e}",
candidate.display_path
)));
}
parse_errors.push(format!("{}: {e}", candidate.display_path));
continue;
}
};
let ast = language.ast_grep(&source);
if ast.root().dfs().any(|n| n.is_error()) { let msg = format!("{}: parse error (syntax tree contains error nodes)", candidate.display_path); if fail_on_parse_error { return Err(Error::from_reason(msg)); } parse_errors.push(msg); continue; }
let mut file_changes = Vec::new(); let mut reached_max = false;
if ast.root().dfs().any(|n| n.is_error()) {
let msg = format!(
"{}: parse error (syntax tree contains error nodes)",
candidate.display_path
);
if fail_on_parse_error {
return Err(Error::from_reason(msg));
}
parse_errors.push(msg);
continue;
}
let mut file_changes = Vec::new();
let mut reached_max = false;
'patterns: for (_pat, rewrite, compiled) in &compiled_rules {
for matched in ast.root().find_all(compiled.clone()) {
if changes.len() + file_changes.len() >= max_replacements as usize { limit_reached = true; reached_max = true; break 'patterns; }
let edit = matched.replace_by(rewrite.as_str()); let range = matched.range(); let start = matched.start_pos(); let end = matched.end_pos();
let after = String::from_utf8(edit.inserted_text.clone()).map_err(|e| Error::from_reason(format!("{}: replacement not valid UTF-8: {e}", candidate.display_path)))?;
file_changes.push(PendingFileChange { change: AstReplaceChange { path: candidate.display_path.clone(), before: matched.text().into_owned(), after, byte_start: to_u32(range.start), byte_end: to_u32(range.end), deleted_length: to_u32(edit.deleted_length), start_line: to_u32(start.line().saturating_add(1)), start_column: to_u32(start.column(matched.get_node()).saturating_add(1)), end_line: to_u32(end.line().saturating_add(1)), end_column: to_u32(end.column(matched.get_node()).saturating_add(1)) }, edit });
if changes.len() + file_changes.len() >= max_replacements as usize {
limit_reached = true;
reached_max = true;
break 'patterns;
}
let edit = matched.replace_by(rewrite.as_str());
let range = matched.range();
let start = matched.start_pos();
let end = matched.end_pos();
let after = String::from_utf8(edit.inserted_text.clone()).map_err(|e| {
Error::from_reason(format!(
"{}: replacement not valid UTF-8: {e}",
candidate.display_path
))
})?;
file_changes.push(PendingFileChange {
change: AstReplaceChange {
path: candidate.display_path.clone(),
before: matched.text().into_owned(),
after,
byte_start: to_u32(range.start),
byte_end: to_u32(range.end),
deleted_length: to_u32(edit.deleted_length),
start_line: to_u32(start.line().saturating_add(1)),
start_column: to_u32(start.column(matched.get_node()).saturating_add(1)),
end_line: to_u32(end.line().saturating_add(1)),
end_column: to_u32(end.column(matched.get_node()).saturating_add(1)),
},
edit,
});
}
}
if file_changes.is_empty() { if reached_max { break; } continue; }
if files_touched >= max_files { limit_reached = true; break; }
if file_changes.is_empty() {
if reached_max {
break;
}
continue;
}
if files_touched >= max_files {
limit_reached = true;
break;
}
files_touched = files_touched.saturating_add(1);
file_counts.insert(candidate.display_path.clone(), to_u32(file_changes.len()));
if !dry_run {
let edits: Vec<Edit<String>> = file_changes.iter().map(|e| Edit { position: e.edit.position, deleted_length: e.edit.deleted_length, inserted_text: e.edit.inserted_text.clone() }).collect();
let edits: Vec<Edit<String>> = file_changes
.iter()
.map(|e| Edit {
position: e.edit.position,
deleted_length: e.edit.deleted_length,
inserted_text: e.edit.inserted_text.clone(),
})
.collect();
let output = apply_edits(&source, &edits)?;
if output != source { std::fs::write(&candidate.absolute_path, output).map_err(|e| Error::from_reason(format!("Failed to write {}: {e}", candidate.display_path)))?; }
if output != source {
std::fs::write(&candidate.absolute_path, output).map_err(|e| {
Error::from_reason(format!("Failed to write {}: {e}", candidate.display_path))
})?;
}
}
changes.extend(file_changes.into_iter().map(|e| e.change));
if reached_max { break; }
if reached_max {
break;
}
let file_changes: Vec<_> = file_counts.into_iter().map(|(p, c)| AstReplaceFileChange { path: p, count: c }).collect();
Ok(AstReplaceResult { file_changes, total_replacements: to_u32(changes.len()), files_touched, files_searched: to_u32(candidates.len()), applied: !dry_run, limit_reached, parse_errors: (!parse_errors.is_empty()).then_some(parse_errors), changes })
}
let file_changes: Vec<_> = file_counts
.into_iter()
.map(|(p, c)| AstReplaceFileChange { path: p, count: c })
.collect();
Ok(AstReplaceResult {
file_changes,
total_replacements: to_u32(changes.len()),
files_touched,
files_searched: to_u32(candidates.len()),
applied: !dry_run,
limit_reached,
parse_errors: (!parse_errors.is_empty()).then_some(parse_errors),
changes,
})
}
#[cfg(test)]
mod tests {
use std::{fs, path::PathBuf, time::{SystemTime, UNIX_EPOCH}};
use super::*;
struct TempTree { root: PathBuf }
impl Drop for TempTree { fn drop(&mut self) { let _ = fs::remove_dir_all(&self.root); } }
use std::{
fs,
path::PathBuf,
time::{SystemTime, UNIX_EPOCH},
};
struct TempTree {
root: PathBuf,
}
impl Drop for TempTree {
fn drop(&mut self) {
let _ = fs::remove_dir_all(&self.root);
}
}
fn make_temp_tree() -> TempTree {
let unique = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_nanos();
let unique = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_nanos();
let root = std::env::temp_dir().join(format!("forge-ast-test-{unique}"));
fs::create_dir_all(root.join("nested")).unwrap();
fs::write(root.join("a.ts"), "const a = 1;\n").unwrap();
@ -391,42 +804,85 @@ mod tests {
}
#[test]
fn resolves_supported_language_aliases() {
assert_eq!(resolve_supported_lang("ts").ok(), Some(SupportLang::TypeScript));
assert_eq!(
resolve_supported_lang("ts").ok(),
Some(SupportLang::TypeScript)
);
assert_eq!(resolve_supported_lang("rs").ok(), Some(SupportLang::Rust));
assert!(resolve_supported_lang("brainfuck").is_err());
}
#[test]
fn applies_non_overlapping_edits() {
let edits = vec![Edit::<String> { position: 6, deleted_length: 6, inserted_text: b"value".to_vec() }, Edit::<String> { position: 15, deleted_length: 2, inserted_text: b"42".to_vec() }];
assert_eq!(apply_edits("const answer = 41;", &edits).unwrap(), "const value = 42;");
let edits = vec![
Edit::<String> {
position: 6,
deleted_length: 6,
inserted_text: b"value".to_vec(),
},
Edit::<String> {
position: 15,
deleted_length: 2,
inserted_text: b"42".to_vec(),
},
];
assert_eq!(
apply_edits("const answer = 41;", &edits).unwrap(),
"const value = 42;"
);
}
#[test]
fn rejects_overlapping_edits() {
let edits = vec![Edit::<String> { position: 1, deleted_length: 3, inserted_text: b"x".to_vec() }, Edit::<String> { position: 2, deleted_length: 1, inserted_text: b"y".to_vec() }];
let edits = vec![
Edit::<String> {
position: 1,
deleted_length: 3,
inserted_text: b"x".to_vec(),
},
Edit::<String> {
position: 2,
deleted_length: 1,
inserted_text: b"y".to_vec(),
},
];
assert!(apply_edits("abcdef", &edits).is_err());
}
#[test]
fn collect_candidates_finds_files() {
let tree = make_temp_tree();
let candidates = collect_candidates(Some(tree.root.to_string_lossy().into_owned()), None).unwrap();
let candidates =
collect_candidates(Some(tree.root.to_string_lossy().into_owned()), None).unwrap();
let paths: Vec<_> = candidates.iter().map(|f| f.display_path.as_str()).collect();
assert!(paths.contains(&"a.ts") && paths.contains(&"nested/b.ts"));
}
#[test]
fn infers_single_replace_lang() {
let tree = make_temp_tree();
let candidates = collect_candidates(Some(tree.root.to_string_lossy().into_owned()), Some("**/*.ts")).unwrap();
assert_eq!(infer_single_replace_lang(&candidates).unwrap(), "typescript");
let candidates = collect_candidates(
Some(tree.root.to_string_lossy().into_owned()),
Some("**/*.ts"),
)
.unwrap();
assert_eq!(
infer_single_replace_lang(&candidates).unwrap(),
"typescript"
);
}
#[test]
fn rejects_mixed_replace_lang() {
let unique = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_nanos();
let unique = SystemTime::now()
.duration_since(UNIX_EPOCH)
.unwrap()
.as_nanos();
let root = std::env::temp_dir().join(format!("forge-ast-mixed-{unique}"));
fs::create_dir_all(&root).unwrap();
fs::write(root.join("a.ts"), "const a = 1;\n").unwrap();
fs::write(root.join("b.rs"), "fn main() {}\n").unwrap();
let candidates = collect_candidates(Some(root.to_string_lossy().into_owned()), None).unwrap();
assert!(infer_single_replace_lang(&candidates).unwrap_err().to_string().contains("multiple languages"));
let candidates =
collect_candidates(Some(root.to_string_lossy().into_owned()), None).unwrap();
assert!(infer_single_replace_lang(&candidates)
.unwrap_err()
.to_string()
.contains("multiple languages"));
let _ = fs::remove_dir_all(&root);
}
}

View file

@ -8,10 +8,10 @@ mod parsers;
use std::{borrow::Cow, collections::HashMap, fmt, path::Path};
use ast_grep_core::{
Doc, Language, Node,
matcher::{KindMatcher, Pattern, PatternBuilder, PatternError},
meta_var::MetaVariable,
tree_sitter::{LanguageExt, StrDoc, TSLanguage, TSRange},
Doc, Language, Node,
};
/// Implements a stub language (no expando / `pre_process_pattern` needed).
@ -26,8 +26,7 @@ macro_rules! impl_lang {
}
fn field_to_id(&self, field: &str) -> Option<u16> {
self
.get_ts_language()
self.get_ts_language()
.field_id_for_name(field)
.map(|f| f.get())
}
@ -75,8 +74,7 @@ macro_rules! impl_lang_expando {
}
fn field_to_id(&self, field: &str) -> Option<u16> {
self
.get_ts_language()
self.get_ts_language()
.field_id_for_name(field)
.map(|f| f.get())
}
@ -167,8 +165,7 @@ impl Language for Html {
}
fn field_to_id(&self, field: &str) -> Option<u16> {
self
.get_ts_language()
self.get_ts_language()
.field_id_for_name(field)
.map(|f| f.get())
}
@ -240,7 +237,12 @@ fn node_to_range<D: Doc>(node: &Node<D>) -> TSRange {
let end = node.end_pos();
let ep = end.byte_point();
let ep = tree_sitter::Point::new(ep.0, ep.1);
TSRange { start_byte: r.start, end_byte: r.end, start_point: sp, end_point: ep }
TSRange {
start_byte: r.start,
end_byte: r.end,
start_point: sp,
end_point: ep,
}
}
// ── SupportLang enum ────────────────────────────────────────────────────
@ -292,9 +294,9 @@ impl SupportLang {
pub const fn all_langs() -> &'static [Self] {
use SupportLang::*;
&[
Bash, C, Cpp, CSharp, Css, Diff, Elixir, Go, Haskell, Hcl, Html, Java, JavaScript, Json,
Julia, Kotlin, Lua, Make, Markdown, Nix, ObjC, Odin, Php, Python, Regex, Ruby, Rust,
Scala, Solidity, Starlark, Swift, Toml, Tsx, TypeScript, Verilog, Xml, Yaml, Zig,
Bash, C, Cpp, CSharp, Css, Diff, Elixir, Go, Haskell, Hcl, Html, Java, JavaScript,
Json, Julia, Kotlin, Lua, Make, Markdown, Nix, ObjC, Odin, Php, Python, Regex, Ruby,
Rust, Scala, Solidity, Starlark, Swift, Toml, Tsx, TypeScript, Verilog, Xml, Yaml, Zig,
]
}
@ -450,9 +452,9 @@ impl LanguageExt for SupportLang {
const fn extensions(lang: SupportLang) -> &'static [&'static str] {
use SupportLang::*;
match lang {
Bash => {
&["bash", "bats", "cgi", "command", "env", "fcgi", "ksh", "sh", "tmux", "tool", "zsh"]
},
Bash => &[
"bash", "bats", "cgi", "command", "env", "fcgi", "ksh", "sh", "tmux", "tool", "zsh",
],
C => &["c", "h"],
Cpp => &["cc", "hpp", "cpp", "c++", "hh", "cxx", "cu", "ino"],
CSharp => &["cs"],

View file

@ -143,7 +143,11 @@ pub struct DiffResult {
/// - ` N line` for context
/// - ` ... ` for skipped context
#[napi(js_name = "generateDiff")]
pub fn generate_diff(old_content: String, new_content: String, context_lines: Option<u32>) -> DiffResult {
pub fn generate_diff(
old_content: String,
new_content: String,
context_lines: Option<u32>,
) -> DiffResult {
let context = context_lines.unwrap_or(4) as usize;
generate_diff_impl(&old_content, &new_content, context)
}
@ -192,36 +196,59 @@ fn generate_diff_impl(old_content: &str, new_content: &str, context_lines: usize
.iter()
.map(|s| s.to_string())
.collect();
parts.push(Part { tag: PartTag::Equal, lines });
parts.push(Part {
tag: PartTag::Equal,
lines,
});
}
similar::DiffOp::Delete { old_index, old_len, .. } => {
similar::DiffOp::Delete {
old_index, old_len, ..
} => {
let lines: Vec<String> = old_lines[*old_index..*old_index + *old_len]
.iter()
.map(|s| s.to_string())
.collect();
parts.push(Part { tag: PartTag::Removed, lines });
parts.push(Part {
tag: PartTag::Removed,
lines,
});
}
similar::DiffOp::Insert { new_index, new_len, .. } => {
similar::DiffOp::Insert {
new_index, new_len, ..
} => {
let lines: Vec<String> = new_lines[*new_index..*new_index + *new_len]
.iter()
.map(|s| s.to_string())
.collect();
parts.push(Part { tag: PartTag::Added, lines });
parts.push(Part {
tag: PartTag::Added,
lines,
});
}
similar::DiffOp::Replace {
old_index, old_len, new_index, new_len, ..
old_index,
old_len,
new_index,
new_len,
..
} => {
let del_lines: Vec<String> = old_lines[*old_index..*old_index + *old_len]
.iter()
.map(|s| s.to_string())
.collect();
parts.push(Part { tag: PartTag::Removed, lines: del_lines });
parts.push(Part {
tag: PartTag::Removed,
lines: del_lines,
});
let ins_lines: Vec<String> = new_lines[*new_index..*new_index + *new_len]
.iter()
.map(|s| s.to_string())
.collect();
parts.push(Part { tag: PartTag::Added, lines: ins_lines });
parts.push(Part {
tag: PartTag::Added,
lines: ins_lines,
});
}
}
}
@ -274,11 +301,7 @@ fn generate_diff_impl(old_content: &str, new_content: &str, context_lines: usize
}
if skip_start > 0 {
output.push(format!(
" {:>width$} ...",
"",
width = line_num_width
));
output.push(format!(" {:>width$} ...", "", width = line_num_width));
old_line_num += skip_start;
new_line_num += skip_start;
}
@ -291,11 +314,7 @@ fn generate_diff_impl(old_content: &str, new_content: &str, context_lines: usize
}
if skip_end > 0 {
output.push(format!(
" {:>width$} ...",
"",
width = line_num_width
));
output.push(format!(" {:>width$} ...", "", width = line_num_width));
old_line_num += skip_end;
new_line_num += skip_end;
}

View file

@ -379,9 +379,8 @@ pub fn apply_workspace_edit(
// ── Phase 2: commit ──────────────────────────────────────────────────
let mut file_results: Vec<WorkspaceEditFileResult> = Vec::with_capacity(staged.len());
let mut succeeded = 0usize;
for (final_path, tmp_path, new_bytes, edits_applied) in &staged {
for (succeeded, (final_path, tmp_path, new_bytes, edits_applied)) in staged.iter().enumerate() {
if let Err(e) = fs::rename(tmp_path, final_path) {
// Cleanup remaining staged tmps (including this one if rename failed
// before touching the original).
@ -402,7 +401,6 @@ pub fn apply_workspace_edit(
edits_applied: *edits_applied,
bytes_written: new_bytes.len() as u32,
});
succeeded += 1;
}
// ── fsync parent directories (deduplicated) ──────────────────────────

View file

@ -225,9 +225,9 @@ fn collect_matches(
let score = score_fuzzy_path(
&entry.path,
is_directory,
&query_lower,
&normalized_query,
&query_chars,
query_lower,
normalized_query,
query_chars,
);
if score == 0 {
continue;

View file

@ -141,22 +141,27 @@ fn parse_frontmatter_map_internal(lines: &[&str]) -> Vec<(String, FmValue)> {
for line in lines {
// Nested object property (4-space indent with key: value)
if line.starts_with(" ") && !line.starts_with(" ") {
if current_array.is_some() && current_obj.is_some() {
if line.starts_with(" ")
&& !line.starts_with(" ")
&& current_array.is_some()
&& current_obj.is_some()
{
let rest = line.trim_start();
if let Some(colon_pos) = rest.find(": ") {
let k = &rest[..colon_pos];
let v = rest[colon_pos + 2..].trim();
if k.chars().all(|c| c.is_alphanumeric() || c == '_') {
current_obj.as_mut().unwrap().push((k.to_string(), v.to_string()));
if let Some(current_obj) = current_obj.as_mut() {
current_obj.push((k.to_string(), v.to_string()));
}
continue;
}
} else if rest.ends_with(':') {
let k = &rest[..rest.len() - 1];
} else if let Some(k) = rest.strip_suffix(':') {
if k.chars().all(|c| c.is_alphanumeric() || c == '_') {
current_obj.as_mut().unwrap().push((k.to_string(), String::new()));
continue;
if let Some(current_obj) = current_obj.as_mut() {
current_obj.push((k.to_string(), String::new()));
}
continue;
}
}
}
@ -187,7 +192,10 @@ fn parse_frontmatter_map_internal(lines: &[&str]) -> Vec<(String, FmValue)> {
}
}
current_array.as_mut().unwrap().push(FmArrayItem::Str(val.to_string()));
current_array
.as_mut()
.unwrap()
.push(FmArrayItem::Str(val.to_string()));
continue;
}
@ -481,8 +489,7 @@ fn parse_roadmap_internal(content: &str) -> NativeRoadmap {
.unwrap_or("")
.to_string();
let sc_section = extract_section_internal(content, "Success Criteria", 2)
.or_else(|| {
let sc_section = extract_section_internal(content, "Success Criteria", 2).or_else(|| {
let idx = content.find("**Success Criteria:**")?;
let rest = &content[idx..];
let next_section = rest.find("\n---");
@ -490,9 +497,7 @@ fn parse_roadmap_internal(content: &str) -> NativeRoadmap {
let first_newline = block.find('\n')?;
Some(block[first_newline + 1..].to_string())
});
let success_criteria = sc_section
.map(|s| parse_bullets(&s))
.unwrap_or_default();
let success_criteria = sc_section.map(|s| parse_bullets(&s)).unwrap_or_default();
let slices = parse_roadmap_slices_internal(content);
let boundary_map = parse_boundary_map_internal(content);
@ -511,7 +516,7 @@ fn parse_roadmap_slices_internal(content: &str) -> Vec<NativeRoadmapSlice> {
Some(idx) => {
let start = idx + "## Slices".len();
let rest = &content[start..];
let rest = rest.trim_start_matches(|c: char| c == '\r' || c == '\n');
let rest = rest.trim_start_matches(['\r', '\n']);
let end = rest.find("\n## ").unwrap_or(rest.len());
rest[..end].trim_end()
}
@ -532,8 +537,8 @@ fn parse_roadmap_slices_internal(content: &str) -> Vec<NativeRoadmapSlice> {
if let Some(ref mut s) = current_slice {
let trimmed = line.trim();
if trimmed.starts_with('>') {
let demo = trimmed[1..].trim();
if let Some(demo) = trimmed.strip_prefix('>') {
let demo = demo.trim();
let demo = if demo.to_lowercase().starts_with("after this:") {
demo["after this:".len()..].trim()
} else {
@ -621,8 +626,7 @@ fn parse_boundary_map_internal(content: &str) -> Vec<NativeBoundaryMapEntry> {
let mut entries = Vec::new();
for (heading, section_content) in h3_sections {
let arrow_pos = heading.find('\u{2192}')
.or_else(|| heading.find("->"));
let arrow_pos = heading.find('\u{2192}').or_else(|| heading.find("->"));
if let Some(pos) = arrow_pos {
let arrow_len = if heading[pos..].starts_with('\u{2192}') {
@ -630,8 +634,16 @@ fn parse_boundary_map_internal(content: &str) -> Vec<NativeBoundaryMapEntry> {
} else {
2
};
let from_slice = heading[..pos].trim().split_whitespace().next().unwrap_or("").to_string();
let to_slice = heading[pos + arrow_len..].trim().split_whitespace().next().unwrap_or("").to_string();
let from_slice = heading[..pos]
.split_whitespace()
.next()
.unwrap_or("")
.to_string();
let to_slice = heading[pos + arrow_len..]
.split_whitespace()
.next()
.unwrap_or("")
.to_string();
let mut produces = String::new();
let mut consumes = String::new();
@ -1057,13 +1069,9 @@ pub fn parse_plan_file(content: String) -> NativePlan {
let id = fm_id.unwrap_or(heading_id);
let goal = extract_bold_field(body, "Goal")
.unwrap_or("")
.to_string();
let goal = extract_bold_field(body, "Goal").unwrap_or("").to_string();
let demo = extract_bold_field(body, "Demo")
.unwrap_or("")
.to_string();
let demo = extract_bold_field(body, "Demo").unwrap_or("").to_string();
let must_haves = extract_section_internal(body, "Must-Haves", 2)
.map(|s| parse_bullets(&s))
@ -1129,10 +1137,7 @@ fn parse_plan_tasks(body: &str) -> Vec<NativeTaskEntry> {
let after_bold = &after_bracket[2 + bold_end + 2..];
let estimate = if let Some(est_start) = after_bold.find("`est:") {
let val_start = est_start + 5;
let val_end = after_bold[val_start..]
.find('`')
.unwrap_or(0)
+ val_start;
let val_end = after_bold[val_start..].find('`').unwrap_or(0) + val_start;
after_bold[val_start..val_end].to_string()
} else {
String::new()
@ -1259,11 +1264,9 @@ pub fn parse_summary_file(content: String) -> NativeSummary {
result
};
let what_happened = extract_section_internal(body, "What Happened", 2)
.unwrap_or_default();
let what_happened = extract_section_internal(body, "What Happened", 2).unwrap_or_default();
let deviations = extract_section_internal(body, "Deviations", 2)
.unwrap_or_default();
let deviations = extract_section_internal(body, "Deviations", 2).unwrap_or_default();
let files_modified = extract_section_internal(body, "Files Created/Modified", 2)
.or_else(|| extract_section_internal(body, "Files Modified", 2))
@ -1327,8 +1330,7 @@ fn parse_summary_frontmatter(fm_map: &[(String, FmValue)]) -> NativeSummaryFront
};
let blocker_str = get_scalar("blocker_discovered");
let blocker_discovered =
blocker_str == "true" || blocker_str == "yes" || blocker_str == "True";
let blocker_discovered = blocker_str == "true" || blocker_str == "yes" || blocker_str == "True";
NativeSummaryFrontmatter {
id: get_scalar("id"),
@ -1359,12 +1361,17 @@ fn parse_files_modified(section: &str) -> Vec<NativeFileModified> {
};
// Parse `path` — description or `path` - description
if text.starts_with('`') {
if let Some(end_tick) = text[1..].find('`') {
let path = text[1..1 + end_tick].to_string();
let rest = text[1 + end_tick + 1..].trim();
let description = if rest.starts_with("") || rest.starts_with("") || rest.starts_with('-') {
rest[rest.find(|c: char| c != '—' && c != '' && c != '-').unwrap_or(rest.len())..].trim().to_string()
if let Some(rest) = text.strip_prefix('`') {
if let Some(end_tick) = rest.find('`') {
let path = rest[..end_tick].to_string();
let rest = rest[end_tick + 1..].trim();
let description =
if rest.starts_with("") || rest.starts_with("") || rest.starts_with('-') {
rest[rest
.find(|c: char| c != '—' && c != '' && c != '-')
.unwrap_or(rest.len())..]
.trim()
.to_string()
} else {
rest.to_string()
};

View file

@ -36,17 +36,26 @@ fn git_err(context: &str, e: git2::Error) -> Error {
/// Prevents path traversal attacks via patterns like `../../etc/passwd`.
fn validate_path_within_repo(repo_path: &str, file_path: &str) -> Result<std::path::PathBuf> {
let repo_dir = std::fs::canonicalize(repo_path).map_err(|e| {
Error::new(Status::GenericFailure, format!("Failed to canonicalize repo path '{repo_path}': {e}"))
Error::new(
Status::GenericFailure,
format!("Failed to canonicalize repo path '{repo_path}': {e}"),
)
})?;
let full_path = repo_dir.join(file_path);
let canonical = if full_path.exists() {
std::fs::canonicalize(&full_path).map_err(|e| {
Error::new(Status::GenericFailure, format!("Failed to canonicalize path '{file_path}': {e}"))
Error::new(
Status::GenericFailure,
format!("Failed to canonicalize path '{file_path}': {e}"),
)
})?
} else if let Some(parent) = full_path.parent() {
if parent.exists() {
let cp = std::fs::canonicalize(parent).map_err(|e| {
Error::new(Status::GenericFailure, format!("Failed to canonicalize parent of '{file_path}': {e}"))
Error::new(
Status::GenericFailure,
format!("Failed to canonicalize parent of '{file_path}': {e}"),
)
})?;
cp.join(full_path.file_name().unwrap_or_default())
} else {
@ -56,7 +65,10 @@ fn validate_path_within_repo(repo_path: &str, file_path: &str) -> Result<std::pa
full_path.clone()
};
if !canonical.starts_with(&repo_dir) {
return Err(Error::new(Status::GenericFailure, format!("Path '{file_path}' escapes repository boundary")));
return Err(Error::new(
Status::GenericFailure,
format!("Path '{file_path}' escapes repository boundary"),
));
}
Ok(canonical)
}
@ -159,9 +171,7 @@ pub struct GitMergeResult {
#[napi]
pub fn git_current_branch(repo_path: String) -> Result<Option<String>> {
let repo = open_repo(&repo_path)?;
let head = repo
.head()
.map_err(|e| git_err("Failed to read HEAD", e))?;
let head = repo.head().map_err(|e| git_err("Failed to read HEAD", e))?;
if head.is_branch() {
Ok(head.shorthand().map(String::from))
@ -200,9 +210,7 @@ pub fn git_main_branch(repo_path: String) -> Result<String> {
return Ok("master".to_string());
}
let head = repo
.head()
.map_err(|e| git_err("Failed to read HEAD", e))?;
let head = repo.head().map_err(|e| git_err("Failed to read HEAD", e))?;
Ok(head.shorthand().unwrap_or("HEAD").to_string())
}
@ -358,11 +366,7 @@ pub fn git_has_staged_changes(repo_path: String) -> Result<bool> {
/// When `from_ref` is "HEAD" and `to_ref` is "INDEX", diffs index vs HEAD (staged).
/// Replaces: `git diff --stat HEAD`, `git diff --stat --cached HEAD`
#[napi]
pub fn git_diff_stat(
repo_path: String,
from_ref: String,
to_ref: String,
) -> Result<GitDiffStat> {
pub fn git_diff_stat(repo_path: String, from_ref: String, to_ref: String) -> Result<GitDiffStat> {
let repo = open_repo(&repo_path)?;
let diff = match (from_ref.as_str(), to_ref.as_str()) {
@ -506,17 +510,14 @@ pub fn git_diff_numstat(
// Count added/removed lines per file using the patch API
for (i, _) in diff.deltas().enumerate() {
if let Ok(patch) = git2::Patch::from_diff(&diff, i) {
if let Some(patch) = patch {
let (_, additions, deletions) = patch.line_stats()
.unwrap_or((0, 0, 0));
if let Ok(Some(patch)) = git2::Patch::from_diff(&diff, i) {
let (_, additions, deletions) = patch.line_stats().unwrap_or((0, 0, 0));
if let Some(entry) = results.get_mut(i) {
entry.added = additions as u32;
entry.removed = deletions as u32;
}
}
}
}
Ok(results)
}
@ -783,7 +784,9 @@ pub fn git_ls_files(repo_path: String, pathspec: String) -> Result<Vec<String>>
let mut files = Vec::new();
for entry in index.iter() {
let path = String::from_utf8_lossy(&entry.path).to_string();
if path.starts_with(&pathspec) || (pathspec.ends_with('/') && path.starts_with(pathspec.trim_end_matches('/'))) {
if path.starts_with(&pathspec)
|| (pathspec.ends_with('/') && path.starts_with(pathspec.trim_end_matches('/')))
{
files.push(path);
}
}
@ -1030,11 +1033,7 @@ pub fn git_reset_paths(repo_path: String, paths: Vec<String>) -> Result<()> {
/// Returns the commit SHA.
/// Replaces: `git commit -m <message>`, `git commit --no-verify -F -`
#[napi]
pub fn git_commit(
repo_path: String,
message: String,
allow_empty: Option<bool>,
) -> Result<String> {
pub fn git_commit(repo_path: String, message: String, allow_empty: Option<bool>) -> Result<String> {
let repo = open_repo(&repo_path)?;
let mut index = repo
.index()
@ -1045,8 +1044,7 @@ pub fn git_commit(
let merge_msg_path = repo.path().join("MERGE_MSG");
let squash_msg_path = repo.path().join("SQUASH_MSG");
if merge_msg_path.exists() {
std::fs::read_to_string(&merge_msg_path)
.unwrap_or_else(|_| "Merge commit".to_string())
std::fs::read_to_string(&merge_msg_path).unwrap_or_else(|_| "Merge commit".to_string())
} else if squash_msg_path.exists() {
std::fs::read_to_string(&squash_msg_path)
.unwrap_or_else(|_| "Squash commit".to_string())
@ -1107,8 +1105,12 @@ pub fn git_commit(
for msg_file in &["SQUASH_MSG", "MERGE_MSG"] {
let msg_path = repo.path().join(msg_file);
if msg_path.exists() {
std::fs::remove_file(&msg_path)
.map_err(|e| Error::new(Status::GenericFailure, format!("Failed to clean up {msg_file}: {e}")))?;
std::fs::remove_file(&msg_path).map_err(|e| {
Error::new(
Status::GenericFailure,
format!("Failed to clean up {msg_file}: {e}"),
)
})?;
}
}
@ -1183,11 +1185,19 @@ pub fn git_checkout_theirs(repo_path: String, paths: Vec<String>) -> Result<()>
.map_err(|e| git_err(&format!("Failed to find blob for '{path}'"), e))?;
let full_path = validate_path_within_repo(&repo_path, path)?;
if let Some(parent) = full_path.parent() {
std::fs::create_dir_all(parent)
.map_err(|e| Error::new(Status::GenericFailure, format!("Failed to create directory for '{path}': {e}")))?;
std::fs::create_dir_all(parent).map_err(|e| {
Error::new(
Status::GenericFailure,
format!("Failed to create directory for '{path}': {e}"),
)
})?;
}
std::fs::write(&full_path, blob.content())
.map_err(|e| Error::new(Status::GenericFailure, format!("Failed to write '{path}': {e}")))?;
std::fs::write(&full_path, blob.content()).map_err(|e| {
Error::new(
Status::GenericFailure,
format!("Failed to write '{path}': {e}"),
)
})?;
}
}
@ -1233,7 +1243,11 @@ pub fn git_merge_squash(repo_path: String, branch: String) -> Result<GitMergeRes
let mut checkout_opts = CheckoutBuilder::new();
checkout_opts.safe().allow_conflicts(true);
repo.merge(&[&annotated], Some(&mut merge_opts), Some(&mut checkout_opts))
repo.merge(
&[&annotated],
Some(&mut merge_opts),
Some(&mut checkout_opts),
)
.map_err(|e| git_err("Failed to merge", e))?;
// Check for conflicts
@ -1277,9 +1291,7 @@ pub fn git_merge_abort(repo_path: String) -> Result<()> {
let repo = open_repo(&repo_path)?;
// Reset to HEAD
let head = repo
.head()
.map_err(|e| git_err("Failed to read HEAD", e))?;
let head = repo.head().map_err(|e| git_err("Failed to read HEAD", e))?;
let obj = head
.peel(ObjectType::Commit)
.map_err(|e| git_err("Failed to peel HEAD", e))?;
@ -1321,12 +1333,20 @@ pub fn git_rebase_abort(repo_path: String) -> Result<()> {
// Clean up rebase state directories
if rebase_merge.exists() {
std::fs::remove_dir_all(&rebase_merge)
.map_err(|e| Error::new(Status::GenericFailure, format!("Failed to remove rebase-merge state: {e}")))?;
std::fs::remove_dir_all(&rebase_merge).map_err(|e| {
Error::new(
Status::GenericFailure,
format!("Failed to remove rebase-merge state: {e}"),
)
})?;
}
if rebase_apply.exists() {
std::fs::remove_dir_all(&rebase_apply)
.map_err(|e| Error::new(Status::GenericFailure, format!("Failed to remove rebase-apply state: {e}")))?;
std::fs::remove_dir_all(&rebase_apply).map_err(|e| {
Error::new(
Status::GenericFailure,
format!("Failed to remove rebase-apply state: {e}"),
)
})?;
}
}
@ -1341,9 +1361,7 @@ pub fn git_rebase_abort(repo_path: String) -> Result<()> {
pub fn git_reset_hard(repo_path: String) -> Result<()> {
let repo = open_repo(&repo_path)?;
let head = repo
.head()
.map_err(|e| git_err("Failed to read HEAD", e))?;
let head = repo.head().map_err(|e| git_err("Failed to read HEAD", e))?;
let obj = head
.peel(ObjectType::Commit)
.map_err(|e| git_err("Failed to peel HEAD", e))?;
@ -1385,11 +1403,7 @@ pub fn git_branch_delete(repo_path: String, branch: String, force: Option<bool>)
/// Force-reset a branch to point at a target ref.
/// Replaces: `git branch -f <branch> <target>`
#[napi]
pub fn git_branch_force_reset(
repo_path: String,
branch: String,
target: String,
) -> Result<()> {
pub fn git_branch_force_reset(repo_path: String, branch: String, target: String) -> Result<()> {
let repo = open_repo(&repo_path)?;
let target_commit = repo
@ -1446,12 +1460,10 @@ pub fn git_rm_cached(
removed.push(format!("rm '{entry_path}'"));
}
}
} else {
if index.remove_path(Path::new(path)).is_ok() {
} else if index.remove_path(Path::new(path)).is_ok() {
removed.push(format!("rm '{path}'"));
}
}
}
if !removed.is_empty() {
index
@ -1472,13 +1484,18 @@ pub fn git_rm_force(repo_path: String, paths: Vec<String>) -> Result<()> {
.map_err(|e| git_err("Failed to read index", e))?;
for path in &paths {
index.remove_path(Path::new(path))
index
.remove_path(Path::new(path))
.map_err(|e| git_err(&format!("Failed to remove '{path}' from index"), e))?;
// Also delete from working tree (with path traversal validation)
let full_path = validate_path_within_repo(&repo_path, path)?;
if full_path.exists() {
std::fs::remove_file(&full_path)
.map_err(|e| Error::new(Status::GenericFailure, format!("Failed to delete '{path}': {e}")))?;
std::fs::remove_file(&full_path).map_err(|e| {
Error::new(
Status::GenericFailure,
format!("Failed to delete '{path}': {e}"),
)
})?;
}
}
@ -1523,10 +1540,7 @@ pub fn git_worktree_add(
repo.worktree(
&branch, // worktree name
Path::new(&wt_path),
Some(
git2::WorktreeAddOptions::new()
.reference(Some(&reference)),
),
Some(git2::WorktreeAddOptions::new().reference(Some(&reference))),
)
.map_err(|e| git_err(&format!("Failed to add worktree at '{wt_path}'"), e))?;
@ -1615,8 +1629,7 @@ pub fn git_worktree_prune(repo_path: String) -> Result<()> {
pub fn git_revert_commit(repo_path: String, sha: String) -> Result<()> {
let repo = open_repo(&repo_path)?;
let oid = git2::Oid::from_str(&sha)
.map_err(|e| git_err(&format!("Invalid SHA '{sha}'"), e))?;
let oid = git2::Oid::from_str(&sha).map_err(|e| git_err(&format!("Invalid SHA '{sha}'"), e))?;
let commit = repo
.find_commit(oid)

View file

@ -175,8 +175,12 @@ fn run_glob(
}
let mut matches = if config.use_cache {
let scan =
fs_cache::get_or_scan(&config.root, config.include_hidden, config.use_gitignore, &ct)?;
let scan = fs_cache::get_or_scan(
&config.root,
config.include_hidden,
config.use_gitignore,
&ct,
)?;
let mut matches = filter_entries(&scan.entries, &glob_set, &config, on_match, &ct)?;
// Empty-result recheck: if we got zero matches from a cached scan that's old
// enough, force a rescan and try once more before returning empty.

View file

@ -81,10 +81,7 @@ mod tests {
#[test]
fn unclosed_brace_gets_closed() {
assert_eq!(
build_glob_pattern("*.{ts,tsx,js", true),
"**/*.{ts,tsx,js}"
);
assert_eq!(build_glob_pattern("*.{ts,tsx,js", true), "**/*.{ts,tsx,js}");
}
#[test]

View file

@ -155,7 +155,19 @@ pub struct HighlightColors {
/// Language alias mappings: (aliases, target syntax name).
/// Used for languages not in syntect's default set or with non-standard names.
const LANG_ALIASES: &[(&[&str], &str)] = &[
(&["ts", "tsx", "typescript", "js", "jsx", "javascript", "mjs", "cjs"], "JavaScript"),
(
&[
"ts",
"tsx",
"typescript",
"js",
"jsx",
"javascript",
"mjs",
"cjs",
],
"JavaScript",
),
(&["py", "python"], "Python"),
(&["rb", "ruby"], "Ruby"),
(&["rs", "rust"], "Rust"),
@ -422,13 +434,13 @@ pub fn highlight_code(code: String, lang: Option<String>, colors: HighlightColor
match op {
ScopeStackOp::Push(scope) => {
scope_stack.push(scope);
},
}
ScopeStackOp::Pop(count) => {
for _ in 0..count {
scope_stack.pop();
}
},
ScopeStackOp::Restore | ScopeStackOp::Clear(_) | ScopeStackOp::Noop => {},
}
ScopeStackOp::Restore | ScopeStackOp::Clear(_) | ScopeStackOp::Noop => {}
}
}

View file

@ -9,9 +9,9 @@
use std::{io::Cursor, sync::Arc};
use image::{
DynamicImage, ImageFormat, ImageReader,
codecs::{jpeg::JpegEncoder, webp::WebPEncoder},
imageops::FilterType,
DynamicImage, ImageFormat, ImageReader,
};
use napi::bindgen_prelude::*;
use napi_derive::napi;
@ -81,7 +81,9 @@ impl NativeImage {
#[napi(js_name = "encode")]
pub fn encode(&self, format: u8, quality: u8) -> task::Async<Vec<u8>> {
let img = Arc::clone(&self.img);
task::blocking("image.encode", (), move |_| encode_image(&img, format, quality))
task::blocking("image.encode", (), move |_| {
encode_image(&img, format, quality)
})
}
/// Resize to exact dimensions. Returns a new NativeImage.
@ -89,7 +91,9 @@ impl NativeImage {
pub fn resize(&self, width: u32, height: u32, filter: SamplingFilter) -> ImageTask {
let img = Arc::clone(&self.img);
task::blocking("image.resize", (), move |_| {
Ok(Self { img: Arc::new(img.resize_exact(width, height, filter.into())) })
Ok(Self {
img: Arc::new(img.resize_exact(width, height, filter.into())),
})
})
}
}
@ -122,27 +126,29 @@ fn encode_image(img: &DynamicImage, format: u8, quality: u8) -> Result<Vec<u8>>
img.write_to(&mut Cursor::new(&mut buffer), ImageFormat::Png)
.map_err(|e| Error::from_reason(format!("Failed to encode PNG: {e}")))?;
Ok(buffer)
},
}
1 => {
let mut buffer = Vec::with_capacity(encode_capacity(w, h, 3)?);
let encoder = JpegEncoder::new_with_quality(&mut buffer, quality);
img.write_with_encoder(encoder)
.map_err(|e| Error::from_reason(format!("Failed to encode JPEG: {e}")))?;
Ok(buffer)
},
}
2 => {
let mut buffer = Vec::with_capacity(encode_capacity(w, h, 4)?);
let encoder = WebPEncoder::new_lossless(&mut buffer);
img.write_with_encoder(encoder)
.map_err(|e| Error::from_reason(format!("Failed to encode WebP: {e}")))?;
Ok(buffer)
},
}
3 => {
let mut buffer = Vec::with_capacity(encode_capacity(w, h, 1)?);
img.write_to(&mut Cursor::new(&mut buffer), ImageFormat::Gif)
.map_err(|e| Error::from_reason(format!("Failed to encode GIF: {e}")))?;
Ok(buffer)
},
_ => Err(Error::from_reason(format!("Invalid image format: {format}"))),
}
_ => Err(Error::from_reason(format!(
"Invalid image format: {format}"
))),
}
}

View file

@ -169,12 +169,24 @@ fn handle_truncated_value(result: &mut String) {
// at the end after a value-position character
if len > 0 {
let last = bytes[len - 1];
if last.is_ascii_digit() || last == b'.' || last == b'-' || last == b'e' || last == b'E' || last == b'+' {
if last.is_ascii_digit()
|| last == b'.'
|| last == b'-'
|| last == b'e'
|| last == b'E'
|| last == b'+'
{
// Walk backwards to find the start of the number-like token
let mut start = len;
while start > 0 {
let b = bytes[start - 1];
if b.is_ascii_digit() || b == b'.' || b == b'-' || b == b'e' || b == b'E' || b == b'+' {
if b.is_ascii_digit()
|| b == b'.'
|| b == b'-'
|| b == b'e'
|| b == b'E'
|| b == b'+'
{
start -= 1;
} else {
break;
@ -228,12 +240,8 @@ fn handle_truncated_value(result: &mut String) {
/// Convert a serde_json::Value to a napi JsUnknown.
fn serde_value_to_napi(env: &Env, value: &serde_json::Value) -> Result<napi::JsUnknown> {
match value {
serde_json::Value::Null => {
env.get_null().map(|v| v.into_unknown())
}
serde_json::Value::Bool(b) => {
env.get_boolean(*b).map(|v| v.into_unknown())
}
serde_json::Value::Null => env.get_null().map(|v| v.into_unknown()),
serde_json::Value::Bool(b) => env.get_boolean(*b).map(|v| v.into_unknown()),
serde_json::Value::Number(n) => {
if let Some(i) = n.as_i64() {
// Use i32 if it fits, otherwise f64
@ -248,9 +256,7 @@ fn serde_value_to_napi(env: &Env, value: &serde_json::Value) -> Result<napi::JsU
env.get_null().map(|v| v.into_unknown())
}
}
serde_json::Value::String(s) => {
env.create_string(s).map(|v| v.into_unknown())
}
serde_json::Value::String(s) => env.create_string(s).map(|v| v.into_unknown()),
serde_json::Value::Array(arr) => {
let mut js_arr = env.create_array_with_length(arr.len())?;
for (idx, item) in arr.iter().enumerate() {

View file

@ -43,7 +43,11 @@ mod platform {
pub fn process_group_id(pid: i32) -> Option<i32> {
// SAFETY: `libc::getpgid` is safe to call with any pid
let pgid = unsafe { libc::getpgid(pid) };
if pgid < 0 { None } else { Some(pgid) }
if pgid < 0 {
None
} else {
Some(pgid)
}
}
/// Send `signal` to the process group `pgid`.
@ -77,7 +81,11 @@ mod platform {
let mut buffer = vec![0i32; count as usize];
// SAFETY: buffer is correctly sized and aligned for `count` i32 elements.
let actual = unsafe {
proc_listchildpids(pid, buffer.as_mut_ptr(), (buffer.len() * size_of::<i32>()) as i32)
proc_listchildpids(
pid,
buffer.as_mut_ptr(),
(buffer.len() * size_of::<i32>()) as i32,
)
};
if actual <= 0 {
@ -105,7 +113,11 @@ mod platform {
pub fn process_group_id(pid: i32) -> Option<i32> {
// SAFETY: libc::getpgid is safe to call with any pid
let pgid = unsafe { libc::getpgid(pid) };
if pgid < 0 { None } else { Some(pgid) }
if pgid < 0 {
None
} else {
Some(pgid)
}
}
/// Send `signal` to the process group `pgid`.
@ -121,7 +133,10 @@ mod platform {
use std::{collections::HashMap, mem};
#[repr(C)]
#[allow(non_snake_case, reason = "Windows PROCESSENTRY32W field names must match Win32 ABI")]
#[allow(
non_snake_case,
reason = "Windows PROCESSENTRY32W field names must match Win32 ABI"
)]
struct PROCESSENTRY32W {
dwSize: u32,
cntUsage: u32,
@ -167,8 +182,7 @@ mod platform {
if Process32FirstW(snapshot, &raw mut entry) != 0 {
loop {
tree
.entry(entry.th32ParentProcessID)
tree.entry(entry.th32ParentProcessID)
.or_default()
.push(entry.th32ProcessID);
@ -191,11 +205,7 @@ mod platform {
collect_descendants_from_tree(pid as u32, &tree, pids);
}
fn collect_descendants_from_tree(
pid: u32,
tree: &HashMap<u32, Vec<u32>>,
pids: &mut Vec<i32>,
) {
fn collect_descendants_from_tree(pid: u32, tree: &HashMap<u32, Vec<u32>>, pids: &mut Vec<i32>) {
if let Some(children) = tree.get(&pid) {
for &child_pid in children {
pids.push(child_pid as i32);

View file

@ -47,19 +47,15 @@ pub struct StreamChunkResult {
/// strips ANSI escape sequences, removes control characters (except tab and
/// newline), removes carriage returns, and filters Unicode format characters.
#[napi(js_name = "processStreamChunk")]
pub fn process_stream_chunk(
chunk: Buffer,
state: Option<StreamState>,
) -> StreamChunkResult {
pub fn process_stream_chunk(chunk: Buffer, state: Option<StreamState>) -> StreamChunkResult {
let state = state.unwrap_or_default();
let bytes = chunk.as_ref();
// Prepend any pending bytes from previous chunk
let mut input: Vec<u8>;
let src: &[u8] = if !state.utf8_pending.is_empty() || !state.ansi_pending.is_empty() {
input = Vec::with_capacity(
state.ansi_pending.len() + state.utf8_pending.len() + bytes.len(),
);
input =
Vec::with_capacity(state.ansi_pending.len() + state.utf8_pending.len() + bytes.len());
input.extend_from_slice(&state.ansi_pending);
input.extend_from_slice(&state.utf8_pending);
input.extend_from_slice(bytes);
@ -134,7 +130,7 @@ fn find_incomplete_utf8_tail(bytes: &[u8]) -> usize {
// that starts an incomplete sequence.
let len = bytes.len();
// Check at most the last 3 bytes (max UTF-8 continuation trail)
let check_start = if len > 3 { len - 3 } else { 0 };
let check_start = len.saturating_sub(3);
for i in (check_start..len).rev() {
let b = bytes[i];
@ -326,8 +322,8 @@ fn could_be_incomplete_ansi(bytes: &[u8], pos: usize) -> bool {
// CSI: ESC [ ... <final byte 0x40-0x7E>
b'[' => {
// If we don't see a final byte, it's incomplete
for j in (pos + 2)..bytes.len() {
if (0x40..=0x7E).contains(&bytes[j]) {
for byte in bytes.iter().skip(pos + 2) {
if (0x40..=0x7E).contains(byte) {
return false; // found terminator — it's complete (but malformed since ansi_sequence_len returned None)
}
}
@ -335,11 +331,11 @@ fn could_be_incomplete_ansi(bytes: &[u8], pos: usize) -> bool {
}
// OSC: ESC ] ... (terminated by BEL or ST)
b']' => {
for j in (pos + 2)..bytes.len() {
if bytes[j] == 0x07 {
for (j, byte) in bytes.iter().enumerate().skip(pos + 2) {
if *byte == 0x07 {
return false;
}
if bytes[j] == 0x1B && j + 1 < bytes.len() && bytes[j + 1] == b'\\' {
if *byte == 0x1B && j + 1 < bytes.len() && bytes[j + 1] == b'\\' {
return false;
}
}
@ -347,8 +343,8 @@ fn could_be_incomplete_ansi(bytes: &[u8], pos: usize) -> bool {
}
// DCS, SOS, PM, APC
b'P' | b'X' | b'^' | b'_' => {
for j in (pos + 2)..bytes.len() {
if bytes[j] == 0x1B && j + 1 < bytes.len() && bytes[j + 1] == b'\\' {
for (j, byte) in bytes.iter().enumerate().skip(pos + 2) {
if *byte == 0x1B && j + 1 < bytes.len() && bytes[j + 1] == b'\\' {
return false;
}
}
@ -358,8 +354,8 @@ fn could_be_incomplete_ansi(bytes: &[u8], pos: usize) -> bool {
0x40..=0x7E => false,
// Intermediate bytes (ESC + intermediate + final)
0x20..=0x2F => {
for j in (pos + 2)..bytes.len() {
if (0x30..=0x7E).contains(&bytes[j]) {
for byte in bytes.iter().skip(pos + 2) {
if (0x30..=0x7E).contains(byte) {
return false;
}
}
@ -383,8 +379,8 @@ fn ansi_sequence_len(bytes: &[u8], pos: usize) -> Option<usize> {
match bytes[pos + 1] {
// CSI: ESC [
b'[' => {
for j in (pos + 2)..len {
if (0x40..=0x7E).contains(&bytes[j]) {
for (j, byte) in bytes.iter().enumerate().take(len).skip(pos + 2) {
if (0x40..=0x7E).contains(byte) {
return Some(j - pos + 1);
}
}
@ -392,11 +388,11 @@ fn ansi_sequence_len(bytes: &[u8], pos: usize) -> Option<usize> {
}
// OSC: ESC ]
b']' => {
for j in (pos + 2)..len {
if bytes[j] == 0x07 {
for (j, byte) in bytes.iter().enumerate().take(len).skip(pos + 2) {
if *byte == 0x07 {
return Some(j - pos + 1);
}
if bytes[j] == 0x1B && j + 1 < len && bytes[j + 1] == b'\\' {
if *byte == 0x1B && j + 1 < len && bytes[j + 1] == b'\\' {
return Some(j - pos + 2);
}
}
@ -404,8 +400,8 @@ fn ansi_sequence_len(bytes: &[u8], pos: usize) -> Option<usize> {
}
// DCS, SOS, PM, APC — terminated by ST (ESC \)
b'P' | b'X' | b'^' | b'_' => {
for j in (pos + 2)..len {
if bytes[j] == 0x1B && j + 1 < len && bytes[j + 1] == b'\\' {
for (j, byte) in bytes.iter().enumerate().take(len).skip(pos + 2) {
if *byte == 0x1B && j + 1 < len && bytes[j + 1] == b'\\' {
return Some(j - pos + 2);
}
}
@ -413,8 +409,8 @@ fn ansi_sequence_len(bytes: &[u8], pos: usize) -> Option<usize> {
}
// ESC + intermediates (0x20-0x2F) + final byte (0x30-0x7E)
0x20..=0x2F => {
for j in (pos + 2)..len {
if (0x30..=0x7E).contains(&bytes[j]) {
for (j, byte) in bytes.iter().enumerate().take(len).skip(pos + 2) {
if (0x30..=0x7E).contains(byte) {
return Some(j - pos + 1);
}
}
@ -661,9 +657,8 @@ mod tests {
fn process_chunk(bytes: &[u8], state: Option<StreamState>) -> StreamChunkResult {
let state = state.unwrap_or_default();
let mut input: Vec<u8> = Vec::with_capacity(
state.ansi_pending.len() + state.utf8_pending.len() + bytes.len(),
);
let mut input: Vec<u8> =
Vec::with_capacity(state.ansi_pending.len() + state.utf8_pending.len() + bytes.len());
input.extend_from_slice(&state.ansi_pending);
input.extend_from_slice(&state.utf8_pending);
input.extend_from_slice(bytes);

View file

@ -208,10 +208,9 @@ fn find_symbol_matches(
) -> Result<Vec<SymbolMatch>> {
let mut compiled: Vec<Pattern> = Vec::new();
for pat_str in patterns {
match Pattern::try_new(pat_str, lang) {
Ok(p) => compiled.push(p),
Err(_) => {} // skip patterns that don't compile for this lang variant
}
if let Ok(p) = Pattern::try_new(pat_str, lang) {
compiled.push(p);
} // skip patterns that don't compile for this lang variant
}
if compiled.is_empty() {
return Err(Error::from_reason(

View file

@ -9,7 +9,7 @@
use std::time::{Duration, Instant};
use napi::{Env, Error, Result, Task, bindgen_prelude::*};
use napi::{bindgen_prelude::*, Env, Error, Result, Task};
// ─────────────────────────────────────────────────────────────────────────────
// Cancellation

View file

@ -10,9 +10,9 @@
use std::cell::RefCell;
use napi::{JsString, bindgen_prelude::*};
use napi::{bindgen_prelude::*, JsString};
use napi_derive::napi;
use smallvec::{SmallVec, smallvec};
use smallvec::{smallvec, SmallVec};
use unicode_segmentation::UnicodeSegmentation;
use unicode_width::{UnicodeWidthChar, UnicodeWidthStr};
@ -107,7 +107,11 @@ struct AnsiState {
impl AnsiState {
#[inline]
const fn new() -> Self {
Self { attrs: 0, fg: COLOR_NONE, bg: COLOR_NONE }
Self {
attrs: 0,
fg: COLOR_NONE,
bg: COLOR_NONE,
}
}
#[inline]
@ -167,7 +171,7 @@ impl AnsiState {
let (idx, ni) = parse_sgr_num_u16(params, i);
i = ni;
0x100 | (idx as ColorVal & 0xff)
},
}
2 => {
let (r, ni) = parse_sgr_num_u16(params, i);
let (g, ni) = parse_sgr_num_u16(params, ni);
@ -177,7 +181,7 @@ impl AnsiState {
| ((r as ColorVal & 0xff) << 16)
| ((g as ColorVal & 0xff) << 8)
| (b as ColorVal & 0xff)
},
}
_ => continue,
};
@ -186,9 +190,9 @@ impl AnsiState {
} else {
self.bg = color;
}
},
}
_ => {},
_ => {}
}
}
}
@ -332,7 +336,7 @@ fn ansi_seq_len_u16(data: &[u16], pos: usize) -> Option<usize> {
}
}
None
},
}
0x5d => {
// ']' OSC
for (i, &b) in data[pos + 2..].iter().enumerate() {
@ -344,7 +348,7 @@ fn ansi_seq_len_u16(data: &[u16], pos: usize) -> Option<usize> {
}
}
None
},
}
0x50 | 0x58 | 0x5e | 0x5f => {
// 'P' DCS, 'X' SOS, '^' PM, '_' APC (terminated by ST)
for (i, &b) in data[pos + 2..].iter().enumerate() {
@ -353,7 +357,7 @@ fn ansi_seq_len_u16(data: &[u16], pos: usize) -> Option<usize> {
}
}
None
},
}
0x20..=0x2f => {
// ESC + intermediates + final byte
for (i, b) in data[pos + 2..].iter().enumerate() {
@ -362,7 +366,7 @@ fn ansi_seq_len_u16(data: &[u16], pos: usize) -> Option<usize> {
}
}
None
},
}
0x40..=0x7e => Some(2),
_ => None,
}
@ -509,7 +513,9 @@ struct Osc8State {
impl Osc8State {
fn new() -> Self {
Self { open_seq: Vec::new() }
Self {
open_seq: Vec::new(),
}
}
fn is_active(&self) -> bool {
@ -518,7 +524,14 @@ impl Osc8State {
/// Write the OSC 8 close sequence: ESC ]8;; BEL
fn write_close(out: &mut Vec<u16>) {
out.extend_from_slice(&[ESC, b']' as u16, b'8' as u16, b';' as u16, b';' as u16, 0x07]);
out.extend_from_slice(&[
ESC,
b']' as u16,
b'8' as u16,
b';' as u16,
b';' as u16,
0x07,
]);
}
/// Write the stored open sequence to re-open the hyperlink.
@ -535,13 +548,14 @@ impl Osc8State {
if seq.len() < 6 {
return false;
}
if seq[0] != ESC || seq[1] != b']' as u16 || seq[2] != b'8' as u16 || seq[3] != b';' as u16 {
if seq[0] != ESC || seq[1] != b']' as u16 || seq[2] != b'8' as u16 || seq[3] != b';' as u16
{
return false;
}
// Find the second semicolon that separates params from URI
let mut second_semi = None;
for i in 4..seq.len() {
if seq[i] == b';' as u16 {
for (i, &item) in seq.iter().enumerate().skip(4) {
if item == b';' as u16 {
second_semi = Some(i);
break;
}
@ -919,7 +933,7 @@ pub fn truncate_to_width(
let (text_w, exceeded) = visible_width_u16_up_to(text, max_width, tab_width);
if !exceeded {
if !pad || text_w == max_width {
return Ok(utf16_to_string(text.to_vec()));
return Ok(utf16_to_string(text));
}
let mut out = Vec::with_capacity(text.len() + (max_width - text_w));
@ -1165,7 +1179,10 @@ pub fn slice_with_width(
let (out, w) =
slice_with_width_impl(line, start_col as usize, length as usize, strict, tab_width);
Ok(SliceResult { text: utf16_to_string(out), width: clamp_u32(w as u64) })
Ok(SliceResult {
text: utf16_to_string(out),
width: clamp_u32(w as u64),
})
}
// ============================================================================
@ -1197,7 +1214,11 @@ fn extract_segments_impl(
let mut after_started = false;
let mut state = AnsiState::new();
let done_col = if after_len == 0 { before_end } else { after_end };
let done_col = if after_len == 0 {
before_end
} else {
after_end
};
while i < line_len && current_col < done_col {
if line[i] == ESC {
@ -1367,9 +1388,7 @@ pub fn sanitize_text(text: JsString) -> Result<String> {
};
if remove_len == 0 {
if u == 0x0d {
remove_len = 1;
} else if u <= 0x1f || u == 0x7f || (0x80..=0x9f).contains(&u) {
if u == 0x0d || u <= 0x1f || u == 0x7f || (0x80..=0x9f).contains(&u) {
remove_len = 1;
} else if (0xd800..=0xdbff).contains(&u) {
if i + 1 < len {
@ -1402,7 +1421,7 @@ pub fn sanitize_text(text: JsString) -> Result<String> {
}
if !did_change {
return Ok(utf16_to_string(data.to_vec()));
return Ok(utf16_to_string(data));
}
if last < len {
out.extend_from_slice(&data[last..]);
@ -1421,7 +1440,9 @@ pub fn sanitize_text(text: JsString) -> Result<String> {
pub fn visible_width_napi(text: JsString, tab_width: Option<u32>) -> Result<u32> {
let text_u16 = text.into_utf16()?;
let tab_width = clamp_tab_width(tab_width);
Ok(clamp_u32(visible_width_u16(text_u16.as_slice(), tab_width) as u64))
Ok(clamp_u32(
visible_width_u16(text_u16.as_slice(), tab_width) as u64
))
}
#[cfg(test)]
@ -1455,12 +1476,18 @@ mod tests {
visible_width_u16(&to_u16("\u{4e16}\u{754c}"), DEFAULT_TAB_WIDTH),
4
);
assert_eq!(visible_width_u16(&to_u16("a\u{4e16}b"), DEFAULT_TAB_WIDTH), 4);
assert_eq!(
visible_width_u16(&to_u16("a\u{4e16}b"), DEFAULT_TAB_WIDTH),
4
);
}
#[test]
fn test_visible_width_emoji() {
assert_eq!(visible_width_u16(&to_u16("\u{1f600}"), DEFAULT_TAB_WIDTH), 2);
assert_eq!(
visible_width_u16(&to_u16("\u{1f600}"), DEFAULT_TAB_WIDTH),
2
);
}
#[test]
@ -1531,9 +1558,8 @@ mod tests {
#[test]
fn test_wrap_text_with_ansi_resets_strike() {
let data = to_u16(
"\x1b[38;5;196m\x1b[48;5;236m\x1b[9mstrikethrough content wraps\x1b[29m\x1b[0m",
);
let data =
to_u16("\x1b[38;5;196m\x1b[48;5;236m\x1b[9mstrikethrough content wraps\x1b[29m\x1b[0m");
let lines = wrap_text_with_ansi_impl(&data, 12, DEFAULT_TAB_WIDTH);
assert!(lines.len() > 1);
@ -1619,17 +1645,33 @@ mod tests {
let data = to_u16(&text);
// Width 10 forces "click here please" (18 chars) to wrap
let lines = wrap_text_with_ansi_impl(&data, 10, DEFAULT_TAB_WIDTH);
assert!(lines.len() >= 2, "Expected wrapping, got {} lines", lines.len());
assert!(
lines.len() >= 2,
"Expected wrapping, got {} lines",
lines.len()
);
let first = String::from_utf16_lossy(&lines[0]);
let second = String::from_utf16_lossy(&lines[1]);
// First line should open the hyperlink and close it at the end
assert!(first.starts_with(&open), "First line should start with OSC 8 open: {:?}", first);
assert!(first.ends_with(close), "First line should end with OSC 8 close: {:?}", first);
assert!(
first.starts_with(&open),
"First line should start with OSC 8 open: {:?}",
first
);
assert!(
first.ends_with(close),
"First line should end with OSC 8 close: {:?}",
first
);
// Second line should re-open the hyperlink
assert!(second.starts_with(&open), "Second line should re-open OSC 8: {:?}", second);
assert!(
second.starts_with(&open),
"Second line should re-open OSC 8: {:?}",
second
);
}
#[test]
@ -1641,19 +1683,31 @@ mod tests {
let text = format!("{}{}{}", open, url, close);
let data = to_u16(&text);
let lines = wrap_text_with_ansi_impl(&data, 40, DEFAULT_TAB_WIDTH);
assert!(lines.len() >= 2, "Expected wrapping, got {} lines", lines.len());
assert!(
lines.len() >= 2,
"Expected wrapping, got {} lines",
lines.len()
);
for (i, line) in lines.iter().enumerate() {
let s = String::from_utf16_lossy(line);
// Every line except possibly the last (which has the close) should
// have the OSC 8 open sequence
assert!(s.contains(&open) || s.contains(close),
"Line {} should contain OSC 8 open or close: {:?}", i, s);
assert!(
s.contains(&open) || s.contains(close),
"Line {} should contain OSC 8 open or close: {:?}",
i,
s
);
}
// Last line should contain the close
let last = String::from_utf16_lossy(lines.last().unwrap());
assert!(last.contains(close), "Last line should contain OSC 8 close: {:?}", last);
assert!(
last.contains(close),
"Last line should contain OSC 8 close: {:?}",
last
);
}
#[test]

View file

@ -44,7 +44,11 @@ pub fn truncate_tail(text: String, max_bytes: u32) -> TruncateResult {
// Fast path: fits entirely
if total_bytes <= max {
let line_count = memchr::memchr_iter(b'\n', text.as_bytes()).count()
+ if text.is_empty() || text.ends_with('\n') { 0 } else { 1 };
+ if text.is_empty() || text.ends_with('\n') {
0
} else {
1
};
return TruncateResult {
text,
truncated: false,
@ -73,7 +77,9 @@ pub fn truncate_tail(text: String, max_bytes: u32) -> TruncateResult {
let kept_lines = count_lines(kept);
TruncateResult {
text: std::str::from_utf8(kept).expect("split at newline boundary preserves UTF-8").to_owned(),
text: std::str::from_utf8(kept)
.expect("split at newline boundary preserves UTF-8")
.to_owned(),
truncated: true,
original_lines,
kept_lines,
@ -93,7 +99,11 @@ pub fn truncate_head(text: String, max_bytes: u32) -> TruncateResult {
// Fast path
if total_bytes <= max {
let line_count = memchr::memchr_iter(b'\n', text.as_bytes()).count()
+ if text.is_empty() || text.ends_with('\n') { 0 } else { 1 };
+ if text.is_empty() || text.ends_with('\n') {
0
} else {
1
};
return TruncateResult {
text,
truncated: false,
@ -124,7 +134,9 @@ pub fn truncate_head(text: String, max_bytes: u32) -> TruncateResult {
let kept_lines = count_lines(kept);
TruncateResult {
text: std::str::from_utf8(kept).expect("split at newline boundary preserves UTF-8").to_owned(),
text: std::str::from_utf8(kept)
.expect("split at newline boundary preserves UTF-8")
.to_owned(),
truncated: true,
original_lines,
kept_lines,
@ -138,11 +150,7 @@ pub fn truncate_head(text: String, max_bytes: u32) -> TruncateResult {
/// - `"head"`: keep the end (tail truncation removes head)
/// - `"both"`: keep beginning and end, elide the middle
#[napi(js_name = "truncateOutput")]
pub fn truncate_output(
text: String,
max_bytes: u32,
mode: Option<String>,
) -> TruncateOutputResult {
pub fn truncate_output(text: String, max_bytes: u32, mode: Option<String>) -> TruncateOutputResult {
let max = max_bytes as usize;
if text.len() <= max {

View file

@ -13,7 +13,7 @@ use napi_derive::napi;
use notify::{Config, Event, EventKind, RecommendedWatcher, RecursiveMode, Watcher};
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
use std::sync::{Arc, mpsc};
use std::sync::{mpsc, Arc};
use std::thread::{self, JoinHandle};
use std::time::{Duration, Instant};
@ -81,8 +81,8 @@ fn build_ignore_set(patterns: &[String]) -> std::result::Result<GlobSet, String>
}
fn event_kind(kind: &EventKind) -> Option<&'static str> {
use notify::EventKind::*;
use notify::event::ModifyKind;
use notify::EventKind::*;
match kind {
Create(_) => Some("create"),
@ -182,8 +182,9 @@ pub fn watch_tree(
build_ignore_set(&ignore_patterns).map_err(|e| Error::new(Status::InvalidArg, e))?;
let has_ignores = !ignore_patterns.is_empty();
let tsfn: ThreadsafeFunction<Vec<WatchEvent>> = on_events
.create_threadsafe_function(0, |ctx: ThreadSafeCallContext<Vec<WatchEvent>>| {
let tsfn: ThreadsafeFunction<Vec<WatchEvent>> = on_events.create_threadsafe_function(
0,
|ctx: ThreadSafeCallContext<Vec<WatchEvent>>| {
let events: Vec<WatchEvent> = ctx.value;
let env = ctx.env;
let mut arr = env.create_array_with_length(events.len())?;
@ -194,7 +195,8 @@ pub fn watch_tree(
arr.set_element(i as u32, obj)?;
}
Ok(vec![arr])
})?;
},
)?;
let (sender, receiver) = mpsc::channel();
let mut watcher = RecommendedWatcher::new(
@ -203,7 +205,12 @@ pub fn watch_tree(
},
Config::default(),
)
.map_err(|e| Error::new(Status::GenericFailure, format!("failed to create watcher: {e}")))?;
.map_err(|e| {
Error::new(
Status::GenericFailure,
format!("failed to create watcher: {e}"),
)
})?;
let mode = if recursive {
RecursiveMode::Recursive

View file

@ -1,10 +0,0 @@
export interface RemoteConfig {
endpoint: string;
apiKey?: string;
timeout?: number;
}
export function resolveRemoteConfig(): RemoteConfig;
export function resolveRemotePreferenceConfig(hydrateTokens?: boolean): RemoteConfig;
export function getRemoteConfigStatus(): string;
export function isValidChannelId(channel: string, id: string): boolean;

View file

@ -1,8 +0,0 @@
export function setFetchAllowedUrls(hostnames: string[]): void;
export function getFetchAllowedUrls(): string[];
export function isBlockedUrl(url: string): boolean;
export function normalizeQuery(query: string): string;
export function toDedupeKey(url: string): string;
export function extractDomain(url: string): string;
export function detectFreshness(query: string): string | null;
export function detectDomainHints(query: string): string[];

View file

@ -1,2 +0,0 @@
export const SCAFFOLD_FILES: string[];
export function ensureAgenticDocsScaffold(basePath?: string): void;

View file

@ -1,24 +0,0 @@
export const PROJECT_RAG_MCP_SERVER_NAME: string;
export function detectProjectRag(projectRoot: string, prefs: Record<string, unknown>, env?: NodeJS.ProcessEnv): unknown;
export function resolveProjectRagBinary(env?: NodeJS.ProcessEnv): string | null;
export function resolveSiftBinary(env?: NodeJS.ProcessEnv): string | null;
export function detectSift(_projectRoot: string, prefs: Record<string, unknown>, env?: NodeJS.ProcessEnv): unknown;
export function ensureSiftIndexWarmup(projectRoot: string, prefs: Record<string, unknown>, options?: Record<string, unknown>): Promise<unknown>;
export function resolveProjectRagBuildJobs(env?: NodeJS.ProcessEnv): number;
export function findProjectRagSourceDir(projectRoot: string, env?: NodeJS.ProcessEnv): string | null;
export function resolveProjectRagBinaryForProject(projectRoot: string, env?: NodeJS.ProcessEnv): string | null;
export function buildProjectRagMcpServerConfig(projectRoot?: string, env?: NodeJS.ProcessEnv): Record<string, unknown>;
export function buildProjectRagBinary(projectRoot: string, env?: NodeJS.ProcessEnv): boolean;
export function ensureProjectRagMcpConfig(projectRoot: string, env?: NodeJS.ProcessEnv): void;
export function resolveCodebaseIndexerBackendName(prefs: Record<string, unknown>): string;
export function resolveEffectiveCodebaseIndexerBackendName(projectRoot: string, prefs: Record<string, unknown>, env?: NodeJS.ProcessEnv): string;
export function getCodebaseIndexerBackend(prefsOrName: Record<string, unknown> | string): unknown;
export function detectCodebaseIndexer(projectRoot: string, prefs: Record<string, unknown>, env?: NodeJS.ProcessEnv): unknown;
export function formatCodebaseIndexerStatus(projectRoot: string, prefs: Record<string, unknown>, env?: NodeJS.ProcessEnv): string;
export function buildCodeIntelligenceContextBlock(projectRoot: string, prefs: Record<string, unknown>, env?: NodeJS.ProcessEnv): string;
export function formatProjectRagStatus(projectRoot: string, prefs: Record<string, unknown>, env?: NodeJS.ProcessEnv): string;
export function formatSiftStatus(projectRoot: string, prefs: Record<string, unknown>, env?: NodeJS.ProcessEnv): string;
export const PROJECT_RAG_CODEBASE_INDEXER_BACKEND: Record<string, unknown>;
export const SIFT_CODEBASE_INDEXER_BACKEND: Record<string, unknown>;
export const NO_CODEBASE_INDEXER_BACKEND: Record<string, unknown>;
export const CODEBASE_INDEXER_BACKENDS: Record<string, unknown>;

View file

@ -1,15 +0,0 @@
export interface DocCheckResult {
checkedAt: string;
repoRoot: string;
checks: Array<{ file: string; status: string; message?: string }>;
summary: {
total: number;
ok: number;
empty: number;
stub: number;
missing: number;
};
}
export function checkDocsScaffold(repoRoot: string): DocCheckResult;
export function formatDocCheckReport(report: DocCheckResult): string;

View file

@ -1,25 +0,0 @@
export function validateTitle(title: string): string | null;
export function buildStateMarkdown(state: Record<string, unknown>): string;
export interface DoctorIssue {
severity: "error" | "warning";
code: string;
scope: string;
unitId: string;
message: string;
file?: string;
fixable?: boolean;
}
export interface DoctorReport {
ok: boolean;
basePath: string;
issues: DoctorIssue[];
fixesApplied: string[];
timing?: Record<string, number>;
scope?: string;
}
export function runSFDoctor(basePath: string, options?: Record<string, unknown>): Promise<DoctorReport>;
export function formatDoctorReport(report: DoctorReport): string;
export function formatDoctorReportJson(report: DoctorReport): string;

View file

@ -1,6 +0,0 @@
export function isSfGitignored(basePath?: string): boolean;
export function hasGitTrackedSfFiles(basePath?: string): boolean;
export function ensureGitInfoExclude(basePath?: string): void;
export function ensureGitignore(basePath?: string, options?: Record<string, unknown>): void;
export function untrackRuntimeFiles(basePath?: string): void;
export function ensurePreferences(basePath?: string): void;

View file

@ -1,52 +0,0 @@
export function nativeGetCurrentBranch(basePath: string): string;
export function nativeDetectMainBranch(basePath: string): string;
export function nativeBranchExists(basePath: string, branch: string): boolean;
export function nativeHasMergeConflicts(basePath: string): boolean;
export function nativeWorkingTreeStatus(basePath: string): string;
export function nativeHasChanges(basePath: string): boolean;
export function _resetHasChangesCache(): void;
export function nativeCommitCountBetween(basePath: string, fromRef: string, toRef: string): number;
export function nativeIsRepo(basePath: string): boolean;
export function nativeHasStagedChanges(basePath: string): boolean;
export function nativeDiffStat(basePath: string, fromRef: string, toRef: string): string;
export function nativeDiffNameStatus(basePath: string, fromRef: string, toRef: string, pathspec?: string, useMergeBase?: boolean): string[];
export function nativeDiffNumstat(basePath: string, fromRef: string, toRef: string): string;
export function nativeDiffContent(basePath: string, fromRef: string, toRef: string, pathspec?: string, exclude?: string[], useMergeBase?: boolean): string;
export function nativeLogOneline(basePath: string, fromRef?: string, toRef?: string): string[];
export function nativeWorktreeList(basePath: string): string[];
export function nativeBranchList(basePath: string, pattern?: string): string[];
export function nativeBranchListMerged(basePath: string, target: string, pattern?: string): string[];
export function nativeLsFiles(basePath: string, pathspec?: string): string[];
export function nativeForEachRef(basePath: string, prefix?: string): string[];
export function nativeConflictFiles(basePath: string): string[];
export function nativeBatchInfo(basePath: string): Record<string, unknown>;
export function nativeInit(basePath: string, initialBranch?: string): void;
export function nativeAddAll(basePath: string): void;
export function nativeAddTracked(basePath: string): void;
export function nativeAddAllWithExclusions(basePath: string, exclusions: string[]): void;
export function nativeAddPaths(basePath: string, paths: string[]): void;
export function nativeResetPaths(basePath: string, paths: string[]): void;
export function nativeCommit(basePath: string, message: string, options?: Record<string, unknown>): void;
export function nativeCheckoutBranch(basePath: string, branch: string): void;
export function nativeCheckoutTheirs(basePath: string, paths: string[]): void;
export function nativeMergeSquash(basePath: string, branch: string): void;
export function nativeMergeAbort(basePath: string): void;
export function nativeRebaseAbort(basePath: string): void;
export function nativeResetHard(basePath: string): void;
export function nativeResetSoft(basePath: string, target?: string): void;
export function nativeCommitSubject(basePath: string, ref: string): string;
export function nativeBranchDelete(basePath: string, branch: string, force?: boolean): void;
export function nativeBranchForceReset(basePath: string, branch: string, target: string): void;
export function nativeRmCached(basePath: string, paths: string[], recursive?: boolean): void;
export function nativeRmForce(basePath: string, paths: string[]): void;
export function nativeWorktreeAdd(basePath: string, wtPath: string, branch: string, createBranch?: boolean, startPoint?: string): void;
export function nativeWorktreeRemove(basePath: string, wtPath: string, force?: boolean): void;
export function nativeWorktreePrune(basePath: string): void;
export function nativeRevertCommit(basePath: string, sha: string): void;
export function nativeRevertAbort(basePath: string): void;
export function nativeUpdateRef(basePath: string, refname: string, target: string): void;
export function isNativeGitAvailable(): boolean;
export function nativeIsAncestor(basePath: string, ancestor: string, descendant: string): boolean;
export function nativeLastCommitEpoch(basePath: string, ref?: string): number;
export function nativeUnpushedCount(basePath: string, branch: string): number;
export function getCommitsBehindMain(worktreePath: string, mainRef: string): number;

View file

@ -1,30 +0,0 @@
export function clearPathCache(): void;
export function buildMilestoneFileName(milestoneId: string, suffix: string): string;
export function buildSliceFileName(sliceId: string, suffix: string): string;
export function buildTaskFileName(taskId: string, suffix: string): string;
export function resolveDir(parentDir: string, idPrefix: string): string;
export function resolveFile(dir: string, idPrefix: string, suffix: string): string;
export function resolveTaskFiles(tasksDir: string, suffix: string): string[];
export function resolveTaskJsonFiles(tasksDir: string, suffix: string): string[];
export const SF_ROOT_FILES: Record<string, string>;
export function _clearSfRootCache(): void;
export function sfRoot(basePath?: string): string;
export const projectRoot: typeof sfRoot;
export function isRunningOnSelf(basePath?: string): boolean;
export function _resetSelfDetectionCache(): void;
export function sfRuntimeRoot(basePath?: string): string;
export function milestonesDir(basePath?: string): string;
export function resolveRuntimeFile(basePath?: string): string;
export function resolveSfRootFile(basePath: string, key: string): string;
export function relSfRootFile(key: string): string;
export function resolveMilestonePath(basePath: string, milestoneId: string): string;
export function resolveMilestoneFile(basePath: string, milestoneId: string, suffix: string): string;
export function resolveSlicePath(basePath: string, milestoneId: string, sliceId: string): string;
export function resolveSliceFile(basePath: string, milestoneId: string, sliceId: string, suffix: string): string;
export function resolveTasksDir(basePath: string, milestoneId: string, sliceId: string): string;
export function resolveTaskFile(basePath: string, milestoneId: string, sliceId: string, taskId: string, suffix: string): string;
export function relMilestonePath(basePath: string, milestoneId: string): string;
export function relMilestoneFile(basePath: string, milestoneId: string, suffix: string): string;
export function relSlicePath(basePath: string, milestoneId: string, sliceId: string): string;
export function relSliceFile(basePath: string, milestoneId: string, sliceId: string, suffix: string): string;
export function relTaskFile(basePath: string, milestoneId: string, sliceId: string, taskId: string, suffix: string): string;

View file

@ -1,20 +0,0 @@
export function filterModelsByProviderModelAllow(models: unknown[], providerModelAllow: unknown, providerModelBlock: unknown): unknown[];
export function isProviderAllowedByLists(provider: string, allowedProviders: string[], blockedProviders: string[]): boolean;
export function isProviderAllowedForAdvisor(providerKey: string, prefs: Record<string, unknown>): boolean;
export function resolveModelForUnit(unitType: string): string;
export function resolveModelWithFallbacksForUnit(unitType: string, options?: Record<string, unknown>): string;
export function resolveDefaultSessionModel(sessionProvider: string): string;
export function isCustomProvider(provider: string): boolean;
export function getNextFallbackModel(currentModelId: string, modelConfig: Record<string, unknown>): string | null;
export function isTransientNetworkError(errorMsg: string): boolean;
export function validateModelId(modelId: string): boolean;
export function updatePreferencesModels(models: unknown[]): void;
export function updateSubscriptionTokensUsed(provider: string, tokensConsumed: number): void;
export function resolveDynamicRoutingConfig(): Record<string, unknown>;
export function resolvePersistModelChanges(): boolean;
export function resolveAutoSupervisorConfig(): Record<string, unknown>;
export function resolveProfileDefaults(profile: string): Record<string, unknown>;
export function resolveEffectiveProfile(): string;
export function resolveInlineLevel(): string;
export function resolveContextSelection(): string;
export function resolveSearchProviderFromPreferences(): string;

View file

@ -1,19 +0,0 @@
export function resolveSkillDiscoveryMode(): string;
export function resolveSkillStalenessDays(): number;
export function getGlobalSFPreferencesPath(): string;
export function getLegacyGlobalSFPreferencesPath(): string;
export function getProjectSFPreferencesPath(): string;
export function loadGlobalSFPreferences(): Record<string, unknown>;
export function loadProjectSFPreferences(): Record<string, unknown>;
export function loadEffectiveSFPreferences(): {
path: string;
preferences: Record<string, unknown>;
} | null;
export function _resetParseWarningFlag(): void;
export function parsePreferencesMarkdown(content: string): Record<string, unknown>;
export function applyModeDefaults(mode: string, prefs: Record<string, unknown>): Record<string, unknown>;
export function renderPreferencesForSystemPrompt(preferences: Record<string, unknown>, resolutions: Record<string, unknown>): string;
export function resolvePostUnitHooks(): string[];
export function resolvePreDispatchHooks(): string[];
export function getIsolationMode(): string;
export function resolveParallelConfig(prefs: Record<string, unknown>): Record<string, unknown>;

View file

@ -34,7 +34,7 @@ After reflection is confirmed, decide the approach based on the actual scope —
Before asking your first question, do a mandatory investigation pass. This is not optional.
1. **Scout the codebase** — use in-process `grep`, `find`, `ls`, and `lsp` first; use `codebase_search` for Sift-backed hybrid retrieval; use `scout` for broad unfamiliar areas that need a separate explorer. Understand what already exists, what patterns are established, what constraints current code imposes.
1. **Scout the codebase** — use `codebase_search` for conceptual, behavioral, or architectural discovery (e.g. "how does X work?", "where is Y handled?"); use in-process `grep`, `find`, `ls`, and `lsp` for exact identifier matches or structural navigation. Use `scout` for broad unfamiliar areas that need a separate explorer. Understand what already exists, what patterns are established, what constraints current code imposes.
2. **Check library docs — DeepWiki first.** Use `ask_question` / `read_wiki_structure` / `read_wiki_contents` (DeepWiki) as the default for any GitHub-hosted library or framework the user mentioned. Fall back to `resolve_library` / `get_library_docs` (Context7) for npm/pypi/crates packages DeepWiki doesn't have. **Context7 free tier is capped at 1000 req/month — spend those on cases DeepWiki can't cover.** Get current facts about capabilities, constraints, API shapes, version-specific behavior.
3. **Web search**`search-the-web` if the domain is unfamiliar, if you need current best practices, or if the user referenced external services/APIs you need facts about. Use `fetch_page` for full content when snippets aren't enough.

View file

@ -15,7 +15,7 @@ Apply `pm-planning` skill thinking throughout: use Working Backwards to anchor o
### Before your first question round
Do a lightweight targeted investigation so your questions are grounded in reality:
- Scout the codebase with in-process `grep`, `find`, `ls`, and `lsp` first; use `codebase_search` for Sift-backed hybrid retrieval; use `scout` for broad unfamiliar areas that need a separate explorer
- Scout the codebase: use `codebase_search` for conceptual, behavioral, or architectural discovery (e.g. "how does X work?", "where is Y handled?"); use in-process `grep`, `find`, `ls`, and `lsp` for exact identifier matches or structural navigation. Use `scout` for broad unfamiliar areas that need a separate explorer.
- If the `PROJECT CODE INTELLIGENCE` block says Project RAG is configured, use its MCP search tools for broad concept, symbol, schema, and git-history lookup before manually reading files
- Check the roadmap context above (if present) to understand what surrounds this milestone
- **Library docs — DeepWiki first.** Use `ask_question` / `read_wiki_structure` / `read_wiki_contents` (DeepWiki) for any GitHub-hosted library. Fall back to `resolve_library` / `get_library_docs` (Context7) only when DeepWiki doesn't have it (Context7 is capped at 1000 req/month free tier).

View file

@ -11,7 +11,7 @@ Your goal is **not** to center the discussion on tech stack trivia, naming conve
### Before your first question round
Do a lightweight targeted investigation so your questions are grounded in reality:
- Scout the codebase with in-process `grep`, `find`, `ls`, and `lsp` first; use `codebase_search` for Sift-backed hybrid retrieval; use `scout` for broad unfamiliar areas that need a separate explorer
- Scout the codebase: use `codebase_search` for conceptual, behavioral, or architectural discovery (e.g. "how does X work?", "where is Y handled?"); use in-process `grep`, `find`, `ls`, and `lsp` for exact identifier matches or structural navigation. Use `scout` for broad unfamiliar areas that need a separate explorer.
- Check the roadmap context above to understand what surrounds this slice — what comes before, what depends on it
- **Library docs — DeepWiki first.** Use `ask_question` / `read_wiki_structure` / `read_wiki_contents` (DeepWiki) for any GitHub-hosted library. Fall back to `resolve_library` / `get_library_docs` (Context7) only when DeepWiki doesn't have it (Context7 is capped at 1000 req/month free tier).
- Identify the 35 biggest behavioural unknowns: things where the user's answer will materially change what gets built

View file

@ -26,7 +26,7 @@ Never fabricate or simulate user input during this discussion. Never generate fa
- Check library docs **DeepWiki first** (`ask_question` / `read_wiki_structure` / `read_wiki_contents`) for any GitHub-hosted library or framework — AI-indexed, no free-tier cap. Fall back to Context7 (`resolve_library` / `get_library_docs`) for npm/pypi/crates packages DeepWiki doesn't cover. Context7 free tier is 1000 req/month — don't spend those on cases DeepWiki covers.
- Do web searches (`search-the-web`) to verify the landscape — what solutions exist, what's changed recently, what's the current best practice. Use `freshness` for recency-sensitive queries, `domain` to target specific sites. Use `fetch_page` to read the full content of promising URLs when snippets aren't enough. **Budget:** You have a limited number of web searches per turn (typically 3-5). Prefer DeepWiki → Context7 → web search for docs; use `search_and_read` for one-shot topic research. Do NOT repeat the same or similar queries. Distribute searches across turns rather than clustering them.
- Scout the codebase with in-process `grep`, `find`, `ls`, and `lsp` first; use `codebase_search` for Sift-backed hybrid retrieval; use `scout` for broad unfamiliar areas that need a separate explorer. Understand what already exists, what patterns are established, what constraints current code imposes
- Scout the codebase: use `codebase_search` for conceptual, behavioral, or architectural discovery (e.g. "how does X work?", "where is Y handled?"); use in-process `grep`, `find`, `ls`, and `lsp` for exact identifier matches or structural navigation. Use `scout` for broad unfamiliar areas that need a separate explorer. Understand what already exists, what patterns are established, what constraints current code imposes.
Don't go deep — just enough that your next question reflects what's actually true rather than what you assume.

View file

@ -161,7 +161,7 @@ Templates showing the expected format for each artifact type are in:
**Code navigation:** Use `lsp` for definition, type_definition, implementation, references, incoming_calls, outgoing_calls, hover, signature, symbols, rename, code_actions, format, and diagnostics. Falls back gracefully if no server is available. Never `grep` for a symbol definition when `lsp` can resolve it semantically. Never shell out to prettier/rustfmt/gofmt when `lsp format` is available. After editing code, use `lsp diagnostics` to verify no type errors were introduced.
**Codebase exploration:** Prefer in-process SF tools first: `grep` for exact text search, `find`/`ls` for filesystem discovery, and `lsp` for structural navigation. These avoid shelling out and use SF's native backends where available. Use `.sf/CODEBASE.md` for durable orientation. If the `PROJECT CODE INTELLIGENCE` block says Project RAG is configured, use its MCP tools for broad hybrid semantic + BM25 code retrieval before manual file-by-file reading. Use `codebase_search` when Sift-backed hybrid retrieval is a better fit than exact search. Use `subagent` with `scout` for broad unfamiliar subsystem mapping that needs an explorer's judgment. Never read files one-by-one to "explore" — search first, then read what's relevant.
**Codebase exploration:** For conceptual, behavioral, or architectural discovery (e.g. "how does X work?", "where is Y handled?"), use `codebase_search` first. Its hybrid BM25+Vector retrieval is significantly more effective than grep for navigating unfamiliar logic. Use in-process SF tools like `grep` for exact text matches when you already have a specific identifier, and `find`/`ls` for literal filesystem discovery. Use `lsp` for structural navigation (definitions, references). Use `.sf/CODEBASE.md` for durable orientation. If the `PROJECT CODE INTELLIGENCE` block says Project RAG is configured, use its MCP tools for broad hybrid semantic + BM25 code retrieval before manual file-by-file reading. Never read files one-by-one to "explore" — search first, then read what's relevant.
**Swarm dispatch:** Let the system decide whether swarming fits before dispatching multiple execution subagents. Use a 2-3 worker same-model swarm only when the work splits into independent shards with explicit file/directory ownership, shard-local verification, low conflict risk, and clear wall-clock savings. Do not swarm shared-interface edits, lockfiles, migrations, single-failure debugging, or sequence-dependent work. The parent agent remains coordinator: assign ownership, synthesize results, inspect dirty files, resolve conflicts, and run final verification.

View file

@ -1,10 +0,0 @@
export function readRepoMeta(externalPath: string): Record<string, unknown>;
export function isInheritedRepo(basePath?: string): boolean;
export function validateProjectId(id: string): boolean;
export function repoIdentity(basePath?: string): Record<string, unknown>;
export function externalSfRoot(basePath?: string): string | null;
export function externalProjectsRoot(): string;
export function cleanNumberedSfVariants(projectPath: string): string;
export function hasExternalProjectState(externalPath: string): boolean;
export function ensureSfSymlink(projectPath: string): string;
export function isInsideWorktree(cwd: string): boolean;

View file

@ -1,29 +0,0 @@
export interface Span {
id: string;
name: string;
startTime: number;
endTime?: number;
attributes: Record<string, unknown>;
children: Span[];
}
export interface Trace {
id: string;
rootSpan: Span;
startTime: number;
endTime?: number;
attributes: Record<string, unknown>;
}
export function isTraceEnabled(): boolean;
export function initTraceCollector(projectRoot: string, sessionId: string | null | undefined, command: string, model: string | null): Trace | null;
export function flushTrace(projectRoot: string): void;
export function getActiveTrace(): Trace | null;
export function startUnitSpan(unitType: string, unitId: string, attributes?: Record<string, unknown>): Span | null;
export function startToolSpan(parentSpan: Span, toolName: string, toolCallId: string, attributes?: Record<string, unknown>): Span;
export function completeSpan(span: Span, status?: string): void;
export function traceEvent(span: Span, name: string, attrs: Record<string, unknown>): void;
export function traceError(span: Span, message: string, stack?: string): void;
export function findTraceSpan(id: string): Span | null;
export function setTraceCost(inputTokens: number, outputTokens: number, cacheReadTokens: number, cacheWriteTokens: number, costUsd: number): void;
export function setTraceExitCode(code: number): void;

View file

@ -1,16 +0,0 @@
export interface MilestoneRef {
id: string;
title?: string;
}
export interface SFState {
milestones: unknown[];
slices: unknown[];
tasks: unknown[];
activeMilestone?: MilestoneRef;
lastCompletedMilestone?: MilestoneRef;
activeSlice?: MilestoneRef;
activeTask?: MilestoneRef;
phase?: string;
nextAction?: string;
}