Skip to content
Open
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
/target
.specsync/

2 changes: 1 addition & 1 deletion Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

6 changes: 4 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
@@ -1,14 +1,16 @@
[package]
name = "specsync"
version = "3.2.0"
version = "3.3.0"
edition = "2024"
rust-version = "1.85"
description = "Bidirectional spec-to-code validation with schema column checking — 11 languages, single binary"
license = "MIT"
readme = "README.md"
homepage = "https://github.com/CorvidLabs/spec-sync"
repository = "https://github.com/CorvidLabs/spec-sync"
keywords = ["spec", "documentation", "validation", "coverage"]
keywords = ["spec", "documentation", "validation", "coverage", "code-quality"]
categories = ["development-tools", "command-line-utilities"]
exclude = ["tests/", "specs/", ".github/", ".specsync/"]

[[bin]]
name = "specsync"
Expand Down
80 changes: 80 additions & 0 deletions src/ai.rs
Original file line number Diff line number Diff line change
Expand Up @@ -640,6 +640,86 @@ fn run_provider(
}
}

/// Build a prompt for regenerating a spec when requirements have changed.
fn build_regen_prompt(
module_name: &str,
current_spec: &str,
requirements: &str,
source_contents: &[(String, String)],
) -> String {
let mut prompt = format!(
"You are updating a module specification for `{module_name}` because its requirements have changed.\n\n\
## Current Spec\n\n```markdown\n{current_spec}\n```\n\n\
## Updated Requirements\n\n```markdown\n{requirements}\n```\n\n"
);

if !source_contents.is_empty() {
prompt.push_str("## Source Files\n\n");
let mut total_len = 0usize;
for (path, content) in source_contents {
if total_len > 150_000 {
prompt.push_str(&format!("(Skipping {path} — size budget exceeded)\n\n"));
continue;
}
let truncated = if content.len() > 30_000 {
&content[..30_000]
} else {
content.as_str()
};
prompt.push_str(&format!("### `{path}`\n\n```\n{truncated}\n```\n\n"));
Copy link

Copilot AI Apr 3, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This truncation uses &content[..30_000], which can panic at runtime if the string contains non-UTF8-boundary bytes (non-ASCII / multibyte UTF-8). Truncate by character boundary (or use a safe byte/char truncation helper) to avoid panics when source files contain Unicode.

Copilot uses AI. Check for mistakes.
total_len += truncated.len();
}
}

prompt.push_str(
"## Instructions\n\n\
Re-validate and update the spec to reflect the new requirements. Preserve the existing \
YAML frontmatter fields (module, version, status, files, db_tables, depends_on) and \
bump the version by 1. Keep the same markdown structure and section headings. \
Focus on updating:\n\
- Purpose section (if the module's role has changed)\n\
- Public API table (if the interface should change)\n\
- Invariants (if constraints have changed)\n\
- Behavioral Examples (if behavior expectations have changed)\n\
- Error Cases (if error handling should change)\n\n\
Output ONLY the complete updated spec as valid markdown with YAML frontmatter. \
Do not wrap in code fences.\n",
);

prompt
}

/// Regenerate a spec file using AI when requirements have drifted.
pub fn regenerate_spec_with_ai(
module_name: &str,
spec_path: &Path,
requirements_path: &Path,
root: &Path,
config: &SpecSyncConfig,
provider: &ResolvedProvider,
) -> Result<String, String> {
let current_spec =
fs::read_to_string(spec_path).map_err(|e| format!("Cannot read spec: {e}"))?;
let requirements = fs::read_to_string(requirements_path)
.map_err(|e| format!("Cannot read requirements: {e}"))?;

// Read source files from frontmatter
let files = crate::hash_cache::extract_frontmatter_files(&current_spec);
let mut source_contents = Vec::new();
for file in &files {
let full_path = root.join(file);
if let Ok(content) = fs::read_to_string(&full_path) {
source_contents.push((file.clone(), content));
}
}

let prompt = build_regen_prompt(module_name, &current_spec, &requirements, &source_contents);
let timeout = config.ai_timeout.unwrap_or(DEFAULT_AI_TIMEOUT_SECS);
let raw = run_provider(provider, &prompt, timeout)?;

postprocess_spec(&raw)
}

/// Strip code fences and validate frontmatter.
fn postprocess_spec(raw: &str) -> Result<String, String> {
let mut spec = raw.to_string();
Expand Down
218 changes: 218 additions & 0 deletions src/archive.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,218 @@
use colored::Colorize;
use std::fs;
use std::path::Path;

use crate::validator::find_spec_files;

/// Result of archiving tasks in a single tasks.md file.
pub struct ArchiveResult {
pub tasks_path: String,
pub archived_count: usize,
}

/// Archive completed tasks across all companion tasks.md files.
/// Moves `- [x]` items to an `## Archive` section at the bottom.
pub fn archive_tasks(root: &Path, specs_dir: &Path, dry_run: bool) -> Vec<ArchiveResult> {
let spec_files = find_spec_files(specs_dir);
let mut results = Vec::new();

for spec_path in &spec_files {
// Find the companion tasks.md in the same directory
let spec_dir = match spec_path.parent() {
Some(d) => d,
None => continue,
};
let tasks_path = spec_dir.join("tasks.md");
if !tasks_path.exists() {
continue;
}

let content = match fs::read_to_string(&tasks_path) {
Ok(c) => c,
Err(_) => continue,
};

let rel_path = tasks_path
.strip_prefix(root)
.unwrap_or(&tasks_path)
.to_string_lossy()
.to_string();

if let Some((new_content, count)) = archive_completed_tasks(&content) {
if count > 0 {
if !dry_run {
if let Err(e) = fs::write(&tasks_path, &new_content) {
eprintln!(
"{} Failed to write {}: {e}",
"error:".red().bold(),
rel_path
);
continue;
}
}
results.push(ArchiveResult {
tasks_path: rel_path,
archived_count: count,
});
}
}
}

results
}

/// Archive completed tasks in a tasks.md file.
/// Returns (new_content, archived_count) if any tasks were archived.
fn archive_completed_tasks(content: &str) -> Option<(String, usize)> {
let mut completed_tasks: Vec<String> = Vec::new();
let mut remaining_lines: Vec<String> = Vec::new();
let mut in_archive = false;
let mut existing_archive: Vec<String> = Vec::new();

for line in content.lines() {
let trimmed = line.trim();

// Track if we're in the archive section
if trimmed == "## Archive" {
in_archive = true;
continue;
}
if in_archive {
if trimmed.starts_with("## ") {
// Exited archive section into next section
in_archive = false;
remaining_lines.push(line.to_string());
} else {
existing_archive.push(line.to_string());
}
continue;
}

// Check for completed tasks outside the archive section
if trimmed.starts_with("- [x]") || trimmed.starts_with("- [X]") {
completed_tasks.push(line.to_string());
} else {
remaining_lines.push(line.to_string());
}
}

if completed_tasks.is_empty() {
return None;
}

let count = completed_tasks.len();

// Build new content: remaining lines + archive section
let mut new_content = remaining_lines.join("\n");

// Ensure trailing newline before archive section
if !new_content.ends_with('\n') {
new_content.push('\n');
}
new_content.push('\n');
new_content.push_str("## Archive\n\n");

// Add existing archive entries first
for line in &existing_archive {
if !line.trim().is_empty() {
new_content.push_str(line);
new_content.push('\n');
}
}

// Add newly archived tasks
for task in &completed_tasks {
new_content.push_str(task);
new_content.push('\n');
}

Some((new_content, count))
}

/// Count completed tasks across all tasks.md files (for warnings in check command).
pub fn count_completed_tasks(specs_dir: &Path) -> usize {
let spec_files = find_spec_files(specs_dir);
let mut total = 0;

for spec_path in &spec_files {
let spec_dir = match spec_path.parent() {
Some(d) => d,
None => continue,
};
let tasks_path = spec_dir.join("tasks.md");
if !tasks_path.exists() {
continue;
}
if let Ok(content) = fs::read_to_string(&tasks_path) {
total += content
.lines()
.filter(|l| {
let t = l.trim();
t.starts_with("- [x]") || t.starts_with("- [X]")
})
.count();
}
}

total
}

#[cfg(test)]
mod tests {
use super::*;

#[test]
fn test_archive_completed_tasks() {
let content = r#"---
spec: test.spec.md
---

## Tasks

- [ ] Uncompleted task
- [x] Done task 1
- [ ] Another open task
- [x] Done task 2

## Gaps

Nothing here.
"#;

let (new_content, count) = archive_completed_tasks(content).unwrap();
assert_eq!(count, 2);
assert!(new_content.contains("## Archive"));
assert!(new_content.contains("- [x] Done task 1"));
assert!(new_content.contains("- [x] Done task 2"));
assert!(new_content.contains("- [ ] Uncompleted task"));
// Archived tasks should not appear in the Tasks section
assert!(!new_content[..new_content.find("## Archive").unwrap()].contains("- [x]"));
}

#[test]
fn test_archive_no_completed() {
let content = r#"## Tasks

- [ ] Open task
"#;

assert!(archive_completed_tasks(content).is_none());
}

#[test]
fn test_archive_preserves_existing() {
let content = r#"## Tasks

- [x] New done task

## Archive

- [x] Previously archived
"#;

let (new_content, count) = archive_completed_tasks(content).unwrap();
assert_eq!(count, 1);
assert!(new_content.contains("- [x] Previously archived"));
assert!(new_content.contains("- [x] New done task"));
}
}
Loading
Loading