diff --git a/Cargo.toml b/Cargo.toml index 0812eacf..f33ade11 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -81,6 +81,7 @@ tower = "0.5.3" tower-http = { version = "0.6.8", features = ["fs"] } rust-embed = { version = "8.11.0", features = ["mime-guess"] } mime_guess = "2.0.5" +tempfile = "3.27.0" # TUI ratatui = "0.30.0" @@ -104,7 +105,6 @@ base64 = "0.22.1" pager = "0.16.1" [dev-dependencies] -tempfile = "3.27.0" serial_test = "3.4.0" tokio = { version = "1.50.0", features = [ "rt-multi-thread", diff --git a/README.md b/README.md index c7f609db..7cfa63a5 100644 --- a/README.md +++ b/README.md @@ -118,61 +118,27 @@ Update your `claude_desktop_config.json` as follows: #### AI Hook Forwarding -Libra can record Claude Code and Gemini CLI sessions as `ai_session` history -objects. The recommended setup is to install Libra's hook forwarding once in -your project, then use your AI CLI normally. +Libra can import Claude Agent SDK managed sessions and provider-side replay +artifacts through the `claude-sdk` command group. -Claude Code: +Run a managed session through the bundled helper: ```bash -libra hooks claude install +libra claude-sdk run --prompt "Inspect src/lib.rs and summarize the bridge state" ``` -Gemini CLI: +Sync Claude provider session metadata into Libra snapshots: ```bash -libra hooks gemini install +libra claude-sdk sync-sessions +libra claude-sdk hydrate-session --provider-session-id session-a +libra claude-sdk build-evidence-input --provider-session-id session-a ``` -After installation, Libra writes hook settings into the provider's project -config: +The `--cwd` flag controls which project directory Claude SDK queries, but all +artifacts and history are persisted into the current Libra repository. -- Claude Code: `.claude/settings.json` -- Gemini CLI: `.gemini/settings.json` - -Those generated entries call the resolved Libra binary with provider lifecycle -subcommands such as `hooks claude ` and `hooks gemini `. - -Useful follow-up commands: - -```bash -# Check whether hooks are installed -libra hooks claude is-installed -libra hooks gemini is-installed - -# Remove Libra-managed hooks -libra hooks claude uninstall -libra hooks gemini uninstall -``` - -By default, install writes the absolute path of the current `libra` binary into -provider hook settings. If you want hooks to call a different local binary, pass -an explicit binary path: - -```bash -libra hooks claude install --binary-path "/absolute/path/to/libra" -libra hooks gemini install --binary-path "/absolute/path/to/libra" -``` - -Provider-specific notes: - -- Claude Code supports `--timeout`, for example: - `libra hooks claude install --timeout 15` -- Gemini CLI does **not** support `--timeout` -- Install / uninstall / is-installed must be run inside a Libra repository - -Once installed, use Claude Code or Gemini CLI as usual. When a session ends, -Libra persists it as an `ai_session` object that you can inspect later with: +Persisted provider session and evidence artifacts are inspectable with: ```bash libra cat-file --ai-list ai_session diff --git a/src/cli.rs b/src/cli.rs index 2b6397ec..aa01a40a 100644 --- a/src/cli.rs +++ b/src/cli.rs @@ -72,12 +72,15 @@ enum Commands { Init(command::init::InitArgs), #[command(about = "Clone a repository into a new directory")] Clone(command::clone::CloneArgs), + #[command( + name = "claude-sdk", + about = "Run or import Claude Agent SDK managed sessions" + )] + ClaudeSdk(command::claude_sdk::ClaudeSdkArgs), #[command(about = "Start Libra Code interactive TUI (with background web server)")] Code(command::code::CodeArgs), #[command(about = "Connect to Codex app-server via WebSocket")] AgentCodex(command::agent_codex::AgentCodexArgs), - #[command(about = "Unified provider hook ingestion and setup")] - Hooks(command::hooks::HooksCommand), // The rest of the commands require a repository to be present #[command(about = "Add file contents to the index")] @@ -433,7 +436,7 @@ pub async fn parse_async(args: Option<&[&str]>) -> CliResult<()> { }, }; match &args.command { - Commands::Init(_) | Commands::Clone(_) | Commands::Hooks(_) => {} + Commands::Init(_) | Commands::Clone(_) => {} // Config global/system scopes don't require a repository Commands::Config(cfg) if cfg.global || cfg.system => {} _ => { @@ -456,13 +459,13 @@ pub async fn parse_async(args: Option<&[&str]>) -> CliResult<()> { })?; // restore working directory as original_dir } Commands::Clone(args) => command::clone::execute_safe(args).await?, //clone will use init internally,so we don't need to set hash kind here again - Commands::Code(args) => command::code::execute(args).await, Commands::AgentCodex(args) => command::agent_codex::execute(args) .await .map_err(|e| CliError::fatal(e.to_string()))?, - Commands::Hooks(cmd) => command::hooks::execute(cmd) + Commands::ClaudeSdk(args) => command::claude_sdk::execute(args) .await .map_err(|e| CliError::fatal(e.to_string()))?, + Commands::Code(args) => command::code::execute(args).await, Commands::Add(args) => command::add::execute_safe(args).await?, Commands::Rm(args) => command::remove::execute_safe(args).await?, Commands::Restore(args) => command::restore::execute_safe(args).await?, diff --git a/src/command/cat_file.rs b/src/command/cat_file.rs index b6887d5e..2f785479 100644 --- a/src/command/cat_file.rs +++ b/src/command/cat_file.rs @@ -117,6 +117,8 @@ const AI_OBJECT_TYPES: &[&str] = &[ "decision", "snapshot", "ai_session", + "provider_session", + "evidence_input", ]; const TAG_REF_PREFIX: &str = "refs/tags/"; @@ -502,6 +504,10 @@ async fn ai_pretty_print(uuid: &str) { println!("hash: {}", hash); if type_name == "ai_session" { print_ai_session_summary(&value); + } else if type_name == "provider_session" { + print_provider_session_summary(&value); + } else if type_name == "evidence_input" { + print_evidence_input_summary(&value); } println!("---"); println!( @@ -526,6 +532,18 @@ fn print_ai_session_summary(value: &serde_json::Value) { } } +fn print_provider_session_summary(value: &serde_json::Value) { + for line in provider_session_summary_lines(value) { + println!("{line}"); + } +} + +fn print_evidence_input_summary(value: &serde_json::Value) { + for line in evidence_input_summary_lines(value) { + println!("{line}"); + } +} + fn ai_session_summary_lines(value: &serde_json::Value) -> Vec { let mut lines = Vec::new(); @@ -604,6 +622,108 @@ fn ai_session_summary_lines(value: &serde_json::Value) -> Vec { lines } +fn provider_session_summary_lines(value: &serde_json::Value) -> Vec { + let mut lines = Vec::new(); + + if let Some(schema) = value.get("schema").and_then(serde_json::Value::as_str) { + lines.push(format!("schema: {schema}")); + } + if let Some(provider) = value.get("provider").and_then(serde_json::Value::as_str) { + lines.push(format!("provider: {provider}")); + } + if let Some(object_id) = value.get("objectId").and_then(serde_json::Value::as_str) { + lines.push(format!("object_id: {object_id}")); + } + if let Some(provider_session_id) = value + .get("providerSessionId") + .and_then(serde_json::Value::as_str) + { + lines.push(format!("provider_session_id: {provider_session_id}")); + } + if let Some(summary) = value.get("summary").and_then(serde_json::Value::as_str) { + lines.push(format!("summary: {summary}")); + } + if let Some(cwd) = value.get("cwd").and_then(serde_json::Value::as_str) { + lines.push(format!("cwd: {cwd}")); + } + if let Some(message_sync) = value.get("messageSync") { + if let Some(message_count) = message_sync + .get("messageCount") + .and_then(serde_json::Value::as_u64) + { + lines.push(format!("message_count: {message_count}")); + } + if let Some(first_kind) = message_sync + .get("firstMessageKind") + .and_then(serde_json::Value::as_str) + { + lines.push(format!("first_message_kind: {first_kind}")); + } + if let Some(last_kind) = message_sync + .get("lastMessageKind") + .and_then(serde_json::Value::as_str) + { + lines.push(format!("last_message_kind: {last_kind}")); + } + } + + lines +} + +fn evidence_input_summary_lines(value: &serde_json::Value) -> Vec { + let mut lines = Vec::new(); + + if let Some(schema) = value.get("schema").and_then(serde_json::Value::as_str) { + lines.push(format!("schema: {schema}")); + } + if let Some(provider) = value.get("provider").and_then(serde_json::Value::as_str) { + lines.push(format!("provider: {provider}")); + } + if let Some(object_id) = value.get("objectId").and_then(serde_json::Value::as_str) { + lines.push(format!("object_id: {object_id}")); + } + if let Some(provider_session_id) = value + .get("providerSessionId") + .and_then(serde_json::Value::as_str) + { + lines.push(format!("provider_session_id: {provider_session_id}")); + } + if let Some(summary) = value.get("summary").and_then(serde_json::Value::as_str) { + lines.push(format!("summary: {summary}")); + } + if let Some(message_count) = value + .get("messageOverview") + .and_then(|overview| overview.get("messageCount")) + .and_then(serde_json::Value::as_u64) + { + lines.push(format!("message_count: {message_count}")); + } + if let Some(assistant_count) = value + .get("contentOverview") + .and_then(|overview| overview.get("assistantMessageCount")) + .and_then(serde_json::Value::as_u64) + { + lines.push(format!("assistant_message_count: {assistant_count}")); + } + if let Some(tool_count) = value + .get("contentOverview") + .and_then(|overview| overview.get("observedTools")) + .and_then(serde_json::Value::as_object) + .map(|tools| tools.len()) + { + lines.push(format!("observed_tool_count: {tool_count}")); + } + if let Some(has_structured_output) = value + .get("runtimeSignals") + .and_then(|signals| signals.get("hasStructuredOutput")) + .and_then(serde_json::Value::as_bool) + { + lines.push(format!("has_structured_output: {has_structured_output}")); + } + + lines +} + /// Print the AI object type for a UUID. async fn ai_show_type(uuid: &str) { let hm = build_history_manager().await; diff --git a/src/command/claude_sdk.rs b/src/command/claude_sdk.rs new file mode 100644 index 00000000..08c5e9bb --- /dev/null +++ b/src/command/claude_sdk.rs @@ -0,0 +1,2075 @@ +//! Claude Agent SDK managed-mode command surface. + +use std::{ + collections::{BTreeMap, BTreeSet}, + path::{Path, PathBuf}, + process::Stdio, + sync::Arc, +}; + +use anyhow::{Context, Result, anyhow, bail}; +use chrono::Utc; +use clap::{Args, Parser, Subcommand}; +use serde::{Deserialize, Serialize}; +use serde_json::{Value, json}; +use tempfile::TempDir; +use tokio::{fs, io::AsyncWriteExt, process::Command}; + +use crate::{ + internal::{ + ai::{ + history::HistoryManager, + intentspec::{ + IntentDraft, ResolveContext, RiskLevel, persist_intentspec, render_summary, + repair_intentspec, resolve_intentspec, validate_intentspec, + }, + mcp::server::LibraMcpServer, + providers::claude_sdk::managed::{ + ClaudeManagedArtifact, PersistedManagedArtifactOutcome, persist_managed_artifact, + }, + }, + db, + head::Head, + }, + utils::{object::write_git_object, storage::local::LocalStorage, util}, +}; + +const DEFAULT_MODEL: &str = "claude-sonnet-4-5-20250929"; +const INTENT_EXTRACTIONS_DIR: &str = "intent-extractions"; +const INTENT_RESOLUTIONS_DIR: &str = "intent-resolutions"; +const INTENT_INPUTS_DIR: &str = "intent-inputs"; +const PROVIDER_SESSIONS_DIR: &str = "provider-sessions"; +const EVIDENCE_INPUTS_DIR: &str = "evidence-inputs"; +const EMBEDDED_HELPER_SOURCE: &str = include_str!("../internal/ai/providers/claude_sdk/helper.cjs"); + +#[derive(Parser, Debug)] +pub struct ClaudeSdkArgs { + #[command(subcommand)] + command: ClaudeSdkSubcommand, +} + +#[derive(Subcommand, Debug)] +enum ClaudeSdkSubcommand { + #[command( + about = "Import a raw Claude SDK managed artifact and persist Libra bridge artifacts" + )] + Import(ImportArtifactArgs), + #[command(about = "Run a Claude Agent SDK managed session through the bundled Node helper")] + Run(RunManagedArgs), + #[command( + name = "sync-sessions", + about = "Sync Claude SDK provider session metadata into Libra-managed provider session snapshots" + )] + SyncSessions(SyncSessionsArgs), + #[command( + name = "hydrate-session", + about = "Fetch Claude SDK session messages and update a Libra-managed provider session snapshot" + )] + HydrateSession(HydrateSessionArgs), + #[command( + name = "build-evidence-input", + about = "Build an EvidenceInput-style artifact from a hydrated Claude provider session" + )] + BuildEvidenceInput(BuildEvidenceInputArgs), + #[command( + name = "resolve-extraction", + about = "Resolve a persisted intent extraction artifact into a validated IntentSpec preview artifact" + )] + ResolveExtraction(ResolveExtractionArgs), + #[command( + name = "persist-intent", + about = "Persist a resolved IntentSpec preview into Libra intent history" + )] + PersistIntent(PersistIntentArgs), +} + +#[derive(Args, Debug)] +struct ImportArtifactArgs { + #[arg(long, help = "Path to a raw Claude managed artifact JSON file")] + artifact: PathBuf, +} + +#[derive(Args, Debug)] +struct RunManagedArgs { + #[arg(long, help = "Prompt text for the managed Claude SDK session")] + prompt: Option, + #[arg(long, help = "Read the prompt text from a UTF-8 file")] + prompt_file: Option, + #[arg(long, help = "Working directory for the Claude SDK session")] + cwd: Option, + #[arg(long, default_value = DEFAULT_MODEL, help = "Claude model identifier")] + model: String, + #[arg( + long, + default_value = "default", + help = "Claude SDK permission mode passed to query()" + )] + permission_mode: String, + #[arg( + long, + help = "Optional helper timeout in seconds; when reached, Libra persists a partial managed artifact if available" + )] + timeout_seconds: Option, + #[arg( + long = "tool", + help = "Tool name to enable and allow for the managed Claude SDK session" + )] + tools: Vec, + #[arg( + long, + default_value_t = true, + action = clap::ArgAction::Set, + help = "Whether the helper should auto-approve requested tools; set to false for live permission/decision probing" + )] + auto_approve_tools: bool, + #[arg( + long, + default_value_t = false, + action = clap::ArgAction::Set, + help = "Whether the helper should request SDKPartialAssistantMessage stream_event messages" + )] + include_partial_messages: bool, + #[arg( + long, + default_value_t = false, + action = clap::ArgAction::Set, + help = "Whether the helper should request prompt_suggestion messages after result events" + )] + prompt_suggestions: bool, + #[arg( + long, + default_value_t = false, + action = clap::ArgAction::Set, + help = "Whether the helper should request agent-generated task_progress summaries for subagents" + )] + agent_progress_summaries: bool, + #[arg( + long, + help = "Optional path to a custom helper script; defaults to the embedded helper" + )] + helper_path: Option, + #[arg( + long, + default_value = "node", + help = "Node.js executable used to run the helper" + )] + node_binary: String, +} + +#[derive(Args, Debug)] +struct SyncSessionsArgs { + #[arg(long, help = "Working directory used as the Claude SDK project dir")] + cwd: Option, + #[arg( + long, + help = "Optional provider session id to sync; defaults to all sessions in the project" + )] + provider_session_id: Option, + #[arg(long, help = "Maximum number of sessions to request from Claude SDK")] + limit: Option, + #[arg( + long, + default_value_t = 0, + help = "Number of sessions to skip before syncing" + )] + offset: usize, + #[arg( + long, + default_value_t = true, + action = clap::ArgAction::Set, + help = "Whether Claude SDK should include sessions from git worktrees when cwd is inside a repo" + )] + include_worktrees: bool, + #[arg( + long, + help = "Optional path to a custom helper script; defaults to the embedded helper" + )] + helper_path: Option, + #[arg( + long, + default_value = "node", + help = "Node.js executable used to run the helper" + )] + node_binary: String, +} + +#[derive(Args, Debug)] +struct HydrateSessionArgs { + #[arg(long, help = "Working directory used as the Claude SDK project dir")] + cwd: Option, + #[arg(long, help = "Provider session id to hydrate from Claude SDK")] + provider_session_id: String, + #[arg(long, help = "Maximum number of session messages to request")] + limit: Option, + #[arg( + long, + default_value_t = 0, + help = "Number of session messages to skip before hydrating" + )] + offset: usize, + #[arg( + long, + help = "Optional path to a custom helper script; defaults to the embedded helper" + )] + helper_path: Option, + #[arg( + long, + default_value = "node", + help = "Node.js executable used to run the helper" + )] + node_binary: String, +} + +#[derive(Args, Debug)] +struct BuildEvidenceInputArgs { + #[arg(long, help = "Provider session id to derive an evidence input from")] + provider_session_id: String, + #[arg( + long, + help = "Optional output path for the evidence input artifact; defaults to .libra/evidence-inputs/.json" + )] + output: Option, +} + +#[derive(Args, Debug)] +struct ResolveExtractionArgs { + #[arg(long, help = "Path to a persisted intent extraction JSON file")] + extraction: Option, + #[arg( + long, + help = "Resolve the extraction stored at .libra/intent-extractions/.json" + )] + ai_session_id: Option, + #[arg( + long, + help = "Override risk level (low|medium|high); defaults to extraction risk level or medium" + )] + risk_level: Option, + #[arg( + long, + default_value = "claude-sdk", + help = "createdBy.id used in the resolved IntentSpec preview" + )] + created_by_id: String, + #[arg( + long, + help = "Optional output path for the resolved artifact; defaults to .libra/intent-resolutions/.json" + )] + output: Option, +} + +#[derive(Args, Debug)] +struct PersistIntentArgs { + #[arg(long, help = "Path to a resolved intent preview JSON file")] + resolution: Option, + #[arg( + long, + help = "Persist the resolution stored at .libra/intent-resolutions/.json" + )] + ai_session_id: Option, + #[arg( + long, + help = "Optional output path for the persisted-intent binding artifact; defaults to .libra/intent-inputs/.json" + )] + output: Option, +} + +#[derive(Debug, Serialize)] +struct ClaudeSdkCommandOutput { + ok: bool, + #[serde(rename = "mode")] + command_mode: &'static str, + #[serde(rename = "providerSessionId")] + provider_session_id: String, + #[serde(rename = "aiSessionId")] + ai_session_id: String, + #[serde(rename = "aiSessionObjectHash")] + ai_session_object_hash: String, + #[serde(rename = "alreadyPersisted")] + already_persisted: bool, + #[serde( + rename = "intentExtractionPath", + skip_serializing_if = "Option::is_none" + )] + intent_extraction_path: Option, + #[serde(rename = "rawArtifactPath")] + raw_artifact_path: String, + #[serde(rename = "auditBundlePath")] + audit_bundle_path: String, +} + +#[derive(Debug, Serialize)] +struct ResolveExtractionCommandOutput { + ok: bool, + #[serde(rename = "mode")] + command_mode: &'static str, + #[serde(rename = "aiSessionId", skip_serializing_if = "Option::is_none")] + ai_session_id: Option, + #[serde(rename = "extractionPath")] + extraction_path: String, + #[serde(rename = "resolvedSpecPath")] + resolved_spec_path: String, + #[serde(rename = "riskLevel")] + risk_level: String, + summary: String, +} + +#[derive(Debug, Serialize)] +struct PersistIntentCommandOutput { + ok: bool, + #[serde(rename = "mode")] + command_mode: &'static str, + #[serde(rename = "aiSessionId", skip_serializing_if = "Option::is_none")] + ai_session_id: Option, + #[serde(rename = "resolutionPath")] + resolution_path: String, + #[serde(rename = "intentId")] + intent_id: String, + #[serde(rename = "bindingPath")] + binding_path: String, + summary: String, +} + +#[derive(Debug, Serialize)] +struct HydrateSessionCommandOutput { + ok: bool, + #[serde(rename = "mode")] + command_mode: &'static str, + #[serde(rename = "providerSessionId")] + provider_session_id: String, + #[serde(rename = "objectId")] + object_id: String, + #[serde(rename = "artifactPath")] + artifact_path: String, + #[serde(rename = "messagesArtifactPath")] + messages_artifact_path: String, + #[serde(rename = "messageCount")] + message_count: usize, +} + +#[derive(Debug, Serialize)] +struct BuildEvidenceInputCommandOutput { + ok: bool, + #[serde(rename = "mode")] + command_mode: &'static str, + #[serde(rename = "providerSessionId")] + provider_session_id: String, + #[serde(rename = "providerSessionObjectId")] + provider_session_object_id: String, + #[serde(rename = "objectId")] + object_id: String, + #[serde(rename = "artifactPath")] + artifact_path: String, + #[serde(rename = "objectHash")] + object_hash: String, + #[serde(rename = "messageCount")] + message_count: usize, +} + +#[derive(Debug, Serialize)] +struct ManagedHelperRequest { + mode: &'static str, + prompt: String, + cwd: String, + model: String, + #[serde(rename = "permissionMode")] + permission_mode: String, + #[serde(rename = "timeoutSeconds", skip_serializing_if = "Option::is_none")] + timeout_seconds: Option, + tools: Vec, + #[serde(rename = "allowedTools")] + allowed_tools: Vec, + #[serde(rename = "autoApproveTools")] + auto_approve_tools: bool, + #[serde(rename = "includePartialMessages")] + include_partial_messages: bool, + #[serde(rename = "promptSuggestions")] + prompt_suggestions: bool, + #[serde(rename = "agentProgressSummaries")] + agent_progress_summaries: bool, + #[serde(rename = "outputSchema")] + output_schema: Value, +} + +#[derive(Debug, Serialize)] +struct SessionCatalogHelperRequest { + mode: &'static str, + cwd: String, + #[serde(skip_serializing_if = "Option::is_none")] + limit: Option, + offset: usize, + #[serde(rename = "includeWorktrees")] + include_worktrees: bool, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct SessionMessagesHelperRequest { + mode: &'static str, + cwd: String, + #[serde(rename = "providerSessionId")] + provider_session_id: String, + #[serde(skip_serializing_if = "Option::is_none")] + limit: Option, + offset: usize, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct ClaudeSdkSessionInfo { + #[serde(rename = "sessionId")] + session_id: String, + summary: String, + #[serde(rename = "lastModified")] + last_modified: i64, + #[serde(rename = "fileSize", default)] + file_size: Option, + #[serde(rename = "customTitle", default)] + custom_title: Option, + #[serde(rename = "firstPrompt", default)] + first_prompt: Option, + #[serde(rename = "gitBranch", default)] + git_branch: Option, + #[serde(default)] + cwd: Option, + #[serde(default)] + tag: Option, + #[serde(rename = "createdAt", default)] + created_at: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct PersistedProviderSessionSnapshot { + schema: String, + object_type: String, + provider: String, + #[serde(rename = "providerSessionId")] + provider_session_id: String, + #[serde(rename = "objectId")] + object_id: String, + summary: String, + #[serde( + rename = "customTitle", + default, + skip_serializing_if = "Option::is_none" + )] + custom_title: Option, + #[serde( + rename = "firstPrompt", + default, + skip_serializing_if = "Option::is_none" + )] + first_prompt: Option, + #[serde(rename = "gitBranch", default, skip_serializing_if = "Option::is_none")] + git_branch: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + cwd: Option, + #[serde(default, skip_serializing_if = "Option::is_none")] + tag: Option, + #[serde(rename = "createdAt", default, skip_serializing_if = "Option::is_none")] + created_at: Option, + #[serde(rename = "lastModified")] + last_modified: i64, + #[serde(rename = "fileSize", default, skip_serializing_if = "Option::is_none")] + file_size: Option, + #[serde(rename = "capturedAt")] + captured_at: String, + #[serde( + rename = "messageSync", + default, + skip_serializing_if = "Option::is_none" + )] + message_sync: Option, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +struct ProviderSessionMessageSync { + #[serde(rename = "artifactPath")] + artifact_path: String, + #[serde(rename = "messageCount")] + message_count: usize, + #[serde(rename = "kindCounts")] + kind_counts: BTreeMap, + #[serde( + rename = "firstMessageKind", + default, + skip_serializing_if = "Option::is_none" + )] + first_message_kind: Option, + #[serde( + rename = "lastMessageKind", + default, + skip_serializing_if = "Option::is_none" + )] + last_message_kind: Option, + offset: usize, + #[serde(skip_serializing_if = "Option::is_none")] + limit: Option, + #[serde(rename = "capturedAt")] + captured_at: String, +} + +#[derive(Debug, Serialize, Deserialize)] +struct ProviderSessionMessagesArtifact { + schema: String, + #[serde(rename = "providerSessionId")] + provider_session_id: String, + #[serde(rename = "objectId")] + object_id: String, + offset: usize, + #[serde(skip_serializing_if = "Option::is_none")] + limit: Option, + #[serde(rename = "capturedAt")] + captured_at: String, + messages: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +struct PersistedEvidenceInputArtifact { + schema: String, + object_type: String, + provider: String, + #[serde(rename = "objectId")] + object_id: String, + #[serde(rename = "providerSessionId")] + provider_session_id: String, + #[serde(rename = "providerSessionObjectId")] + provider_session_object_id: String, + summary: String, + #[serde(rename = "sourceArtifacts")] + source_artifacts: EvidenceInputSourceArtifacts, + #[serde(rename = "messageOverview")] + message_overview: EvidenceInputMessageOverview, + #[serde(rename = "contentOverview")] + content_overview: EvidenceInputContentOverview, + #[serde(rename = "runtimeSignals")] + runtime_signals: EvidenceInputRuntimeSignals, + #[serde(rename = "latestResult", skip_serializing_if = "Option::is_none")] + latest_result: Option, + #[serde(rename = "capturedAt")] + captured_at: String, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +struct EvidenceInputSourceArtifacts { + #[serde(rename = "providerSessionPath")] + provider_session_path: String, + #[serde(rename = "messagesPath")] + messages_path: String, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +struct EvidenceInputMessageOverview { + #[serde(rename = "messageCount")] + message_count: usize, + #[serde(rename = "kindCounts")] + kind_counts: BTreeMap, + #[serde(rename = "firstMessageKind", skip_serializing_if = "Option::is_none")] + first_message_kind: Option, + #[serde(rename = "lastMessageKind", skip_serializing_if = "Option::is_none")] + last_message_kind: Option, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +struct EvidenceInputContentOverview { + #[serde(rename = "assistantMessageCount")] + assistant_message_count: usize, + #[serde(rename = "userMessageCount")] + user_message_count: usize, + #[serde(rename = "observedTools")] + observed_tools: BTreeMap, + #[serde(rename = "observedPaths")] + observed_paths: Vec, + #[serde(rename = "assistantTextPreviews")] + assistant_text_previews: Vec, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +struct EvidenceInputRuntimeSignals { + #[serde(rename = "resultMessageCount")] + result_message_count: usize, + #[serde(rename = "toolRuntimeCount")] + tool_runtime_count: usize, + #[serde(rename = "taskRuntimeCount")] + task_runtime_count: usize, + #[serde(rename = "partialAssistantEventCount")] + partial_assistant_event_count: usize, + #[serde(rename = "hasStructuredOutput")] + has_structured_output: bool, + #[serde(rename = "hasPermissionDenials")] + has_permission_denials: bool, +} + +#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)] +struct EvidenceInputLatestResult { + #[serde(skip_serializing_if = "Option::is_none")] + subtype: Option, + #[serde(rename = "stopReason", skip_serializing_if = "Option::is_none")] + stop_reason: Option, + #[serde(rename = "durationMs", skip_serializing_if = "Option::is_none")] + duration_ms: Option, + #[serde(rename = "durationApiMs", skip_serializing_if = "Option::is_none")] + duration_api_ms: Option, + #[serde(rename = "totalCostUsd", skip_serializing_if = "Option::is_none")] + total_cost_usd: Option, + #[serde(rename = "numTurns", skip_serializing_if = "Option::is_none")] + num_turns: Option, + #[serde(rename = "permissionDenialCount")] + permission_denial_count: usize, +} + +#[derive(Debug, Serialize)] +struct SyncSessionsCommandOutput { + ok: bool, + #[serde(rename = "mode")] + command_mode: &'static str, + #[serde(rename = "syncedCount")] + synced_count: usize, + sessions: Vec, +} + +#[derive(Debug, Serialize)] +struct SyncSessionRecord { + #[serde(rename = "providerSessionId")] + provider_session_id: String, + #[serde(rename = "objectId")] + object_id: String, + #[serde(rename = "artifactPath")] + artifact_path: String, + #[serde(rename = "objectHash")] + object_hash: String, +} + +trait HelperResponse { + type Output; + + fn parse_response(stdout: &str, stderr: &str) -> Result; +} + +impl HelperResponse for ManagedHelperRequest { + type Output = ClaudeManagedArtifact; + + fn parse_response(stdout: &str, stderr: &str) -> Result { + serde_json::from_str(stdout.trim()).with_context(|| { + format!( + "failed to parse Claude SDK helper output as a managed artifact (stderr: {})", + stderr.trim() + ) + }) + } +} + +impl HelperResponse for SessionCatalogHelperRequest { + type Output = Vec; + + fn parse_response(stdout: &str, stderr: &str) -> Result { + serde_json::from_str(stdout.trim()).with_context(|| { + format!( + "failed to parse Claude SDK helper output as a session catalog response (stderr: {})", + stderr.trim() + ) + }) + } +} + +impl HelperResponse for SessionMessagesHelperRequest { + type Output = Vec; + + fn parse_response(stdout: &str, stderr: &str) -> Result { + serde_json::from_str(stdout.trim()).with_context(|| { + format!( + "failed to parse Claude SDK helper output as a session messages response (stderr: {})", + stderr.trim() + ) + }) + } +} + +#[derive(Debug, Deserialize)] +struct PersistedIntentExtractionArtifact { + schema: String, + #[serde(rename = "ai_session_id")] + ai_session_id: String, + source: String, + extraction: IntentDraft, +} + +#[derive(Debug, Serialize)] +struct ResolvedIntentSpecArtifact { + schema: &'static str, + #[serde(rename = "aiSessionId", skip_serializing_if = "Option::is_none")] + ai_session_id: Option, + #[serde(rename = "extractionPath")] + extraction_path: String, + #[serde(rename = "extractionSource")] + extraction_source: String, + #[serde(rename = "riskLevel")] + risk_level: RiskLevel, + summary: String, + intentspec: crate::internal::ai::intentspec::IntentSpec, +} + +#[derive(Debug, Deserialize)] +struct PersistedIntentResolutionArtifact { + schema: String, + #[serde(rename = "aiSessionId", default)] + ai_session_id: Option, + #[serde(rename = "extractionPath")] + extraction_path: String, + #[serde(rename = "extractionSource")] + extraction_source: String, + #[serde(rename = "riskLevel")] + risk_level: RiskLevel, + summary: String, + intentspec: crate::internal::ai::intentspec::IntentSpec, +} + +#[derive(Debug, Serialize)] +struct PersistedIntentInputBindingArtifact { + schema: &'static str, + #[serde(rename = "aiSessionId", skip_serializing_if = "Option::is_none")] + ai_session_id: Option, + #[serde(rename = "resolutionPath")] + resolution_path: String, + #[serde(rename = "extractionPath")] + extraction_path: String, + #[serde(rename = "extractionSource")] + extraction_source: String, + #[serde(rename = "riskLevel")] + risk_level: RiskLevel, + #[serde(rename = "intentId")] + intent_id: String, + summary: String, +} + +#[derive(Debug)] +struct EmbeddedHelperDir { + _temp_dir: TempDir, +} + +pub async fn execute(args: ClaudeSdkArgs) -> Result<()> { + match args.command { + ClaudeSdkSubcommand::Import(args) => import_artifact(args).await, + ClaudeSdkSubcommand::Run(args) => run_managed(args).await, + ClaudeSdkSubcommand::SyncSessions(args) => sync_sessions(args).await, + ClaudeSdkSubcommand::HydrateSession(args) => hydrate_session(args).await, + ClaudeSdkSubcommand::BuildEvidenceInput(args) => build_evidence_input(args).await, + ClaudeSdkSubcommand::ResolveExtraction(args) => resolve_extraction(args).await, + ClaudeSdkSubcommand::PersistIntent(args) => persist_intent(args).await, + } +} + +async fn import_artifact(args: ImportArtifactArgs) -> Result<()> { + let storage_path = util::try_get_storage_path(None) + .context("claude-sdk commands must be run inside a Libra repository")?; + let artifact = read_artifact(&args.artifact).await?; + let outcome = persist_managed_artifact(&storage_path, &artifact).await?; + print_result("import", &outcome)?; + Ok(()) +} + +async fn run_managed(args: RunManagedArgs) -> Result<()> { + let storage_path = util::try_get_storage_path(None) + .context("claude-sdk commands must be run inside a Libra repository")?; + let prompt = resolve_prompt(&args)?; + let cwd = args + .cwd + .unwrap_or(std::env::current_dir().context("failed to read current directory")?); + let helper_request = ManagedHelperRequest { + mode: "query", + prompt, + cwd: cwd.to_string_lossy().to_string(), + model: args.model, + permission_mode: args.permission_mode, + timeout_seconds: args.timeout_seconds, + tools: args.tools.clone(), + allowed_tools: args.tools.clone(), + auto_approve_tools: args.auto_approve_tools && !args.tools.is_empty(), + include_partial_messages: args.include_partial_messages, + prompt_suggestions: args.prompt_suggestions, + agent_progress_summaries: args.agent_progress_summaries, + output_schema: managed_output_schema(), + }; + + let (_temp_helper_dir, helper_path) = materialize_helper(args.helper_path.as_deref()).await?; + let artifact = invoke_helper(&args.node_binary, &helper_path, &helper_request).await?; + let outcome = persist_managed_artifact(&storage_path, &artifact).await?; + print_result("run", &outcome)?; + Ok(()) +} + +async fn sync_sessions(args: SyncSessionsArgs) -> Result<()> { + let storage_path = util::try_get_storage_path(None) + .context("claude-sdk commands must be run inside a Libra repository")?; + let cwd = args + .cwd + .unwrap_or(std::env::current_dir().context("failed to read current directory")?); + let helper_request = SessionCatalogHelperRequest { + mode: "listSessions", + cwd: cwd.to_string_lossy().to_string(), + limit: args.limit, + offset: args.offset, + include_worktrees: args.include_worktrees, + }; + + let (_temp_helper_dir, helper_path) = materialize_helper(args.helper_path.as_deref()).await?; + let sessions: Vec = + invoke_helper_json(&args.node_binary, &helper_path, &helper_request) + .await + .context("failed to fetch Claude SDK session catalog")?; + + let filtered_sessions = if let Some(session_id) = args.provider_session_id.as_deref() { + sessions + .into_iter() + .filter(|session| session.session_id == session_id) + .collect::>() + } else { + sessions + }; + + let mut synced = Vec::new(); + for session in filtered_sessions { + synced.push(persist_provider_session_snapshot(&storage_path, session).await?); + } + + println!( + "{}", + serde_json::to_string_pretty(&SyncSessionsCommandOutput { + ok: true, + command_mode: "sync-sessions", + synced_count: synced.len(), + sessions: synced, + }) + .context("failed to serialize sync-sessions output")? + ); + + Ok(()) +} + +async fn hydrate_session(args: HydrateSessionArgs) -> Result<()> { + let storage_path = util::try_get_storage_path(None) + .context("claude-sdk commands must be run inside a Libra repository")?; + let cwd = args + .cwd + .unwrap_or(std::env::current_dir().context("failed to read current directory")?); + let object_id = build_provider_session_object_id(&args.provider_session_id)?; + let artifact_path = provider_session_artifact_path(&storage_path, &object_id); + let mut snapshot = read_persisted_provider_session_snapshot(&artifact_path) + .await + .with_context(|| { + format!( + "failed to read provider session snapshot '{}'; run `claude-sdk sync-sessions --provider-session-id {}` first", + artifact_path.display(), + args.provider_session_id + ) + })?; + + let helper_request = SessionMessagesHelperRequest { + mode: "getSessionMessages", + cwd: cwd.to_string_lossy().to_string(), + provider_session_id: args.provider_session_id.clone(), + limit: args.limit, + offset: args.offset, + }; + let (_temp_helper_dir, helper_path) = materialize_helper(args.helper_path.as_deref()).await?; + let messages: Vec = invoke_helper_json(&args.node_binary, &helper_path, &helper_request) + .await + .context("failed to fetch Claude SDK session messages")?; + + let captured_at = Utc::now().to_rfc3339(); + let messages_artifact_path = provider_session_messages_artifact_path(&storage_path, &object_id); + let messages_artifact = ProviderSessionMessagesArtifact { + schema: "libra.provider_session_messages.v1".to_string(), + provider_session_id: args.provider_session_id.clone(), + object_id: object_id.clone(), + offset: args.offset, + limit: args.limit, + captured_at: captured_at.clone(), + messages, + }; + write_pretty_json_file(&messages_artifact_path, &messages_artifact) + .await + .with_context(|| { + format!( + "failed to write provider session messages artifact '{}'", + messages_artifact_path.display() + ) + })?; + + snapshot.captured_at = captured_at.clone(); + snapshot.message_sync = Some(build_provider_session_message_sync( + &messages_artifact_path, + &messages_artifact.messages, + args.offset, + args.limit, + captured_at, + )); + let sync_record = upsert_provider_session_snapshot(&storage_path, &snapshot).await?; + + println!( + "{}", + serde_json::to_string_pretty(&HydrateSessionCommandOutput { + ok: true, + command_mode: "hydrate-session", + provider_session_id: snapshot.provider_session_id, + object_id: snapshot.object_id, + artifact_path: sync_record.artifact_path, + messages_artifact_path: messages_artifact_path.to_string_lossy().to_string(), + message_count: messages_artifact.messages.len(), + }) + .context("failed to serialize hydrate-session output")? + ); + + Ok(()) +} + +async fn build_evidence_input(args: BuildEvidenceInputArgs) -> Result<()> { + let storage_path = util::try_get_storage_path(None) + .context("claude-sdk commands must be run inside a Libra repository")?; + let provider_session_object_id = build_provider_session_object_id(&args.provider_session_id)?; + let provider_session_path = + provider_session_artifact_path(&storage_path, &provider_session_object_id); + let snapshot = read_persisted_provider_session_snapshot(&provider_session_path) + .await + .with_context(|| { + format!( + "failed to read provider session snapshot '{}'; run `claude-sdk sync-sessions --provider-session-id {}` first", + provider_session_path.display(), + args.provider_session_id + ) + })?; + let message_sync = snapshot.message_sync.as_ref().ok_or_else(|| { + anyhow!( + "provider session '{}' has no hydrated messages; run `claude-sdk hydrate-session --provider-session-id {}` first", + snapshot.object_id, + args.provider_session_id + ) + })?; + let messages_path = PathBuf::from(&message_sync.artifact_path); + let messages_artifact = read_provider_session_messages_artifact(&messages_path) + .await + .with_context(|| { + format!( + "failed to read provider session messages artifact '{}'", + messages_path.display() + ) + })?; + + let object_id = build_evidence_input_object_id(&args.provider_session_id)?; + let default_artifact_path = evidence_input_artifact_path(&storage_path, &object_id); + let comparison_path = args.output.as_deref().unwrap_or(&default_artifact_path); + let mut artifact = build_evidence_input_artifact( + &snapshot, + &provider_session_path, + &messages_artifact, + &messages_path, + object_id, + Utc::now().to_rfc3339(), + ); + if let Some(existing_artifact) = read_existing_evidence_input_artifact(comparison_path).await? + && evidence_input_artifact_matches(&existing_artifact, &artifact) + { + artifact.captured_at = existing_artifact.captured_at; + } + let artifact_path = args.output.unwrap_or(default_artifact_path); + let record = persist_evidence_input_artifact(&storage_path, &artifact_path, &artifact).await?; + + println!( + "{}", + serde_json::to_string_pretty(&BuildEvidenceInputCommandOutput { + ok: true, + command_mode: "build-evidence-input", + provider_session_id: artifact.provider_session_id.clone(), + provider_session_object_id: artifact.provider_session_object_id.clone(), + object_id: artifact.object_id.clone(), + artifact_path: record.artifact_path, + object_hash: record.object_hash, + message_count: artifact.message_overview.message_count, + }) + .context("failed to serialize build-evidence-input output")? + ); + + Ok(()) +} + +async fn resolve_extraction(args: ResolveExtractionArgs) -> Result<()> { + let storage_path = util::try_get_storage_path(None) + .context("claude-sdk commands must be run inside a Libra repository")?; + let extraction_path = resolve_extraction_path(&storage_path, &args)?; + let persisted = read_persisted_extraction(&extraction_path).await?; + if persisted.schema != "libra.intent_extraction.v1" { + bail!( + "unsupported extraction schema '{}' in '{}'", + persisted.schema, + extraction_path.display() + ); + } + + let risk_level = select_risk_level( + args.risk_level.as_deref(), + persisted.extraction.risk.level.clone(), + )?; + let working_dir = + util::try_working_dir().context("failed to resolve repository working directory")?; + let base_ref = current_head_sha().await; + + let mut spec = resolve_intentspec( + persisted.extraction, + risk_level.clone(), + ResolveContext { + working_dir: working_dir.display().to_string(), + base_ref, + created_by_id: args.created_by_id, + }, + ); + + let mut issues = validate_intentspec(&spec); + for _ in 0..3 { + if issues.is_empty() { + break; + } + repair_intentspec(&mut spec, &issues); + issues = validate_intentspec(&spec); + } + if !issues.is_empty() { + let report = issues + .iter() + .map(|issue| format!("{}: {}", issue.path, issue.message)) + .collect::>() + .join("; "); + bail!("resolved draft is still invalid after repair: {report}"); + } + + let summary = render_summary(&spec, None); + let resolved_artifact = ResolvedIntentSpecArtifact { + schema: "libra.intent_resolution.v1", + ai_session_id: Some(persisted.ai_session_id.clone()), + extraction_path: extraction_path.to_string_lossy().to_string(), + extraction_source: persisted.source, + risk_level: risk_level.clone(), + summary: summary.clone(), + intentspec: spec, + }; + let output_path = match args.output { + Some(path) => path, + None => storage_path + .join(INTENT_RESOLUTIONS_DIR) + .join(format!("{}.json", persisted.ai_session_id)), + }; + write_pretty_json_file(&output_path, &resolved_artifact).await?; + + let payload = ResolveExtractionCommandOutput { + ok: true, + command_mode: "resolve-extraction", + ai_session_id: Some(persisted.ai_session_id), + extraction_path: extraction_path.to_string_lossy().to_string(), + resolved_spec_path: output_path.to_string_lossy().to_string(), + risk_level: risk_level_label(&risk_level).to_string(), + summary, + }; + println!( + "{}", + serde_json::to_string_pretty(&payload) + .context("failed to serialize resolve-extraction output")? + ); + Ok(()) +} + +async fn persist_intent(args: PersistIntentArgs) -> Result<()> { + let storage_path = util::try_get_storage_path(None) + .context("claude-sdk commands must be run inside a Libra repository")?; + let resolution_path = resolve_resolution_path(&storage_path, &args)?; + let resolved = read_persisted_resolution(&resolution_path).await?; + if resolved.schema != "libra.intent_resolution.v1" { + bail!( + "unsupported resolution schema '{}' in '{}'", + resolved.schema, + resolution_path.display() + ); + } + + let mcp_server = init_local_mcp_server(&storage_path).await?; + let intent_id = persist_intentspec(&resolved.intentspec, mcp_server.as_ref()).await?; + + let binding_artifact = PersistedIntentInputBindingArtifact { + schema: "libra.intent_input_binding.v1", + ai_session_id: resolved.ai_session_id.clone(), + resolution_path: resolution_path.to_string_lossy().to_string(), + extraction_path: resolved.extraction_path.clone(), + extraction_source: resolved.extraction_source.clone(), + risk_level: resolved.risk_level, + intent_id: intent_id.clone(), + summary: resolved.summary.clone(), + }; + let binding_path = match args.output { + Some(path) => path, + None => storage_path.join(INTENT_INPUTS_DIR).join(format!( + "{}.json", + resolved + .ai_session_id + .clone() + .unwrap_or_else(|| intent_id.clone()) + )), + }; + write_pretty_json_file(&binding_path, &binding_artifact).await?; + + let payload = PersistIntentCommandOutput { + ok: true, + command_mode: "persist-intent", + ai_session_id: resolved.ai_session_id, + resolution_path: resolution_path.to_string_lossy().to_string(), + intent_id, + binding_path: binding_path.to_string_lossy().to_string(), + summary: binding_artifact.summary, + }; + println!( + "{}", + serde_json::to_string_pretty(&payload) + .context("failed to serialize persist-intent output")? + ); + Ok(()) +} + +async fn read_artifact(path: &Path) -> Result { + let content = fs::read_to_string(path) + .await + .with_context(|| format!("failed to read managed artifact '{}'", path.display()))?; + serde_json::from_str(&content) + .with_context(|| format!("failed to parse managed artifact '{}'", path.display())) +} + +async fn read_persisted_extraction(path: &Path) -> Result { + let content = fs::read_to_string(path) + .await + .with_context(|| format!("failed to read persisted extraction '{}'", path.display()))?; + serde_json::from_str(&content) + .with_context(|| format!("failed to parse persisted extraction '{}'", path.display())) +} + +async fn read_persisted_resolution(path: &Path) -> Result { + let content = fs::read_to_string(path) + .await + .with_context(|| format!("failed to read persisted resolution '{}'", path.display()))?; + serde_json::from_str(&content) + .with_context(|| format!("failed to parse persisted resolution '{}'", path.display())) +} + +fn resolve_prompt(args: &RunManagedArgs) -> Result { + match (&args.prompt, &args.prompt_file) { + (Some(prompt), None) => Ok(prompt.clone()), + (None, Some(path)) => std::fs::read_to_string(path) + .with_context(|| format!("failed to read prompt file '{}'", path.display())), + (Some(_), Some(_)) => { + bail!("pass either --prompt or --prompt-file, not both") + } + (None, None) => { + bail!("missing prompt; pass --prompt or --prompt-file") + } + } +} + +fn resolve_extraction_path(storage_path: &Path, args: &ResolveExtractionArgs) -> Result { + match (&args.extraction, &args.ai_session_id) { + (Some(path), None) => Ok(path.clone()), + (None, Some(ai_session_id)) => Ok(storage_path + .join(INTENT_EXTRACTIONS_DIR) + .join(format!("{ai_session_id}.json"))), + (Some(_), Some(_)) => bail!("pass either --extraction or --ai-session-id, not both"), + (None, None) => bail!("missing extraction input; pass --extraction or --ai-session-id"), + } +} + +fn resolve_resolution_path(storage_path: &Path, args: &PersistIntentArgs) -> Result { + match (&args.resolution, &args.ai_session_id) { + (Some(path), None) => Ok(path.clone()), + (None, Some(ai_session_id)) => Ok(storage_path + .join(INTENT_RESOLUTIONS_DIR) + .join(format!("{ai_session_id}.json"))), + (Some(_), Some(_)) => bail!("pass either --resolution or --ai-session-id, not both"), + (None, None) => bail!("missing resolution input; pass --resolution or --ai-session-id"), + } +} + +fn select_risk_level( + override_value: Option<&str>, + draft_level: Option, +) -> Result { + if let Some(raw) = override_value { + return parse_risk_level(raw); + } + Ok(draft_level.unwrap_or(RiskLevel::Medium)) +} + +fn parse_risk_level(raw: &str) -> Result { + match raw.trim().to_ascii_lowercase().as_str() { + "low" => Ok(RiskLevel::Low), + "medium" => Ok(RiskLevel::Medium), + "high" => Ok(RiskLevel::High), + other => bail!("unsupported risk level '{other}'; expected one of low, medium, high"), + } +} + +fn risk_level_label(level: &RiskLevel) -> &'static str { + match level { + RiskLevel::Low => "low", + RiskLevel::Medium => "medium", + RiskLevel::High => "high", + } +} + +async fn current_head_sha() -> String { + Head::current_commit() + .await + .map(|hash| hash.to_string()) + .unwrap_or_else(|| "HEAD".to_string()) +} + +async fn init_local_mcp_server(storage_dir: &Path) -> Result> { + let objects_dir = storage_dir.join("objects"); + + fs::create_dir_all(&objects_dir).await.with_context(|| { + format!( + "failed to create local MCP storage directory '{}'", + objects_dir.display() + ) + })?; + + let db_path = storage_dir.join("libra.db"); + let db_path_str = db_path + .to_str() + .ok_or_else(|| anyhow!("database path '{}' is not valid UTF-8", db_path.display()))?; + #[cfg(target_os = "windows")] + let db_path_string = db_path_str.replace("\\", "/"); + #[cfg(target_os = "windows")] + let db_path_str = db_path_string.as_str(); + + let db_conn = Arc::new( + db::establish_connection(db_path_str) + .await + .with_context(|| format!("failed to connect to database '{}'", db_path.display()))?, + ); + let storage = Arc::new(LocalStorage::new(objects_dir)); + let history_manager = Arc::new(HistoryManager::new( + storage.clone(), + storage_dir.to_path_buf(), + db_conn, + )); + + Ok(Arc::new(LibraMcpServer::new( + Some(history_manager), + Some(storage), + ))) +} + +async fn write_pretty_json_file(path: &Path, value: &T) -> Result<()> +where + T: Serialize, +{ + if let Some(parent) = path.parent() { + fs::create_dir_all(parent).await.with_context(|| { + format!( + "failed to create directory for resolved artifact '{}'", + parent.display() + ) + })?; + } + let payload = serde_json::to_vec_pretty(value).context("failed to serialize JSON artifact")?; + fs::write(path, payload) + .await + .with_context(|| format!("failed to write JSON artifact '{}'", path.display())) +} + +fn validate_provider_session_id(provider_session_id: &str) -> Result<()> { + if provider_session_id.len() > 128 { + bail!("invalid provider session id: exceeds 128 characters"); + } + if !provider_session_id + .chars() + .all(|char| char.is_ascii_alphanumeric() || matches!(char, '.' | '_' | '-')) + { + bail!("invalid provider session id: only [A-Za-z0-9._-] is allowed"); + } + Ok(()) +} + +fn build_provider_session_object_id(provider_session_id: &str) -> Result { + validate_provider_session_id(provider_session_id)?; + Ok(format!("claude_provider_session__{provider_session_id}")) +} + +fn provider_session_artifact_path(storage_path: &Path, object_id: &str) -> PathBuf { + storage_path + .join(PROVIDER_SESSIONS_DIR) + .join(format!("{object_id}.json")) +} + +fn provider_session_messages_artifact_path(storage_path: &Path, object_id: &str) -> PathBuf { + storage_path + .join(PROVIDER_SESSIONS_DIR) + .join(format!("{object_id}.messages.json")) +} + +fn build_evidence_input_object_id(provider_session_id: &str) -> Result { + validate_provider_session_id(provider_session_id)?; + Ok(format!("claude_evidence_input__{provider_session_id}")) +} + +fn evidence_input_artifact_path(storage_path: &Path, object_id: &str) -> PathBuf { + storage_path + .join(EVIDENCE_INPUTS_DIR) + .join(format!("{object_id}.json")) +} + +async fn read_persisted_provider_session_snapshot( + path: &Path, +) -> Result { + let content = fs::read_to_string(path).await.with_context(|| { + format!( + "failed to read provider session snapshot '{}'", + path.display() + ) + })?; + serde_json::from_str(&content).with_context(|| { + format!( + "failed to parse provider session snapshot '{}'", + path.display() + ) + }) +} + +async fn read_existing_evidence_input_artifact( + path: &Path, +) -> Result> { + if !path.exists() { + return Ok(None); + } + + let content = fs::read_to_string(path).await.with_context(|| { + format!( + "failed to read evidence input artifact '{}'", + path.display() + ) + })?; + let artifact = serde_json::from_str(&content).with_context(|| { + format!( + "failed to parse evidence input artifact '{}'", + path.display() + ) + })?; + Ok(Some(artifact)) +} + +fn evidence_input_artifact_matches( + existing: &PersistedEvidenceInputArtifact, + candidate: &PersistedEvidenceInputArtifact, +) -> bool { + existing.schema == candidate.schema + && existing.object_type == candidate.object_type + && existing.provider == candidate.provider + && existing.object_id == candidate.object_id + && existing.provider_session_id == candidate.provider_session_id + && existing.provider_session_object_id == candidate.provider_session_object_id + && existing.summary == candidate.summary + && existing.source_artifacts == candidate.source_artifacts + && existing.message_overview == candidate.message_overview + && existing.content_overview == candidate.content_overview + && existing.runtime_signals == candidate.runtime_signals + && existing.latest_result == candidate.latest_result +} + +async fn read_provider_session_messages_artifact( + path: &Path, +) -> Result { + let content = fs::read_to_string(path).await.with_context(|| { + format!( + "failed to read provider session messages artifact '{}'", + path.display() + ) + })?; + serde_json::from_str(&content).with_context(|| { + format!( + "failed to parse provider session messages artifact '{}'", + path.display() + ) + }) +} + +fn build_provider_session_message_sync( + artifact_path: &Path, + messages: &[Value], + offset: usize, + limit: Option, + captured_at: String, +) -> ProviderSessionMessageSync { + let mut kind_counts = BTreeMap::new(); + let mut kinds = Vec::new(); + for message in messages { + if let Some(kind) = provider_message_kind(message) { + *kind_counts.entry(kind.clone()).or_insert(0) += 1; + kinds.push(kind); + } + } + + ProviderSessionMessageSync { + artifact_path: artifact_path.to_string_lossy().to_string(), + message_count: messages.len(), + kind_counts, + first_message_kind: kinds.first().cloned(), + last_message_kind: kinds.last().cloned(), + offset, + limit, + captured_at, + } +} + +fn provider_message_kind(message: &Value) -> Option { + let message_type = message.get("type").and_then(Value::as_str)?; + match message.get("subtype").and_then(Value::as_str) { + Some(subtype) => Some(format!("{message_type}:{subtype}")), + None => Some(message_type.to_string()), + } +} + +fn build_evidence_input_artifact( + snapshot: &PersistedProviderSessionSnapshot, + provider_session_path: &Path, + messages_artifact: &ProviderSessionMessagesArtifact, + messages_path: &Path, + object_id: String, + captured_at: String, +) -> PersistedEvidenceInputArtifact { + let mut assistant_message_count = 0usize; + let mut user_message_count = 0usize; + let mut observed_tools = BTreeMap::new(); + let mut observed_paths = BTreeSet::new(); + let mut assistant_text_previews = Vec::new(); + let mut result_message_count = 0usize; + let mut tool_runtime_count = 0usize; + let mut task_runtime_count = 0usize; + let mut partial_assistant_event_count = 0usize; + let mut has_structured_output = false; + let mut has_permission_denials = false; + let mut latest_result = None; + + for message in &messages_artifact.messages { + match message.get("type").and_then(Value::as_str) { + Some("assistant") => { + assistant_message_count += 1; + collect_message_content_evidence( + message, + &mut observed_tools, + &mut observed_paths, + &mut assistant_text_previews, + ); + } + Some("user") => { + user_message_count += 1; + collect_message_content_evidence( + message, + &mut observed_tools, + &mut observed_paths, + &mut Vec::new(), + ); + } + Some("result") => { + result_message_count += 1; + has_structured_output |= message.get("structured_output").is_some(); + let permission_denial_count = message + .get("permission_denials") + .and_then(Value::as_array) + .map_or(0, Vec::len); + has_permission_denials |= permission_denial_count > 0; + latest_result = Some(EvidenceInputLatestResult { + subtype: message + .get("subtype") + .and_then(Value::as_str) + .map(ToOwned::to_owned), + stop_reason: message + .get("stop_reason") + .and_then(Value::as_str) + .map(ToOwned::to_owned), + duration_ms: message.get("duration_ms").and_then(Value::as_u64), + duration_api_ms: message.get("duration_api_ms").and_then(Value::as_u64), + total_cost_usd: message.get("total_cost_usd").and_then(Value::as_f64), + num_turns: message.get("num_turns").and_then(Value::as_u64), + permission_denial_count, + }); + } + Some("tool_progress" | "tool_use_summary") => { + tool_runtime_count += 1; + if let Some(tool_name) = message.get("tool_name").and_then(Value::as_str) { + *observed_tools.entry(tool_name.to_string()).or_insert(0) += 1; + } + } + _ if matches!( + message.get("subtype").and_then(Value::as_str), + Some("task_started" | "task_progress" | "task_notification") + ) => + { + task_runtime_count += 1; + } + Some("stream_event") => { + partial_assistant_event_count += 1; + } + _ => {} + } + } + + PersistedEvidenceInputArtifact { + schema: "libra.evidence_input.v1".to_string(), + object_type: "evidence_input".to_string(), + provider: "claude".to_string(), + object_id, + provider_session_id: snapshot.provider_session_id.clone(), + provider_session_object_id: snapshot.object_id.clone(), + summary: snapshot.summary.clone(), + source_artifacts: EvidenceInputSourceArtifacts { + provider_session_path: provider_session_path.to_string_lossy().to_string(), + messages_path: messages_path.to_string_lossy().to_string(), + }, + message_overview: EvidenceInputMessageOverview { + message_count: messages_artifact.messages.len(), + kind_counts: snapshot + .message_sync + .as_ref() + .map(|sync| sync.kind_counts.clone()) + .unwrap_or_default(), + first_message_kind: snapshot + .message_sync + .as_ref() + .and_then(|sync| sync.first_message_kind.clone()), + last_message_kind: snapshot + .message_sync + .as_ref() + .and_then(|sync| sync.last_message_kind.clone()), + }, + content_overview: EvidenceInputContentOverview { + assistant_message_count, + user_message_count, + observed_tools, + observed_paths: observed_paths.into_iter().collect(), + assistant_text_previews, + }, + runtime_signals: EvidenceInputRuntimeSignals { + result_message_count, + tool_runtime_count, + task_runtime_count, + partial_assistant_event_count, + has_structured_output, + has_permission_denials, + }, + latest_result, + captured_at, + } +} + +fn collect_message_content_evidence( + message: &Value, + observed_tools: &mut BTreeMap, + observed_paths: &mut BTreeSet, + assistant_text_previews: &mut Vec, +) { + let blocks = message + .get("message") + .and_then(|inner| inner.get("content")) + .and_then(Value::as_array); + let Some(blocks) = blocks else { + return; + }; + + for block in blocks { + if let Some(text) = block.get("text").and_then(Value::as_str) + && assistant_text_previews.len() < 3 + { + let normalized = normalize_text_preview(text); + if !normalized.is_empty() { + assistant_text_previews.push(normalized); + } + } + + if block.get("type").and_then(Value::as_str) == Some("tool_use") { + if let Some(tool_name) = block.get("name").and_then(Value::as_str) { + *observed_tools.entry(tool_name.to_string()).or_insert(0) += 1; + } + if let Some(input) = block.get("input") { + collect_path_candidates(input, observed_paths); + } + } + } +} + +fn collect_path_candidates(value: &Value, observed_paths: &mut BTreeSet) { + match value { + Value::Object(map) => { + for (key, nested) in map { + if matches!( + key.as_str(), + "file_path" + | "path" + | "cwd" + | "worktree_path" + | "trigger_file_path" + | "parent_file_path" + ) { + if let Some(path) = nested.as_str() { + observed_paths.insert(path.to_string()); + } + } else if matches!(key.as_str(), "paths" | "files") { + if let Some(items) = nested.as_array() { + for item in items { + if let Some(path) = item.as_str() { + observed_paths.insert(path.to_string()); + } + } + } + } else { + collect_path_candidates(nested, observed_paths); + } + } + } + Value::Array(items) => { + for item in items { + collect_path_candidates(item, observed_paths); + } + } + _ => {} + } +} + +fn normalize_text_preview(text: &str) -> String { + let compact = text.split_whitespace().collect::>().join(" "); + if compact.chars().count() <= 180 { + compact + } else { + let preview = compact.chars().take(180).collect::(); + format!("{preview}...") + } +} + +async fn persist_provider_session_snapshot( + storage_path: &Path, + session: ClaudeSdkSessionInfo, +) -> Result { + let existing_snapshot = + read_existing_provider_session_snapshot(storage_path, &session.session_id).await?; + let object_id = build_provider_session_object_id(&session.session_id)?; + let mut snapshot = PersistedProviderSessionSnapshot { + schema: "libra.provider_session.v3".to_string(), + object_type: "provider_session".to_string(), + provider: "claude".to_string(), + object_id, + provider_session_id: session.session_id, + summary: session.summary, + custom_title: session.custom_title, + first_prompt: session.first_prompt, + git_branch: session.git_branch, + cwd: session.cwd, + tag: session.tag, + created_at: session.created_at, + last_modified: session.last_modified, + file_size: session.file_size, + captured_at: Utc::now().to_rfc3339(), + message_sync: existing_snapshot + .as_ref() + .and_then(|snapshot| snapshot.message_sync.clone()), + }; + if let Some(existing_snapshot) = &existing_snapshot + && provider_session_snapshot_matches(existing_snapshot, &snapshot) + { + snapshot.captured_at = existing_snapshot.captured_at.clone(); + } + + upsert_provider_session_snapshot(storage_path, &snapshot).await +} + +async fn upsert_provider_session_snapshot( + storage_path: &Path, + snapshot: &PersistedProviderSessionSnapshot, +) -> Result { + let artifact_path = provider_session_artifact_path(storage_path, &snapshot.object_id); + write_pretty_json_file(&artifact_path, &snapshot) + .await + .with_context(|| { + format!( + "failed to write provider session snapshot '{}'", + artifact_path.display() + ) + })?; + + let payload = serde_json::to_vec_pretty(&snapshot) + .context("failed to serialize provider session snapshot")?; + let object_hash = write_git_object(storage_path, "blob", &payload) + .context("failed to write provider session snapshot object")?; + let mcp_server = init_local_mcp_server(storage_path).await?; + let history = mcp_server + .intent_history_manager + .as_ref() + .ok_or_else(|| anyhow!("local MCP history manager is unavailable"))?; + let existing_hash = history + .get_object_hash("provider_session", &snapshot.object_id) + .await + .context("failed to inspect existing provider session history")?; + if existing_hash != Some(object_hash) { + history + .append("provider_session", &snapshot.object_id, object_hash) + .await + .context("failed to append provider session snapshot to history")?; + } + + Ok(SyncSessionRecord { + provider_session_id: snapshot.provider_session_id.clone(), + object_id: snapshot.object_id.clone(), + artifact_path: artifact_path.to_string_lossy().to_string(), + object_hash: object_hash.to_string(), + }) +} + +async fn persist_evidence_input_artifact( + storage_path: &Path, + artifact_path: &Path, + artifact: &PersistedEvidenceInputArtifact, +) -> Result { + write_pretty_json_file(artifact_path, artifact) + .await + .with_context(|| { + format!( + "failed to write evidence input artifact '{}'", + artifact_path.display() + ) + })?; + + let payload = serde_json::to_vec_pretty(artifact) + .context("failed to serialize evidence input artifact")?; + let object_hash = write_git_object(storage_path, "blob", &payload) + .context("failed to write evidence input object")?; + let mcp_server = init_local_mcp_server(storage_path).await?; + let history = mcp_server + .intent_history_manager + .as_ref() + .ok_or_else(|| anyhow!("local MCP history manager is unavailable"))?; + let existing_hash = history + .get_object_hash("evidence_input", &artifact.object_id) + .await + .context("failed to inspect existing evidence input history")?; + if existing_hash != Some(object_hash) { + history + .append("evidence_input", &artifact.object_id, object_hash) + .await + .context("failed to append evidence input to history")?; + } + + Ok(SyncSessionRecord { + provider_session_id: artifact.provider_session_id.clone(), + object_id: artifact.object_id.clone(), + artifact_path: artifact_path.to_string_lossy().to_string(), + object_hash: object_hash.to_string(), + }) +} + +async fn read_existing_provider_session_snapshot( + storage_path: &Path, + provider_session_id: &str, +) -> Result> { + let object_id = build_provider_session_object_id(provider_session_id)?; + let artifact_path = provider_session_artifact_path(storage_path, &object_id); + if !artifact_path.exists() { + return Ok(None); + } + + let snapshot = read_persisted_provider_session_snapshot(&artifact_path) + .await + .with_context(|| { + format!( + "failed to refresh provider session snapshot '{}'", + artifact_path.display() + ) + })?; + Ok(Some(snapshot)) +} + +fn provider_session_snapshot_matches( + existing: &PersistedProviderSessionSnapshot, + candidate: &PersistedProviderSessionSnapshot, +) -> bool { + existing.schema == candidate.schema + && existing.object_type == candidate.object_type + && existing.provider == candidate.provider + && existing.provider_session_id == candidate.provider_session_id + && existing.object_id == candidate.object_id + && existing.summary == candidate.summary + && existing.custom_title == candidate.custom_title + && existing.first_prompt == candidate.first_prompt + && existing.git_branch == candidate.git_branch + && existing.cwd == candidate.cwd + && existing.tag == candidate.tag + && existing.created_at == candidate.created_at + && existing.last_modified == candidate.last_modified + && existing.file_size == candidate.file_size + && existing.message_sync == candidate.message_sync +} + +async fn materialize_helper( + helper_path: Option<&Path>, +) -> Result<(Option, PathBuf)> { + if let Some(path) = helper_path { + return Ok((None, path.to_path_buf())); + } + + let temp_dir = tempfile::Builder::new() + .prefix("libra-claude-sdk-helper-") + .tempdir() + .context("failed to create temporary helper directory")?; + let temp_dir_path = temp_dir.path().to_path_buf(); + let helper_path = temp_dir_path.join("libra-claude-managed-helper.cjs"); + let mut helper_file = fs::OpenOptions::new() + .write(true) + .create_new(true) + .open(&helper_path) + .await + .with_context(|| format!("failed to create helper '{}'", helper_path.display()))?; + helper_file + .write_all(EMBEDDED_HELPER_SOURCE.as_bytes()) + .await + .with_context(|| format!("failed to write helper '{}'", helper_path.display()))?; + Ok(( + Some(EmbeddedHelperDir { + _temp_dir: temp_dir, + }), + helper_path, + )) +} + +async fn invoke_helper( + node_binary: &str, + helper_path: &Path, + request: &ManagedHelperRequest, +) -> Result { + invoke_helper_json(node_binary, helper_path, request) + .await + .context("failed to invoke Claude SDK managed helper") +} + +async fn invoke_helper_json( + node_binary: &str, + helper_path: &Path, + request: &T, +) -> Result +where + T: Serialize + HelperResponse, +{ + let serialized_request = + serde_json::to_vec(request).context("failed to serialize helper request")?; + let mut child = Command::new(node_binary) + .arg(helper_path) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .with_context(|| { + format!( + "failed to start Claude SDK helper with '{}' '{}'", + node_binary, + helper_path.display() + ) + })?; + + if let Some(mut stdin) = child.stdin.take() { + stdin + .write_all(&serialized_request) + .await + .context("failed to send request to Claude SDK helper")?; + } + + let output = child + .wait_with_output() + .await + .context("failed to wait for Claude SDK helper process")?; + let stdout = String::from_utf8(output.stdout).context("helper stdout is not valid UTF-8")?; + let stderr = String::from_utf8(output.stderr).context("helper stderr is not valid UTF-8")?; + + if !output.status.success() { + let detail = if stderr.trim().is_empty() { + "helper exited with a non-zero status".to_string() + } else { + stderr.trim().to_string() + }; + return Err(anyhow!("Claude SDK helper failed: {detail}")); + } + + T::parse_response(stdout.trim(), stderr.trim()) +} + +fn managed_output_schema() -> Value { + json!({ + "type": "object", + "additionalProperties": false, + "required": [ + "summary", + "problemStatement", + "changeType", + "objectives", + "successCriteria", + "riskRationale" + ], + "properties": { + "summary": { "type": "string" }, + "problemStatement": { "type": "string" }, + "changeType": { "type": "string" }, + "objectives": { + "type": "array", + "minItems": 1, + "items": { "type": "string" } + }, + "inScope": { + "type": "array", + "items": { "type": "string" } + }, + "outOfScope": { + "type": "array", + "items": { "type": "string" } + }, + "touchHints": { + "type": "object", + "additionalProperties": false, + "properties": { + "files": { + "type": "array", + "items": { "type": "string" } + }, + "symbols": { + "type": "array", + "items": { "type": "string" } + }, + "apis": { + "type": "array", + "items": { "type": "string" } + } + } + }, + "successCriteria": { + "type": "array", + "minItems": 1, + "items": { "type": "string" } + }, + "fastChecks": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "required": ["id", "kind"], + "properties": { + "id": { "type": "string" }, + "kind": { + "type": "string", + "enum": ["command", "testSuite", "policy"] + }, + "command": { "type": "string" }, + "timeoutSeconds": { "type": "integer", "minimum": 1 }, + "expectedExitCode": { "type": "integer" }, + "required": { "type": "boolean" }, + "artifactsProduced": { + "type": "array", + "items": { "type": "string" } + } + } + } + }, + "integrationChecks": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "required": ["id", "kind"], + "properties": { + "id": { "type": "string" }, + "kind": { + "type": "string", + "enum": ["command", "testSuite", "policy"] + }, + "command": { "type": "string" }, + "timeoutSeconds": { "type": "integer", "minimum": 1 }, + "expectedExitCode": { "type": "integer" }, + "required": { "type": "boolean" }, + "artifactsProduced": { + "type": "array", + "items": { "type": "string" } + } + } + } + }, + "securityChecks": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "required": ["id", "kind"], + "properties": { + "id": { "type": "string" }, + "kind": { + "type": "string", + "enum": ["command", "testSuite", "policy"] + }, + "command": { "type": "string" }, + "timeoutSeconds": { "type": "integer", "minimum": 1 }, + "expectedExitCode": { "type": "integer" }, + "required": { "type": "boolean" }, + "artifactsProduced": { + "type": "array", + "items": { "type": "string" } + } + } + } + }, + "releaseChecks": { + "type": "array", + "items": { + "type": "object", + "additionalProperties": false, + "required": ["id", "kind"], + "properties": { + "id": { "type": "string" }, + "kind": { + "type": "string", + "enum": ["command", "testSuite", "policy"] + }, + "command": { "type": "string" }, + "timeoutSeconds": { "type": "integer", "minimum": 1 }, + "expectedExitCode": { "type": "integer" }, + "required": { "type": "boolean" }, + "artifactsProduced": { + "type": "array", + "items": { "type": "string" } + } + } + } + }, + "riskRationale": { "type": "string" }, + "riskFactors": { + "type": "array", + "items": { "type": "string" } + }, + "riskLevel": { + "type": "string", + "enum": ["low", "medium", "high"] + } + } + }) +} + +fn print_result(mode: &'static str, outcome: &PersistedManagedArtifactOutcome) -> Result<()> { + let payload = ClaudeSdkCommandOutput { + ok: true, + command_mode: mode, + provider_session_id: outcome.provider_session_id.clone(), + ai_session_id: outcome.ai_session_id.clone(), + ai_session_object_hash: outcome.ai_session_object_hash.clone(), + already_persisted: outcome.already_persisted, + intent_extraction_path: outcome.intent_extraction_path.clone(), + raw_artifact_path: outcome.raw_artifact_path.clone(), + audit_bundle_path: outcome.audit_bundle_path.clone(), + }; + println!( + "{}", + serde_json::to_string_pretty(&payload) + .context("failed to serialize Claude SDK command output")? + ); + Ok(()) +} diff --git a/src/command/hooks.rs b/src/command/hooks.rs deleted file mode 100644 index 77d2a621..00000000 --- a/src/command/hooks.rs +++ /dev/null @@ -1,121 +0,0 @@ -//! Unified hook command entrypoint for provider adapters. - -use anyhow::{Result, anyhow, bail}; -use clap::{Parser, Subcommand}; - -use crate::internal::ai::hooks::{ - ProviderHookCommand, ProviderInstallOptions, find_provider, process_hook_event_from_stdin, - supported_provider_names, -}; - -#[derive(Parser, Debug)] -pub struct HooksCommand { - #[arg(help = "Hook provider identifier")] - pub provider: String, - #[command(subcommand)] - pub command: HookSubcommand, -} - -#[derive(Subcommand, Debug)] -pub enum HookSubcommand { - #[command(about = "Handle SessionStart lifecycle event")] - SessionStart(HookEventArgs), - #[command(about = "Handle TurnStart lifecycle event")] - Prompt(HookEventArgs), - #[command(about = "Handle ToolUse lifecycle event")] - ToolUse(HookEventArgs), - #[command(about = "Handle TurnEnd lifecycle event")] - Stop(HookEventArgs), - #[command(about = "Handle SessionEnd lifecycle event")] - SessionEnd(HookEventArgs), - #[command(about = "Handle ModelUpdate lifecycle event")] - ModelUpdate(HookEventArgs), - #[command(about = "Handle Compaction lifecycle event")] - Compaction(HookEventArgs), - #[command(about = "Install provider hook forwarding into provider settings")] - Install(InstallHookArgs), - #[command(about = "Uninstall provider hook forwarding from provider settings")] - Uninstall, - #[command(about = "Print whether provider hooks are installed")] - IsInstalled, -} - -#[derive(Parser, Debug, Clone)] -pub struct HookEventArgs {} - -#[derive(Parser, Debug, Clone)] -pub struct InstallHookArgs { - #[arg( - long, - help = "Absolute or relative path to the Libra binary that provider hooks should execute" - )] - pub binary_path: Option, - #[arg( - long, - help = "Optional timeout in seconds for providers that support command-level hook timeouts" - )] - pub timeout: Option, -} - -pub async fn execute(cmd: HooksCommand) -> Result<()> { - let supported = supported_provider_names().join(", "); - let provider = find_provider(&cmd.provider).ok_or_else(|| { - anyhow!( - "unsupported hook provider '{}'; supported providers: {}", - cmd.provider, - supported, - ) - })?; - - match cmd.command { - HookSubcommand::SessionStart(_) => { - execute_event_command(provider, ProviderHookCommand::SessionStart).await - } - HookSubcommand::Prompt(_) => { - execute_event_command(provider, ProviderHookCommand::Prompt).await - } - HookSubcommand::ToolUse(_) => { - execute_event_command(provider, ProviderHookCommand::ToolUse).await - } - HookSubcommand::ModelUpdate(_) => { - execute_event_command(provider, ProviderHookCommand::ModelUpdate).await - } - HookSubcommand::Compaction(_) => { - execute_event_command(provider, ProviderHookCommand::Compaction).await - } - HookSubcommand::Stop(_) => execute_event_command(provider, ProviderHookCommand::Stop).await, - HookSubcommand::SessionEnd(_) => { - execute_event_command(provider, ProviderHookCommand::SessionEnd).await - } - HookSubcommand::Install(args) => provider.install_hooks(&ProviderInstallOptions { - binary_path: args.binary_path, - timeout_secs: args.timeout, - }), - HookSubcommand::Uninstall => provider.uninstall_hooks(), - HookSubcommand::IsInstalled => { - println!( - "{}", - if provider.hooks_are_installed()? { - "true" - } else { - "false" - } - ); - Ok(()) - } - } -} - -async fn execute_event_command( - provider: &dyn crate::internal::ai::hooks::HookProvider, - command: ProviderHookCommand, -) -> Result<()> { - if !provider.supported_commands().contains(&command) { - bail!( - "provider '{}' does not support '{}'", - provider.provider_name(), - command - ); - } - process_hook_event_from_stdin(command.lifecycle_event_kind(), provider).await -} diff --git a/src/command/mod.rs b/src/command/mod.rs index 98b3f05d..41c80860 100644 --- a/src/command/mod.rs +++ b/src/command/mod.rs @@ -7,6 +7,7 @@ pub mod branch; pub mod cat_file; pub mod checkout; pub mod cherry_pick; +pub mod claude_sdk; pub mod clean; pub mod clone; pub mod cloud; @@ -16,7 +17,6 @@ pub mod config; pub mod describe; pub mod diff; pub mod fetch; -pub mod hooks; pub mod index_pack; pub mod init; pub mod lfs; diff --git a/src/internal/ai/hooks/provider.rs b/src/internal/ai/hooks/provider.rs index 7761e099..69c6f9e8 100644 --- a/src/internal/ai/hooks/provider.rs +++ b/src/internal/ai/hooks/provider.rs @@ -1,10 +1,12 @@ //! Provider contracts for lifecycle hook ingestion and setup. -use std::fmt; +use std::{fmt, path::Path}; use anyhow::Result; +use serde_json::Value; use super::lifecycle::{LifecycleEvent, LifecycleEventKind, SessionHookEnvelope}; +use crate::internal::ai::session::SessionState; pub const CANONICAL_DEDUP_IDENTITY_KEYS: &[&str] = &[ "event_id", @@ -76,6 +78,19 @@ pub trait HookProvider: Sync { ) -> Result; fn dedup_identity_keys(&self) -> &'static [&'static str]; fn lifecycle_fallback_events(&self) -> &'static [&'static str]; + fn command_output(&self, _command: ProviderHookCommand) -> Option { + None + } + fn post_process_event( + &self, + _command: ProviderHookCommand, + _storage_path: &Path, + _session: &mut SessionState, + _envelope: &SessionHookEnvelope, + _event: &LifecycleEvent, + ) -> Result<()> { + Ok(()) + } fn install_hooks(&self, options: &ProviderInstallOptions) -> Result<()>; fn uninstall_hooks(&self) -> Result<()>; fn hooks_are_installed(&self) -> Result; diff --git a/src/internal/ai/hooks/providers/claude/mod.rs b/src/internal/ai/hooks/providers/claude/mod.rs index 6a5a717b..e413f8d0 100644 --- a/src/internal/ai/hooks/providers/claude/mod.rs +++ b/src/internal/ai/hooks/providers/claude/mod.rs @@ -1,5 +1,4 @@ //! Claude Code lifecycle hook provider facade. - mod parser; mod settings; diff --git a/src/internal/ai/hooks/providers/claude/settings.rs b/src/internal/ai/hooks/providers/claude/settings.rs index 55051d36..08089e84 100644 --- a/src/internal/ai/hooks/providers/claude/settings.rs +++ b/src/internal/ai/hooks/providers/claude/settings.rs @@ -76,7 +76,6 @@ pub(super) fn install_claude_hooks(options: &ProviderInstallOptions) -> Result<( settings_path.display() ); } - Ok(()) } diff --git a/src/internal/ai/hooks/runtime.rs b/src/internal/ai/hooks/runtime.rs index 516f8467..7f2a1606 100644 --- a/src/internal/ai/hooks/runtime.rs +++ b/src/internal/ai/hooks/runtime.rs @@ -71,6 +71,7 @@ fn redact_session_id(session_id: &str) -> String { } pub async fn process_hook_event_from_stdin( + command: super::provider::ProviderHookCommand, expected_kind: LifecycleEventKind, provider: &dyn HookProvider, ) -> Result<()> { @@ -114,6 +115,7 @@ pub async fn process_hook_event_from_stdin( crate::internal::ai::session::SessionStore::from_storage_path(&storage_path); let ai_session_id = build_ai_session_id(provider.provider_name(), &envelope.session_id); + let recovered_from_out_of_order = event.kind != LifecycleEventKind::SessionStart; let _session_lock = session_store .lock_session(&ai_session_id) .with_context(|| { @@ -129,9 +131,11 @@ pub async fn process_hook_event_from_stdin( let mut recovered = SessionState::new(&process_cwd_str); recovered.id = ai_session_id.clone(); recovered.working_dir = process_cwd_str.clone(); - recovered - .metadata - .insert("recovered_from_out_of_order".to_string(), json!(true)); + if recovered_from_out_of_order { + recovered + .metadata + .insert("recovered_from_out_of_order".to_string(), json!(true)); + } recovered } Err(err) if err.kind() == std::io::ErrorKind::InvalidData => { @@ -209,6 +213,9 @@ pub async fn process_hook_event_from_stdin( } apply_hook_event(&mut session, &envelope, &event, provider.provider_name()); + provider + .post_process_event(command, &storage_path, &mut session, &envelope, &event) + .context("provider hook post-processing failed")?; if let Some(event_key) = dedup_key { append_processed_event_key(&mut session, event_key); } diff --git a/src/internal/ai/providers/claude_sdk/helper.cjs b/src/internal/ai/providers/claude_sdk/helper.cjs new file mode 100644 index 00000000..ff3f65c9 --- /dev/null +++ b/src/internal/ai/providers/claude_sdk/helper.cjs @@ -0,0 +1,236 @@ +const path = require('path'); +const { createRequire } = require('module'); + +async function readStdin() { + const chunks = []; + for await (const chunk of process.stdin) { + chunks.push(Buffer.isBuffer(chunk) ? chunk : Buffer.from(chunk)); + } + return Buffer.concat(chunks).toString('utf8'); +} + +function findLastResultMessage(messages) { + for (let index = messages.length - 1; index >= 0; index -= 1) { + if (messages[index] && messages[index].type === 'result') { + return messages[index]; + } + } + return null; +} + +function loadClaudeAgentSdk(cwd) { + const attempts = []; + const moduleOverride = process.env.LIBRA_CLAUDE_AGENT_SDK_MODULE; + if (moduleOverride) { + attempts.push(() => require(moduleOverride)); + } + + const resolverCwd = path.resolve(cwd || process.cwd()); + attempts.push(() => + createRequire(path.join(resolverCwd, '__libra_claude_sdk__.cjs'))( + '@anthropic-ai/claude-agent-sdk' + ) + ); + attempts.push(() => require('@anthropic-ai/claude-agent-sdk')); + + let lastError = null; + for (const attempt of attempts) { + try { + const sdk = attempt(); + if (sdk && typeof sdk.query === 'function') { + return sdk; + } + lastError = new Error('resolved module does not export query()'); + } catch (error) { + lastError = error; + } + } + + const detail = lastError && lastError.message ? lastError.message : String(lastError); + throw new Error( + 'failed to load @anthropic-ai/claude-agent-sdk; install it in the working directory, configure NODE_PATH, or set LIBRA_CLAUDE_AGENT_SDK_MODULE. Last error: ' + + detail + ); +} + +function buildHooks(hookEvents) { + const recordHook = (hookName) => async (input) => { + hookEvents.push({ hook: hookName, input }); + return { continue: true }; + }; + + const hookNames = [ + 'SessionStart', + 'UserPromptSubmit', + 'PreToolUse', + 'PostToolUse', + 'PostToolUseFailure', + 'Notification', + 'SessionEnd', + 'Stop', + 'SubagentStart', + 'SubagentStop', + 'PreCompact', + 'PostCompact', + 'PermissionRequest', + 'Setup', + 'TeammateIdle', + 'TaskCompleted', + 'Elicitation', + 'ElicitationResult', + 'ConfigChange', + 'WorktreeCreate', + 'WorktreeRemove', + 'InstructionsLoaded', + ]; + + return Object.fromEntries( + hookNames.map((hookName) => [hookName, [{ hooks: [recordHook(hookName)] }]]) + ); +} + +async function main() { + const requestBody = await readStdin(); + if (!requestBody.trim()) { + throw new Error('managed helper request is empty'); + } + + const request = JSON.parse(requestBody); + const sdk = loadClaudeAgentSdk(request.cwd); + if (request.mode === 'listSessions') { + if (typeof sdk.listSessions !== 'function') { + throw new Error('resolved @anthropic-ai/claude-agent-sdk does not export listSessions()'); + } + const sessions = await sdk.listSessions({ + dir: request.cwd, + limit: request.limit, + offset: request.offset, + includeWorktrees: request.includeWorktrees, + }); + process.stdout.write(JSON.stringify(sessions)); + return; + } + if (request.mode === 'getSessionMessages') { + if (typeof sdk.getSessionMessages !== 'function') { + throw new Error( + 'resolved @anthropic-ai/claude-agent-sdk does not export getSessionMessages()' + ); + } + const messages = await sdk.getSessionMessages(request.providerSessionId, { + limit: request.limit, + offset: request.offset, + }); + process.stdout.write(JSON.stringify(messages)); + return; + } + const { query } = sdk; + const hookEvents = []; + const messages = []; + let helperTimedOut = false; + let helperError = null; + + const options = { + cwd: request.cwd, + model: request.model, + settingSources: ['user'], + permissionMode: request.permissionMode, + hooks: buildHooks(hookEvents), + includePartialMessages: request.includePartialMessages === true, + promptSuggestions: request.promptSuggestions === true, + agentProgressSummaries: request.agentProgressSummaries === true, + }; + + if (Array.isArray(request.tools) && request.tools.length > 0) { + options.tools = request.tools; + if (request.autoApproveTools) { + options.canUseTool = async (toolName, input, permissionOptions) => { + hookEvents.push({ + hook: 'CanUseTool', + input: { + tool_name: toolName, + tool_input: input, + tool_use_id: permissionOptions && permissionOptions.toolUseID ? permissionOptions.toolUseID : null, + agent_id: permissionOptions && permissionOptions.agentID ? permissionOptions.agentID : null, + blocked_path: + permissionOptions && permissionOptions.blockedPath ? permissionOptions.blockedPath : null, + decision_reason: + permissionOptions && permissionOptions.decisionReason ? permissionOptions.decisionReason : null, + suggestions: + permissionOptions && Array.isArray(permissionOptions.suggestions) + ? permissionOptions.suggestions + : [], + }, + }); + return { behavior: 'allow' }; + }; + } else { + options.allowedTools = Array.isArray(request.allowedTools) ? request.allowedTools : request.tools; + } + } + + if (request.outputSchema) { + options.outputFormat = { + type: 'json_schema', + schema: request.outputSchema, + }; + } + + const stream = query({ + prompt: request.prompt, + options, + }); + + const iterator = stream[Symbol.asyncIterator](); + let timeoutId = null; + let timeoutPromise = null; + if (typeof request.timeoutSeconds === 'number' && request.timeoutSeconds > 0) { + timeoutPromise = new Promise((resolve) => { + timeoutId = setTimeout(() => resolve({ __libraTimeout: true }), request.timeoutSeconds * 1000); + }); + } + + try { + while (true) { + const nextMessage = iterator.next(); + const step = timeoutPromise ? await Promise.race([nextMessage, timeoutPromise]) : await nextMessage; + if (step && step.__libraTimeout) { + helperTimedOut = true; + if (typeof iterator.return === 'function') { + try { + await iterator.return(); + } catch (_) {} + } + break; + } + if (!step || step.done) { + break; + } + messages.push(step.value); + } + } catch (error) { + helperError = error && error.message ? error.message : String(error); + } finally { + if (timeoutId) { + clearTimeout(timeoutId); + } + } + + const artifact = { + cwd: request.cwd, + prompt: request.prompt, + helperTimedOut, + helperError, + hookEvents, + messages, + resultMessage: findLastResultMessage(messages), + }; + + process.stdout.write(JSON.stringify(artifact)); +} + +main().catch((error) => { + const detail = + error && error.stack ? error.stack : error && error.message ? error.message : String(error); + process.stderr.write(detail); + process.exitCode = 1; +}); diff --git a/src/internal/ai/providers/claude_sdk/managed.rs b/src/internal/ai/providers/claude_sdk/managed.rs new file mode 100644 index 00000000..09c9abb1 --- /dev/null +++ b/src/internal/ai/providers/claude_sdk/managed.rs @@ -0,0 +1,3137 @@ +//! Claude Agent SDK managed-mode artifact parsing and bridge conversion. + +use std::{ + collections::{HashMap, HashSet}, + path::{Path, PathBuf}, + sync::Arc, + time::{SystemTime, UNIX_EPOCH}, +}; + +use anyhow::{Context, Result, anyhow, bail}; +use chrono::Utc; +use serde::{Deserialize, Serialize}; +use serde_json::{Value, json}; +use tokio::{fs, io::AsyncWriteExt}; + +use crate::{ + internal::{ + ai::{ + history::HistoryManager, + hooks::{ + lifecycle::normalize_json_value, + runtime::{AI_SESSION_SCHEMA, AI_SESSION_TYPE, build_ai_session_id}, + }, + intentspec::{ + DraftAcceptance, DraftCheck, DraftIntent, DraftRisk, IntentDraft, RiskLevel, + types::{ChangeType, TouchHints}, + }, + session::SessionState, + }, + db, + }, + utils::{object::write_git_object, storage::local::LocalStorage}, +}; + +const MANAGED_SOURCE_NAME: &str = "claude_agent_sdk_managed"; +const MANAGED_AUDIT_BUNDLE_SCHEMA: &str = "libra.claude_managed_audit_bundle.v1"; +const MANAGED_INTENT_EXTRACTION_SOURCE: &str = "claude_agent_sdk_managed.structured_output"; +const MANAGED_ARTIFACTS_DIR: &str = "managed-artifacts"; +const AUDIT_BUNDLES_DIR: &str = "audit-bundles"; +const INTENT_EXTRACTIONS_DIR: &str = "intent-extractions"; +const NORMALIZED_EVENTS_KEY: &str = "normalized_events"; +const RAW_HOOK_EVENTS_KEY: &str = "raw_hook_events"; +const SESSION_PHASE_METADATA_KEY: &str = "session_phase"; + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ClaudeManagedArtifact { + pub cwd: String, + #[serde(default)] + pub prompt: Option, + #[serde(rename = "helperTimedOut", default)] + pub helper_timed_out: bool, + #[serde(rename = "helperError", default)] + pub helper_error: Option, + #[serde(rename = "hookEvents", default)] + pub hook_events: Vec, + #[serde(default)] + pub messages: Vec, + #[serde(rename = "resultMessage", default)] + pub result_message: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ClaudeManagedHookEvent { + pub hook: String, + #[serde(default)] + pub input: Value, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ClaudeManagedResultMessage { + #[serde(default)] + pub r#type: Option, + #[serde(default)] + pub subtype: Option, + #[serde(default)] + pub is_error: Option, + #[serde(default)] + pub session_id: Option, + #[serde(default)] + pub stop_reason: Option, + #[serde(default)] + pub duration_ms: Option, + #[serde(default)] + pub duration_api_ms: Option, + #[serde(default)] + pub num_turns: Option, + #[serde(default)] + pub result: Option, + #[serde(default)] + pub total_cost_usd: Option, + #[serde(default)] + pub usage: Option, + #[serde(rename = "modelUsage", default)] + pub model_usage: Option, + #[serde(default)] + pub permission_denials: Option, + #[serde(default)] + pub structured_output: Option, + #[serde(default)] + pub fast_mode_state: Option, + #[serde(default)] + pub uuid: Option, +} + +#[derive(Debug, Clone)] +pub struct ManagedArtifactIngestion { + pub session: SessionState, + pub intent_extraction: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PersistedManagedArtifactOutcome { + #[serde(rename = "providerSessionId")] + pub provider_session_id: String, + #[serde(rename = "aiSessionId")] + pub ai_session_id: String, + #[serde(rename = "aiSessionObjectHash")] + pub ai_session_object_hash: String, + #[serde(rename = "alreadyPersisted")] + pub already_persisted: bool, + #[serde( + rename = "intentExtractionPath", + default, + skip_serializing_if = "Option::is_none" + )] + pub intent_extraction_path: Option, + #[serde(rename = "rawArtifactPath")] + pub raw_artifact_path: String, + #[serde(rename = "auditBundlePath")] + pub audit_bundle_path: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ManagedAuditBundle { + pub schema: String, + pub provider: String, + #[serde(rename = "managedSource")] + pub managed_source: String, + #[serde(rename = "aiSessionId")] + pub ai_session_id: String, + #[serde(rename = "providerSessionId")] + pub provider_session_id: String, + #[serde(rename = "generatedAt")] + pub generated_at: String, + #[serde(rename = "rawArtifact")] + pub raw_artifact: ClaudeManagedArtifact, + pub bridge: ManagedBridgeArtifacts, + #[serde(rename = "fieldProvenance", default)] + pub field_provenance: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ManagedBridgeArtifacts { + #[serde(rename = "sessionState")] + pub session_state: SessionState, + #[serde(rename = "aiSession")] + pub ai_session: Value, + #[serde(rename = "objectCandidates")] + pub object_candidates: ManagedObjectCandidates, + #[serde(rename = "intentExtraction")] + pub intent_extraction: ManagedDraftExtractionReport, + #[serde( + rename = "intentExtractionArtifact", + default, + skip_serializing_if = "Option::is_none" + )] + pub intent_extraction_artifact: Option, + #[serde(rename = "toolInvocations", default)] + pub tool_invocations: Vec, + #[serde(rename = "touchHints", default)] + pub touch_hints: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ManagedObjectCandidates { + #[serde(rename = "threadId")] + pub thread_id: String, + #[serde(rename = "runSnapshot")] + pub run_snapshot: ManagedRunSnapshot, + #[serde(rename = "runEvent")] + pub run_event: ManagedRunEvent, + #[serde(rename = "provenanceSnapshot")] + pub provenance_snapshot: ManagedProvenanceSnapshot, + #[serde(rename = "providerInitSnapshot")] + pub provider_init_snapshot: ManagedProviderInitSnapshot, + #[serde( + rename = "runUsageEvent", + default, + skip_serializing_if = "Option::is_none" + )] + pub run_usage_event: Option, + #[serde(rename = "toolInvocationEvents", default)] + pub tool_invocation_events: Vec, + #[serde(rename = "toolRuntimeEvents", default)] + pub tool_runtime_events: Vec, + #[serde(rename = "assistantRuntimeEvents", default)] + pub assistant_runtime_events: Vec, + #[serde(rename = "taskRuntimeEvents", default)] + pub task_runtime_events: Vec, + #[serde(rename = "decisionRuntimeEvents", default)] + pub decision_runtime_events: Vec, + #[serde(rename = "contextRuntimeEvents", default)] + pub context_runtime_events: Vec, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ManagedRunSnapshot { + pub id: String, + #[serde(rename = "threadId")] + pub thread_id: String, + #[serde(rename = "startedAt")] + pub started_at: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ManagedRunEvent { + pub id: String, + #[serde(rename = "runId")] + pub run_id: String, + pub status: String, + pub at: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub error: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ManagedProvenanceSnapshot { + pub id: String, + #[serde(rename = "runId")] + pub run_id: String, + pub provider: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub model: Option, + pub parameters: Value, + #[serde(rename = "createdAt")] + pub created_at: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ManagedProviderInitSnapshot { + pub id: String, + #[serde(rename = "runId")] + pub run_id: String, + #[serde(rename = "threadId")] + pub thread_id: String, + #[serde( + rename = "apiKeySource", + default, + skip_serializing_if = "Option::is_none" + )] + pub api_key_source: Option, + #[serde( + rename = "claudeCodeVersion", + default, + skip_serializing_if = "Option::is_none" + )] + pub claude_code_version: Option, + #[serde( + rename = "outputStyle", + default, + skip_serializing_if = "Option::is_none" + )] + pub output_style: Option, + #[serde(default)] + pub agents: Vec, + #[serde(default)] + pub skills: Vec, + #[serde(rename = "slashCommands", default)] + pub slash_commands: Vec, + #[serde(rename = "mcpServers", default)] + pub mcp_servers: Vec, + #[serde(default)] + pub plugins: Vec, + #[serde( + rename = "fastModeState", + default, + skip_serializing_if = "Option::is_none" + )] + pub fast_mode_state: Option, + #[serde(rename = "capturedAt")] + pub captured_at: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ManagedRunUsageEvent { + #[serde(rename = "runId")] + pub run_id: String, + #[serde(rename = "threadId")] + pub thread_id: String, + pub at: String, + pub usage: Value, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ManagedToolInvocationEvent { + pub id: String, + #[serde(rename = "runId")] + pub run_id: String, + #[serde(rename = "threadId")] + pub thread_id: String, + pub tool: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub server: Option, + pub status: String, + pub at: String, + pub payload: Value, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ManagedSemanticRuntimeEvent { + pub id: String, + #[serde(rename = "runId")] + pub run_id: String, + #[serde(rename = "threadId")] + pub thread_id: String, + #[serde(rename = "semanticObject")] + pub semantic_object: String, + pub kind: String, + pub source: String, + pub at: String, + pub payload: Value, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ManagedDraftExtractionReport { + pub status: String, + pub source: String, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub error: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ManagedToolInvocation { + #[serde(rename = "toolUseId")] + pub tool_use_id: String, + #[serde(rename = "toolName", default, skip_serializing_if = "Option::is_none")] + pub tool_name: Option, + #[serde(rename = "toolInput", default, skip_serializing_if = "Option::is_none")] + pub tool_input: Option, + #[serde( + rename = "toolResponse", + default, + skip_serializing_if = "Option::is_none" + )] + pub tool_response: Option, + #[serde( + rename = "transcriptPath", + default, + skip_serializing_if = "Option::is_none" + )] + pub transcript_path: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ManagedFieldProvenance { + #[serde(rename = "fieldPath")] + pub field_path: String, + #[serde(rename = "sourceLayer")] + pub source_layer: String, + #[serde(rename = "sourcePath")] + pub source_path: String, + pub value: Value, + #[serde(default, skip_serializing_if = "Option::is_none")] + pub note: Option, +} + +#[derive(Debug, Clone, Deserialize)] +struct StructuredIntentExtractionOutput { + summary: String, + #[serde(rename = "problemStatement")] + problem_statement: String, + #[serde(rename = "changeType")] + change_type: ChangeType, + objectives: Vec, + #[serde(rename = "inScope", default)] + in_scope: Vec, + #[serde(rename = "outOfScope", default)] + out_of_scope: Vec, + #[serde(rename = "touchHints", default)] + touch_hints: Option, + #[serde(rename = "successCriteria")] + success_criteria: Vec, + #[serde(rename = "fastChecks", default)] + fast_checks: Vec, + #[serde(rename = "integrationChecks", default)] + integration_checks: Vec, + #[serde(rename = "securityChecks", default)] + security_checks: Vec, + #[serde(rename = "releaseChecks", default)] + release_checks: Vec, + #[serde(rename = "riskRationale")] + risk_rationale: String, + #[serde(rename = "riskFactors", default)] + risk_factors: Vec, + #[serde(rename = "riskLevel", default)] + risk_level: Option, +} + +#[derive(Debug, Clone)] +struct ManagedSystemInit { + session_id: String, + cwd: String, + model: Option, + permission_mode: Option, + tools: Vec, + api_key_source: Option, + claude_code_version: Option, + output_style: Option, + agents: Vec, + skills: Vec, + slash_commands: Vec, + mcp_servers: Vec, + plugins: Vec, + fast_mode_state: Option, +} + +#[derive(Debug, Clone, Default)] +struct ToolHookPair { + tool_use_id: String, + tool_name: Option, + tool_input: Option, + tool_response: Option, + transcript_path: Option, + saw_pre: bool, + saw_post: bool, +} + +#[derive(Debug, Clone)] +struct IntentExtractionOutcome { + extraction: Option, + error: Option, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PersistedManagedIntentExtraction { + pub schema: String, + #[serde(rename = "ai_session_id")] + pub ai_session_id: String, + pub source: String, + pub extraction: IntentDraft, +} + +pub fn ingest_managed_artifact( + artifact: &ClaudeManagedArtifact, +) -> Result { + let mut session = build_bridge_session(artifact)?; + let intent_extraction = + extract_intent_extraction_from_result(artifact.result_message.as_ref())?; + if intent_extraction.is_some() { + session.metadata.insert( + "intent_extraction_source".to_string(), + json!("structured_output"), + ); + } + + Ok(ManagedArtifactIngestion { + session, + intent_extraction, + }) +} + +pub fn build_managed_audit_bundle(artifact: &ClaudeManagedArtifact) -> Result { + let system_init = extract_system_init(artifact) + .context("managed artifact does not contain a valid system init message")?; + let session = build_bridge_session(artifact)?; + let extraction_outcome = extract_intent_extraction_outcome(artifact.result_message.as_ref()); + let tool_invocations = merge_tool_hook_events(&artifact.hook_events) + .into_iter() + .map(ManagedToolInvocation::from) + .collect::>(); + let object_candidates = build_object_candidates(&session, &system_init, artifact)?; + let touch_hints = collect_touch_hints(&tool_invocations, &system_init.cwd); + let ai_session = build_managed_ai_session_payload(&session); + let intent_extraction_artifact = extraction_outcome + .extraction + .clone() + .map(|extraction| PersistedManagedIntentExtraction::new(session.id.clone(), extraction)); + let intent_extraction = ManagedDraftExtractionReport { + status: extraction_outcome.status_label().to_string(), + source: "result.structured_output".to_string(), + error: extraction_outcome.error.clone(), + }; + let field_provenance = build_field_provenance( + &system_init, + artifact, + &extraction_outcome, + &tool_invocations, + &touch_hints, + )?; + + Ok(ManagedAuditBundle { + schema: MANAGED_AUDIT_BUNDLE_SCHEMA.to_string(), + provider: "claude".to_string(), + managed_source: MANAGED_SOURCE_NAME.to_string(), + ai_session_id: session.id.clone(), + provider_session_id: system_init.session_id, + generated_at: Utc::now().to_rfc3339(), + raw_artifact: artifact.clone(), + bridge: ManagedBridgeArtifacts { + session_state: session, + ai_session, + object_candidates, + intent_extraction, + intent_extraction_artifact, + tool_invocations, + touch_hints, + }, + field_provenance, + }) +} + +pub fn extract_intent_extraction_from_result( + result_message: Option<&ClaudeManagedResultMessage>, +) -> Result> { + let outcome = extract_intent_extraction_outcome(result_message); + if let Some(error) = outcome.error { + return Err(anyhow!(error)); + } + Ok(outcome.extraction) +} + +pub async fn persist_managed_artifact( + storage_path: &Path, + artifact: &ClaudeManagedArtifact, +) -> Result { + let bundle = build_managed_audit_bundle(artifact)?; + let ai_session_id = bundle.ai_session_id.clone(); + let provider_session_id = bundle.provider_session_id.clone(); + + let objects_dir = storage_path.join("objects"); + fs::create_dir_all(&objects_dir).await.with_context(|| { + format!( + "failed to create objects directory '{}'", + objects_dir.display() + ) + })?; + + let storage = Arc::new(LocalStorage::new(objects_dir)); + let db_conn = Arc::new(db::get_db_conn_instance().await.clone()); + let history_manager = HistoryManager::new(storage, storage_path.to_path_buf(), db_conn); + + let ai_session_payload = normalize_json_value(bundle.bridge.ai_session.clone()); + let blob_data = serde_json::to_vec(&ai_session_payload) + .context("failed to serialize ai_session payload")?; + let blob_hash = write_git_object(storage_path, "blob", &blob_data) + .context("failed to write ai_session git blob")?; + let existing_object_hash = history_manager + .get_object_hash(AI_SESSION_TYPE, &ai_session_id) + .await?; + + let (ai_session_object_hash, already_persisted) = if let Some(existing) = existing_object_hash { + (existing.to_string(), true) + } else { + history_manager + .append(AI_SESSION_TYPE, &ai_session_id, blob_hash) + .await + .with_context(|| { + format!( + "failed to append ai_session '{}' to '{}'", + ai_session_id, + history_manager.ref_name() + ) + })?; + (blob_hash.to_string(), false) + }; + + let raw_artifact_path = write_pretty_json_artifact( + &storage_path.join(MANAGED_ARTIFACTS_DIR), + &ai_session_id, + artifact, + ) + .await?; + let audit_bundle_path = write_pretty_json_artifact( + &storage_path.join(AUDIT_BUNDLES_DIR), + &ai_session_id, + &bundle, + ) + .await?; + let intent_extraction_path = match &bundle.bridge.intent_extraction_artifact { + Some(intent_extraction_artifact) => Some( + write_pretty_json_artifact( + &storage_path.join(INTENT_EXTRACTIONS_DIR), + &ai_session_id, + intent_extraction_artifact, + ) + .await? + .to_string_lossy() + .to_string(), + ), + None => { + delete_generated_artifact_if_exists( + &storage_path.join(INTENT_EXTRACTIONS_DIR), + &ai_session_id, + ) + .await?; + None + } + }; + Ok(PersistedManagedArtifactOutcome { + provider_session_id, + ai_session_id, + ai_session_object_hash, + already_persisted, + intent_extraction_path, + raw_artifact_path: raw_artifact_path.to_string_lossy().to_string(), + audit_bundle_path: audit_bundle_path.to_string_lossy().to_string(), + }) +} + +impl ClaudeManagedArtifact { + fn prompt_text(&self) -> Option { + self.hook_events + .iter() + .find(|event| event.hook == "UserPromptSubmit") + .and_then(|event| event.input.get("prompt")) + .and_then(Value::as_str) + .map(ToString::to_string) + .or_else(|| self.prompt.clone()) + } + + fn transcript_path(&self) -> Option { + self.hook_events.iter().find_map(|event| { + event + .input + .get("transcript_path") + .and_then(Value::as_str) + .map(ToString::to_string) + }) + } + + fn assistant_text_messages(&self) -> Vec { + self.messages + .iter() + .filter(|message| message.get("type").and_then(Value::as_str) == Some("assistant")) + .flat_map(|message| { + message + .get("message") + .and_then(|value| value.get("content")) + .and_then(Value::as_array) + .into_iter() + .flatten() + .filter_map(|content| { + if content.get("type").and_then(Value::as_str) != Some("text") { + return None; + } + content + .get("text") + .and_then(Value::as_str) + .map(str::trim) + .filter(|text| !text.is_empty()) + .map(ToString::to_string) + }) + .collect::>() + }) + .collect() + } + + fn has_stop_hook(&self) -> bool { + self.hook_events.iter().any(|event| event.hook == "Stop") + } +} + +fn extract_system_init(artifact: &ClaudeManagedArtifact) -> Result { + let message = artifact + .messages + .iter() + .find(|message| { + message.get("type").and_then(Value::as_str) == Some("system") + && message.get("subtype").and_then(Value::as_str) == Some("init") + }) + .ok_or_else(|| anyhow!("system init message is missing"))?; + + let session_id = message + .get("session_id") + .and_then(Value::as_str) + .filter(|value| !value.trim().is_empty()) + .ok_or_else(|| anyhow!("system init message is missing session_id"))? + .to_string(); + validate_managed_session_id(&session_id)?; + let cwd = message + .get("cwd") + .and_then(Value::as_str) + .filter(|value| !value.trim().is_empty()) + .map(ToString::to_string) + .unwrap_or_else(|| artifact.cwd.clone()); + let model = message + .get("model") + .and_then(Value::as_str) + .map(ToString::to_string); + let permission_mode = message + .get("permissionMode") + .and_then(Value::as_str) + .map(ToString::to_string); + let tools = message + .get("tools") + .and_then(Value::as_array) + .map(|items| { + items + .iter() + .filter_map(Value::as_str) + .map(ToString::to_string) + .collect::>() + }) + .unwrap_or_default(); + let api_key_source = message + .get("apiKeySource") + .and_then(Value::as_str) + .map(ToString::to_string); + let claude_code_version = message + .get("claude_code_version") + .and_then(Value::as_str) + .map(ToString::to_string); + let output_style = message + .get("output_style") + .and_then(Value::as_str) + .map(ToString::to_string); + let agents = message + .get("agents") + .and_then(Value::as_array) + .map(|items| { + items + .iter() + .filter_map(Value::as_str) + .map(ToString::to_string) + .collect::>() + }) + .unwrap_or_default(); + let skills = message + .get("skills") + .and_then(Value::as_array) + .map(|items| { + items + .iter() + .filter_map(Value::as_str) + .map(ToString::to_string) + .collect::>() + }) + .unwrap_or_default(); + let slash_commands = message + .get("slash_commands") + .and_then(Value::as_array) + .map(|items| { + items + .iter() + .filter_map(Value::as_str) + .map(ToString::to_string) + .collect::>() + }) + .unwrap_or_default(); + let mcp_servers = message + .get("mcp_servers") + .and_then(Value::as_array) + .cloned() + .unwrap_or_default(); + let plugins = message + .get("plugins") + .and_then(Value::as_array) + .cloned() + .unwrap_or_default(); + let fast_mode_state = message + .get("fast_mode_state") + .and_then(Value::as_str) + .map(ToString::to_string); + + Ok(ManagedSystemInit { + session_id, + cwd, + model, + permission_mode, + tools, + api_key_source, + claude_code_version, + output_style, + agents, + skills, + slash_commands, + mcp_servers, + plugins, + fast_mode_state, + }) +} + +fn validate_managed_session_id(session_id: &str) -> Result<()> { + if session_id.len() > 128 { + bail!("invalid managed session_id: exceeds 128 characters"); + } + if !session_id + .chars() + .all(|char| char.is_ascii_alphanumeric() || matches!(char, '.' | '_' | '-')) + { + bail!("invalid managed session_id: only [A-Za-z0-9._-] is allowed"); + } + Ok(()) +} + +fn append_raw_hook_events(session: &mut SessionState, hook_events: &[ClaudeManagedHookEvent]) { + let items = hook_events + .iter() + .map(|event| { + json!({ + "hook_event_name": event.hook, + "session_id": event.input.get("session_id"), + "cwd": event.input.get("cwd"), + "transcript_path": event.input.get("transcript_path"), + "extra": event.input, + "timestamp": Utc::now().to_rfc3339(), + }) + }) + .collect::>(); + session + .metadata + .insert(RAW_HOOK_EVENTS_KEY.to_string(), Value::Array(items)); +} + +fn merge_tool_hook_events(hook_events: &[ClaudeManagedHookEvent]) -> Vec { + let mut order = Vec::new(); + let mut seen = HashSet::new(); + let mut pairs = HashMap::::new(); + + for event in hook_events { + let Some(tool_use_id) = event + .input + .get("tool_use_id") + .and_then(Value::as_str) + .map(ToString::to_string) + else { + continue; + }; + + if seen.insert(tool_use_id.clone()) { + order.push(tool_use_id.clone()); + } + + let pair = pairs + .entry(tool_use_id.clone()) + .or_insert_with(|| ToolHookPair { + tool_use_id: tool_use_id.clone(), + ..ToolHookPair::default() + }); + match event.hook.as_str() { + "PreToolUse" => { + pair.saw_pre = true; + pair.tool_name = event + .input + .get("tool_name") + .and_then(Value::as_str) + .map(ToString::to_string); + pair.tool_input = event.input.get("tool_input").cloned(); + pair.transcript_path = event + .input + .get("transcript_path") + .and_then(Value::as_str) + .map(ToString::to_string); + } + "PostToolUse" => { + pair.saw_post = true; + if pair.tool_name.is_none() { + pair.tool_name = event + .input + .get("tool_name") + .and_then(Value::as_str) + .map(ToString::to_string); + } + if pair.tool_input.is_none() { + pair.tool_input = event.input.get("tool_input").cloned(); + } + pair.tool_response = event.input.get("tool_response").cloned(); + if pair.transcript_path.is_none() { + pair.transcript_path = event + .input + .get("transcript_path") + .and_then(Value::as_str) + .map(ToString::to_string); + } + } + _ => {} + } + } + + order + .into_iter() + .filter_map(|id| pairs.remove(&id)) + .collect::>() +} + +fn build_object_candidates( + session: &SessionState, + system_init: &ManagedSystemInit, + artifact: &ClaudeManagedArtifact, +) -> Result { + let thread_id = session.id.clone(); + let run_id = format!("{thread_id}::run"); + let run_started_at = session.created_at.to_rfc3339(); + let observed_at = session.updated_at.to_rfc3339(); + let run_status = managed_run_status(artifact); + let run_error = managed_run_error(artifact); + let provenance_parameters = build_provenance_parameters(system_init, artifact); + let tool_invocation_events = merge_tool_hook_events(&artifact.hook_events) + .into_iter() + .map(|tool| { + ManagedToolInvocationEvent::from_tool_hook_pair(&thread_id, &run_id, &observed_at, tool) + }) + .collect::>(); + let provider_init_snapshot = + build_provider_init_snapshot(&thread_id, &run_id, &observed_at, system_init); + let run_usage_event = artifact + .result_message + .as_ref() + .and_then(|result| result.usage.clone()) + .map(|usage| ManagedRunUsageEvent { + run_id: run_id.clone(), + thread_id: thread_id.clone(), + at: observed_at.clone(), + usage, + }); + let tool_runtime_events = + build_tool_runtime_events(&thread_id, &run_id, &observed_at, artifact); + let assistant_runtime_events = + build_assistant_runtime_events(&thread_id, &run_id, &observed_at, artifact); + let task_runtime_events = + build_task_runtime_events(&thread_id, &run_id, &observed_at, artifact); + let decision_runtime_events = + build_decision_runtime_events(&thread_id, &run_id, &observed_at, artifact); + let context_runtime_events = + build_context_runtime_events(&thread_id, &run_id, &observed_at, artifact); + + Ok(ManagedObjectCandidates { + thread_id: thread_id.clone(), + run_snapshot: ManagedRunSnapshot { + id: run_id.clone(), + thread_id: thread_id.clone(), + started_at: run_started_at, + }, + run_event: ManagedRunEvent { + id: format!("{run_id}::status"), + run_id: run_id.clone(), + status: run_status, + at: observed_at.clone(), + error: run_error, + }, + provenance_snapshot: ManagedProvenanceSnapshot { + id: format!("{run_id}::provenance"), + run_id, + provider: "claude".to_string(), + model: system_init.model.clone(), + parameters: provenance_parameters, + created_at: session.created_at.to_rfc3339(), + }, + provider_init_snapshot, + run_usage_event, + tool_invocation_events, + tool_runtime_events, + assistant_runtime_events, + task_runtime_events, + decision_runtime_events, + context_runtime_events, + }) +} + +fn build_tool_runtime_events( + thread_id: &str, + run_id: &str, + observed_at: &str, + artifact: &ClaudeManagedArtifact, +) -> Vec { + let mut events = Vec::new(); + + for (index, message) in artifact.messages.iter().enumerate() { + let Some(message_type) = message.get("type").and_then(Value::as_str) else { + continue; + }; + if !matches!(message_type, "tool_progress" | "tool_use_summary") { + continue; + } + + let id = message + .get("uuid") + .and_then(Value::as_str) + .map(ToString::to_string) + .or_else(|| { + message + .get("tool_use_id") + .and_then(Value::as_str) + .map(ToString::to_string) + }) + .unwrap_or_else(|| format!("{run_id}::{message_type}::{index}")); + events.push(ManagedSemanticRuntimeEvent { + id, + run_id: run_id.to_string(), + thread_id: thread_id.to_string(), + semantic_object: "Tool".to_string(), + kind: message_type.to_string(), + source: "stream".to_string(), + at: observed_at.to_string(), + payload: message.clone(), + }); + } + + events +} + +fn build_assistant_runtime_events( + thread_id: &str, + run_id: &str, + observed_at: &str, + artifact: &ClaudeManagedArtifact, +) -> Vec { + let mut events = Vec::new(); + + for (index, message) in artifact.messages.iter().enumerate() { + if message.get("type").and_then(Value::as_str) != Some("stream_event") { + continue; + } + + let kind = message + .get("event") + .and_then(|event| event.get("type")) + .and_then(Value::as_str) + .unwrap_or("stream_event"); + events.push(ManagedSemanticRuntimeEvent { + id: message + .get("uuid") + .and_then(Value::as_str) + .map(ToString::to_string) + .unwrap_or_else(|| format!("{run_id}::{kind}::{index}")), + run_id: run_id.to_string(), + thread_id: thread_id.to_string(), + semantic_object: "Assistant".to_string(), + kind: kind.to_string(), + source: "stream".to_string(), + at: observed_at.to_string(), + payload: message.clone(), + }); + } + + events +} + +fn build_provider_init_snapshot( + thread_id: &str, + run_id: &str, + observed_at: &str, + system_init: &ManagedSystemInit, +) -> ManagedProviderInitSnapshot { + ManagedProviderInitSnapshot { + id: format!("{run_id}::provider-init"), + run_id: run_id.to_string(), + thread_id: thread_id.to_string(), + api_key_source: system_init.api_key_source.clone(), + claude_code_version: system_init.claude_code_version.clone(), + output_style: system_init.output_style.clone(), + agents: system_init.agents.clone(), + skills: system_init.skills.clone(), + slash_commands: system_init.slash_commands.clone(), + mcp_servers: system_init.mcp_servers.clone(), + plugins: system_init.plugins.clone(), + fast_mode_state: system_init.fast_mode_state.clone(), + captured_at: observed_at.to_string(), + } +} + +fn build_task_runtime_events( + thread_id: &str, + run_id: &str, + observed_at: &str, + artifact: &ClaudeManagedArtifact, +) -> Vec { + let mut events = Vec::new(); + + for (index, message) in artifact.messages.iter().enumerate() { + let Some(subtype) = message.get("subtype").and_then(Value::as_str) else { + continue; + }; + if !matches!( + subtype, + "task_started" | "task_progress" | "task_notification" + ) { + continue; + } + + events.push(ManagedSemanticRuntimeEvent { + id: message + .get("uuid") + .and_then(Value::as_str) + .map(ToString::to_string) + .unwrap_or_else(|| format!("{run_id}::{subtype}::{index}")), + run_id: run_id.to_string(), + thread_id: thread_id.to_string(), + semantic_object: "Task".to_string(), + kind: subtype.to_string(), + source: "stream".to_string(), + at: observed_at.to_string(), + payload: message.clone(), + }); + } + + for (index, hook_event) in artifact.hook_events.iter().enumerate() { + if !matches!( + hook_event.hook.as_str(), + "SubagentStart" | "SubagentStop" | "TaskCompleted" | "TeammateIdle" + ) { + continue; + } + + events.push(ManagedSemanticRuntimeEvent { + id: hook_event + .input + .get("task_id") + .or_else(|| hook_event.input.get("agent_id")) + .and_then(Value::as_str) + .map(ToString::to_string) + .unwrap_or_else(|| format!("{run_id}::{}::{index}", hook_event.hook)), + run_id: run_id.to_string(), + thread_id: thread_id.to_string(), + semantic_object: "Task".to_string(), + kind: hook_event.hook.to_string(), + source: "hook".to_string(), + at: observed_at.to_string(), + payload: hook_event.input.clone(), + }); + } + + events +} + +fn build_decision_runtime_events( + thread_id: &str, + run_id: &str, + observed_at: &str, + artifact: &ClaudeManagedArtifact, +) -> Vec { + let mut events = Vec::new(); + + for (index, hook_event) in artifact.hook_events.iter().enumerate() { + if !matches!( + hook_event.hook.as_str(), + "PermissionRequest" | "Elicitation" | "ElicitationResult" | "CanUseTool" + ) { + continue; + } + + events.push(ManagedSemanticRuntimeEvent { + id: hook_event + .input + .get("tool_use_id") + .or_else(|| hook_event.input.get("elicitation_id")) + .and_then(Value::as_str) + .map(ToString::to_string) + .unwrap_or_else(|| format!("{run_id}::{}::{index}", hook_event.hook)), + run_id: run_id.to_string(), + thread_id: thread_id.to_string(), + semantic_object: "Decision".to_string(), + kind: hook_event.hook.to_string(), + source: "hook".to_string(), + at: observed_at.to_string(), + payload: hook_event.input.clone(), + }); + } + + if let Some(permission_denials) = artifact + .result_message + .as_ref() + .and_then(|result| result.permission_denials.clone()) + { + let denial_count = permission_denials.as_array().map(Vec::len).unwrap_or(0); + if denial_count > 0 { + events.push(ManagedSemanticRuntimeEvent { + id: format!("{run_id}::permission-denials"), + run_id: run_id.to_string(), + thread_id: thread_id.to_string(), + semantic_object: "Decision".to_string(), + kind: "permission_denials".to_string(), + source: "result".to_string(), + at: observed_at.to_string(), + payload: permission_denials, + }); + } + } + + events +} + +fn build_context_runtime_events( + thread_id: &str, + run_id: &str, + observed_at: &str, + artifact: &ClaudeManagedArtifact, +) -> Vec { + let mut events = Vec::new(); + + for (index, message) in artifact.messages.iter().enumerate() { + let message_type = message.get("type").and_then(Value::as_str); + let subtype = message.get("subtype").and_then(Value::as_str); + let is_context_message = + matches!( + (message_type, subtype), + ( + Some("system"), + Some("status" | "compact_boundary" | "files_persisted") + ) + ) || matches!(message_type, Some("rate_limit_event" | "prompt_suggestion")); + if !is_context_message { + continue; + } + + let kind = subtype.unwrap_or_else(|| message_type.unwrap_or("unknown")); + events.push(ManagedSemanticRuntimeEvent { + id: message + .get("uuid") + .and_then(Value::as_str) + .map(ToString::to_string) + .unwrap_or_else(|| format!("{run_id}::{kind}::{index}")), + run_id: run_id.to_string(), + thread_id: thread_id.to_string(), + semantic_object: "Context".to_string(), + kind: kind.to_string(), + source: "stream".to_string(), + at: observed_at.to_string(), + payload: message.clone(), + }); + } + + for (index, hook_event) in artifact.hook_events.iter().enumerate() { + if !matches!( + hook_event.hook.as_str(), + "PreCompact" + | "PostCompact" + | "InstructionsLoaded" + | "ConfigChange" + | "WorktreeCreate" + | "WorktreeRemove" + ) { + continue; + } + + events.push(ManagedSemanticRuntimeEvent { + id: hook_event + .input + .get("file_path") + .or_else(|| hook_event.input.get("worktree_path")) + .and_then(Value::as_str) + .map(ToString::to_string) + .unwrap_or_else(|| format!("{run_id}::{}::{index}", hook_event.hook)), + run_id: run_id.to_string(), + thread_id: thread_id.to_string(), + semantic_object: "Context".to_string(), + kind: hook_event.hook.to_string(), + source: "hook".to_string(), + at: observed_at.to_string(), + payload: hook_event.input.clone(), + }); + } + + events +} + +fn build_provenance_parameters( + system_init: &ManagedSystemInit, + artifact: &ClaudeManagedArtifact, +) -> Value { + json!({ + "cwd": system_init.cwd, + "permissionMode": system_init.permission_mode, + "tools": system_init.tools, + "apiKeySource": system_init.api_key_source, + "claudeCodeVersion": system_init.claude_code_version, + "outputStyle": system_init.output_style, + "agents": system_init.agents, + "skills": system_init.skills, + "slashCommands": system_init.slash_commands, + "mcpServers": system_init.mcp_servers, + "plugins": system_init.plugins, + "fastModeState": system_init.fast_mode_state, + "durationMs": artifact.result_message.as_ref().and_then(|result| result.duration_ms), + "durationApiMs": artifact.result_message.as_ref().and_then(|result| result.duration_api_ms), + "numTurns": artifact.result_message.as_ref().and_then(|result| result.num_turns), + "stopReason": artifact.result_message.as_ref().and_then(|result| result.stop_reason.clone()), + "totalCostUsd": artifact.result_message.as_ref().and_then(|result| result.total_cost_usd), + }) +} + +fn managed_run_status(artifact: &ClaudeManagedArtifact) -> String { + match artifact.result_message.as_ref() { + Some(result) + if result.is_error == Some(true) + || matches!(result.subtype.as_deref(), Some("error" | "failed")) => + { + "failed".to_string() + } + Some(_) => "completed".to_string(), + None if artifact.helper_timed_out => "timed_out".to_string(), + None if artifact.helper_error.is_some() => "failed".to_string(), + None => "running".to_string(), + } +} + +fn managed_run_error(artifact: &ClaudeManagedArtifact) -> Option { + if let Some(result) = artifact.result_message.as_ref() { + if result.is_error != Some(true) + && !matches!(result.subtype.as_deref(), Some("error" | "failed")) + { + return artifact.helper_error.clone(); + } + return result + .result + .clone() + .filter(|value| !value.trim().is_empty()) + .or_else(|| Some("managed Claude SDK run reported an error".to_string())); + } + artifact.helper_error.clone() +} + +fn append_tool_event(session: &mut SessionState, tool_event: &ToolHookPair) { + let entry = session + .metadata + .entry("tool_events".to_string()) + .or_insert_with(|| Value::Array(Vec::new())); + let Value::Array(items) = entry else { + session.metadata.insert( + "tool_events".to_string(), + Value::Array(vec![json!({ + "tool_use_id": tool_event.tool_use_id, + "name": tool_event.tool_name, + "input": tool_event.tool_input, + "response": tool_event.tool_response, + "transcript_path": tool_event.transcript_path, + "timestamp": Utc::now().to_rfc3339(), + })]), + ); + return; + }; + + items.push(json!({ + "tool_use_id": tool_event.tool_use_id, + "name": tool_event.tool_name, + "input": tool_event.tool_input, + "response": tool_event.tool_response, + "transcript_path": tool_event.transcript_path, + "timestamp": Utc::now().to_rfc3339(), + })); +} + +fn append_normalized_event(session: &mut SessionState, event: Value) { + let entry = session + .metadata + .entry(NORMALIZED_EVENTS_KEY.to_string()) + .or_insert_with(|| Value::Array(Vec::new())); + let Value::Array(items) = entry else { + session + .metadata + .insert(NORMALIZED_EVENTS_KEY.to_string(), Value::Array(vec![event])); + return; + }; + items.push(event); +} + +fn build_bridge_session(artifact: &ClaudeManagedArtifact) -> Result { + let system_init = extract_system_init(artifact) + .context("managed artifact does not contain a valid system init message")?; + let provider_session_id = system_init.session_id.clone(); + let mut session = SessionState::new(&system_init.cwd); + session.id = build_ai_session_id("claude", &provider_session_id); + session.working_dir = system_init.cwd.clone(); + session.summary = artifact + .result_message + .as_ref() + .and_then(|result| result.structured_output.as_ref()) + .and_then(|output| output.get("summary")) + .and_then(Value::as_str) + .unwrap_or_default() + .to_string(); + + session + .metadata + .insert("provider".to_string(), json!("claude")); + session.metadata.insert( + "provider_session_id".to_string(), + json!(provider_session_id.clone()), + ); + session + .metadata + .insert("managed_source".to_string(), json!(MANAGED_SOURCE_NAME)); + + if let Some(model) = &system_init.model { + session.metadata.insert("model".to_string(), json!(model)); + } + if let Some(permission_mode) = &system_init.permission_mode { + session + .metadata + .insert("permission_mode".to_string(), json!(permission_mode)); + } + if !system_init.tools.is_empty() { + session + .metadata + .insert("available_tools".to_string(), json!(system_init.tools)); + } + + if let Some(transcript_path) = artifact.transcript_path() { + session + .metadata + .insert("transcript_path".to_string(), json!(transcript_path)); + } + + append_normalized_event( + &mut session, + json!({ + "provider": "claude", + "kind": "session_start", + "timestamp": Utc::now().to_rfc3339(), + "prompt": Value::Null, + "tool_name": Value::Null, + "assistant_message": Value::Null, + "has_model": system_init.model.is_some(), + "has_tool_input": false, + "has_tool_response": false, + }), + ); + + append_raw_hook_events(&mut session, &artifact.hook_events); + + if let Some(prompt) = artifact.prompt_text() { + session.add_user_message(&prompt); + append_normalized_event( + &mut session, + json!({ + "provider": "claude", + "kind": "turn_start", + "timestamp": Utc::now().to_rfc3339(), + "prompt": prompt, + "tool_name": Value::Null, + "assistant_message": Value::Null, + "has_model": false, + "has_tool_input": false, + "has_tool_response": false, + }), + ); + } + + for assistant_text in artifact.assistant_text_messages() { + session.add_assistant_message(&assistant_text); + session + .metadata + .insert("last_assistant_message".to_string(), json!(assistant_text)); + } + + for tool_event in merge_tool_hook_events(&artifact.hook_events) { + append_tool_event(&mut session, &tool_event); + append_normalized_event( + &mut session, + json!({ + "provider": "claude", + "kind": "tool_use", + "timestamp": Utc::now().to_rfc3339(), + "prompt": Value::Null, + "tool_name": tool_event.tool_name, + "assistant_message": Value::Null, + "has_model": false, + "has_tool_input": tool_event.tool_input.is_some(), + "has_tool_response": tool_event.tool_response.is_some(), + }), + ); + } + + if artifact.has_stop_hook() { + let last_assistant_message = session + .metadata + .get("last_assistant_message") + .cloned() + .unwrap_or(Value::Null); + append_normalized_event( + &mut session, + json!({ + "provider": "claude", + "kind": "turn_end", + "timestamp": Utc::now().to_rfc3339(), + "prompt": Value::Null, + "tool_name": Value::Null, + "assistant_message": last_assistant_message, + "has_model": false, + "has_tool_input": false, + "has_tool_response": false, + }), + ); + } + + if let Some(result) = &artifact.result_message { + if let Some(usage) = &result.usage { + session.metadata.insert("usage".to_string(), usage.clone()); + } + if let Some(total_cost_usd) = result.total_cost_usd { + session + .metadata + .insert("total_cost_usd".to_string(), json!(total_cost_usd)); + } + if let Some(duration_ms) = result.duration_ms { + session + .metadata + .insert("duration_ms".to_string(), json!(duration_ms)); + } + if let Some(stop_reason) = &result.stop_reason { + session + .metadata + .insert("stop_reason".to_string(), json!(stop_reason)); + } + + append_normalized_event( + &mut session, + json!({ + "provider": "claude", + "kind": "session_end", + "timestamp": Utc::now().to_rfc3339(), + "prompt": Value::Null, + "tool_name": Value::Null, + "assistant_message": Value::Null, + "has_model": false, + "has_tool_input": false, + "has_tool_response": false, + }), + ); + session + .metadata + .insert(SESSION_PHASE_METADATA_KEY.to_string(), json!("ended")); + } else if artifact.helper_timed_out || artifact.helper_error.is_some() { + session + .metadata + .insert(SESSION_PHASE_METADATA_KEY.to_string(), json!("stopped")); + } else { + session + .metadata + .insert(SESSION_PHASE_METADATA_KEY.to_string(), json!("active")); + } + + if artifact.helper_timed_out { + session + .metadata + .insert("managed_helper_timed_out".to_string(), json!(true)); + } + if let Some(helper_error) = &artifact.helper_error { + session + .metadata + .insert("managed_helper_error".to_string(), json!(helper_error)); + } + + session.metadata.insert( + "provider_runtime".to_string(), + build_provider_runtime_metadata(&session, &system_init, artifact), + ); + + Ok(session) +} + +fn build_provider_runtime_metadata( + session: &SessionState, + system_init: &ManagedSystemInit, + artifact: &ClaudeManagedArtifact, +) -> Value { + let thread_id = session.id.clone(); + let run_id = format!("{thread_id}::run"); + let observed_at = session.updated_at.to_rfc3339(); + + json!({ + "providerInit": build_provider_init_snapshot(&thread_id, &run_id, &observed_at, system_init), + "taskRuntimeEvents": build_task_runtime_events(&thread_id, &run_id, &observed_at, artifact), + "decisionRuntimeEvents": build_decision_runtime_events(&thread_id, &run_id, &observed_at, artifact), + "contextRuntimeEvents": build_context_runtime_events(&thread_id, &run_id, &observed_at, artifact), + }) +} + +fn extract_intent_extraction_outcome( + result_message: Option<&ClaudeManagedResultMessage>, +) -> IntentExtractionOutcome { + let Some(result_message) = result_message else { + return IntentExtractionOutcome { + extraction: None, + error: None, + }; + }; + let Some(structured_output) = result_message.structured_output.clone() else { + return IntentExtractionOutcome { + extraction: None, + error: None, + }; + }; + + match serde_json::from_value::(structured_output) { + Ok(output) => IntentExtractionOutcome { + extraction: Some(IntentDraft { + intent: DraftIntent { + summary: output.summary, + problem_statement: output.problem_statement, + change_type: output.change_type, + objectives: output.objectives, + in_scope: output.in_scope, + out_of_scope: output.out_of_scope, + touch_hints: output.touch_hints, + }, + acceptance: DraftAcceptance { + success_criteria: output.success_criteria, + fast_checks: output.fast_checks, + integration_checks: output.integration_checks, + security_checks: output.security_checks, + release_checks: output.release_checks, + }, + risk: DraftRisk { + rationale: output.risk_rationale, + factors: output.risk_factors, + level: output.risk_level, + }, + }), + error: None, + }, + Err(err) => IntentExtractionOutcome { + extraction: None, + error: Some(format!( + "managed result structured_output does not match the intent extraction bridge schema: {err}" + )), + }, + } +} + +fn build_managed_ai_session_payload(session: &SessionState) -> Value { + let events = session + .metadata + .get(NORMALIZED_EVENTS_KEY) + .and_then(Value::as_array) + .cloned() + .unwrap_or_default(); + let raw_events = session + .metadata + .get(RAW_HOOK_EVENTS_KEY) + .and_then(Value::as_array) + .cloned() + .unwrap_or_default(); + let phase = session + .metadata + .get(SESSION_PHASE_METADATA_KEY) + .and_then(Value::as_str) + .unwrap_or("active"); + let provider_session_id = session + .metadata + .get("provider_session_id") + .and_then(Value::as_str) + .unwrap_or(&session.id); + let transcript_path = session + .metadata + .get("transcript_path") + .and_then(Value::as_str); + let last_assistant_message = session + .metadata + .get("last_assistant_message") + .and_then(Value::as_str); + + json!({ + "schema": AI_SESSION_SCHEMA, + "object_type": AI_SESSION_TYPE, + "provider": "claude", + "ai_session_id": session.id, + "provider_session_id": provider_session_id, + "state_machine": { + "phase": phase, + "status": phase_status_label(phase), + "event_count": events.len(), + "tool_use_count": count_events(&events, "tool_use"), + "compaction_count": count_events(&events, "compaction"), + "started_at": first_event_timestamp(&events, "session_start"), + "ended_at": first_event_timestamp(&events, "session_end"), + "updated_at": session.updated_at.to_rfc3339(), + }, + "summary": { + "message_count": session.messages.len(), + "user_message_count": session.messages.iter().filter(|message| message.role == "user").count(), + "assistant_message_count": session.messages.iter().filter(|message| message.role == "assistant").count(), + "last_assistant_message": last_assistant_message, + }, + "transcript": { + "path": transcript_path, + "raw_event_count": raw_events.len(), + }, + "events": events, + "raw_hook_events": raw_events, + "session": session, + "ingest_meta": { + "source": MANAGED_SOURCE_NAME, + "provider": "claude", + "ingested_at": Utc::now().to_rfc3339(), + } + }) +} + +fn phase_status_label(phase: &str) -> &'static str { + match phase { + "active" => "running", + "stopped" => "idle", + "ended" => "ended", + _ => "running", + } +} + +fn count_events(events: &[Value], kind: &str) -> usize { + events + .iter() + .filter(|value| value.get("kind").and_then(Value::as_str) == Some(kind)) + .count() +} + +fn first_event_timestamp(events: &[Value], kind: &str) -> Option { + events + .iter() + .find(|value| value.get("kind").and_then(Value::as_str) == Some(kind)) + .and_then(|value| value.get("timestamp")) + .and_then(Value::as_str) + .map(ToString::to_string) +} + +fn collect_touch_hints(tool_invocations: &[ManagedToolInvocation], cwd: &str) -> Vec { + let mut hints = Vec::new(); + let mut seen = HashSet::new(); + + for tool in tool_invocations { + for candidate in extract_file_candidates(tool) { + let normalized = normalize_hint_path(&candidate, cwd); + if normalized.is_empty() || !seen.insert(normalized.clone()) { + continue; + } + hints.push(normalized); + } + } + + hints +} + +fn extract_file_candidates(tool: &ManagedToolInvocation) -> Vec { + let mut candidates = Vec::new(); + + if let Some(input) = &tool.tool_input + && let Some(file_path) = input.get("file_path").and_then(Value::as_str) + { + candidates.push(file_path.to_string()); + } + + if let Some(response) = &tool.tool_response + && let Some(file_path) = response + .get("file") + .and_then(|value| value.get("filePath")) + .and_then(Value::as_str) + { + candidates.push(file_path.to_string()); + } + + candidates +} + +fn normalize_hint_path(path: &str, cwd: &str) -> String { + let trimmed = path.trim(); + if trimmed.is_empty() { + return String::new(); + } + + let normalized_path = trimmed.replace('\\', "/").trim_end_matches('/').to_string(); + let normalized_cwd = cwd + .trim() + .replace('\\', "/") + .trim_end_matches('/') + .to_string(); + if normalized_path.is_empty() || normalized_path == normalized_cwd { + return String::new(); + } + + let cwd_prefix = format!("{normalized_cwd}/"); + if let Some(relative) = normalized_path.strip_prefix(&cwd_prefix) { + return relative.to_string(); + } + + normalized_path +} + +fn build_field_provenance( + system_init: &ManagedSystemInit, + artifact: &ClaudeManagedArtifact, + extraction_outcome: &IntentExtractionOutcome, + tool_invocations: &[ManagedToolInvocation], + touch_hints: &[String], +) -> Result> { + let mut entries = Vec::new(); + + push_field_provenance( + &mut entries, + "meta.providerSessionId", + "system(init)", + "$.messages[type=system,subtype=init].session_id", + json!(system_init.session_id), + None, + ); + push_field_provenance( + &mut entries, + "meta.cwd", + "system(init)", + "$.messages[type=system,subtype=init].cwd", + json!(system_init.cwd), + None, + ); + if let Some(model) = &system_init.model { + push_field_provenance( + &mut entries, + "meta.model", + "system(init)", + "$.messages[type=system,subtype=init].model", + json!(model), + None, + ); + } + if let Some(permission_mode) = &system_init.permission_mode { + push_field_provenance( + &mut entries, + "meta.permissionMode", + "system(init)", + "$.messages[type=system,subtype=init].permissionMode", + json!(permission_mode), + None, + ); + } + if !system_init.tools.is_empty() { + push_field_provenance( + &mut entries, + "meta.availableTools", + "system(init)", + "$.messages[type=system,subtype=init].tools", + json!(system_init.tools), + None, + ); + } + if let Some(api_key_source) = &system_init.api_key_source { + push_field_provenance( + &mut entries, + "meta.apiKeySource", + "system(init)", + "$.messages[type=system,subtype=init].apiKeySource", + json!(api_key_source), + Some("Provider-native runtime fact for auth/provenance classification.".to_string()), + ); + } + if let Some(claude_code_version) = &system_init.claude_code_version { + push_field_provenance( + &mut entries, + "meta.claudeCodeVersion", + "system(init)", + "$.messages[type=system,subtype=init].claude_code_version", + json!(claude_code_version), + Some( + "Provider-native runtime fact for provider capability/runtime debugging." + .to_string(), + ), + ); + } + if let Some(output_style) = &system_init.output_style { + push_field_provenance( + &mut entries, + "meta.outputStyle", + "system(init)", + "$.messages[type=system,subtype=init].output_style", + json!(output_style), + Some("Maps to provider capability/runtime presentation facts rather than formal intent semantics.".to_string()), + ); + } + if !system_init.skills.is_empty() { + push_field_provenance( + &mut entries, + "meta.skills", + "system(init)", + "$.messages[type=system,subtype=init].skills", + json!(system_init.skills), + Some( + "Provider-native runtime fact; supports provenance/capability reconstruction." + .to_string(), + ), + ); + } + if !system_init.agents.is_empty() { + push_field_provenance( + &mut entries, + "meta.agents", + "system(init)", + "$.messages[type=system,subtype=init].agents", + json!(system_init.agents), + Some( + "Provider-native runtime fact; supports Task/subagent runtime mapping.".to_string(), + ), + ); + } + if let Some(transcript_path) = artifact.transcript_path() { + push_field_provenance( + &mut entries, + "transcript.path", + "hooks", + "$.hookEvents[*].input.transcript_path", + json!(transcript_path), + None, + ); + } + + if let Some(result) = &artifact.result_message { + if let Some(usage) = &result.usage { + push_field_provenance( + &mut entries, + "usage", + "result", + "$.resultMessage.usage", + usage.clone(), + None, + ); + } + if let Some(total_cost_usd) = result.total_cost_usd { + push_field_provenance( + &mut entries, + "usage.totalCostUsd", + "result", + "$.resultMessage.total_cost_usd", + json!(total_cost_usd), + None, + ); + } + if let Some(duration_ms) = result.duration_ms { + push_field_provenance( + &mut entries, + "usage.durationMs", + "result", + "$.resultMessage.duration_ms", + json!(duration_ms), + None, + ); + } + if let Some(stop_reason) = &result.stop_reason { + push_field_provenance( + &mut entries, + "usage.stopReason", + "result", + "$.resultMessage.stop_reason", + json!(stop_reason), + None, + ); + } + } + + if let Some(extraction) = &extraction_outcome.extraction { + push_field_provenance( + &mut entries, + "intent.summary", + "result.structured_output", + "$.resultMessage.structured_output.summary", + json!(extraction.intent.summary), + None, + ); + push_field_provenance( + &mut entries, + "intent.problemStatement", + "result.structured_output", + "$.resultMessage.structured_output.problemStatement", + json!(extraction.intent.problem_statement), + None, + ); + push_field_provenance( + &mut entries, + "intent.changeType", + "result.structured_output", + "$.resultMessage.structured_output.changeType", + json!(extraction.intent.change_type), + None, + ); + push_field_provenance( + &mut entries, + "intent.objectives", + "result.structured_output", + "$.resultMessage.structured_output.objectives", + serde_json::to_value(&extraction.intent.objectives) + .context("failed to serialize managed objectives provenance")?, + None, + ); + push_field_provenance( + &mut entries, + "intent.inScope", + "result.structured_output", + "$.resultMessage.structured_output.inScope", + serde_json::to_value(&extraction.intent.in_scope) + .context("failed to serialize managed inScope provenance")?, + None, + ); + push_field_provenance( + &mut entries, + "intent.outOfScope", + "result.structured_output", + "$.resultMessage.structured_output.outOfScope", + serde_json::to_value(&extraction.intent.out_of_scope) + .context("failed to serialize managed outOfScope provenance")?, + None, + ); + push_field_provenance( + &mut entries, + "acceptance.successCriteria", + "result.structured_output", + "$.resultMessage.structured_output.successCriteria", + serde_json::to_value(&extraction.acceptance.success_criteria) + .context("failed to serialize managed successCriteria provenance")?, + None, + ); + push_field_provenance( + &mut entries, + "risk.rationale", + "result.structured_output", + "$.resultMessage.structured_output.riskRationale", + json!(extraction.risk.rationale), + None, + ); + if !extraction.risk.factors.is_empty() { + push_field_provenance( + &mut entries, + "risk.factors", + "result.structured_output", + "$.resultMessage.structured_output.riskFactors", + serde_json::to_value(&extraction.risk.factors) + .context("failed to serialize managed riskFactors provenance")?, + None, + ); + } + if let Some(level) = &extraction.risk.level { + push_field_provenance( + &mut entries, + "risk.level", + "result.structured_output", + "$.resultMessage.structured_output.riskLevel", + json!(level), + None, + ); + } + } + + if !tool_invocations.is_empty() { + push_field_provenance( + &mut entries, + "evidence.toolInvocations", + "hooks", + "$.hookEvents[hook=PreToolUse|PostToolUse]", + serde_json::to_value(tool_invocations) + .context("failed to serialize managed tool invocation provenance")?, + Some("Merged by tool_use_id from paired PreToolUse/PostToolUse hooks.".to_string()), + ); + } + + if !touch_hints.is_empty() { + push_field_provenance( + &mut entries, + "intent.touchHints", + "hooks+tool_evidence", + "$.hookEvents[*].input.tool_input.file_path | $.hookEvents[*].input.tool_response.file.filePath", + serde_json::to_value(touch_hints) + .context("failed to serialize managed touchHints provenance")?, + Some("Derived from explicit file targets observed in tool input/response.".to_string()), + ); + } + + let task_runtime_events = + build_task_runtime_events("thread", "run", &Utc::now().to_rfc3339(), artifact); + if !task_runtime_events.is_empty() { + push_field_provenance( + &mut entries, + "runtime.taskEvents", + "stream+hooks", + "$.messages[subtype=task_*] | $.hookEvents[hook=SubagentStart|SubagentStop|TaskCompleted|TeammateIdle]", + serde_json::to_value(&task_runtime_events) + .context("failed to serialize managed task runtime provenance")?, + Some("Provider-native runtime facts that can currently map onto Task/subagent lifecycle, but are not formal Task snapshots.".to_string()), + ); + } + + let tool_runtime_events = + build_tool_runtime_events("thread", "run", &Utc::now().to_rfc3339(), artifact); + if !tool_runtime_events.is_empty() { + push_field_provenance( + &mut entries, + "runtime.toolEvents", + "stream", + "$.messages[type=tool_progress|tool_use_summary]", + serde_json::to_value(&tool_runtime_events) + .context("failed to serialize managed tool runtime provenance")?, + Some("Provider-native tool execution progress and summary events; these complement hook-based tool invocation facts.".to_string()), + ); + } + + let assistant_runtime_events = + build_assistant_runtime_events("thread", "run", &Utc::now().to_rfc3339(), artifact); + if !assistant_runtime_events.is_empty() { + push_field_provenance( + &mut entries, + "runtime.assistantEvents", + "stream", + "$.messages[type=stream_event]", + serde_json::to_value(&assistant_runtime_events) + .context("failed to serialize managed assistant runtime provenance")?, + Some("Provider-native partial assistant stream events; these are raw incremental output facts, not finalized assistant messages.".to_string()), + ); + } + + let decision_runtime_events = + build_decision_runtime_events("thread", "run", &Utc::now().to_rfc3339(), artifact); + if !decision_runtime_events.is_empty() { + push_field_provenance( + &mut entries, + "runtime.decisionEvents", + "hooks+result", + "$.hookEvents[hook=PermissionRequest|CanUseTool|Elicitation|ElicitationResult] | $.resultMessage.permission_denials", + serde_json::to_value(&decision_runtime_events) + .context("failed to serialize managed decision runtime provenance")?, + Some("Provider-native runtime facts for permission/human-gate surfaces; they are pre-decision evidence, not formal Decision objects.".to_string()), + ); + } + + let context_runtime_events = + build_context_runtime_events("thread", "run", &Utc::now().to_rfc3339(), artifact); + if !context_runtime_events.is_empty() { + push_field_provenance( + &mut entries, + "runtime.contextEvents", + "stream+hooks", + "$.messages[type=system,subtype=status|compact_boundary|files_persisted] | $.messages[type=rate_limit_event|prompt_suggestion] | $.hookEvents[hook=PreCompact|PostCompact|InstructionsLoaded|ConfigChange|WorktreeCreate|WorktreeRemove]", + serde_json::to_value(&context_runtime_events) + .context("failed to serialize managed context runtime provenance")?, + Some("Provider-native runtime facts for context maintenance and environment mutation; these support ContextFrame/ContextSnapshot reasoning later.".to_string()), + ); + } + + Ok(entries) +} + +fn push_field_provenance( + entries: &mut Vec, + field_path: &str, + source_layer: &str, + source_path: &str, + value: Value, + note: Option, +) { + entries.push(ManagedFieldProvenance { + field_path: field_path.to_string(), + source_layer: source_layer.to_string(), + source_path: source_path.to_string(), + value, + note, + }); +} + +impl IntentExtractionOutcome { + fn status_label(&self) -> &'static str { + if self.extraction.is_some() { + "accepted" + } else if self.error.is_some() { + "invalid" + } else { + "missing" + } + } +} + +impl PersistedManagedIntentExtraction { + fn new(ai_session_id: String, extraction: IntentDraft) -> Self { + Self { + schema: "libra.intent_extraction.v1".to_string(), + ai_session_id, + source: MANAGED_INTENT_EXTRACTION_SOURCE.to_string(), + extraction, + } + } +} + +async fn write_pretty_json_artifact( + directory: &Path, + artifact_id: &str, + value: &T, +) -> Result +where + T: Serialize, +{ + fs::create_dir_all(directory).await.with_context(|| { + format!( + "failed to create managed artifact directory '{}'", + directory.display() + ) + })?; + let destination = directory.join(format!("{artifact_id}.json")); + let payload = + serde_json::to_vec_pretty(value).context("failed to serialize managed JSON artifact")?; + write_atomic_file(&destination, &payload).await?; + Ok(destination) +} + +async fn delete_generated_artifact_if_exists(directory: &Path, artifact_id: &str) -> Result<()> { + let destination = directory.join(format!("{artifact_id}.json")); + match fs::remove_file(&destination).await { + Ok(()) => Ok(()), + Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(()), + Err(err) => Err(err).with_context(|| { + format!( + "failed to remove stale managed artifact '{}'", + destination.display() + ) + }), + } +} + +async fn write_atomic_file(destination: &Path, data: &[u8]) -> Result<()> { + let parent = destination.parent().ok_or_else(|| { + anyhow!( + "managed artifact path '{}' does not have a parent directory", + destination.display() + ) + })?; + fs::create_dir_all(parent) + .await + .with_context(|| format!("failed to create parent directory '{}'", parent.display()))?; + + let file_name = destination + .file_name() + .and_then(|name| name.to_str()) + .ok_or_else(|| { + anyhow!( + "managed artifact path '{}' does not have a valid file name", + destination.display() + ) + })?; + let unique_suffix = format!( + "{}.{}", + std::process::id(), + SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_nanos() + ); + let temp_path = parent.join(format!(".{file_name}.{unique_suffix}.tmp")); + + let mut temp_file = fs::OpenOptions::new() + .write(true) + .create_new(true) + .open(&temp_path) + .await + .with_context(|| { + format!( + "failed to create temporary managed artifact '{}'", + temp_path.display() + ) + })?; + temp_file.write_all(data).await.with_context(|| { + format!( + "failed to write temporary managed artifact '{}'", + temp_path.display() + ) + })?; + + #[cfg(windows)] + { + if fs::try_exists(destination).await.unwrap_or(false) { + match fs::remove_file(destination).await { + Ok(()) => {} + Err(err) if err.kind() == std::io::ErrorKind::NotFound => {} + Err(err) => { + let _ = fs::remove_file(&temp_path).await; + return Err(err).with_context(|| { + format!( + "failed to replace existing managed artifact '{}'", + destination.display() + ) + }); + } + } + } + } + + if let Err(err) = fs::rename(&temp_path, destination).await { + let _ = fs::remove_file(&temp_path).await; + return Err(err).with_context(|| { + format!( + "failed to finalize managed artifact '{}' -> '{}'", + temp_path.display(), + destination.display() + ) + }); + } + + Ok(()) +} + +impl From for ManagedToolInvocation { + fn from(value: ToolHookPair) -> Self { + Self { + tool_use_id: value.tool_use_id, + tool_name: value.tool_name, + tool_input: value.tool_input, + tool_response: value.tool_response, + transcript_path: value.transcript_path, + } + } +} + +impl ManagedToolInvocationEvent { + fn from_tool_hook_pair(thread_id: &str, run_id: &str, at: &str, value: ToolHookPair) -> Self { + let status = if value.saw_post { + "completed" + } else if value.saw_pre { + "in_progress" + } else { + "pending" + }; + + Self { + id: value.tool_use_id, + run_id: run_id.to_string(), + thread_id: thread_id.to_string(), + tool: value.tool_name.unwrap_or_else(|| "unknown".to_string()), + server: None, + status: status.to_string(), + at: at.to_string(), + payload: json!({ + "input": value.tool_input, + "response": value.tool_response, + "transcriptPath": value.transcript_path, + }), + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn ingest_managed_artifact_builds_bridge_session_and_draft() { + let artifact: ClaudeManagedArtifact = serde_json::from_value(json!({ + "cwd": "/repo", + "prompt": "Implement the managed mode bridge", + "hookEvents": [ + { + "hook": "UserPromptSubmit", + "input": { + "session_id": "sdk-session-1", + "transcript_path": "/tmp/managed-transcript.jsonl", + "cwd": "/repo", + "permission_mode": "plan", + "hook_event_name": "UserPromptSubmit", + "prompt": "Implement the managed mode bridge" + } + }, + { + "hook": "PreToolUse", + "input": { + "session_id": "sdk-session-1", + "transcript_path": "/tmp/managed-transcript.jsonl", + "cwd": "/repo", + "hook_event_name": "PreToolUse", + "tool_name": "Read", + "tool_input": {"file_path": "/repo/src/lib.rs"}, + "tool_use_id": "tool-1" + } + }, + { + "hook": "PostToolUse", + "input": { + "session_id": "sdk-session-1", + "transcript_path": "/tmp/managed-transcript.jsonl", + "cwd": "/repo", + "hook_event_name": "PostToolUse", + "tool_name": "Read", + "tool_input": {"file_path": "/repo/src/lib.rs"}, + "tool_response": {"file":{"filePath":"/repo/src/lib.rs"}}, + "tool_use_id": "tool-1" + } + }, + { + "hook": "Stop", + "input": { + "session_id": "sdk-session-1", + "transcript_path": "/tmp/managed-transcript.jsonl", + "cwd": "/repo", + "hook_event_name": "Stop" + } + } + ], + "messages": [ + { + "type": "system", + "subtype": "init", + "cwd": "/repo", + "session_id": "sdk-session-1", + "tools": ["Read", "StructuredOutput"], + "model": "claude-sonnet-4-5-20250929", + "permissionMode": "plan" + }, + { + "type": "assistant", + "message": { + "role": "assistant", + "content": [ + {"type": "text", "text": "I will inspect the repository and prepare the bridge."} + ] + } + } + ], + "resultMessage": { + "type": "result", + "subtype": "success", + "is_error": false, + "session_id": "sdk-session-1", + "stop_reason": "end_turn", + "duration_ms": 1200, + "duration_api_ms": 1000, + "num_turns": 1, + "result": "ok", + "total_cost_usd": 0.02, + "usage": {"input_tokens": 100, "output_tokens": 50}, + "modelUsage": {"claude-sonnet-4-5-20250929": {"inputTokens": 100, "outputTokens": 50}}, + "permission_denials": [], + "structured_output": { + "summary": "Build the managed mode bridge", + "problemStatement": "Libra needs a stable SDK-managed ingestion bridge", + "changeType": "feature", + "objectives": ["Bridge SDK events", "Persist ai_session"], + "inScope": ["src/internal/ai/providers/claude_sdk"], + "outOfScope": ["UI redesign"], + "successCriteria": ["Session is persisted", "Intent extraction is derived"], + "riskRationale": "Low risk because the bridge is additive", + "riskFactors": ["new adapter path"], + "riskLevel": "low" + }, + "fast_mode_state": null, + "uuid": "result-1" + } + })) + .expect("fixture should deserialize"); + + let ingested = ingest_managed_artifact(&artifact).expect("ingestion should succeed"); + + assert_eq!(ingested.session.id, "claude__sdk-session-1"); + assert_eq!(ingested.session.summary, "Build the managed mode bridge"); + assert_eq!( + ingested.session.metadata.get("transcript_path"), + Some(&json!("/tmp/managed-transcript.jsonl")) + ); + assert_eq!( + ingested.session.metadata.get("session_phase"), + Some(&json!("ended")) + ); + assert_eq!(ingested.session.messages.len(), 2); + assert_eq!(ingested.session.messages[0].role, "user"); + assert_eq!(ingested.session.messages[1].role, "assistant"); + assert_eq!( + ingested + .session + .metadata + .get("tool_events") + .and_then(Value::as_array) + .map(Vec::len), + Some(1) + ); + assert_eq!( + ingested + .session + .metadata + .get("tool_events") + .and_then(Value::as_array) + .and_then(|items| items.first()) + .and_then(|value| value.get("tool_use_id")) + .and_then(Value::as_str), + Some("tool-1") + ); + assert_eq!( + ingested + .session + .metadata + .get(NORMALIZED_EVENTS_KEY) + .and_then(Value::as_array) + .map(Vec::len), + Some(5) + ); + + let extraction = ingested + .intent_extraction + .expect("intent extraction should be present"); + assert_eq!(extraction.intent.summary, "Build the managed mode bridge"); + assert_eq!(extraction.intent.change_type, ChangeType::Feature); + assert_eq!(extraction.acceptance.success_criteria.len(), 2); + assert_eq!(extraction.risk.level, Some(RiskLevel::Low)); + } + + #[test] + fn extract_intent_extraction_from_result_returns_none_when_output_missing() { + let result = ClaudeManagedResultMessage { + r#type: Some("result".to_string()), + subtype: Some("success".to_string()), + is_error: Some(false), + session_id: Some("sdk-session-2".to_string()), + stop_reason: Some("end_turn".to_string()), + duration_ms: None, + duration_api_ms: None, + num_turns: None, + result: None, + total_cost_usd: None, + usage: None, + model_usage: None, + permission_denials: None, + structured_output: None, + fast_mode_state: None, + uuid: None, + }; + + let extraction = extract_intent_extraction_from_result(Some(&result)) + .expect("extraction should succeed"); + assert!(extraction.is_none()); + } + + #[test] + fn structured_output_accepts_optional_future_fields() { + let result = ClaudeManagedResultMessage { + r#type: Some("result".to_string()), + subtype: Some("success".to_string()), + is_error: Some(false), + session_id: Some("sdk-session-3".to_string()), + stop_reason: Some("end_turn".to_string()), + duration_ms: Some(10), + duration_api_ms: Some(8), + num_turns: Some(1), + result: Some("ok".to_string()), + total_cost_usd: Some(0.001), + usage: Some(json!({"input_tokens": 1, "output_tokens": 1})), + model_usage: None, + permission_denials: None, + structured_output: Some(json!({ + "summary": "Harden adapter", + "problemStatement": "Need a stronger managed ingestion contract", + "changeType": "security", + "objectives": ["Validate fields"], + "successCriteria": ["Contract stays strict"], + "riskRationale": "Moderate because ingestion bugs can hide data", + "fastChecks": [ + { + "id": "unit", + "kind": "command", + "command": "cargo test managed", + "required": true, + "artifactsProduced": ["test-log"] + } + ] + })), + fast_mode_state: None, + uuid: None, + }; + + let extraction = extract_intent_extraction_from_result(Some(&result)) + .expect("extraction should succeed") + .expect("extraction should exist"); + + assert_eq!(extraction.intent.change_type, ChangeType::Security); + assert_eq!(extraction.acceptance.fast_checks.len(), 1); + assert_eq!( + extraction.acceptance.fast_checks[0].kind, + crate::internal::ai::intentspec::types::CheckKind::Command + ); + assert!(extraction.intent.in_scope.is_empty()); + assert!(extraction.intent.out_of_scope.is_empty()); + } + + #[test] + fn build_managed_audit_bundle_includes_ai_session_and_provenance() { + let artifact: ClaudeManagedArtifact = serde_json::from_value(json!({ + "cwd": "/repo", + "prompt": "Implement the managed mode bridge", + "hookEvents": [ + { + "hook": "UserPromptSubmit", + "input": { + "session_id": "sdk-session-1", + "transcript_path": "/tmp/managed-transcript.jsonl", + "cwd": "/repo", + "permission_mode": "plan", + "hook_event_name": "UserPromptSubmit", + "prompt": "Implement the managed mode bridge" + } + }, + { + "hook": "PreToolUse", + "input": { + "session_id": "sdk-session-1", + "transcript_path": "/tmp/managed-transcript.jsonl", + "cwd": "/repo", + "hook_event_name": "PreToolUse", + "tool_name": "Read", + "tool_input": {"file_path": "/repo/src/lib.rs"}, + "tool_use_id": "tool-1" + } + }, + { + "hook": "PostToolUse", + "input": { + "session_id": "sdk-session-1", + "transcript_path": "/tmp/managed-transcript.jsonl", + "cwd": "/repo", + "hook_event_name": "PostToolUse", + "tool_name": "Read", + "tool_input": {"file_path": "/repo/src/lib.rs"}, + "tool_response": {"file":{"filePath":"/repo/src/lib.rs"}}, + "tool_use_id": "tool-1" + } + }, + { + "hook": "Stop", + "input": { + "session_id": "sdk-session-1", + "transcript_path": "/tmp/managed-transcript.jsonl", + "cwd": "/repo", + "hook_event_name": "Stop" + } + } + ], + "messages": [ + { + "type": "system", + "subtype": "init", + "cwd": "/repo", + "session_id": "sdk-session-1", + "tools": ["Read", "StructuredOutput"], + "model": "claude-sonnet-4-5-20250929", + "permissionMode": "plan" + }, + { + "type": "assistant", + "message": { + "role": "assistant", + "content": [ + {"type": "text", "text": "I will inspect the repository and prepare the bridge."} + ] + } + } + ], + "resultMessage": { + "type": "result", + "subtype": "success", + "is_error": false, + "session_id": "sdk-session-1", + "stop_reason": "end_turn", + "duration_ms": 1200, + "duration_api_ms": 1000, + "num_turns": 1, + "result": "ok", + "total_cost_usd": 0.02, + "usage": {"input_tokens": 100, "output_tokens": 50}, + "modelUsage": {"claude-sonnet-4-5-20250929": {"inputTokens": 100, "outputTokens": 50}}, + "permission_denials": [], + "structured_output": { + "summary": "Build the managed mode bridge", + "problemStatement": "Libra needs a stable SDK-managed ingestion bridge", + "changeType": "feature", + "objectives": ["Bridge SDK events", "Persist ai_session"], + "inScope": ["src/internal/ai/providers/claude_sdk"], + "outOfScope": ["UI redesign"], + "successCriteria": ["Session is persisted", "Intent extraction is derived"], + "riskRationale": "Low risk because the bridge is additive", + "riskFactors": ["new adapter path"], + "riskLevel": "low" + }, + "fast_mode_state": null, + "uuid": "result-1" + } + })) + .expect("fixture should deserialize"); + + let bundle = build_managed_audit_bundle(&artifact).expect("bundle should build"); + + assert_eq!(bundle.schema, MANAGED_AUDIT_BUNDLE_SCHEMA); + assert_eq!(bundle.ai_session_id, "claude__sdk-session-1"); + assert_eq!(bundle.provider_session_id, "sdk-session-1"); + assert_eq!(bundle.bridge.intent_extraction.status, "accepted"); + assert_eq!(bundle.bridge.touch_hints, vec!["src/lib.rs".to_string()]); + assert_eq!(bundle.bridge.tool_invocations.len(), 1); + assert_eq!(bundle.bridge.tool_invocations[0].tool_use_id, "tool-1"); + assert_eq!( + bundle.bridge.object_candidates.thread_id, + "claude__sdk-session-1" + ); + assert_eq!( + bundle.bridge.object_candidates.run_snapshot.id, + "claude__sdk-session-1::run" + ); + assert_eq!( + bundle.bridge.object_candidates.run_snapshot.started_at, + bundle.bridge.session_state.created_at.to_rfc3339() + ); + assert_eq!( + bundle.bridge.object_candidates.run_event.status, + "completed" + ); + assert_eq!( + bundle.bridge.object_candidates.run_event.at, + bundle.bridge.session_state.updated_at.to_rfc3339() + ); + assert_eq!( + bundle.bridge.object_candidates.provenance_snapshot.provider, + "claude" + ); + assert_eq!( + bundle + .bridge + .object_candidates + .provenance_snapshot + .created_at, + bundle.bridge.session_state.created_at.to_rfc3339() + ); + assert_eq!( + bundle + .bridge + .object_candidates + .provenance_snapshot + .model + .as_deref(), + Some("claude-sonnet-4-5-20250929") + ); + assert_eq!( + bundle + .bridge + .object_candidates + .run_usage_event + .as_ref() + .map(|event| event.run_id.as_str()), + Some("claude__sdk-session-1::run") + ); + assert_eq!( + bundle.bridge.object_candidates.tool_invocation_events.len(), + 1 + ); + assert_eq!( + bundle.bridge.object_candidates.tool_invocation_events[0].status, + "completed" + ); + assert_eq!( + bundle.bridge.object_candidates.tool_invocation_events[0].at, + bundle.bridge.session_state.updated_at.to_rfc3339() + ); + assert_eq!(bundle.bridge.intent_extraction.status, "accepted"); + assert_eq!( + bundle + .bridge + .intent_extraction_artifact + .as_ref() + .map(|artifact| artifact.schema.as_str()), + Some("libra.intent_extraction.v1") + ); + assert_eq!(bundle.bridge.ai_session["schema"], json!(AI_SESSION_SCHEMA)); + assert!( + bundle.field_provenance.iter().any(|entry| { + entry.field_path == "intent.summary" + && entry.source_layer == "result.structured_output" + && entry.value == json!("Build the managed mode bridge") + }), + "expected intent.summary provenance from structured_output" + ); + assert!( + bundle.field_provenance.iter().any(|entry| { + entry.field_path == "intent.touchHints" + && entry.source_layer == "hooks+tool_evidence" + }), + "expected derived touchHints provenance" + ); + } + + #[test] + fn build_managed_audit_bundle_surfaces_invalid_structured_output_without_failing() { + let artifact: ClaudeManagedArtifact = serde_json::from_str(include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/data/ai/claude_managed_probe_like.json" + ))) + .expect("fixture should deserialize"); + + let bundle = build_managed_audit_bundle(&artifact).expect("bundle should build"); + + assert_eq!(bundle.provider_session_id, "fixture-probe-session"); + assert_eq!(bundle.ai_session_id, "claude__fixture-probe-session"); + assert_eq!(bundle.bridge.intent_extraction.status, "invalid"); + assert!(bundle.bridge.intent_extraction_artifact.is_none()); + assert_eq!( + bundle.bridge.object_candidates.run_snapshot.id, + "claude__fixture-probe-session::run" + ); + assert_eq!( + bundle.bridge.object_candidates.run_event.status, + "completed" + ); + assert_eq!( + bundle.bridge.object_candidates.tool_invocation_events.len(), + 3 + ); + assert!( + bundle + .bridge + .intent_extraction + .error + .as_deref() + .unwrap_or_default() + .contains("intent extraction bridge schema") + ); + assert_eq!(bundle.bridge.tool_invocations.len(), 3); + assert!( + bundle + .bridge + .touch_hints + .contains(&"package.json".to_string()), + "expected package.json hint from probe-like Read tool evidence" + ); + assert_eq!(bundle.bridge.ai_session["schema"], json!(AI_SESSION_SCHEMA)); + assert!( + bundle.field_provenance.iter().any(|entry| { + entry.field_path == "usage.durationMs" && entry.value == json!(3479) + }), + "expected duration provenance from result payload" + ); + } + + #[test] + fn build_managed_audit_bundle_rejects_invalid_session_id() { + let artifact: ClaudeManagedArtifact = serde_json::from_value(json!({ + "cwd": "/repo", + "hookEvents": [], + "messages": [ + { + "type": "system", + "subtype": "init", + "cwd": "/repo", + "session_id": "../escape", + "tools": ["Read"], + "model": "claude-sonnet-4-5-20250929", + "permissionMode": "default" + } + ], + "resultMessage": { + "type": "result", + "subtype": "success", + "session_id": "../escape", + "stop_reason": "end_turn" + } + })) + .expect("fixture should deserialize"); + + let err = build_managed_audit_bundle(&artifact).expect_err("bundle should reject"); + assert!( + err.chain() + .any(|cause| cause.to_string().contains("invalid managed session_id")), + "unexpected error: {err:#}" + ); + } + + #[test] + fn build_managed_audit_bundle_maps_official_runtime_facts_to_semantic_candidates() { + let artifact: ClaudeManagedArtifact = serde_json::from_value(json!({ + "cwd": "/repo", + "hookEvents": [ + { + "hook": "PermissionRequest", + "input": { + "session_id": "sdk-session-runtime", + "cwd": "/repo", + "hook_event_name": "PermissionRequest", + "tool_name": "Bash", + "tool_input": {"command": "cargo test"} + } + }, + { + "hook": "CanUseTool", + "input": { + "tool_name": "Edit", + "tool_input": {"file_path": "src/lib.rs"}, + "tool_use_id": "tool-edit-1", + "agent_id": "general-purpose", + "blocked_path": null, + "decision_reason": "auto-approved by Libra managed helper", + "suggestions": [] + } + }, + { + "hook": "Elicitation", + "input": { + "session_id": "sdk-session-runtime", + "cwd": "/repo", + "hook_event_name": "Elicitation", + "mcp_server_name": "review-gate", + "message": "Approve release checks?", + "elicitation_id": "elic-1" + } + }, + { + "hook": "InstructionsLoaded", + "input": { + "session_id": "sdk-session-runtime", + "cwd": "/repo", + "hook_event_name": "InstructionsLoaded", + "file_path": ".claude/CLAUDE.md", + "memory_type": "Project", + "load_reason": "session_start" + } + }, + { + "hook": "TaskCompleted", + "input": { + "session_id": "sdk-session-runtime", + "cwd": "/repo", + "hook_event_name": "TaskCompleted", + "task_id": "task-1", + "task_subject": "inspect bridge" + } + } + ], + "messages": [ + { + "type": "system", + "subtype": "init", + "cwd": "/repo", + "session_id": "sdk-session-runtime", + "tools": ["Read", "StructuredOutput"], + "model": "claude-sonnet-4-5-20250929", + "permissionMode": "default", + "apiKeySource": "oauth", + "claude_code_version": "2.1.76", + "output_style": "default", + "agents": ["general-purpose", "Plan"], + "skills": ["context7"], + "slash_commands": ["review"], + "mcp_servers": [{"name":"review-gate","status":"connected"}], + "plugins": [{"name":"team-plugin","path":"/plugins/team-plugin"}], + "fast_mode_state": "off" + }, + { + "type": "system", + "subtype": "task_started", + "session_id": "sdk-session-runtime", + "task_id": "task-1", + "description": "Inspect bridge" + }, + { + "type": "system", + "subtype": "status", + "session_id": "sdk-session-runtime", + "status": "compacting" + }, + { + "type": "tool_progress", + "session_id": "sdk-session-runtime", + "uuid": "tool-progress-1", + "tool_use_id": "tool-edit-1", + "tool_name": "Edit", + "parent_tool_use_id": null, + "elapsed_time_seconds": 0.8 + }, + { + "type": "stream_event", + "session_id": "sdk-session-runtime", + "uuid": "stream-runtime-1", + "parent_tool_use_id": null, + "event": { + "type": "content_block_delta", + "delta": {"type": "text_delta", "text": "Inspecting bridge"} + } + }, + { + "type": "rate_limit_event", + "session_id": "sdk-session-runtime", + "rate_limit_info": {"status":"allowed_warning"} + } + ], + "resultMessage": { + "type": "result", + "subtype": "success", + "is_error": false, + "session_id": "sdk-session-runtime", + "stop_reason": "end_turn", + "permission_denials": [ + { + "tool_name": "Bash", + "tool_use_id": "tool-bash-1", + "tool_input": {"command": "cargo test"} + } + ] + } + })) + .expect("fixture should deserialize"); + + let bundle = build_managed_audit_bundle(&artifact).expect("bundle should build"); + + assert_eq!( + bundle + .bridge + .object_candidates + .provider_init_snapshot + .api_key_source + .as_deref(), + Some("oauth") + ); + assert_eq!( + bundle + .bridge + .object_candidates + .provider_init_snapshot + .claude_code_version + .as_deref(), + Some("2.1.76") + ); + assert_eq!( + bundle.bridge.object_candidates.task_runtime_events.len(), + 2, + "task_started stream + TaskCompleted hook should both map to Task runtime candidates" + ); + assert!( + bundle + .bridge + .object_candidates + .tool_runtime_events + .iter() + .all(|event| event.semantic_object == "Tool") + ); + assert_eq!( + bundle.bridge.object_candidates.tool_runtime_events.len(), + 1, + "tool_progress should map to Tool runtime candidates" + ); + assert!( + bundle + .bridge + .object_candidates + .assistant_runtime_events + .iter() + .all(|event| event.semantic_object == "Assistant") + ); + assert_eq!( + bundle + .bridge + .object_candidates + .assistant_runtime_events + .len(), + 1, + "stream_event should map to Assistant runtime candidates" + ); + assert!( + bundle + .bridge + .object_candidates + .task_runtime_events + .iter() + .all(|event| event.semantic_object == "Task") + ); + assert_eq!( + bundle + .bridge + .object_candidates + .decision_runtime_events + .len(), + 4, + "PermissionRequest + CanUseTool + Elicitation + permission_denials should map to Decision runtime candidates" + ); + assert!( + bundle + .bridge + .object_candidates + .decision_runtime_events + .iter() + .all(|event| event.semantic_object == "Decision") + ); + assert_eq!( + bundle.bridge.object_candidates.context_runtime_events.len(), + 3, + "status + rate_limit_event + InstructionsLoaded should map to Context runtime candidates" + ); + assert!( + bundle + .bridge + .object_candidates + .context_runtime_events + .iter() + .all(|event| event.semantic_object == "Context") + ); + assert_eq!( + bundle.bridge.session_state.metadata["provider_runtime"]["providerInit"]["apiKeySource"], + json!("oauth") + ); + assert!( + bundle + .field_provenance + .iter() + .any(|entry| entry.field_path == "runtime.toolEvents"), + "expected tool runtime provenance" + ); + assert!( + bundle + .field_provenance + .iter() + .any(|entry| entry.field_path == "runtime.assistantEvents"), + "expected assistant runtime provenance" + ); + assert!( + bundle + .field_provenance + .iter() + .any(|entry| entry.field_path == "runtime.taskEvents"), + "expected task runtime provenance" + ); + assert!( + bundle + .field_provenance + .iter() + .any(|entry| entry.field_path == "runtime.decisionEvents"), + "expected decision runtime provenance" + ); + assert!( + bundle + .field_provenance + .iter() + .any(|entry| entry.field_path == "runtime.contextEvents"), + "expected context runtime provenance" + ); + } + + #[test] + fn normalize_hint_path_respects_path_boundaries() { + assert_eq!( + normalize_hint_path("/repo/src/lib.rs", "/repo"), + "src/lib.rs" + ); + assert_eq!( + normalize_hint_path("/repo2/src/lib.rs", "/repo"), + "/repo2/src/lib.rs" + ); + assert_eq!( + normalize_hint_path("C:\\repo\\src\\lib.rs", "C:\\repo"), + "src/lib.rs" + ); + } +} diff --git a/src/internal/ai/providers/claude_sdk/mod.rs b/src/internal/ai/providers/claude_sdk/mod.rs new file mode 100644 index 00000000..e702636e --- /dev/null +++ b/src/internal/ai/providers/claude_sdk/mod.rs @@ -0,0 +1,3 @@ +//! Claude Agent SDK provider runtime support. + +pub mod managed; diff --git a/src/internal/ai/providers/mod.rs b/src/internal/ai/providers/mod.rs index 27577004..01d15a1c 100644 --- a/src/internal/ai/providers/mod.rs +++ b/src/internal/ai/providers/mod.rs @@ -32,6 +32,7 @@ //! | `codex` | OpenAI Codex | Bearer token | `http://localhost:8080` | pub mod anthropic; +pub mod claude_sdk; pub mod codex; pub mod deepseek; pub mod gemini; diff --git a/tests/command/ai_hook_test.rs b/tests/command/ai_hook_test.rs deleted file mode 100644 index a4ed3159..00000000 --- a/tests/command/ai_hook_test.rs +++ /dev/null @@ -1,917 +0,0 @@ -//! Integration tests for the unified `hooks` ingestion surface. - -use std::{ - fs, - path::{Path, PathBuf}, - process::Command, -}; - -use libra::{ - command::init::{InitArgs, init}, - internal::ai::history::HistoryManager, - utils::test, -}; -use serde_json::json; -use serial_test::serial; -use tempfile::tempdir; - -fn run_hook_in( - workdir: &Path, - provider: &str, - subcmd: &str, - payload: &str, -) -> std::process::Output { - let mut cmd = Command::new(env!("CARGO_BIN_EXE_libra")); - cmd.current_dir(workdir) - .arg("hooks") - .arg(provider) - .arg(subcmd) - .stdin(std::process::Stdio::piped()) - .stdout(std::process::Stdio::piped()) - .stderr(std::process::Stdio::piped()); - - let mut child = cmd.spawn().expect("spawn failed"); - { - use std::io::Write; - let stdin = child.stdin.as_mut().expect("stdin missing"); - stdin - .write_all(payload.as_bytes()) - .expect("write stdin failed"); - } - child.wait_with_output().expect("wait failed") -} - -fn run_hook( - temp: &tempfile::TempDir, - provider: &str, - subcmd: &str, - payload: &str, -) -> std::process::Output { - run_hook_in(temp.path(), provider, subcmd, payload) -} - -fn ai_session_id(provider: &str, provider_session_id: &str) -> String { - format!("{provider}__{provider_session_id}") -} - -fn session_file(repo_root: &Path, provider: &str, id: &str) -> PathBuf { - repo_root - .join(".libra") - .join("sessions") - .join(format!("{}.json", ai_session_id(provider, id))) -} - -async fn build_history_manager(temp: &tempfile::TempDir) -> HistoryManager { - let _guard = test::ChangeDirGuard::new(temp.path()); - let db = libra::internal::db::get_db_conn_instance().await; - HistoryManager::new_with_ref( - std::sync::Arc::new(libra::utils::storage::local::LocalStorage::new( - temp.path().join(".libra").join("objects"), - )), - temp.path().join(".libra"), - std::sync::Arc::new(db.clone()), - "libra/intent", - ) -} - -async fn assert_basic_flow_and_persisted(provider: &str) { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - - let session_id = "session-1"; - let cwd = temp.path().to_string_lossy().to_string(); - - let events = vec![ - ( - "session-start", - json!({ - "hook_event_name": "SessionStart", - "session_id": session_id, - "cwd": cwd - }) - .to_string(), - ), - ( - "prompt", - json!({ - "hook_event_name": "UserPromptSubmit", - "session_id": session_id, - "cwd": cwd, - "prompt": "hello" - }) - .to_string(), - ), - ( - "tool-use", - json!({ - "hook_event_name": "PostToolUse", - "session_id": session_id, - "cwd": cwd, - "tool_name": "Read", - "tool_input": {"path": "a.txt"}, - "tool_response": {"ok": true} - }) - .to_string(), - ), - ( - "stop", - json!({ - "hook_event_name": "Stop", - "session_id": session_id, - "cwd": cwd, - "last_assistant_message": "done" - }) - .to_string(), - ), - ( - "session-end", - json!({ - "hook_event_name": "SessionEnd", - "session_id": session_id, - "cwd": cwd - }) - .to_string(), - ), - ]; - - for (subcmd, payload) in events { - let out = run_hook(&temp, provider, subcmd, &payload); - assert!( - out.status.success(), - "{subcmd} failed, stderr: {}", - String::from_utf8_lossy(&out.stderr) - ); - } - - let session_path = session_file(temp.path(), provider, session_id); - assert!( - !session_path.exists(), - "session file should be removed after successful persistence" - ); - - let history_manager = build_history_manager(&temp).await; - let head = history_manager.resolve_history_head().await.unwrap(); - assert!(head.is_some(), "expected libra/intent ref to exist"); - - let ai_object_id = ai_session_id(provider, session_id); - let object_hash = history_manager - .get_object_hash("ai_session", &ai_object_id) - .await - .unwrap(); - assert!(object_hash.is_some(), "ai_session object should exist"); - - let ai_type = Command::new(env!("CARGO_BIN_EXE_libra")) - .current_dir(temp.path()) - .args(["cat-file", "--ai-type", &ai_object_id]) - .output() - .expect("failed to execute cat-file --ai-type"); - assert!(ai_type.status.success()); - assert_eq!( - String::from_utf8_lossy(&ai_type.stdout).trim(), - "ai_session" - ); - - let ai_pretty = Command::new(env!("CARGO_BIN_EXE_libra")) - .current_dir(temp.path()) - .args(["cat-file", "--ai", &ai_object_id]) - .output() - .expect("failed to execute cat-file --ai"); - assert!(ai_pretty.status.success()); - let ai_pretty_stdout = String::from_utf8_lossy(&ai_pretty.stdout); - assert!(ai_pretty_stdout.contains("type: ai_session")); - assert!(ai_pretty_stdout.contains("schema: libra.ai_session.v2")); - assert!(ai_pretty_stdout.contains(&format!("provider: {provider}"))); - assert!(ai_pretty_stdout.contains("phase: ended")); - assert!(ai_pretty_stdout.contains("transcript_raw_event_count")); - assert!(ai_pretty_stdout.contains(&format!("\"provider\": \"{provider}\""))); - - let ai_list = Command::new(env!("CARGO_BIN_EXE_libra")) - .current_dir(temp.path()) - .args(["cat-file", "--ai-list", "ai_session"]) - .output() - .expect("failed to execute cat-file --ai-list"); - assert!(ai_list.status.success()); - assert!(String::from_utf8_lossy(&ai_list.stdout).contains(&ai_object_id)); - - let ai_list_types = Command::new(env!("CARGO_BIN_EXE_libra")) - .current_dir(temp.path()) - .args(["cat-file", "--ai-list-types"]) - .output() - .expect("failed to execute cat-file --ai-list-types"); - assert!(ai_list_types.status.success()); - assert!(String::from_utf8_lossy(&ai_list_types.stdout).contains("ai_session")); -} - -#[tokio::test] -#[serial] -async fn test_ai_hooks_normal_flow_and_persisted() { - assert_basic_flow_and_persisted("gemini").await; - assert_basic_flow_and_persisted("claude").await; -} - -#[tokio::test] -#[serial] -async fn test_ai_hooks_use_repo_object_format_for_persistence() { - let temp = tempdir().unwrap(); - test::setup_clean_testing_env_in(temp.path()); - let _guard = test::ChangeDirGuard::new(temp.path()); - init(InitArgs { - bare: false, - initial_branch: Some("main".to_string()), - template: None, - repo_directory: temp.path().to_string_lossy().to_string(), - quiet: true, - shared: None, - separate_libra_dir: None, - object_format: Some("sha256".to_string()), - ref_format: None, - from_git_repository: None, - vault: false, - }) - .await - .unwrap(); - - let provider = "gemini"; - let session_id = "session-sha256"; - let cwd = temp.path().to_string_lossy().to_string(); - let start = json!({ - "hook_event_name": "SessionStart", - "session_id": session_id, - "cwd": cwd - }) - .to_string(); - let end = json!({ - "hook_event_name": "SessionEnd", - "session_id": session_id, - "cwd": cwd - }) - .to_string(); - - assert!( - run_hook(&temp, provider, "session-start", &start) - .status - .success() - ); - assert!( - run_hook(&temp, provider, "session-end", &end) - .status - .success() - ); - - let history_manager = build_history_manager(&temp).await; - let object_hash = history_manager - .get_object_hash("ai_session", &ai_session_id(provider, session_id)) - .await - .unwrap() - .expect("ai_session object should exist"); - assert_eq!( - object_hash.to_string().len(), - 64, - "sha256 repo should persist 64-char object hash" - ); -} - -#[tokio::test] -#[serial] -async fn test_ai_hooks_out_of_order_recovers() { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - - let provider = "gemini"; - let session_id = "session-recover"; - let cwd = temp.path().to_string_lossy().to_string(); - let tool = json!({ - "hook_event_name": "PostToolUse", - "session_id": session_id, - "cwd": cwd, - "tool_name": "Read" - }) - .to_string(); - - let out = run_hook(&temp, provider, "tool-use", &tool); - assert!(out.status.success()); - - let session_json = fs::read_to_string(session_file(temp.path(), provider, session_id)).unwrap(); - let session: serde_json::Value = serde_json::from_str(&session_json).unwrap(); - assert_eq!( - session["metadata"]["recovered_from_out_of_order"], - serde_json::json!(true) - ); -} - -#[tokio::test] -#[serial] -async fn test_ai_hooks_duplicate_event_dedup() { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - - let provider = "gemini"; - let session_id = "session-dedup"; - let cwd = temp.path().to_string_lossy().to_string(); - let start = json!({ - "hook_event_name": "SessionStart", - "session_id": session_id, - "cwd": cwd - }) - .to_string(); - let prompt = json!({ - "hook_event_name": "UserPromptSubmit", - "session_id": session_id, - "cwd": cwd, - "event_id": "evt-1", - "prompt": "hello" - }) - .to_string(); - - assert!( - run_hook(&temp, provider, "session-start", &start) - .status - .success() - ); - assert!( - run_hook(&temp, provider, "prompt", &prompt) - .status - .success() - ); - assert!( - run_hook(&temp, provider, "prompt", &prompt) - .status - .success() - ); - - let session_json = fs::read_to_string(session_file(temp.path(), provider, session_id)).unwrap(); - let session: serde_json::Value = serde_json::from_str(&session_json).unwrap(); - let messages = session["messages"].as_array().unwrap(); - let user_count = messages - .iter() - .filter(|m| m["role"] == "user" && m["content"] == "hello") - .count(); - assert_eq!(user_count, 1, "duplicate prompt should be ignored"); -} - -#[tokio::test] -#[serial] -async fn test_ai_hooks_repeated_payload_without_identity_is_not_deduped() { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - - let provider = "gemini"; - let session_id = "session-no-identity"; - let cwd = temp.path().to_string_lossy().to_string(); - let start = json!({ - "hook_event_name": "SessionStart", - "session_id": session_id, - "cwd": cwd - }) - .to_string(); - let prompt = json!({ - "hook_event_name": "UserPromptSubmit", - "session_id": session_id, - "cwd": cwd, - "prompt": "hello" - }) - .to_string(); - - assert!( - run_hook(&temp, provider, "session-start", &start) - .status - .success() - ); - assert!( - run_hook(&temp, provider, "prompt", &prompt) - .status - .success() - ); - assert!( - run_hook(&temp, provider, "prompt", &prompt) - .status - .success() - ); - - let session_json = fs::read_to_string(session_file(temp.path(), provider, session_id)).unwrap(); - let session: serde_json::Value = serde_json::from_str(&session_json).unwrap(); - let messages = session["messages"].as_array().unwrap(); - let user_count = messages - .iter() - .filter(|m| m["role"] == "user" && m["content"] == "hello") - .count(); - assert_eq!( - user_count, 2, - "without identity, duplicate payload must be kept" - ); -} - -#[tokio::test] -#[serial] -async fn test_ai_hooks_lifecycle_without_identity_is_deduped() { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - - let provider = "gemini"; - let session_id = "session-lifecycle-dedup"; - let cwd = temp.path().to_string_lossy().to_string(); - let start = json!({ - "hook_event_name": "SessionStart", - "session_id": session_id, - "cwd": cwd - }) - .to_string(); - - assert!( - run_hook(&temp, provider, "session-start", &start) - .status - .success() - ); - assert!( - run_hook(&temp, provider, "session-start", &start) - .status - .success() - ); - - let session_json = fs::read_to_string(session_file(temp.path(), provider, session_id)).unwrap(); - let session: serde_json::Value = serde_json::from_str(&session_json).unwrap(); - let raw_events = session["metadata"]["raw_hook_events"].as_array().unwrap(); - assert_eq!( - raw_events.len(), - 1, - "duplicate lifecycle event should be deduped even without identity" - ); -} - -#[tokio::test] -#[serial] -async fn test_ai_hooks_repeat_session_end_is_idempotent() { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - - let provider = "gemini"; - let session_id = "session-end-repeat"; - let cwd = temp.path().to_string_lossy().to_string(); - - let start = json!({ - "hook_event_name": "SessionStart", - "session_id": session_id, - "cwd": cwd - }) - .to_string(); - let end = json!({ - "hook_event_name": "SessionEnd", - "session_id": session_id, - "cwd": cwd - }) - .to_string(); - - assert!( - run_hook(&temp, provider, "session-start", &start) - .status - .success() - ); - assert!( - run_hook(&temp, provider, "session-end", &end) - .status - .success() - ); - let history_manager = build_history_manager(&temp).await; - let head_after_first = history_manager.resolve_history_head().await.unwrap(); - - assert!( - run_hook(&temp, provider, "session-end", &end) - .status - .success() - ); - let head_after_second = history_manager.resolve_history_head().await.unwrap(); - - assert!( - !session_file(temp.path(), provider, session_id).exists(), - "session file should be removed after successful persistence" - ); - assert_eq!( - head_after_first, head_after_second, - "repeated SessionEnd should not create extra history commits" - ); -} - -#[tokio::test] -#[serial] -async fn test_ai_hooks_stop_accepts_session_stop_event_name() { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - - let provider = "gemini"; - let session_id = "session-stop-alias"; - let cwd = temp.path().to_string_lossy().to_string(); - let start = json!({ - "hook_event_name": "SessionStart", - "session_id": session_id, - "cwd": cwd - }) - .to_string(); - let stop_alias = json!({ - "hook_event_name": "SessionStop", - "session_id": session_id, - "cwd": cwd, - "last_assistant_message": "done" - }) - .to_string(); - - assert!( - run_hook(&temp, provider, "session-start", &start) - .status - .success() - ); - assert!( - run_hook(&temp, provider, "stop", &stop_alias) - .status - .success() - ); - - let session_json = fs::read_to_string(session_file(temp.path(), provider, session_id)).unwrap(); - let session: serde_json::Value = serde_json::from_str(&session_json).unwrap(); - assert_eq!( - session["metadata"]["session_phase"], - serde_json::json!("stopped") - ); - assert_eq!( - session["metadata"]["last_assistant_message"], - serde_json::json!("done") - ); -} - -#[tokio::test] -#[serial] -async fn test_ai_hooks_use_repo_root_session_storage_from_subdir() { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - - let provider = "gemini"; - let nested = temp.path().join("nested").join("deeper"); - fs::create_dir_all(&nested).unwrap(); - - let session_id = "session-subdir"; - let cwd = nested.to_string_lossy().to_string(); - let start = json!({ - "hook_event_name": "SessionStart", - "session_id": session_id, - "cwd": cwd - }) - .to_string(); - let end = json!({ - "hook_event_name": "SessionEnd", - "session_id": session_id, - "cwd": cwd - }) - .to_string(); - - assert!( - run_hook_in(&nested, provider, "session-start", &start) - .status - .success() - ); - - let root_session_path = session_file(temp.path(), provider, session_id); - assert!( - root_session_path.exists(), - "session should be stored at repo root" - ); - assert!( - !nested.join(".libra").exists(), - "subdir must not create nested .libra directory" - ); - - assert!( - run_hook_in(&nested, provider, "session-end", &end) - .status - .success() - ); - assert!( - !root_session_path.exists(), - "session file should be cleaned after successful persistence" - ); -} - -#[tokio::test] -#[serial] -async fn test_ai_hooks_set_hook_cwd_mismatch_metadata() { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - - let provider = "gemini"; - let session_id = "session-cwd-mismatch"; - let payload = json!({ - "hook_event_name": "SessionStart", - "session_id": session_id, - "cwd": "/mismatch/path" - }) - .to_string(); - - assert!( - run_hook(&temp, provider, "session-start", &payload) - .status - .success() - ); - let session_json = fs::read_to_string(session_file(temp.path(), provider, session_id)).unwrap(); - let session: serde_json::Value = serde_json::from_str(&session_json).unwrap(); - - assert_eq!(session["metadata"]["hook_cwd_mismatch"], json!(true)); - assert_eq!( - session["metadata"]["hook_reported_cwd"], - json!("/mismatch/path") - ); -} - -#[tokio::test] -#[serial] -async fn test_ai_hooks_recover_from_corrupt_session_file() { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - - let provider = "gemini"; - let session_id = "session-corrupt"; - let ai_id = ai_session_id(provider, session_id); - let sessions_dir = temp.path().join(".libra").join("sessions"); - fs::create_dir_all(&sessions_dir).unwrap(); - let corrupt_path = sessions_dir.join(format!("{ai_id}.json")); - fs::write(&corrupt_path, "{\n \"id\": \"broken\"\n}\n}").unwrap(); - - let payload = json!({ - "hook_event_name": "SessionStart", - "session_id": session_id, - "cwd": temp.path().to_string_lossy().to_string() - }) - .to_string(); - - let out = run_hook(&temp, provider, "session-start", &payload); - assert!( - out.status.success(), - "hook should recover from malformed session, stderr: {}", - String::from_utf8_lossy(&out.stderr) - ); - - let repaired_json = - fs::read_to_string(session_file(temp.path(), provider, session_id)).unwrap(); - let repaired: serde_json::Value = serde_json::from_str(&repaired_json).unwrap(); - assert_eq!( - repaired["metadata"]["recovered_from_corrupt_session"], - json!(true) - ); - assert!( - repaired["metadata"]["recovery_error"] - .as_str() - .is_some_and(|value| !value.is_empty()) - ); - - let backup_count = fs::read_dir(&sessions_dir) - .unwrap() - .filter_map(Result::ok) - .map(|entry| entry.file_name().to_string_lossy().to_string()) - .filter(|name| name.starts_with(&format!("{ai_id}.corrupt.")) && name.ends_with(".json")) - .count(); - assert_eq!( - backup_count, 1, - "expected one archived backup for malformed session file" - ); -} - -#[tokio::test] -#[serial] -async fn test_ai_hooks_concurrent_events_do_not_corrupt_session_file() { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - - let provider = "gemini"; - let session_id = "session-concurrent-hooks"; - let cwd = temp.path().to_string_lossy().to_string(); - let start = json!({ - "hook_event_name": "SessionStart", - "session_id": session_id, - "cwd": cwd, - "source": "startup" - }) - .to_string(); - let out = run_hook(&temp, provider, "session-start", &start); - assert!( - out.status.success(), - "session-start failed, stderr: {}", - String::from_utf8_lossy(&out.stderr) - ); - - let jobs_per_type = 8usize; - let total_jobs = jobs_per_type * 2; - let barrier = std::sync::Arc::new(std::sync::Barrier::new(total_jobs)); - let mut handles = Vec::with_capacity(total_jobs); - - for index in 0..jobs_per_type { - let workdir = temp.path().to_path_buf(); - let barrier_clone = std::sync::Arc::clone(&barrier); - let payload = json!({ - "hook_event_name": "BeforeModel", - "session_id": session_id, - "cwd": cwd, - "sequence": index, - "llm_request": { "model": format!("gemini-2.5-pro-{index}") } - }) - .to_string(); - handles.push(std::thread::spawn(move || { - barrier_clone.wait(); - run_hook_in(&workdir, provider, "model-update", &payload) - })); - } - - for index in 0..jobs_per_type { - let workdir = temp.path().to_path_buf(); - let barrier_clone = std::sync::Arc::clone(&barrier); - let payload = json!({ - "hook_event_name": "PreCompress", - "session_id": session_id, - "cwd": cwd, - "sequence": jobs_per_type + index - }) - .to_string(); - handles.push(std::thread::spawn(move || { - barrier_clone.wait(); - run_hook_in(&workdir, provider, "compaction", &payload) - })); - } - - for handle in handles { - let output = handle.join().expect("concurrent hook thread panicked"); - assert!( - output.status.success(), - "concurrent hook failed, stderr: {}", - String::from_utf8_lossy(&output.stderr) - ); - } - - let session_path = session_file(temp.path(), provider, session_id); - let session_json = fs::read_to_string(&session_path).unwrap(); - let session: serde_json::Value = serde_json::from_str(&session_json).unwrap(); - assert!( - session["metadata"] - .get("recovered_from_corrupt_session") - .is_none(), - "session file should not be corrupted under concurrent hook writes" - ); - assert_eq!( - session["metadata"]["compaction_count"], - json!(jobs_per_type as u64) - ); - - let sessions_dir = temp.path().join(".libra").join("sessions"); - let backup_count = fs::read_dir(&sessions_dir) - .unwrap() - .filter_map(Result::ok) - .map(|entry| entry.file_name().to_string_lossy().to_string()) - .filter(|name| { - name.starts_with(&format!("{}.corrupt.", ai_session_id(provider, session_id))) - && name.ends_with(".json") - }) - .count(); - assert_eq!(backup_count, 0, "no corrupt backup should be generated"); - - let end = json!({ - "hook_event_name": "SessionEnd", - "session_id": session_id, - "cwd": cwd - }) - .to_string(); - let out = run_hook(&temp, provider, "session-end", &end); - assert!( - out.status.success(), - "session-end failed, stderr: {}", - String::from_utf8_lossy(&out.stderr) - ); - assert!( - !session_path.exists(), - "session file should be cleaned after persistence" - ); - - let history_manager = build_history_manager(&temp).await; - let object_hash = history_manager - .get_object_hash("ai_session", &ai_session_id(provider, session_id)) - .await - .unwrap(); - assert!( - object_hash.is_some(), - "ai_session object should exist after concurrent hook ingestion" - ); -} - -#[test] -fn test_ai_hooks_reject_empty_stdin() { - let temp = tempdir().unwrap(); - let out = run_hook(&temp, "gemini", "session-start", ""); - assert!( - !out.status.success(), - "invalid input should return non-zero exit status" - ); - assert!( - String::from_utf8_lossy(&out.stderr).contains("hook input is empty"), - "stderr: {}", - String::from_utf8_lossy(&out.stderr) - ); -} - -#[test] -fn test_ai_hooks_reject_invalid_json() { - let temp = tempdir().unwrap(); - let out = run_hook(&temp, "gemini", "session-start", "{invalid"); - assert!( - !out.status.success(), - "invalid input should return non-zero exit status" - ); - assert!( - String::from_utf8_lossy(&out.stderr).contains("invalid hook JSON payload"), - "stderr: {}", - String::from_utf8_lossy(&out.stderr) - ); -} - -#[test] -fn test_ai_hooks_reject_missing_required_field() { - let temp = tempdir().unwrap(); - let out = run_hook( - &temp, - "gemini", - "session-start", - &json!({ - "hook_event_name": "SessionStart", - "cwd": "/tmp" - }) - .to_string(), - ); - assert!( - !out.status.success(), - "invalid input should return non-zero exit status" - ); - let stderr = String::from_utf8_lossy(&out.stderr); - assert!( - stderr.contains("missing required field: session_id") - || stderr.contains("missing field `session_id`"), - "stderr: {stderr}" - ); -} - -#[test] -fn test_ai_hooks_reject_oversized_stdin() { - let temp = tempdir().unwrap(); - let huge = "x".repeat(1_048_577); - let out = run_hook(&temp, "gemini", "session-start", &huge); - assert!( - !out.status.success(), - "invalid input should return non-zero exit status" - ); - assert!( - String::from_utf8_lossy(&out.stderr).contains("hook input exceeds 1048576 bytes"), - "stderr: {}", - String::from_utf8_lossy(&out.stderr) - ); -} - -#[tokio::test] -#[serial] -async fn test_ai_hooks_session_end_persist_failure_returns_error() { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - - let provider = "gemini"; - let session_id = "session-persist-fail"; - let cwd = temp.path().to_string_lossy().to_string(); - let start = json!({ - "hook_event_name": "SessionStart", - "session_id": session_id, - "cwd": cwd - }) - .to_string(); - let end = json!({ - "hook_event_name": "SessionEnd", - "session_id": session_id, - "cwd": cwd - }) - .to_string(); - - assert!( - run_hook(&temp, provider, "session-start", &start) - .status - .success() - ); - - let objects_path = temp.path().join(".libra").join("objects"); - fs::remove_dir_all(&objects_path).unwrap(); - fs::write(&objects_path, "not-a-directory").unwrap(); - - let out = run_hook(&temp, provider, "session-end", &end); - assert!( - !out.status.success(), - "persistence failure should return non-zero exit status" - ); - assert!( - String::from_utf8_lossy(&out.stderr).contains("failed to persist session history"), - "stderr: {}", - String::from_utf8_lossy(&out.stderr) - ); - - let session_json = fs::read_to_string(session_file(temp.path(), provider, session_id)).unwrap(); - let session: serde_json::Value = serde_json::from_str(&session_json).unwrap(); - assert_eq!(session["metadata"]["persist_failed"], json!(true)); - assert_eq!(session["metadata"]["persisted"], json!(false)); -} diff --git a/tests/command/claude_sdk_test.rs b/tests/command/claude_sdk_test.rs new file mode 100644 index 00000000..e53dcd0d --- /dev/null +++ b/tests/command/claude_sdk_test.rs @@ -0,0 +1,1995 @@ +use std::{ + fs, + io::Write, + path::{Path, PathBuf}, + process::{Output, Stdio}, + sync::Arc, +}; + +use git_internal::internal::object::intent::Intent; +use libra::{ + internal::{ + ai::history::{AI_REF, HistoryManager}, + model::reference::{self, ConfigKind}, + }, + utils::{storage::local::LocalStorage, storage_ext::StorageExt, test}, +}; +use sea_orm::{ColumnTrait, EntityTrait, QueryFilter}; +use serde_json::{Value, json}; +use serial_test::serial; +use tempfile::tempdir; + +use super::{assert_cli_success, run_libra_command}; + +const PROBE_LIKE_ARTIFACT: &str = include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/data/ai/claude_managed_probe_like.json" +)); +const SEMANTIC_FULL_TEMPLATE: &str = include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/data/ai/claude_managed_semantic_full_template.json" +)); +const PLAN_TASK_ONLY_TEMPLATE: &str = include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/data/ai/claude_managed_plan_task_only_template.json" +)); +const PLAN_PROMPT: &str = include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/tests/data/ai/claude_sdk_plan_prompt.txt" +)); + +const DEFAULT_MANAGED_PROMPT: &str = "Bridge a managed Claude SDK session into Libra artifacts."; + +fn parse_stdout_json(output: &Output, context: &str) -> Value { + serde_json::from_slice(&output.stdout) + .unwrap_or_else(|err| panic!("{context}: failed to parse stdout JSON: {err}")) +} + +fn read_json_file(path: &Path) -> Value { + let body = fs::read_to_string(path) + .unwrap_or_else(|err| panic!("failed to read JSON file '{}': {err}", path.display())); + serde_json::from_str(&body) + .unwrap_or_else(|err| panic!("failed to parse JSON file '{}': {err}", path.display())) +} + +async fn read_history_head(repo: &Path, history: &HistoryManager) -> String { + assert_eq!(history.ref_name(), AI_REF); + let db_path = repo.join(".libra/libra.db"); + let db_conn = libra::internal::db::establish_connection( + db_path.to_str().expect("db path should be valid UTF-8"), + ) + .await + .expect("failed to connect test database"); + let row = reference::Entity::find() + .filter(reference::Column::Name.eq(AI_REF)) + .filter(reference::Column::Kind.eq(ConfigKind::Branch)) + .one(&db_conn) + .await + .expect("failed to query AI history ref") + .expect("AI history ref should exist"); + row.commit.expect("AI history ref should point to a commit") +} + +fn write_shell_helper(path: &Path, artifact_path: &Path) { + let artifact_rendered = artifact_path.to_string_lossy().replace('\'', r#"'\''"#); + let script = format!("#!/bin/sh\ncat '{artifact_rendered}'\n"); + fs::write(path, script) + .unwrap_or_else(|err| panic!("failed to write helper script '{}': {err}", path.display())); +} + +fn write_request_capture_shell_helper(path: &Path, artifact_path: &Path, request_path: &Path) { + let artifact_rendered = artifact_path.to_string_lossy().replace('\'', r#"'\''"#); + let request_rendered = request_path.to_string_lossy().replace('\'', r#"'\''"#); + let script = format!("#!/bin/sh\ncat > '{request_rendered}'\ncat '{artifact_rendered}'\n"); + fs::write(path, script) + .unwrap_or_else(|err| panic!("failed to write helper script '{}': {err}", path.display())); +} + +fn write_json_response_capture_shell_helper( + path: &Path, + response_path: &Path, + request_path: &Path, +) { + let response_rendered = response_path.to_string_lossy().replace('\'', r#"'\''"#); + let request_rendered = request_path.to_string_lossy().replace('\'', r#"'\''"#); + let script = format!("#!/bin/sh\ncat > '{request_rendered}'\ncat '{response_rendered}'\n"); + fs::write(path, script) + .unwrap_or_else(|err| panic!("failed to write helper script '{}': {err}", path.display())); +} + +fn replace_template_slots(node: &mut Value, replacements: &[(&str, Value)]) { + match node { + Value::Array(items) => { + for item in items { + replace_template_slots(item, replacements); + } + } + Value::Object(map) => { + for value in map.values_mut() { + replace_template_slots(value, replacements); + } + } + Value::String(slot) => { + if let Some((_, replacement)) = replacements.iter().find(|(key, _)| *key == slot) { + *node = replacement.clone(); + } + } + _ => {} + } +} + +fn managed_artifact_from_template( + template: &str, + repo: &Path, + touched_file: &Path, + prompt: &str, +) -> Value { + let mut artifact: Value = serde_json::from_str(template) + .unwrap_or_else(|err| panic!("failed to parse managed artifact template: {err}")); + let replacements = [ + ("__CWD__", json!(repo.to_string_lossy().to_string())), + ( + "__TOUCHED_FILE__", + json!(touched_file.to_string_lossy().to_string()), + ), + ("__PROMPT__", json!(prompt)), + ]; + replace_template_slots(&mut artifact, &replacements); + artifact +} + +fn semantic_full_artifact(repo: &Path, touched_file: &Path) -> Value { + managed_artifact_from_template( + SEMANTIC_FULL_TEMPLATE, + repo, + touched_file, + DEFAULT_MANAGED_PROMPT, + ) +} + +fn plan_task_only_artifact(repo: &Path, touched_file: &Path) -> Value { + managed_artifact_from_template(PLAN_TASK_ONLY_TEMPLATE, repo, touched_file, PLAN_PROMPT) +} + +fn timed_out_partial_artifact(repo: &Path, touched_file: &Path) -> Value { + let mut artifact = semantic_full_artifact(repo, touched_file); + let object = artifact + .as_object_mut() + .expect("semantic full artifact should be an object"); + object.insert("helperTimedOut".to_string(), json!(true)); + object.insert( + "helperError".to_string(), + json!("Claude SDK helper timed out"), + ); + object.insert("resultMessage".to_string(), Value::Null); + artifact +} + +async fn load_intent_history(repo: &Path) -> (Arc, HistoryManager) { + let libra_dir = repo.join(".libra"); + let storage = Arc::new(LocalStorage::new(libra_dir.join("objects"))); + let db_conn = Arc::new( + libra::internal::db::establish_connection( + libra_dir + .join("libra.db") + .to_str() + .expect("db path should be valid UTF-8"), + ) + .await + .expect("failed to connect test database"), + ); + let history = HistoryManager::new(storage.clone(), libra_dir, db_conn); + (storage, history) +} + +#[tokio::test] +#[serial] +async fn test_claude_sdk_import_persists_bridge_artifacts_and_is_idempotent() { + let repo = tempdir().expect("failed to create repo root"); + test::setup_with_new_libra_in(repo.path()).await; + + let artifact_path = repo.path().join("probe-like-artifact.json"); + fs::write(&artifact_path, PROBE_LIKE_ARTIFACT).expect("failed to stage managed artifact"); + + let first = run_libra_command( + &[ + "claude-sdk", + "import", + "--artifact", + artifact_path.to_str().expect("artifact path utf-8"), + ], + repo.path(), + ); + assert_cli_success(&first, "claude-sdk import should succeed"); + let first_json = parse_stdout_json(&first, "first import"); + + assert_eq!(first_json["ok"], json!(true)); + assert_eq!(first_json["mode"], json!("import")); + assert_eq!(first_json["alreadyPersisted"], json!(false)); + assert!( + first_json["intentExtractionPath"].is_null(), + "probe-like fixture should not yield an intent extraction" + ); + + let ai_session_id = first_json["aiSessionId"] + .as_str() + .expect("aiSessionId should be present"); + let raw_artifact_path = PathBuf::from( + first_json["rawArtifactPath"] + .as_str() + .expect("rawArtifactPath"), + ); + let audit_bundle_path = PathBuf::from( + first_json["auditBundlePath"] + .as_str() + .expect("auditBundlePath"), + ); + + assert!( + raw_artifact_path.exists(), + "raw artifact should be materialized" + ); + assert!( + audit_bundle_path.exists(), + "audit bundle should be materialized" + ); + + let audit_bundle = read_json_file(&audit_bundle_path); + assert_eq!( + audit_bundle["bridge"]["intentExtraction"]["status"], + json!("invalid") + ); + assert_eq!( + audit_bundle["bridge"]["objectCandidates"]["runSnapshot"]["id"], + json!(format!("{ai_session_id}::run")) + ); + assert_eq!( + audit_bundle["bridge"]["objectCandidates"]["runEvent"]["status"], + json!("completed") + ); + assert_eq!( + audit_bundle["bridge"]["objectCandidates"]["provenanceSnapshot"]["provider"], + json!("claude") + ); + assert_eq!( + audit_bundle["bridge"]["aiSession"]["schema"], + json!("libra.ai_session.v2") + ); + + let ai_type = run_libra_command(&["cat-file", "--ai-type", ai_session_id], repo.path()); + assert_cli_success(&ai_type, "cat-file --ai-type should succeed"); + assert_eq!( + String::from_utf8_lossy(&ai_type.stdout).trim(), + "ai_session" + ); + + let ai_pretty = run_libra_command(&["cat-file", "--ai", ai_session_id], repo.path()); + assert_cli_success(&ai_pretty, "cat-file --ai should succeed"); + let ai_pretty_stdout = String::from_utf8_lossy(&ai_pretty.stdout); + assert!(ai_pretty_stdout.contains("schema: libra.ai_session.v2")); + assert!(ai_pretty_stdout.contains("provider: claude")); + + let second = run_libra_command( + &[ + "claude-sdk", + "import", + "--artifact", + artifact_path.to_str().expect("artifact path utf-8"), + ], + repo.path(), + ); + assert_cli_success(&second, "second claude-sdk import should succeed"); + let second_json = parse_stdout_json(&second, "second import"); + assert_eq!(second_json["alreadyPersisted"], json!(true)); + assert_eq!(second_json["aiSessionId"], json!(ai_session_id)); + assert_eq!( + second_json["aiSessionObjectHash"], + first_json["aiSessionObjectHash"] + ); +} + +#[tokio::test] +#[serial] +async fn test_claude_sdk_run_with_custom_helper_persists_intent_extraction() { + let repo = tempdir().expect("failed to create repo root"); + test::setup_with_new_libra_in(repo.path()).await; + + let touched_file = repo.path().join("src").join("lib.rs"); + fs::create_dir_all(touched_file.parent().expect("source file parent")).expect("mkdir src"); + fs::write(&touched_file, "pub fn managed_bridge() {}\n").expect("write source file"); + + let artifact_json = semantic_full_artifact(repo.path(), &touched_file); + + let artifact_path = repo.path().join("managed-run-artifact.json"); + fs::write( + &artifact_path, + serde_json::to_vec_pretty(&artifact_json).expect("serialize test artifact"), + ) + .expect("write test artifact"); + + let helper_path = repo.path().join("fake-managed-helper.sh"); + write_shell_helper(&helper_path, &artifact_path); + + let run = run_libra_command( + &[ + "claude-sdk", + "run", + "--prompt", + DEFAULT_MANAGED_PROMPT, + "--helper-path", + helper_path.to_str().expect("helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success(&run, "claude-sdk run should succeed"); + let run_json = parse_stdout_json(&run, "claude-sdk run"); + + assert_eq!(run_json["ok"], json!(true)); + assert_eq!(run_json["mode"], json!("run")); + assert_eq!(run_json["alreadyPersisted"], json!(false)); + + let ai_session_id = run_json["aiSessionId"] + .as_str() + .expect("aiSessionId should be present"); + let intent_extraction_path = PathBuf::from( + run_json["intentExtractionPath"] + .as_str() + .expect("intentExtractionPath"), + ); + let raw_artifact_path = PathBuf::from( + run_json["rawArtifactPath"] + .as_str() + .expect("rawArtifactPath"), + ); + let audit_bundle_path = PathBuf::from( + run_json["auditBundlePath"] + .as_str() + .expect("auditBundlePath"), + ); + + assert!( + intent_extraction_path.exists(), + "intent extraction should be persisted" + ); + assert!( + raw_artifact_path.exists(), + "raw artifact should be materialized" + ); + assert!( + audit_bundle_path.exists(), + "audit bundle should be materialized" + ); + + let intent_extraction = read_json_file(&intent_extraction_path); + assert_eq!( + intent_extraction["schema"], + json!("libra.intent_extraction.v1") + ); + assert_eq!( + intent_extraction["extraction"]["intent"]["summary"], + json!("Persist the Claude SDK managed bridge") + ); + + let audit_bundle = read_json_file(&audit_bundle_path); + assert_eq!( + audit_bundle["bridge"]["intentExtraction"]["status"], + json!("accepted") + ); + assert_eq!( + audit_bundle["bridge"]["intentExtractionArtifact"]["schema"], + json!("libra.intent_extraction.v1") + ); + assert_eq!( + audit_bundle["bridge"]["objectCandidates"]["runSnapshot"]["id"], + json!(format!("{ai_session_id}::run")) + ); + assert_eq!( + audit_bundle["bridge"]["objectCandidates"]["runEvent"]["status"], + json!("completed") + ); + assert_eq!( + audit_bundle["bridge"]["objectCandidates"]["toolInvocationEvents"][0]["tool"], + json!("Read") + ); + assert_eq!( + audit_bundle["bridge"]["objectCandidates"]["providerInitSnapshot"]["apiKeySource"], + json!("oauth") + ); + assert_eq!( + audit_bundle["bridge"]["objectCandidates"]["providerInitSnapshot"]["claudeCodeVersion"], + json!("2.1.76") + ); + assert_eq!( + audit_bundle["bridge"]["objectCandidates"]["taskRuntimeEvents"] + .as_array() + .map(Vec::len), + Some(4) + ); + assert_eq!( + audit_bundle["bridge"]["objectCandidates"]["toolRuntimeEvents"] + .as_array() + .map(Vec::len), + Some(2) + ); + assert_eq!( + audit_bundle["bridge"]["objectCandidates"]["assistantRuntimeEvents"] + .as_array() + .map(Vec::len), + Some(2) + ); + assert_eq!( + audit_bundle["bridge"]["objectCandidates"]["decisionRuntimeEvents"] + .as_array() + .map(Vec::len), + Some(4) + ); + assert_eq!( + audit_bundle["bridge"]["objectCandidates"]["contextRuntimeEvents"] + .as_array() + .map(Vec::len), + Some(7) + ); + assert_eq!( + audit_bundle["bridge"]["sessionState"]["metadata"]["provider_runtime"]["providerInit"]["apiKeySource"], + json!("oauth") + ); + assert!( + audit_bundle["bridge"]["touchHints"] + .as_array() + .is_some_and(|items| items.iter().any(|item| item == "src/lib.rs")), + "touch hints should include the repo-relative file observed from tool evidence" + ); + assert!( + audit_bundle["fieldProvenance"] + .as_array() + .is_some_and(|entries| entries + .iter() + .any(|entry| entry["fieldPath"] == "runtime.assistantEvents")), + "assistant stream runtime facts should be recorded in field provenance" + ); + + let ai_pretty = run_libra_command(&["cat-file", "--ai", ai_session_id], repo.path()); + assert_cli_success(&ai_pretty, "cat-file --ai should succeed for run path"); + let ai_pretty_stdout = String::from_utf8_lossy(&ai_pretty.stdout); + assert!(ai_pretty_stdout.contains("schema: libra.ai_session.v2")); + assert!(ai_pretty_stdout.contains("provider: claude")); +} + +#[tokio::test] +#[serial] +async fn test_claude_sdk_run_can_disable_auto_tool_approval() { + let repo = tempdir().expect("failed to create repo root"); + test::setup_with_new_libra_in(repo.path()).await; + + let touched_file = repo.path().join("src").join("lib.rs"); + fs::create_dir_all(touched_file.parent().expect("source file parent")).expect("mkdir src"); + fs::write(&touched_file, "pub fn managed_bridge() {}\n").expect("write source file"); + + let artifact_path = repo.path().join("managed-run-artifact.json"); + fs::write( + &artifact_path, + serde_json::to_vec_pretty(&semantic_full_artifact(repo.path(), &touched_file)) + .expect("serialize test artifact"), + ) + .expect("write test artifact"); + + let request_path = repo.path().join("helper-request.json"); + let helper_path = repo.path().join("capture-managed-helper.sh"); + write_request_capture_shell_helper(&helper_path, &artifact_path, &request_path); + + let run = run_libra_command( + &[ + "claude-sdk", + "run", + "--prompt", + DEFAULT_MANAGED_PROMPT, + "--tool", + "Read", + "--auto-approve-tools", + "false", + "--include-partial-messages", + "true", + "--prompt-suggestions", + "true", + "--agent-progress-summaries", + "true", + "--helper-path", + helper_path.to_str().expect("helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success( + &run, + "claude-sdk run should succeed when auto tool approval is disabled", + ); + + let helper_request = read_json_file(&request_path); + assert_eq!(helper_request["tools"], json!(["Read"])); + assert_eq!(helper_request["allowedTools"], json!(["Read"])); + assert_eq!(helper_request["autoApproveTools"], json!(false)); + assert_eq!(helper_request["includePartialMessages"], json!(true)); + assert_eq!(helper_request["promptSuggestions"], json!(true)); + assert_eq!(helper_request["agentProgressSummaries"], json!(true)); +} + +#[tokio::test] +#[serial] +async fn test_claude_sdk_sync_sessions_persists_provider_session_snapshots() { + let repo = tempdir().expect("failed to create repo root"); + test::setup_with_new_libra_in(repo.path()).await; + + let response_path = repo.path().join("session-catalog.json"); + fs::write( + &response_path, + serde_json::to_vec_pretty(&json!([ + { + "sessionId": "session-a", + "summary": "Claude session A", + "lastModified": 1742025600000i64, + "fileSize": 2048, + "customTitle": "A title", + "firstPrompt": "Add tests", + "gitBranch": "main", + "cwd": repo.path().to_string_lossy().to_string(), + "tag": "review", + "createdAt": 1742022000000i64 + }, + { + "sessionId": "session-b", + "summary": "Claude session B", + "lastModified": 1742029200000i64, + "cwd": repo.path().to_string_lossy().to_string() + } + ])) + .expect("serialize session catalog"), + ) + .expect("write session catalog response"); + + let request_path = repo.path().join("session-catalog-request.json"); + let helper_path = repo.path().join("fake-session-helper.sh"); + write_json_response_capture_shell_helper(&helper_path, &response_path, &request_path); + + let sync = run_libra_command( + &[ + "claude-sdk", + "sync-sessions", + "--limit", + "10", + "--offset", + "2", + "--include-worktrees", + "false", + "--helper-path", + helper_path.to_str().expect("helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success(&sync, "claude-sdk sync-sessions should succeed"); + let sync_json = parse_stdout_json(&sync, "claude-sdk sync-sessions"); + assert_eq!(sync_json["ok"], json!(true)); + assert_eq!(sync_json["mode"], json!("sync-sessions")); + assert_eq!(sync_json["syncedCount"], json!(2)); + + let helper_request = read_json_file(&request_path); + assert_eq!(helper_request["mode"], json!("listSessions")); + assert_eq!(helper_request["limit"], json!(10)); + assert_eq!(helper_request["offset"], json!(2)); + assert_eq!(helper_request["includeWorktrees"], json!(false)); + + let first_artifact = PathBuf::from( + sync_json["sessions"][0]["artifactPath"] + .as_str() + .expect("artifactPath should be present"), + ); + assert!( + first_artifact.exists(), + "provider session artifact should exist" + ); + let first_snapshot = read_json_file(&first_artifact); + assert_eq!(first_snapshot["schema"], json!("libra.provider_session.v3")); + assert_eq!(first_snapshot["provider"], json!("claude")); + assert_eq!(first_snapshot["providerSessionId"], json!("session-a")); + assert_eq!( + first_snapshot["objectId"], + json!("claude_provider_session__session-a") + ); + assert_eq!(first_snapshot["summary"], json!("Claude session A")); + + let (_, history) = load_intent_history(repo.path()).await; + let sessions = history + .list_objects("provider_session") + .await + .expect("should list provider_session objects"); + assert_eq!( + sessions.len(), + 2, + "should persist provider session snapshots" + ); + + let ai_pretty = run_libra_command( + &["cat-file", "--ai", "claude_provider_session__session-a"], + repo.path(), + ); + assert_cli_success( + &ai_pretty, + "cat-file --ai should succeed for provider_session object", + ); + let ai_pretty_stdout = String::from_utf8_lossy(&ai_pretty.stdout); + assert!(ai_pretty_stdout.contains("type: provider_session")); + assert!(ai_pretty_stdout.contains("schema: libra.provider_session.v3")); + assert!(ai_pretty_stdout.contains("provider: claude")); +} + +#[tokio::test] +#[serial] +async fn test_claude_sdk_sync_sessions_preserves_existing_message_sync() { + let repo = tempdir().expect("failed to create repo root"); + test::setup_with_new_libra_in(repo.path()).await; + + let catalog_response_path = repo.path().join("session-catalog.json"); + fs::write( + &catalog_response_path, + serde_json::to_vec_pretty(&json!([ + { + "sessionId": "session-a", + "summary": "Claude session A", + "lastModified": 1742025600000i64, + "cwd": repo.path().to_string_lossy().to_string() + } + ])) + .expect("serialize session catalog"), + ) + .expect("write session catalog response"); + let catalog_request_path = repo.path().join("session-catalog-request.json"); + let catalog_helper_path = repo.path().join("fake-session-catalog-helper.sh"); + write_json_response_capture_shell_helper( + &catalog_helper_path, + &catalog_response_path, + &catalog_request_path, + ); + + let sync = run_libra_command( + &[ + "claude-sdk", + "sync-sessions", + "--helper-path", + catalog_helper_path + .to_str() + .expect("catalog helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success(&sync, "initial claude-sdk sync-sessions should succeed"); + + let messages_response_path = repo.path().join("session-messages.json"); + fs::write( + &messages_response_path, + serde_json::to_vec_pretty(&json!([ + {"type": "user", "session_id": "session-a"}, + {"type": "assistant", "session_id": "session-a"}, + {"type": "result", "subtype": "success", "session_id": "session-a"} + ])) + .expect("serialize session messages"), + ) + .expect("write session messages response"); + let messages_request_path = repo.path().join("session-messages-request.json"); + let messages_helper_path = repo.path().join("fake-session-messages-helper.sh"); + write_json_response_capture_shell_helper( + &messages_helper_path, + &messages_response_path, + &messages_request_path, + ); + + let hydrate = run_libra_command( + &[ + "claude-sdk", + "hydrate-session", + "--provider-session-id", + "session-a", + "--helper-path", + messages_helper_path + .to_str() + .expect("messages helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success(&hydrate, "claude-sdk hydrate-session should succeed"); + + let resync = run_libra_command( + &[ + "claude-sdk", + "sync-sessions", + "--helper-path", + catalog_helper_path + .to_str() + .expect("catalog helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success(&resync, "repeat claude-sdk sync-sessions should succeed"); + + let build = run_libra_command( + &[ + "claude-sdk", + "build-evidence-input", + "--provider-session-id", + "session-a", + ], + repo.path(), + ); + assert_cli_success( + &build, + "build-evidence-input should still succeed after re-syncing a hydrated session", + ); + + let snapshot_path = repo + .path() + .join(".libra/provider-sessions/claude_provider_session__session-a.json"); + let snapshot = read_json_file(&snapshot_path); + assert_eq!(snapshot["messageSync"]["messageCount"], json!(3)); + assert_eq!( + snapshot["messageSync"]["lastMessageKind"], + json!("result:success") + ); +} + +#[tokio::test] +#[serial] +async fn test_claude_sdk_sync_sessions_skips_history_append_when_snapshot_is_unchanged() { + let repo = tempdir().expect("failed to create repo root"); + test::setup_with_new_libra_in(repo.path()).await; + + let response_path = repo.path().join("session-catalog.json"); + fs::write( + &response_path, + serde_json::to_vec_pretty(&json!([ + { + "sessionId": "session-a", + "summary": "Claude session A", + "lastModified": 1742025600000i64, + "cwd": repo.path().to_string_lossy().to_string() + } + ])) + .expect("serialize session catalog"), + ) + .expect("write session catalog response"); + + let request_path = repo.path().join("session-catalog-request.json"); + let helper_path = repo.path().join("fake-session-helper.sh"); + write_json_response_capture_shell_helper(&helper_path, &response_path, &request_path); + + let first = run_libra_command( + &[ + "claude-sdk", + "sync-sessions", + "--helper-path", + helper_path.to_str().expect("helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success(&first, "initial sync-sessions should succeed"); + + let (_, history) = load_intent_history(repo.path()).await; + let first_head = read_history_head(repo.path(), &history).await; + + let second = run_libra_command( + &[ + "claude-sdk", + "sync-sessions", + "--helper-path", + helper_path.to_str().expect("helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success(&second, "repeat sync-sessions should succeed"); + + let second_head = read_history_head(repo.path(), &history).await; + assert_eq!( + second_head, first_head, + "unchanged sync-sessions runs should not append a new AI history commit" + ); +} + +#[tokio::test] +#[serial] +async fn test_claude_sdk_sync_sessions_keeps_history_in_current_repo_when_cwd_is_overridden() { + let repo = tempdir().expect("failed to create repo root"); + let external_project = tempdir().expect("failed to create external project root"); + test::setup_with_new_libra_in(repo.path()).await; + + let response_path = repo.path().join("session-catalog.json"); + fs::write( + &response_path, + serde_json::to_vec_pretty(&json!([ + { + "sessionId": "session-a", + "summary": "Claude session A", + "lastModified": 1742025600000i64, + "cwd": external_project.path().to_string_lossy().to_string() + } + ])) + .expect("serialize session catalog"), + ) + .expect("write session catalog response"); + + let request_path = repo.path().join("session-catalog-request.json"); + let helper_path = repo.path().join("fake-session-helper.sh"); + write_json_response_capture_shell_helper(&helper_path, &response_path, &request_path); + + let sync = run_libra_command( + &[ + "claude-sdk", + "sync-sessions", + "--cwd", + external_project + .path() + .to_str() + .expect("external cwd utf-8"), + "--helper-path", + helper_path.to_str().expect("helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success( + &sync, + "claude-sdk sync-sessions should persist into the current repo even with --cwd override", + ); + + let (_, history) = load_intent_history(repo.path()).await; + let sessions = history + .list_objects("provider_session") + .await + .expect("should list provider_session objects from current repo"); + assert_eq!(sessions.len(), 1); + + assert!( + !external_project.path().join(".libra/libra.db").exists(), + "sync-sessions should not create a shadow Libra repo under the overridden cwd" + ); +} + +#[test] +fn test_claude_sdk_helper_resolves_project_local_sdk_from_relative_cwd() { + let repo = tempdir().expect("failed to create repo root"); + let module_dir = repo + .path() + .join("node_modules") + .join("@anthropic-ai") + .join("claude-agent-sdk"); + fs::create_dir_all(&module_dir).expect("failed to create fake sdk module directory"); + fs::write( + module_dir.join("index.js"), + r#"exports.query = async function* () {}; +exports.listSessions = async () => ([{ + sessionId: "session-relative", + summary: "Relative cwd session", + lastModified: 1742025600000, + cwd: process.cwd() +}]);"#, + ) + .expect("failed to write fake sdk module"); + + let helper_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("src") + .join("internal") + .join("ai") + .join("providers") + .join("claude_sdk") + .join("helper.cjs"); + let mut child = std::process::Command::new("node") + .arg(&helper_path) + .current_dir(repo.path()) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("failed to spawn helper with node"); + + let request = br#"{"mode":"listSessions","cwd":".","offset":0,"includeWorktrees":true}"#; + child + .stdin + .as_mut() + .expect("child stdin should exist") + .write_all(request) + .expect("failed to send request to helper"); + let output = child.wait_with_output().expect("failed to wait on helper"); + assert!( + output.status.success(), + "helper should resolve project-local sdk from relative cwd: {}", + String::from_utf8_lossy(&output.stderr) + ); + + let sync_json: Value = + serde_json::from_slice(&output.stdout).expect("helper stdout should be valid JSON"); + assert_eq!(sync_json.as_array().map(Vec::len), Some(1)); + assert_eq!(sync_json[0]["sessionId"], json!("session-relative")); +} + +#[tokio::test] +#[serial] +async fn test_claude_sdk_hydrate_session_updates_provider_session_with_messages() { + let repo = tempdir().expect("failed to create repo root"); + test::setup_with_new_libra_in(repo.path()).await; + + let catalog_response_path = repo.path().join("session-catalog.json"); + fs::write( + &catalog_response_path, + serde_json::to_vec_pretty(&json!([ + { + "sessionId": "session-a", + "summary": "Claude session A", + "lastModified": 1742025600000i64, + "cwd": repo.path().to_string_lossy().to_string() + } + ])) + .expect("serialize session catalog"), + ) + .expect("write session catalog response"); + let catalog_request_path = repo.path().join("session-catalog-request.json"); + let catalog_helper_path = repo.path().join("fake-session-catalog-helper.sh"); + write_json_response_capture_shell_helper( + &catalog_helper_path, + &catalog_response_path, + &catalog_request_path, + ); + + let sync = run_libra_command( + &[ + "claude-sdk", + "sync-sessions", + "--helper-path", + catalog_helper_path + .to_str() + .expect("catalog helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success( + &sync, + "claude-sdk sync-sessions should succeed before hydration", + ); + + let messages_response_path = repo.path().join("session-messages.json"); + fs::write( + &messages_response_path, + serde_json::to_vec_pretty(&json!([ + { + "type": "system", + "subtype": "init", + "session_id": "session-a", + "uuid": "msg-1" + }, + { + "type": "user", + "session_id": "session-a" + }, + { + "type": "assistant", + "session_id": "session-a", + "uuid": "msg-2" + }, + { + "type": "tool_progress", + "tool_use_id": "tool-1", + "tool_name": "Read", + "elapsed_time_seconds": 1, + "session_id": "session-a", + "uuid": "msg-3" + }, + { + "type": "result", + "subtype": "success", + "session_id": "session-a", + "uuid": "msg-4", + "duration_ms": 10, + "duration_api_ms": 8, + "is_error": false, + "num_turns": 1, + "result": "ok", + "stop_reason": "end_turn", + "total_cost_usd": 0.01, + "usage": {} + } + ])) + .expect("serialize session messages"), + ) + .expect("write session messages response"); + let messages_request_path = repo.path().join("session-messages-request.json"); + let messages_helper_path = repo.path().join("fake-session-messages-helper.sh"); + write_json_response_capture_shell_helper( + &messages_helper_path, + &messages_response_path, + &messages_request_path, + ); + + let hydrate = run_libra_command( + &[ + "claude-sdk", + "hydrate-session", + "--provider-session-id", + "session-a", + "--limit", + "20", + "--offset", + "3", + "--helper-path", + messages_helper_path + .to_str() + .expect("messages helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success(&hydrate, "claude-sdk hydrate-session should succeed"); + let hydrate_json = parse_stdout_json(&hydrate, "claude-sdk hydrate-session"); + assert_eq!(hydrate_json["ok"], json!(true)); + assert_eq!(hydrate_json["mode"], json!("hydrate-session")); + assert_eq!(hydrate_json["providerSessionId"], json!("session-a")); + assert_eq!(hydrate_json["messageCount"], json!(5)); + + let helper_request = read_json_file(&messages_request_path); + assert_eq!(helper_request["mode"], json!("getSessionMessages")); + assert_eq!(helper_request["providerSessionId"], json!("session-a")); + assert_eq!(helper_request["limit"], json!(20)); + assert_eq!(helper_request["offset"], json!(3)); + + let artifact_path = PathBuf::from( + hydrate_json["artifactPath"] + .as_str() + .expect("artifactPath should be present"), + ); + let snapshot = read_json_file(&artifact_path); + assert_eq!(snapshot["schema"], json!("libra.provider_session.v3")); + assert_eq!(snapshot["messageSync"]["messageCount"], json!(5)); + assert_eq!( + snapshot["messageSync"]["kindCounts"]["system:init"], + json!(1) + ); + assert_eq!( + snapshot["messageSync"]["kindCounts"]["result:success"], + json!(1) + ); + assert_eq!( + snapshot["messageSync"]["firstMessageKind"], + json!("system:init") + ); + assert_eq!( + snapshot["messageSync"]["lastMessageKind"], + json!("result:success") + ); + + let messages_artifact_path = PathBuf::from( + hydrate_json["messagesArtifactPath"] + .as_str() + .expect("messagesArtifactPath should be present"), + ); + let messages_artifact = read_json_file(&messages_artifact_path); + assert_eq!( + messages_artifact["schema"], + json!("libra.provider_session_messages.v1") + ); + assert_eq!(messages_artifact["providerSessionId"], json!("session-a")); + assert_eq!( + messages_artifact["messages"].as_array().map(Vec::len), + Some(5) + ); + + let ai_pretty = run_libra_command( + &["cat-file", "--ai", "claude_provider_session__session-a"], + repo.path(), + ); + assert_cli_success( + &ai_pretty, + "cat-file --ai should succeed for hydrated provider_session object", + ); + let ai_pretty_stdout = String::from_utf8_lossy(&ai_pretty.stdout); + assert!(ai_pretty_stdout.contains("message_count: 5")); + assert!(ai_pretty_stdout.contains("first_message_kind: system:init")); + assert!(ai_pretty_stdout.contains("last_message_kind: result:success")); +} + +#[tokio::test] +#[serial] +async fn test_claude_sdk_build_evidence_input_from_provider_session_messages() { + let repo = tempdir().expect("failed to create repo root"); + test::setup_with_new_libra_in(repo.path()).await; + + let catalog_response_path = repo.path().join("session-catalog.json"); + fs::write( + &catalog_response_path, + serde_json::to_vec_pretty(&json!([ + { + "sessionId": "session-a", + "summary": "Claude session A", + "lastModified": 1742025600000i64, + "cwd": repo.path().to_string_lossy().to_string() + } + ])) + .expect("serialize session catalog"), + ) + .expect("write session catalog response"); + let catalog_request_path = repo.path().join("session-catalog-request.json"); + let catalog_helper_path = repo.path().join("fake-session-catalog-helper.sh"); + write_json_response_capture_shell_helper( + &catalog_helper_path, + &catalog_response_path, + &catalog_request_path, + ); + + let sync = run_libra_command( + &[ + "claude-sdk", + "sync-sessions", + "--helper-path", + catalog_helper_path + .to_str() + .expect("catalog helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success( + &sync, + "claude-sdk sync-sessions should succeed before evidence-input build", + ); + + let messages_response_path = repo.path().join("session-messages.json"); + fs::write( + &messages_response_path, + serde_json::to_vec_pretty(&json!([ + { + "type": "system", + "subtype": "init", + "session_id": "session-a", + "uuid": "msg-1" + }, + { + "type": "user", + "session_id": "session-a", + "message": { + "content": [ + { + "type": "text", + "text": "Inspect src/lib.rs and summarize the bridge state." + } + ] + } + }, + { + "type": "assistant", + "session_id": "session-a", + "uuid": "msg-2", + "message": { + "content": [ + { + "type": "text", + "text": "I will inspect src/lib.rs and then summarize the current bridge shape." + }, + { + "type": "tool_use", + "name": "Read", + "input": { + "file_path": "src/lib.rs" + } + } + ] + } + }, + { + "type": "system", + "subtype": "task_progress", + "session_id": "session-a", + "uuid": "msg-task-1", + "description": "Reading provider runtime facts" + }, + { + "type": "tool_progress", + "tool_use_id": "tool-1", + "tool_name": "Read", + "elapsed_time_seconds": 1, + "session_id": "session-a", + "uuid": "msg-3" + }, + { + "type": "result", + "subtype": "success", + "session_id": "session-a", + "uuid": "msg-4", + "duration_ms": 10, + "duration_api_ms": 8, + "is_error": false, + "num_turns": 1, + "result": "ok", + "stop_reason": "end_turn", + "total_cost_usd": 0.01, + "permission_denials": [ + { + "tool_name": "Edit" + } + ], + "structured_output": { + "summary": "Separate runtime facts from semantic candidates" + }, + "usage": {} + } + ])) + .expect("serialize session messages"), + ) + .expect("write session messages response"); + let messages_request_path = repo.path().join("session-messages-request.json"); + let messages_helper_path = repo.path().join("fake-session-messages-helper.sh"); + write_json_response_capture_shell_helper( + &messages_helper_path, + &messages_response_path, + &messages_request_path, + ); + + let hydrate = run_libra_command( + &[ + "claude-sdk", + "hydrate-session", + "--provider-session-id", + "session-a", + "--helper-path", + messages_helper_path + .to_str() + .expect("messages helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success( + &hydrate, + "claude-sdk hydrate-session should succeed before evidence-input build", + ); + + let build = run_libra_command( + &[ + "claude-sdk", + "build-evidence-input", + "--provider-session-id", + "session-a", + ], + repo.path(), + ); + assert_cli_success( + &build, + "claude-sdk build-evidence-input should succeed for a hydrated provider session", + ); + let build_json = parse_stdout_json(&build, "claude-sdk build-evidence-input"); + assert_eq!(build_json["ok"], json!(true)); + assert_eq!(build_json["mode"], json!("build-evidence-input")); + assert_eq!(build_json["providerSessionId"], json!("session-a")); + assert_eq!( + build_json["providerSessionObjectId"], + json!("claude_provider_session__session-a") + ); + assert_eq!( + build_json["objectId"], + json!("claude_evidence_input__session-a") + ); + assert_eq!(build_json["messageCount"], json!(6)); + + let evidence_path = PathBuf::from( + build_json["artifactPath"] + .as_str() + .expect("artifactPath should be present"), + ); + let evidence = read_json_file(&evidence_path); + assert_eq!(evidence["schema"], json!("libra.evidence_input.v1")); + assert_eq!(evidence["object_type"], json!("evidence_input")); + assert_eq!(evidence["provider"], json!("claude")); + assert_eq!(evidence["providerSessionId"], json!("session-a")); + assert_eq!( + evidence["providerSessionObjectId"], + json!("claude_provider_session__session-a") + ); + assert_eq!(evidence["messageOverview"]["messageCount"], json!(6)); + assert_eq!( + evidence["contentOverview"]["assistantMessageCount"], + json!(1) + ); + assert_eq!(evidence["contentOverview"]["userMessageCount"], json!(1)); + assert_eq!( + evidence["contentOverview"]["observedTools"]["Read"], + json!(2) + ); + assert_eq!( + evidence["contentOverview"]["observedPaths"][0], + json!("src/lib.rs") + ); + assert_eq!(evidence["runtimeSignals"]["toolRuntimeCount"], json!(1)); + assert_eq!(evidence["runtimeSignals"]["taskRuntimeCount"], json!(1)); + assert_eq!(evidence["runtimeSignals"]["resultMessageCount"], json!(1)); + assert_eq!( + evidence["runtimeSignals"]["hasStructuredOutput"], + json!(true) + ); + assert_eq!( + evidence["runtimeSignals"]["hasPermissionDenials"], + json!(true) + ); + assert_eq!(evidence["latestResult"]["stopReason"], json!("end_turn")); + assert_eq!(evidence["latestResult"]["permissionDenialCount"], json!(1)); + + let (_, history) = load_intent_history(repo.path()).await; + let evidence_inputs = history + .list_objects("evidence_input") + .await + .expect("should list evidence_input objects"); + assert_eq!( + evidence_inputs.len(), + 1, + "should persist evidence_input objects" + ); + + let ai_pretty = run_libra_command( + &["cat-file", "--ai", "claude_evidence_input__session-a"], + repo.path(), + ); + assert_cli_success( + &ai_pretty, + "cat-file --ai should succeed for evidence_input object", + ); + let ai_pretty_stdout = String::from_utf8_lossy(&ai_pretty.stdout); + assert!(ai_pretty_stdout.contains("type: evidence_input")); + assert!(ai_pretty_stdout.contains("schema: libra.evidence_input.v1")); + assert!(ai_pretty_stdout.contains("message_count: 6")); + assert!(ai_pretty_stdout.contains("has_structured_output: true")); + + let helper_request = read_json_file(&messages_request_path); + assert_eq!(helper_request["mode"], json!("getSessionMessages")); +} + +#[tokio::test] +#[serial] +async fn test_claude_sdk_build_evidence_input_skips_history_append_when_artifact_is_unchanged() { + let repo = tempdir().expect("failed to create repo root"); + test::setup_with_new_libra_in(repo.path()).await; + + let catalog_response_path = repo.path().join("session-catalog.json"); + fs::write( + &catalog_response_path, + serde_json::to_vec_pretty(&json!([ + { + "sessionId": "session-a", + "summary": "Claude session A", + "lastModified": 1742025600000i64, + "cwd": repo.path().to_string_lossy().to_string() + } + ])) + .expect("serialize session catalog"), + ) + .expect("write session catalog response"); + let catalog_request_path = repo.path().join("session-catalog-request.json"); + let catalog_helper_path = repo.path().join("fake-session-catalog-helper.sh"); + write_json_response_capture_shell_helper( + &catalog_helper_path, + &catalog_response_path, + &catalog_request_path, + ); + + let sync = run_libra_command( + &[ + "claude-sdk", + "sync-sessions", + "--helper-path", + catalog_helper_path + .to_str() + .expect("catalog helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success( + &sync, + "sync-sessions should succeed before evidence-input build", + ); + + let messages_response_path = repo.path().join("session-messages.json"); + fs::write( + &messages_response_path, + serde_json::to_vec_pretty(&json!([ + { + "type": "system", + "subtype": "init", + "session_id": "session-a", + "uuid": "msg-1" + }, + { + "type": "assistant", + "session_id": "session-a", + "uuid": "msg-2", + "message": { + "content": [ + { + "type": "tool_use", + "name": "Read", + "input": { + "file_path": "src/lib.rs" + } + } + ] + } + }, + { + "type": "result", + "subtype": "success", + "session_id": "session-a", + "uuid": "msg-3", + "stop_reason": "end_turn", + "structured_output": { + "summary": "Bridge runtime facts" + } + } + ])) + .expect("serialize session messages"), + ) + .expect("write session messages response"); + let messages_request_path = repo.path().join("session-messages-request.json"); + let messages_helper_path = repo.path().join("fake-session-messages-helper.sh"); + write_json_response_capture_shell_helper( + &messages_helper_path, + &messages_response_path, + &messages_request_path, + ); + + let hydrate = run_libra_command( + &[ + "claude-sdk", + "hydrate-session", + "--provider-session-id", + "session-a", + "--helper-path", + messages_helper_path + .to_str() + .expect("messages helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success( + &hydrate, + "hydrate-session should succeed before evidence-input build", + ); + + let first = run_libra_command( + &[ + "claude-sdk", + "build-evidence-input", + "--provider-session-id", + "session-a", + ], + repo.path(), + ); + assert_cli_success(&first, "initial build-evidence-input should succeed"); + + let (_, history) = load_intent_history(repo.path()).await; + let first_head = read_history_head(repo.path(), &history).await; + + let second = run_libra_command( + &[ + "claude-sdk", + "build-evidence-input", + "--provider-session-id", + "session-a", + ], + repo.path(), + ); + assert_cli_success(&second, "repeat build-evidence-input should succeed"); + + let second_head = read_history_head(repo.path(), &history).await; + assert_eq!( + second_head, first_head, + "unchanged evidence-input builds should not append a new AI history commit" + ); +} + +#[tokio::test] +#[serial] +async fn test_claude_sdk_sync_sessions_rejects_invalid_provider_session_id() { + let repo = tempdir().expect("failed to create repo root"); + test::setup_with_new_libra_in(repo.path()).await; + + let response_path = repo.path().join("session-catalog.json"); + fs::write( + &response_path, + serde_json::to_vec_pretty(&json!([ + { + "sessionId": "../session-a", + "summary": "Claude session A", + "lastModified": 1742025600000i64, + "cwd": repo.path().to_string_lossy().to_string() + } + ])) + .expect("serialize session catalog"), + ) + .expect("write session catalog response"); + + let request_path = repo.path().join("session-catalog-request.json"); + let helper_path = repo.path().join("fake-session-helper.sh"); + write_json_response_capture_shell_helper(&helper_path, &response_path, &request_path); + + let sync = run_libra_command( + &[ + "claude-sdk", + "sync-sessions", + "--helper-path", + helper_path.to_str().expect("helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert!( + !sync.status.success(), + "sync-sessions should reject invalid provider session ids" + ); + assert!( + String::from_utf8_lossy(&sync.stderr).contains("invalid provider session id"), + "expected invalid provider session id error, got: {}", + String::from_utf8_lossy(&sync.stderr) + ); +} + +#[tokio::test] +#[serial] +async fn test_claude_sdk_run_persists_partial_artifact_when_helper_times_out() { + let repo = tempdir().expect("failed to create repo root"); + test::setup_with_new_libra_in(repo.path()).await; + + let touched_file = repo.path().join("src").join("lib.rs"); + fs::create_dir_all(touched_file.parent().expect("source file parent")).expect("mkdir src"); + fs::write(&touched_file, "pub fn managed_bridge() {}\n").expect("write source file"); + + let artifact_path = repo.path().join("timed-out-artifact.json"); + fs::write( + &artifact_path, + serde_json::to_vec_pretty(&timed_out_partial_artifact(repo.path(), &touched_file)) + .expect("serialize timeout artifact"), + ) + .expect("write timeout artifact"); + + let helper_path = repo.path().join("fake-timeout-helper.sh"); + write_shell_helper(&helper_path, &artifact_path); + + let run = run_libra_command( + &[ + "claude-sdk", + "run", + "--prompt", + DEFAULT_MANAGED_PROMPT, + "--helper-path", + helper_path.to_str().expect("helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success( + &run, + "claude-sdk run should persist partial artifacts when the helper times out", + ); + let run_json = parse_stdout_json(&run, "claude-sdk run timeout artifact"); + assert!( + run_json["intentExtractionPath"].is_null(), + "partial timeout artifact should not produce a formal intent extraction" + ); + + let audit_bundle_path = PathBuf::from( + run_json["auditBundlePath"] + .as_str() + .expect("auditBundlePath should be present"), + ); + let audit_bundle = read_json_file(&audit_bundle_path); + assert_eq!( + audit_bundle["bridge"]["objectCandidates"]["runEvent"]["status"], + json!("timed_out") + ); + assert_eq!( + audit_bundle["bridge"]["sessionState"]["metadata"]["managed_helper_timed_out"], + json!(true) + ); + assert_eq!( + audit_bundle["bridge"]["sessionState"]["metadata"]["managed_helper_error"], + json!("Claude SDK helper timed out") + ); + assert!( + audit_bundle["bridge"]["objectCandidates"]["decisionRuntimeEvents"] + .as_array() + .is_some_and(|events| !events.is_empty()), + "partial timeout artifact should still preserve decision runtime facts" + ); +} + +#[tokio::test] +#[serial] +async fn test_claude_sdk_run_plan_prompt_fixture_persists_task_runtime_scenario() { + let repo = tempdir().expect("failed to create repo root"); + test::setup_with_new_libra_in(repo.path()).await; + + let touched_file = repo.path().join("samples").join("managed.rs"); + fs::create_dir_all(touched_file.parent().expect("sample file parent")).expect("mkdir samples"); + fs::write(&touched_file, "pub fn provider_runtime() {}\n").expect("write sample file"); + + let artifact_path = repo.path().join("managed-plan-artifact.json"); + fs::write( + &artifact_path, + serde_json::to_vec_pretty(&plan_task_only_artifact(repo.path(), &touched_file)) + .expect("serialize plan scenario artifact"), + ) + .expect("write plan scenario artifact"); + + let helper_path = repo.path().join("fake-managed-helper.sh"); + write_shell_helper(&helper_path, &artifact_path); + + let prompt_path = repo.path().join("plan-prompt.txt"); + fs::write(&prompt_path, PLAN_PROMPT).expect("write plan prompt fixture"); + + let run = run_libra_command( + &[ + "claude-sdk", + "run", + "--prompt-file", + prompt_path.to_str().expect("prompt path utf-8"), + "--helper-path", + helper_path.to_str().expect("helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success( + &run, + "claude-sdk run should succeed for plan prompt fixture", + ); + let run_json = parse_stdout_json(&run, "claude-sdk run with plan prompt fixture"); + + let audit_bundle_path = PathBuf::from( + run_json["auditBundlePath"] + .as_str() + .expect("auditBundlePath should be present"), + ); + let intent_extraction_path = PathBuf::from( + run_json["intentExtractionPath"] + .as_str() + .expect("intentExtractionPath should be present"), + ); + + let audit_bundle = read_json_file(&audit_bundle_path); + let task_runtime_events = audit_bundle["bridge"]["objectCandidates"]["taskRuntimeEvents"] + .as_array() + .expect("taskRuntimeEvents should be an array"); + let decision_runtime_events = + audit_bundle["bridge"]["objectCandidates"]["decisionRuntimeEvents"] + .as_array() + .expect("decisionRuntimeEvents should be an array"); + let context_runtime_events = audit_bundle["bridge"]["objectCandidates"]["contextRuntimeEvents"] + .as_array() + .expect("contextRuntimeEvents should be an array"); + + assert_eq!( + audit_bundle["rawArtifact"]["prompt"], + json!(PLAN_PROMPT), + "raw artifact should preserve the persisted prompt fixture" + ); + assert_eq!( + audit_bundle["bridge"]["intentExtraction"]["status"], + json!("accepted") + ); + assert_eq!(task_runtime_events.len(), 6); + assert!(decision_runtime_events.is_empty()); + assert!(context_runtime_events.is_empty()); + assert_eq!( + audit_bundle["bridge"]["objectCandidates"]["providerInitSnapshot"]["agents"], + json!(["general-purpose", "statusline-setup", "Explore", "Plan"]) + ); + assert!( + audit_bundle["rawArtifact"]["messages"] + .as_array() + .is_some_and(|messages| messages.iter().any(|message| { + message["message"]["content"] + .as_array() + .is_some_and(|items| { + items.iter().any(|item| { + item["text"] + .as_str() + .is_some_and(|text| text.contains("3-Step Plan")) + }) + }) + })), + "plan scenario should preserve the assistant plan text in raw messages" + ); + + let task_kinds = task_runtime_events + .iter() + .map(|event| event["kind"].as_str().expect("task runtime kind")) + .collect::>(); + assert_eq!( + task_kinds, + vec![ + "task_started", + "task_progress", + "task_progress", + "task_notification", + "SubagentStart", + "SubagentStop" + ] + ); + + let intent_extraction = read_json_file(&intent_extraction_path); + assert_eq!( + intent_extraction["extraction"]["intent"]["summary"], + json!( + "Refactor Claude SDK bridge to separate provider-native runtime facts from semantic-layer candidates" + ) + ); + assert_eq!( + intent_extraction["extraction"]["risk"]["level"], + json!("medium") + ); +} + +#[tokio::test] +#[serial] +async fn test_claude_sdk_resolve_extraction_materializes_intentspec_preview() { + let repo = tempdir().expect("failed to create repo root"); + test::setup_with_new_libra_in(repo.path()).await; + + let touched_file = repo.path().join("src").join("lib.rs"); + fs::create_dir_all(touched_file.parent().expect("source file parent")).expect("mkdir src"); + fs::write(&touched_file, "pub fn managed_bridge() {}\n").expect("write source file"); + + let artifact_path = repo.path().join("managed-run-artifact.json"); + fs::write( + &artifact_path, + serde_json::to_vec_pretty(&semantic_full_artifact(repo.path(), &touched_file)) + .expect("serialize test artifact"), + ) + .expect("write test artifact"); + + let helper_path = repo.path().join("fake-managed-helper.sh"); + write_shell_helper(&helper_path, &artifact_path); + + let run = run_libra_command( + &[ + "claude-sdk", + "run", + "--prompt", + DEFAULT_MANAGED_PROMPT, + "--helper-path", + helper_path.to_str().expect("helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success( + &run, + "claude-sdk run should succeed before resolve-extraction", + ); + let run_json = parse_stdout_json(&run, "claude-sdk run before resolve-extraction"); + let ai_session_id = run_json["aiSessionId"] + .as_str() + .expect("aiSessionId should be present"); + + let resolve = run_libra_command( + &[ + "claude-sdk", + "resolve-extraction", + "--ai-session-id", + ai_session_id, + ], + repo.path(), + ); + assert_cli_success(&resolve, "claude-sdk resolve-extraction should succeed"); + let resolve_json = parse_stdout_json(&resolve, "claude-sdk resolve-extraction"); + let expected_extraction_path = run_json["intentExtractionPath"] + .as_str() + .expect("run should emit intentExtractionPath"); + + assert_eq!(resolve_json["ok"], json!(true)); + assert_eq!(resolve_json["mode"], json!("resolve-extraction")); + assert_eq!(resolve_json["aiSessionId"], json!(ai_session_id)); + assert_eq!( + resolve_json["extractionPath"], + json!(expected_extraction_path) + ); + assert_eq!(resolve_json["riskLevel"], json!("low")); + assert!( + resolve_json["summary"] + .as_str() + .is_some_and(|summary| summary.contains("IntentSpec generated.")), + "resolve-extraction summary should be derived from the IntentSpec preview" + ); + + let resolved_spec_path = PathBuf::from( + resolve_json["resolvedSpecPath"] + .as_str() + .expect("resolvedSpecPath should be present"), + ); + assert!( + resolved_spec_path.exists(), + "resolved IntentSpec artifact should be materialized" + ); + + let resolved_artifact = read_json_file(&resolved_spec_path); + assert_eq!( + resolved_artifact["schema"], + json!("libra.intent_resolution.v1") + ); + assert_eq!(resolved_artifact["aiSessionId"], json!(ai_session_id)); + assert_eq!(resolved_artifact["riskLevel"], json!("low")); + assert_eq!( + resolved_artifact["extractionSource"], + json!("claude_agent_sdk_managed.structured_output") + ); + assert_eq!(resolved_artifact["intentspec"]["kind"], json!("IntentSpec")); + assert_eq!( + resolved_artifact["intentspec"]["apiVersion"], + json!("intentspec.io/v1alpha1") + ); + assert_eq!( + resolved_artifact["intentspec"]["intent"]["summary"], + json!("Persist the Claude SDK managed bridge") + ); + assert_eq!( + resolved_artifact["intentspec"]["risk"]["level"], + json!("low") + ); +} + +#[tokio::test] +#[serial] +async fn test_claude_sdk_persist_intent_writes_formal_intent_and_binding_artifact() { + let repo = tempdir().expect("failed to create repo root"); + test::setup_with_new_libra_in(repo.path()).await; + + let touched_file = repo.path().join("src").join("lib.rs"); + fs::create_dir_all(touched_file.parent().expect("source file parent")).expect("mkdir src"); + fs::write(&touched_file, "pub fn managed_bridge() {}\n").expect("write source file"); + + let artifact_path = repo.path().join("managed-run-artifact.json"); + fs::write( + &artifact_path, + serde_json::to_vec_pretty(&semantic_full_artifact(repo.path(), &touched_file)) + .expect("serialize test artifact"), + ) + .expect("write test artifact"); + + let helper_path = repo.path().join("fake-managed-helper.sh"); + write_shell_helper(&helper_path, &artifact_path); + + let run = run_libra_command( + &[ + "claude-sdk", + "run", + "--prompt", + DEFAULT_MANAGED_PROMPT, + "--helper-path", + helper_path.to_str().expect("helper path utf-8"), + "--node-binary", + "/bin/sh", + ], + repo.path(), + ); + assert_cli_success(&run, "claude-sdk run should succeed before persist-intent"); + let run_json = parse_stdout_json(&run, "claude-sdk run before persist-intent"); + let ai_session_id = run_json["aiSessionId"] + .as_str() + .expect("aiSessionId should be present"); + + let resolve = run_libra_command( + &[ + "claude-sdk", + "resolve-extraction", + "--ai-session-id", + ai_session_id, + ], + repo.path(), + ); + assert_cli_success( + &resolve, + "claude-sdk resolve-extraction should succeed before persist-intent", + ); + + let persist = run_libra_command( + &[ + "claude-sdk", + "persist-intent", + "--ai-session-id", + ai_session_id, + ], + repo.path(), + ); + assert_cli_success(&persist, "claude-sdk persist-intent should succeed"); + let persist_json = parse_stdout_json(&persist, "claude-sdk persist-intent"); + let expected_extraction_path = run_json["intentExtractionPath"] + .as_str() + .expect("run should emit intentExtractionPath"); + + assert_eq!(persist_json["ok"], json!(true)); + assert_eq!(persist_json["mode"], json!("persist-intent")); + assert_eq!(persist_json["aiSessionId"], json!(ai_session_id)); + + let intent_id = persist_json["intentId"] + .as_str() + .expect("intentId should be present") + .to_string(); + let binding_path = PathBuf::from( + persist_json["bindingPath"] + .as_str() + .expect("bindingPath should be present"), + ); + assert!( + binding_path.exists(), + "persist-intent should materialize a binding artifact" + ); + + let binding_artifact = read_json_file(&binding_path); + assert_eq!( + binding_artifact["schema"], + json!("libra.intent_input_binding.v1") + ); + assert_eq!( + binding_artifact["extractionPath"], + json!(expected_extraction_path) + ); + assert_eq!(binding_artifact["aiSessionId"], json!(ai_session_id)); + assert_eq!(binding_artifact["intentId"], json!(intent_id)); + assert!( + binding_artifact["summary"] + .as_str() + .is_some_and(|summary| summary.contains("IntentSpec generated.")), + "binding artifact should retain the resolved IntentSpec summary" + ); + + let (storage, history) = load_intent_history(repo.path()).await; + let intents = history + .list_objects("intent") + .await + .expect("should list intent objects"); + assert_eq!(intents.len(), 1, "should persist exactly one formal intent"); + assert_eq!( + intents[0].0, intent_id, + "history should contain the persisted intent ID" + ); + + let stored_intent: Intent = storage + .get_json(&intents[0].1) + .await + .expect("should load persisted intent object"); + assert_eq!( + stored_intent.prompt(), + "Persist the Claude SDK managed bridge" + ); + assert!( + stored_intent.spec().is_some(), + "persisted formal intent should retain the canonical IntentSpec" + ); +} diff --git a/tests/command/hooks_test.rs b/tests/command/hooks_test.rs deleted file mode 100644 index 90bf1769..00000000 --- a/tests/command/hooks_test.rs +++ /dev/null @@ -1,431 +0,0 @@ -use std::{fs, path::Path, process::Command}; - -use libra::utils::test; -use serde_json::json; -use serial_test::serial; -use tempfile::tempdir; - -fn run_hooks(temp: &tempfile::TempDir, provider: &str, args: &[&str]) -> std::process::Output { - Command::new(env!("CARGO_BIN_EXE_libra")) - .current_dir(temp.path()) - .arg("hooks") - .arg(provider) - .args(args) - .output() - .expect("failed to run hooks command") -} - -fn installed_libra_binary() -> String { - let path = std::fs::canonicalize(env!("CARGO_BIN_EXE_libra")) - .expect("failed to canonicalize test libra binary"); - quote_command_path(&path) -} - -fn installed_libra_binary_for(path: &Path) -> String { - let path = std::fs::canonicalize(path).expect("failed to canonicalize hook binary"); - quote_command_path(&path) -} - -fn quote_command_path(path: &Path) -> String { - let rendered = path.to_string_lossy(); - - #[cfg(windows)] - { - if rendered.contains([' ', '\t', '"']) { - return format!("\"{}\"", rendered.replace('"', "\\\"")); - } - rendered.into_owned() - } - - #[cfg(not(windows))] - { - if rendered - .chars() - .all(|ch| ch.is_ascii_alphanumeric() || matches!(ch, '/' | '.' | '_' | '-' | ':')) - { - return rendered.into_owned(); - } - format!("'{}'", rendered.replace('\'', r#"'\''"#)) - } -} - -fn claude_settings_file(repo_root: &Path) -> std::path::PathBuf { - repo_root.join(".claude").join("settings.json") -} - -fn gemini_settings_file(repo_root: &Path) -> std::path::PathBuf { - repo_root.join(".gemini").join("settings.json") -} - -#[tokio::test] -#[serial] -async fn test_hooks_claude_install_preserves_existing_and_is_idempotent() { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - - let settings_path = claude_settings_file(temp.path()); - fs::create_dir_all(settings_path.parent().expect("parent should exist")).unwrap(); - fs::write( - &settings_path, - json!({ - "hooks": { - "SessionStart": [ - { - "matcher": "startup", - "hooks": [ - { - "type": "command", - "command": "echo keep" - } - ] - } - ] - }, - "enabledPlugins": { - "example": true - } - }) - .to_string(), - ) - .unwrap(); - - let first = run_hooks(&temp, "claude", &["install"]); - assert!( - first.status.success(), - "hooks claude install failed: {}", - String::from_utf8_lossy(&first.stderr) - ); - let second = run_hooks(&temp, "claude", &["install"]); - assert!( - second.status.success(), - "second hooks claude install failed: {}", - String::from_utf8_lossy(&second.stderr) - ); - - let settings_json = fs::read_to_string(&settings_path).unwrap(); - let settings: serde_json::Value = serde_json::from_str(&settings_json).unwrap(); - let expected_command = format!("{} hooks claude session-start", installed_libra_binary()); - assert_eq!(settings["enabledPlugins"]["example"], json!(true)); - - let session_start_entries = settings["hooks"]["SessionStart"].as_array().unwrap(); - let startup_count = session_start_entries - .iter() - .filter(|item| item["matcher"] == json!("startup")) - .count(); - let managed_count = session_start_entries - .iter() - .filter(|item| { - item.get("matcher").is_none() && item["hooks"][0]["command"] == json!(expected_command) - }) - .count(); - assert_eq!(startup_count, 1); - assert_eq!(managed_count, 1); -} - -#[tokio::test] -#[serial] -async fn test_hooks_claude_install_rewrites_legacy_entries_and_uninstall_roundtrip() { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - let old_binary = temp.path().join("old").join("libra"); - let new_binary = temp.path().join("new").join("libra"); - fs::create_dir_all(old_binary.parent().unwrap()).unwrap(); - fs::create_dir_all(new_binary.parent().unwrap()).unwrap(); - fs::write(&old_binary, "").unwrap(); - fs::write(&new_binary, "").unwrap(); - - let settings_path = claude_settings_file(temp.path()); - fs::create_dir_all(settings_path.parent().expect("parent should exist")).unwrap(); - fs::write( - &settings_path, - json!({ - "hooks": { - "SessionStart": [ - { - "matcher": "libra", - "hooks": [ - { - "type": "command", - "command": format!( - "{} hooks claude session-start", - installed_libra_binary_for(&old_binary) - ), - "timeout": 10 - }, - { - "type": "command", - "command": "echo keep" - } - ] - } - ] - } - }) - .to_string(), - ) - .unwrap(); - - let install = run_hooks( - &temp, - "claude", - &[ - "install", - "--binary-path", - new_binary.to_str().unwrap(), - "--timeout", - "15", - ], - ); - assert!( - install.status.success(), - "hooks claude install failed: {}", - String::from_utf8_lossy(&install.stderr) - ); - - let installed = run_hooks(&temp, "claude", &["is-installed"]); - assert!(installed.status.success()); - assert_eq!(String::from_utf8_lossy(&installed.stdout).trim(), "false"); - - let settings_json = fs::read_to_string(&settings_path).unwrap(); - let settings: serde_json::Value = serde_json::from_str(&settings_json).unwrap(); - let session_start_entries = settings["hooks"]["SessionStart"].as_array().unwrap(); - let expected_new_command = format!( - "{} hooks claude session-start", - installed_libra_binary_for(&new_binary) - ); - - assert_eq!( - session_start_entries - .iter() - .filter(|item| item["matcher"] == json!("libra")) - .count(), - 1 - ); - assert!(session_start_entries.iter().any(|item| { - item["matcher"] == json!("libra") - && item["hooks"] - .as_array() - .is_some_and(|hooks| hooks.len() == 1 && hooks[0]["command"] == json!("echo keep")) - })); - assert!(session_start_entries.iter().any(|item| { - item.get("matcher").is_none() - && item["hooks"][0]["command"] == json!(expected_new_command) - && item["hooks"][0]["timeout"] == json!(15) - })); - - let uninstall = run_hooks(&temp, "claude", &["uninstall"]); - assert!( - uninstall.status.success(), - "hooks claude uninstall failed: {}", - String::from_utf8_lossy(&uninstall.stderr) - ); - - let installed_after = run_hooks(&temp, "claude", &["is-installed"]); - assert!(installed_after.status.success()); - assert_eq!( - String::from_utf8_lossy(&installed_after.stdout).trim(), - "false" - ); -} - -#[tokio::test] -#[serial] -async fn test_hooks_gemini_install_is_installed_and_uninstall() { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - - let install = run_hooks(&temp, "gemini", &["install"]); - assert!( - install.status.success(), - "hooks gemini install failed: {}", - String::from_utf8_lossy(&install.stderr) - ); - - let settings_path = gemini_settings_file(temp.path()); - let content = fs::read_to_string(&settings_path).expect("settings file should be created"); - let settings: serde_json::Value = serde_json::from_str(&content).expect("settings json"); - let expected_command = format!("{} hooks gemini session-start", installed_libra_binary()); - assert_eq!(settings["hooksConfig"]["enabled"], json!(true)); - assert_eq!( - settings["hooks"]["SessionStart"][0]["hooks"][0]["command"], - json!(expected_command) - ); - - let installed = run_hooks(&temp, "gemini", &["is-installed"]); - assert!(installed.status.success()); - assert_eq!(String::from_utf8_lossy(&installed.stdout).trim(), "true"); - - let uninstall = run_hooks(&temp, "gemini", &["uninstall"]); - assert!( - uninstall.status.success(), - "hooks gemini uninstall failed: {}", - String::from_utf8_lossy(&uninstall.stderr) - ); - - let installed_after = run_hooks(&temp, "gemini", &["is-installed"]); - assert!(installed_after.status.success()); - assert_eq!( - String::from_utf8_lossy(&installed_after.stdout).trim(), - "false" - ); -} - -#[tokio::test] -#[serial] -async fn test_hooks_gemini_install_replaces_previous_managed_binary_path() { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - let old_binary = temp.path().join("old-libra"); - let new_binary = temp.path().join("new-libra"); - fs::write(&old_binary, "").unwrap(); - fs::write(&new_binary, "").unwrap(); - - let settings_path = gemini_settings_file(temp.path()); - fs::create_dir_all(settings_path.parent().expect("parent should exist")).unwrap(); - fs::write( - &settings_path, - json!({ - "hooksConfig": { "enabled": true }, - "hooks": { - "SessionStart": [ - { - "hooks": [ - { - "name": "libra-session-start", - "type": "command", - "command": format!( - "{} hooks gemini session-start", - installed_libra_binary_for(&old_binary) - ) - } - ] - }, - { - "matcher": "startup", - "hooks": [ - { - "name": "user-hook", - "type": "command", - "command": "echo keep" - } - ] - } - ] - } - }) - .to_string(), - ) - .unwrap(); - - let install = run_hooks( - &temp, - "gemini", - &["install", "--binary-path", new_binary.to_str().unwrap()], - ); - assert!( - install.status.success(), - "hooks gemini install failed: {}", - String::from_utf8_lossy(&install.stderr) - ); - - let content = fs::read_to_string(&settings_path).unwrap(); - let settings: serde_json::Value = serde_json::from_str(&content).unwrap(); - let expected_command = format!( - "{} hooks gemini session-start", - installed_libra_binary_for(&new_binary) - ); - let session_start_entries = settings["hooks"]["SessionStart"].as_array().unwrap(); - - assert_eq!( - session_start_entries - .iter() - .filter(|item| { - item.get("matcher").is_none() - && item["hooks"][0]["name"] == json!("libra-session-start") - }) - .count(), - 1 - ); - assert!(session_start_entries.iter().any(|item| { - item.get("matcher").is_none() && item["hooks"][0]["command"] == json!(expected_command) - })); - assert!( - session_start_entries - .iter() - .any(|item| item["matcher"] == json!("startup")) - ); -} - -#[tokio::test] -#[serial] -async fn test_hooks_gemini_is_installed_rejects_disabled_or_stale_command() { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - - let settings_path = gemini_settings_file(temp.path()); - fs::create_dir_all(settings_path.parent().expect("parent should exist")).unwrap(); - fs::write( - &settings_path, - json!({ - "hooksConfig": { "enabled": false }, - "hooks": { - "SessionStart": [ - { - "hooks": [ - { - "name": "libra-session-start", - "type": "command", - "command": "stale-libra hooks gemini session-start" - } - ] - } - ] - } - }) - .to_string(), - ) - .unwrap(); - - let installed = run_hooks(&temp, "gemini", &["is-installed"]); - assert!(installed.status.success()); - assert_eq!(String::from_utf8_lossy(&installed.stdout).trim(), "false"); -} - -#[tokio::test] -#[serial] -async fn test_hooks_gemini_install_rejects_timeout() { - let temp = tempdir().unwrap(); - test::setup_with_new_libra_in(temp.path()).await; - - let install = run_hooks(&temp, "gemini", &["install", "--timeout", "10"]); - assert!( - !install.status.success(), - "gemini install with timeout should fail" - ); - assert!( - String::from_utf8_lossy(&install.stderr).contains("Gemini hooks do not support --timeout") - ); -} - -#[tokio::test] -#[serial] -async fn test_hooks_install_commands_require_libra_repo() { - let temp = tempdir().unwrap(); - - let install = run_hooks(&temp, "claude", &["install"]); - assert!(!install.status.success()); - assert!(String::from_utf8_lossy(&install.stderr).contains("inside a Libra repository")); - - let is_installed = run_hooks(&temp, "gemini", &["is-installed"]); - assert!(!is_installed.status.success()); - assert!(String::from_utf8_lossy(&is_installed.stderr).contains("inside a Libra repository")); -} - -#[test] -fn test_hooks_reject_unknown_provider() { - let temp = tempdir().unwrap(); - - let output = run_hooks(&temp, "unknown", &["session-start"]); - assert!(!output.status.success(), "unknown provider should fail"); - assert!(String::from_utf8_lossy(&output.stderr).contains("unsupported hook provider")); -} diff --git a/tests/command/mod.rs b/tests/command/mod.rs index 551dbfd1..612929fe 100644 --- a/tests/command/mod.rs +++ b/tests/command/mod.rs @@ -99,12 +99,13 @@ fn create_committed_repo_via_cli() -> tempfile::TempDir { mod add_cli_test; mod add_test; -mod ai_hook_test; mod blame_test; mod branch_test; mod cat_file_test; mod checkout_test; mod cherry_pick_test; +#[cfg(unix)] +mod claude_sdk_test; mod clean_test; mod cli_error_test; mod clone_cli_test; @@ -114,7 +115,6 @@ mod commit_test; mod config_test; mod diff_test; mod fetch_test; -mod hooks_test; mod index_pack_test; mod init_from_git_test; mod init_separate_libra_dir_test; diff --git a/tests/data/ai/claude_managed_plan_task_only_template.json b/tests/data/ai/claude_managed_plan_task_only_template.json new file mode 100644 index 00000000..2956567f --- /dev/null +++ b/tests/data/ai/claude_managed_plan_task_only_template.json @@ -0,0 +1,288 @@ +{ + "cwd": "__CWD__", + "prompt": "__PROMPT__", + "hookEvents": [ + { + "hook": "UserPromptSubmit", + "input": { + "session_id": "fixture-plan-session", + "transcript_path": "/tmp/libra-fixtures/fixture-plan-session.jsonl", + "cwd": "__CWD__", + "permission_mode": "default", + "hook_event_name": "UserPromptSubmit", + "prompt": "__PROMPT__" + } + }, + { + "hook": "PreToolUse", + "input": { + "session_id": "fixture-plan-session", + "transcript_path": "/tmp/libra-fixtures/fixture-plan-session.jsonl", + "cwd": "__CWD__", + "permission_mode": "default", + "hook_event_name": "PreToolUse", + "tool_name": "Read", + "tool_input": { + "file_path": "__TOUCHED_FILE__" + }, + "tool_use_id": "tool-read-fixture" + } + }, + { + "hook": "PostToolUse", + "input": { + "session_id": "fixture-plan-session", + "transcript_path": "/tmp/libra-fixtures/fixture-plan-session.jsonl", + "cwd": "__CWD__", + "permission_mode": "default", + "hook_event_name": "PostToolUse", + "tool_name": "Read", + "tool_input": { + "file_path": "__TOUCHED_FILE__" + }, + "tool_response": { + "file": { + "filePath": "__TOUCHED_FILE__" + } + }, + "tool_use_id": "tool-read-fixture" + } + }, + { + "hook": "PreToolUse", + "input": { + "session_id": "fixture-plan-session", + "transcript_path": "/tmp/libra-fixtures/fixture-plan-session.jsonl", + "cwd": "__CWD__", + "permission_mode": "default", + "hook_event_name": "PreToolUse", + "tool_name": "Grep", + "tool_input": { + "pattern": "provider_runtime", + "path": "__CWD__" + }, + "tool_use_id": "tool-grep-fixture" + } + }, + { + "hook": "PostToolUseFailure", + "input": { + "session_id": "fixture-plan-session", + "transcript_path": "/tmp/libra-fixtures/fixture-plan-session.jsonl", + "cwd": "__CWD__", + "permission_mode": "default", + "hook_event_name": "PostToolUseFailure", + "tool_name": "Grep", + "tool_input": { + "pattern": "provider_runtime", + "path": "__CWD__" + }, + "tool_response": { + "error": "No matches found" + }, + "tool_use_id": "tool-grep-fixture" + } + }, + { + "hook": "SubagentStart", + "input": { + "session_id": "fixture-plan-session", + "transcript_path": "/tmp/libra-fixtures/fixture-plan-session.jsonl", + "cwd": "__CWD__", + "hook_event_name": "SubagentStart", + "agent_id": "fixture-subagent-plan", + "agent_type": "Explore" + } + }, + { + "hook": "SubagentStop", + "input": { + "session_id": "fixture-plan-session", + "transcript_path": "/tmp/libra-fixtures/fixture-plan-session.jsonl", + "cwd": "__CWD__", + "hook_event_name": "SubagentStop", + "agent_id": "fixture-subagent-plan", + "agent_type": "Explore", + "agent_transcript_path": "/tmp/libra-fixtures/fixture-subagent-plan.jsonl", + "last_assistant_message": "Exploration complete. Provider runtime facts are separated from the structured intent output.", + "permission_mode": "default", + "stop_hook_active": false + } + }, + { + "hook": "Stop", + "input": { + "session_id": "fixture-plan-session", + "transcript_path": "/tmp/libra-fixtures/fixture-plan-session.jsonl", + "cwd": "__CWD__", + "hook_event_name": "Stop" + } + } + ], + "messages": [ + { + "type": "system", + "subtype": "init", + "cwd": "__CWD__", + "session_id": "fixture-plan-session", + "apiKeySource": "none", + "claude_code_version": "2.1.76", + "tools": [ + "Task", + "Grep", + "Read", + "StructuredOutput" + ], + "model": "claude-sonnet-4-5-20250929", + "permissionMode": "default", + "output_style": "default", + "agents": [ + "general-purpose", + "statusline-setup", + "Explore", + "Plan" + ], + "skills": [ + "context7", + "exa-search", + "find-skills" + ], + "slash_commands": [ + "context7", + "review", + "insights" + ], + "mcp_servers": [], + "plugins": [], + "fast_mode_state": "off" + }, + { + "type": "assistant", + "message": { + "role": "assistant", + "content": [ + { + "type": "text", + "text": "I'll audit this Claude SDK to Libra semantic-agent bridge experiment and produce a structured intent extraction. Here's my plan:\n\n**3-Step Plan:**\n1. Inspect the managed bridge and its tests to understand the provider/runtime split.\n2. Use task-level exploration to validate what runtime facts are exposed natively.\n3. Produce a structured extraction with risk and validation checks." + } + ] + } + }, + { + "type": "system", + "subtype": "task_started", + "session_id": "fixture-plan-session", + "task_id": "fixture-subagent-plan", + "description": "Explore runtime event patterns", + "task_type": "local_agent", + "prompt": "Explore runtime event patterns" + }, + { + "type": "system", + "subtype": "task_progress", + "session_id": "fixture-plan-session", + "task_id": "fixture-subagent-plan", + "description": "Reading managed bridge source", + "last_tool_name": "Read", + "usage": { + "duration_ms": 4200, + "tool_uses": 1, + "total_tokens": 5800 + } + }, + { + "type": "system", + "subtype": "task_progress", + "session_id": "fixture-plan-session", + "task_id": "fixture-subagent-plan", + "description": "Searching for provider_runtime metadata handling", + "last_tool_name": "Grep", + "usage": { + "duration_ms": 8100, + "tool_uses": 2, + "total_tokens": 9100 + } + }, + { + "type": "system", + "subtype": "task_notification", + "session_id": "fixture-plan-session", + "task_id": "fixture-subagent-plan", + "status": "completed", + "output_file": "", + "summary": "Explore runtime event patterns" + } + ], + "resultMessage": { + "type": "result", + "subtype": "success", + "is_error": false, + "session_id": "fixture-plan-session", + "stop_reason": "end_turn", + "duration_ms": 142759, + "duration_api_ms": 139813, + "num_turns": 6, + "result": "ok", + "total_cost_usd": 0.7018920000000001, + "usage": { + "input_tokens": 79705, + "output_tokens": 3983 + }, + "structured_output": { + "summary": "Refactor Claude SDK bridge to separate provider-native runtime facts from semantic-layer candidates", + "problemStatement": "The current Claude SDK to Libra semantic-agent bridge implementation intermingles provider-native runtime facts with semantic-layer candidates, which makes the provider/runtime boundary harder to reason about and test.", + "changeType": "refactor", + "objectives": [ + "Preserve provider-native runtime facts as auditable artifacts", + "Keep semantic extraction focused on intent-understanding fields", + "Stabilize the Claude SDK path around plan-plus-task scenarios" + ], + "inScope": [ + "src/internal/ai/providers/claude_sdk/managed.rs", + "tests/command/claude_sdk_test.rs" + ], + "outOfScope": [ + "Legacy hook-only provider ingestion" + ], + "touchHints": { + "files": [ + "samples/managed.rs", + "tests/command/claude_sdk_test.rs" + ], + "symbols": [ + "ManagedProviderInitSnapshot", + "ManagedSemanticRuntimeEvent" + ], + "apis": [ + "persist_managed_artifact" + ] + }, + "successCriteria": [ + "Plan text remains visible in raw transcript evidence", + "Task runtime events are persisted without needing prompt injection", + "Structured output covers risk and implementation intent" + ], + "fastChecks": [ + { + "id": "claude-sdk-tests", + "kind": "command", + "command": "cargo test claude_sdk", + "required": true, + "artifactsProduced": [ + "test-output" + ] + } + ], + "integrationChecks": [], + "securityChecks": [], + "releaseChecks": [], + "riskRationale": "Medium risk because the bridge touches runtime event normalization, but the scenario remains observable through raw artifacts and audit bundles.", + "riskFactors": [ + "Task/runtime fact routing must remain stable", + "Structured output still needs to align with semantic extraction" + ], + "riskLevel": "medium" + }, + "fast_mode_state": "off" + } +} diff --git a/tests/data/ai/claude_managed_probe_like.json b/tests/data/ai/claude_managed_probe_like.json new file mode 100644 index 00000000..d3244bac --- /dev/null +++ b/tests/data/ai/claude_managed_probe_like.json @@ -0,0 +1,211 @@ +{ + "cwd": "/tmp/libra-fixtures/claude-agent-sdk-probe", + "prompt": "Inspect the current working directory and return a structured summary.\nRequirements:\n- Use read-only tools if needed.\n- Mention at least one real file observed in this directory.\n- Keep the response concise.", + "hookEvents": [ + { + "hook": "UserPromptSubmit", + "input": { + "session_id": "fixture-probe-session", + "transcript_path": "/tmp/libra-fixtures/claude/projects/fixture-probe-session.jsonl", + "cwd": "/tmp/libra-fixtures/claude-agent-sdk-probe", + "permission_mode": "default", + "hook_event_name": "UserPromptSubmit", + "prompt": "Inspect the current working directory and return a structured summary.\nRequirements:\n- Use read-only tools if needed.\n- Mention at least one real file observed in this directory.\n- Keep the response concise." + } + }, + { + "hook": "PreToolUse", + "input": { + "session_id": "fixture-probe-session", + "transcript_path": "/tmp/libra-fixtures/claude/projects/fixture-probe-session.jsonl", + "cwd": "/tmp/libra-fixtures/claude-agent-sdk-probe", + "permission_mode": "default", + "hook_event_name": "PreToolUse", + "tool_name": "Glob", + "tool_input": { + "pattern": "*.*", + "path": "/tmp/libra-fixtures/claude-agent-sdk-probe" + }, + "tool_use_id": "tooluse_Gti4jp4kMPu0No5yVW7Iez" + } + }, + { + "hook": "PostToolUse", + "input": { + "session_id": "fixture-probe-session", + "transcript_path": "/tmp/libra-fixtures/claude/projects/fixture-probe-session.jsonl", + "cwd": "/tmp/libra-fixtures/claude-agent-sdk-probe", + "permission_mode": "default", + "hook_event_name": "PostToolUse", + "tool_name": "Glob", + "tool_input": { + "pattern": "*.*", + "path": "/tmp/libra-fixtures/claude-agent-sdk-probe" + }, + "tool_response": { + "filenames": [ + "/tmp/libra-fixtures/claude-agent-sdk-probe/package.json", + "/tmp/libra-fixtures/claude-agent-sdk-probe/query_tool_probe.js", + "/tmp/libra-fixtures/claude-agent-sdk-probe/session_probe.js" + ], + "durationMs": 10, + "numFiles": 3, + "truncated": false + }, + "tool_use_id": "tooluse_Gti4jp4kMPu0No5yVW7Iez" + } + }, + { + "hook": "PreToolUse", + "input": { + "session_id": "fixture-probe-session", + "transcript_path": "/tmp/libra-fixtures/claude/projects/fixture-probe-session.jsonl", + "cwd": "/tmp/libra-fixtures/claude-agent-sdk-probe", + "permission_mode": "default", + "hook_event_name": "PreToolUse", + "tool_name": "Read", + "tool_input": { + "file_path": "/tmp/libra-fixtures/claude-agent-sdk-probe/package.json" + }, + "tool_use_id": "tooluse_dw5ZECTKdk5Z8wuIdR10I8" + } + }, + { + "hook": "PostToolUse", + "input": { + "session_id": "fixture-probe-session", + "transcript_path": "/tmp/libra-fixtures/claude/projects/fixture-probe-session.jsonl", + "cwd": "/tmp/libra-fixtures/claude-agent-sdk-probe", + "permission_mode": "default", + "hook_event_name": "PostToolUse", + "tool_name": "Read", + "tool_input": { + "file_path": "/tmp/libra-fixtures/claude-agent-sdk-probe/package.json" + }, + "tool_response": { + "type": "text", + "file": { + "filePath": "/tmp/libra-fixtures/claude-agent-sdk-probe/package.json", + "content": "{\n \"name\": \"claude-agent-sdk-probe\",\n \"version\": \"1.0.0\"\n}\n", + "numLines": 3, + "startLine": 1, + "totalLines": 3 + } + }, + "tool_use_id": "tooluse_dw5ZECTKdk5Z8wuIdR10I8" + } + }, + { + "hook": "PreToolUse", + "input": { + "session_id": "fixture-probe-session", + "transcript_path": "/tmp/libra-fixtures/claude/projects/fixture-probe-session.jsonl", + "cwd": "/tmp/libra-fixtures/claude-agent-sdk-probe", + "permission_mode": "default", + "hook_event_name": "PreToolUse", + "tool_name": "StructuredOutput", + "tool_input": { + "summary": "pending" + }, + "tool_use_id": "tooluse_structured_output" + } + }, + { + "hook": "PostToolUse", + "input": { + "session_id": "fixture-probe-session", + "transcript_path": "/tmp/libra-fixtures/claude/projects/fixture-probe-session.jsonl", + "cwd": "/tmp/libra-fixtures/claude-agent-sdk-probe", + "permission_mode": "default", + "hook_event_name": "PostToolUse", + "tool_name": "StructuredOutput", + "tool_input": { + "summary": "pending" + }, + "tool_response": { + "ok": true + }, + "tool_use_id": "tooluse_structured_output" + } + }, + { + "hook": "Stop", + "input": { + "session_id": "fixture-probe-session", + "transcript_path": "/tmp/libra-fixtures/claude/projects/fixture-probe-session.jsonl", + "cwd": "/tmp/libra-fixtures/claude-agent-sdk-probe", + "hook_event_name": "Stop" + } + } + ], + "messages": [ + { + "type": "system", + "subtype": "init", + "cwd": "/tmp/libra-fixtures/claude-agent-sdk-probe", + "session_id": "fixture-probe-session", + "tools": [ + "Glob", + "Read", + "StructuredOutput" + ], + "model": "claude-sonnet-4-5-20250929", + "permissionMode": "default" + }, + { + "type": "assistant", + "message": { + "role": "assistant", + "content": [ + { + "type": "text", + "text": "I inspected the directory and found probe scripts plus a package.json file." + } + ] + } + } + ], + "resultMessage": { + "type": "result", + "subtype": "success", + "is_error": false, + "duration_ms": 3479, + "duration_api_ms": 2900, + "num_turns": 1, + "result": "ok", + "stop_reason": "end_turn", + "session_id": "fixture-probe-session", + "total_cost_usd": 0.01877775, + "usage": { + "input_tokens": 2010, + "cache_creation_input_tokens": 140, + "cache_read_input_tokens": 17859, + "output_tokens": 250, + "server_tool_use": { + "web_search_requests": 0 + }, + "service_tier": "standard" + }, + "modelUsage": { + "claude-sonnet-4-5-20250929": { + "inputTokens": 2010, + "outputTokens": 250, + "cacheReadInputTokens": 17859, + "cacheCreationInputTokens": 140 + } + }, + "permission_denials": [], + "structured_output": { + "summary": "This is a Node.js test project for the Claude Agent SDK (version 0.2.74). The directory contains probe scripts that test SDK functionality including query tools, session management, and managed mode. Test artifacts are stored in JSON format in the artifacts directory.", + "observedFiles": [ + "/tmp/libra-fixtures/claude-agent-sdk-probe/package.json", + "/tmp/libra-fixtures/claude-agent-sdk-probe/query_tool_probe.js", + "/tmp/libra-fixtures/claude-agent-sdk-probe/session_probe.js", + "/tmp/libra-fixtures/claude-agent-sdk-probe/managed_mode_probe.js" + ], + "riskRationale": "Low risk - this is a testing/development project with probe scripts that exercise Claude Agent SDK features. The code uses read-only tools (Read, Glob, Grep) and writes test artifacts to a local directory. No malicious patterns detected." + }, + "fast_mode_state": null, + "uuid": "result-actual-like" + } +} diff --git a/tests/data/ai/claude_managed_semantic_full_template.json b/tests/data/ai/claude_managed_semantic_full_template.json new file mode 100644 index 00000000..c7720dda --- /dev/null +++ b/tests/data/ai/claude_managed_semantic_full_template.json @@ -0,0 +1,383 @@ +{ + "cwd": "__CWD__", + "prompt": "__PROMPT__", + "hookEvents": [ + { + "hook": "UserPromptSubmit", + "input": { + "session_id": "fixture-managed-session", + "transcript_path": "/tmp/libra-fixtures/fixture-managed-session.jsonl", + "cwd": "__CWD__", + "hook_event_name": "UserPromptSubmit", + "prompt": "__PROMPT__" + } + }, + { + "hook": "PreToolUse", + "input": { + "session_id": "fixture-managed-session", + "transcript_path": "/tmp/libra-fixtures/fixture-managed-session.jsonl", + "cwd": "__CWD__", + "hook_event_name": "PreToolUse", + "tool_name": "Read", + "tool_input": { + "file_path": "__TOUCHED_FILE__" + }, + "tool_use_id": "tool-read-fixture" + } + }, + { + "hook": "PostToolUse", + "input": { + "session_id": "fixture-managed-session", + "transcript_path": "/tmp/libra-fixtures/fixture-managed-session.jsonl", + "cwd": "__CWD__", + "hook_event_name": "PostToolUse", + "tool_name": "Read", + "tool_input": { + "file_path": "__TOUCHED_FILE__" + }, + "tool_response": { + "file": { + "filePath": "__TOUCHED_FILE__" + } + }, + "tool_use_id": "tool-read-fixture" + } + }, + { + "hook": "PermissionRequest", + "input": { + "session_id": "fixture-managed-session", + "transcript_path": "/tmp/libra-fixtures/fixture-managed-session.jsonl", + "cwd": "__CWD__", + "hook_event_name": "PermissionRequest", + "tool_name": "Bash", + "tool_input": { + "command": "cargo test" + }, + "permission_suggestions": [] + } + }, + { + "hook": "Elicitation", + "input": { + "session_id": "fixture-managed-session", + "transcript_path": "/tmp/libra-fixtures/fixture-managed-session.jsonl", + "cwd": "__CWD__", + "hook_event_name": "Elicitation", + "mcp_server_name": "review-gate", + "message": "Approve release checks?", + "mode": "form", + "elicitation_id": "elic-1", + "requested_schema": { + "type": "object" + } + } + }, + { + "hook": "ElicitationResult", + "input": { + "session_id": "fixture-managed-session", + "transcript_path": "/tmp/libra-fixtures/fixture-managed-session.jsonl", + "cwd": "__CWD__", + "hook_event_name": "ElicitationResult", + "mcp_server_name": "review-gate", + "mode": "form", + "elicitation_id": "elic-1", + "action": "accept", + "content": { + "approved": true + } + } + }, + { + "hook": "InstructionsLoaded", + "input": { + "session_id": "fixture-managed-session", + "transcript_path": "/tmp/libra-fixtures/fixture-managed-session.jsonl", + "cwd": "__CWD__", + "hook_event_name": "InstructionsLoaded", + "file_path": ".claude/CLAUDE.md", + "memory_type": "Project", + "load_reason": "session_start" + } + }, + { + "hook": "ConfigChange", + "input": { + "session_id": "fixture-managed-session", + "transcript_path": "/tmp/libra-fixtures/fixture-managed-session.jsonl", + "cwd": "__CWD__", + "hook_event_name": "ConfigChange", + "source": "project_settings", + "file_path": ".claude/settings.json" + } + }, + { + "hook": "TaskCompleted", + "input": { + "session_id": "fixture-managed-session", + "transcript_path": "/tmp/libra-fixtures/fixture-managed-session.jsonl", + "cwd": "__CWD__", + "hook_event_name": "TaskCompleted", + "task_id": "fixture-task-managed", + "task_subject": "inspect managed bridge" + } + }, + { + "hook": "Stop", + "input": { + "session_id": "fixture-managed-session", + "transcript_path": "/tmp/libra-fixtures/fixture-managed-session.jsonl", + "cwd": "__CWD__", + "hook_event_name": "Stop" + } + } + ], + "messages": [ + { + "type": "system", + "subtype": "init", + "cwd": "__CWD__", + "session_id": "fixture-managed-session", + "apiKeySource": "oauth", + "claude_code_version": "2.1.76", + "tools": [ + "Read", + "StructuredOutput" + ], + "model": "claude-sonnet-4-5-20250929", + "permissionMode": "default", + "output_style": "default", + "agents": [ + "general-purpose", + "Plan" + ], + "skills": [ + "context7", + "exa-search" + ], + "slash_commands": [ + "context7", + "review" + ], + "mcp_servers": [ + { + "name": "review-gate", + "status": "connected" + } + ], + "plugins": [ + { + "name": "fixture-team-plugin", + "path": "/opt/libra-fixtures/plugins/fixture-team-plugin" + } + ], + "fast_mode_state": "off" + }, + { + "type": "system", + "subtype": "task_started", + "session_id": "fixture-managed-session", + "task_id": "fixture-task-managed", + "description": "Inspect managed bridge", + "task_type": "subagent", + "prompt": "Inspect managed bridge" + }, + { + "type": "system", + "subtype": "task_progress", + "session_id": "fixture-managed-session", + "task_id": "fixture-task-managed", + "description": "Reading source files", + "usage": { + "total_tokens": 120, + "tool_uses": 1, + "duration_ms": 800 + }, + "summary": "Reading source files" + }, + { + "type": "system", + "subtype": "task_notification", + "session_id": "fixture-managed-session", + "task_id": "fixture-task-managed", + "status": "completed", + "output_file": "/tmp/libra-fixtures/fixture-task-managed.out", + "summary": "Inspection complete" + }, + { + "type": "system", + "subtype": "status", + "session_id": "fixture-managed-session", + "status": "compacting", + "permissionMode": "default" + }, + { + "type": "system", + "subtype": "compact_boundary", + "session_id": "fixture-managed-session", + "compact_metadata": { + "trigger": "auto", + "pre_tokens": 4096 + } + }, + { + "type": "rate_limit_event", + "session_id": "fixture-managed-session", + "rate_limit_info": { + "status": "allowed_warning", + "utilization": 0.75 + } + }, + { + "type": "tool_progress", + "session_id": "fixture-managed-session", + "uuid": "tool-progress-1", + "tool_use_id": "tool-read-fixture", + "tool_name": "Read", + "parent_tool_use_id": null, + "elapsed_time_seconds": 1.2 + }, + { + "type": "tool_use_summary", + "session_id": "fixture-managed-session", + "uuid": "tool-summary-1", + "summary": "Read completed for source inspection", + "preceding_tool_use_ids": [ + "tool-read-fixture" + ] + }, + { + "type": "prompt_suggestion", + "session_id": "fixture-managed-session", + "suggestion": "Generate a resolver checklist" + }, + { + "type": "system", + "subtype": "files_persisted", + "session_id": "fixture-managed-session", + "files": [ + { + "filename": "src/lib.rs", + "file_id": "file-1" + } + ], + "failed": [], + "processed_at": "2026-03-15T00:00:00Z" + }, + { + "type": "stream_event", + "session_id": "fixture-managed-session", + "uuid": "stream-1", + "parent_tool_use_id": null, + "event": { + "type": "content_block_start", + "index": 0, + "content_block": { + "type": "text", + "text": "" + } + } + }, + { + "type": "stream_event", + "session_id": "fixture-managed-session", + "uuid": "stream-2", + "parent_tool_use_id": null, + "event": { + "type": "content_block_delta", + "index": 0, + "delta": { + "type": "text_delta", + "text": "I inspected the repository" + } + } + }, + { + "type": "assistant", + "message": { + "role": "assistant", + "content": [ + { + "type": "text", + "text": "I inspected the repository and prepared a structured draft." + } + ] + } + } + ], + "resultMessage": { + "type": "result", + "subtype": "success", + "is_error": false, + "session_id": "fixture-managed-session", + "stop_reason": "end_turn", + "duration_ms": 1800, + "duration_api_ms": 1400, + "num_turns": 1, + "result": "ok", + "total_cost_usd": 0.015, + "usage": { + "input_tokens": 321, + "output_tokens": 123 + }, + "permission_denials": [ + { + "tool_name": "Bash", + "tool_use_id": "tool-bash-1", + "tool_input": { + "command": "cargo test" + } + } + ], + "structured_output": { + "summary": "Persist the Claude SDK managed bridge", + "problemStatement": "Libra needs a stable SDK-first ingestion path for Claude sessions.", + "changeType": "feature", + "objectives": [ + "Persist ai_session.v2", + "Emit raw artifact and audit bundle" + ], + "inScope": [ + "src/command/claude_sdk.rs", + "src/internal/ai/providers/claude_sdk/managed.rs" + ], + "outOfScope": [ + "scheduler objects" + ], + "touchHints": { + "files": [ + "src/lib.rs" + ], + "symbols": [], + "apis": [] + }, + "successCriteria": [ + "Import path persists ai_session", + "Run path materializes intent extraction" + ], + "fastChecks": [ + { + "id": "managed-import", + "kind": "command", + "command": "cargo test claude_sdk", + "required": true, + "artifactsProduced": [ + "test-log" + ] + } + ], + "integrationChecks": [], + "securityChecks": [], + "releaseChecks": [], + "riskRationale": "Low risk because the path is additive and emits auditable artifacts.", + "riskFactors": [ + "new SDK bridge" + ], + "riskLevel": "low" + }, + "fast_mode_state": "off" + } +} diff --git a/tests/data/ai/claude_sdk_plan_prompt.txt b/tests/data/ai/claude_sdk_plan_prompt.txt new file mode 100644 index 00000000..a800b6a0 --- /dev/null +++ b/tests/data/ai/claude_sdk_plan_prompt.txt @@ -0,0 +1,7 @@ +Audit the Claude SDK to Libra semantic-agent bridge and produce a structured intent extraction. + +Requirements: +- Start by giving a concise 3-step plan. +- Use task-style exploration to inspect provider-native runtime facts versus semantic candidates. +- Focus on how provider init, task runtime events, and structured extraction relate to each other. +- Keep the final extraction grounded in observable runtime evidence.