From ebb0a0e7df73b68052c4fdf77341d33e64033d9b Mon Sep 17 00:00:00 2001 From: taberah Date: Mon, 23 Feb 2026 23:28:18 +0100 Subject: [PATCH 1/5] Update Soroban SDK to v20.3.2 and fix analysis tool breaking changes --- contracts/kani-poc/Cargo.toml | 2 +- contracts/vulnerable-contract/Cargo.toml | 2 +- .../sanctifier-cli/src/commands/analyze.rs | 262 ++++++++++++------ tooling/sanctifier-core/Cargo.toml | 2 +- tooling/sanctifier-core/src/lib.rs | 114 +++++++- 5 files changed, 283 insertions(+), 99 deletions(-) diff --git a/contracts/kani-poc/Cargo.toml b/contracts/kani-poc/Cargo.toml index b9defbb..f5adc62 100644 --- a/contracts/kani-poc/Cargo.toml +++ b/contracts/kani-poc/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2021" [dependencies] -soroban-sdk = "20.0.0" +soroban-sdk = "20.3.2" [lints.rust] unexpected_cfgs = { level = "warn", check-cfg = ["cfg(kani)"] } diff --git a/contracts/vulnerable-contract/Cargo.toml b/contracts/vulnerable-contract/Cargo.toml index e840221..daee910 100644 --- a/contracts/vulnerable-contract/Cargo.toml +++ b/contracts/vulnerable-contract/Cargo.toml @@ -4,7 +4,7 @@ version = "0.1.0" edition = "2021" [dependencies] -soroban-sdk = "20.0.0" +soroban-sdk = "20.3.2" [lib] crate-type = ["cdylib"] diff --git a/tooling/sanctifier-cli/src/commands/analyze.rs b/tooling/sanctifier-cli/src/commands/analyze.rs index c147079..6742c0e 100644 --- a/tooling/sanctifier-cli/src/commands/analyze.rs +++ b/tooling/sanctifier-cli/src/commands/analyze.rs @@ -2,7 +2,7 @@ use std::fs; use std::path::{Path, PathBuf}; use clap::Args; use colored::*; -use sanctifier_core::{Analyzer, SanctifyConfig}; +use sanctifier_core::{Analyzer, SanctifyConfig, SizeWarning, UnsafePattern, ArithmeticIssue, PanicIssue, SymbolIssue}; #[derive(Args, Debug)] pub struct AnalyzeArgs { @@ -22,138 +22,222 @@ pub struct AnalyzeArgs { pub fn exec(args: AnalyzeArgs) -> anyhow::Result<()> { let path = &args.path; let format = &args.format; - let limit = args.limit; let is_json = format == "json"; if !is_soroban_project(path) { eprintln!( - "{} Error: {:?} is not a valid Soroban project. (Missing Cargo.toml with 'soroban-sdk' dependency)", + "{} Error: {:?} is not a valid Soroban project.", "❌".red(), path ); std::process::exit(1); } - if is_json { - eprintln!("{} Sanctifier: Valid Soroban project found at {:?}", "✨".green(), path); - eprintln!("{} Analyzing contract at {:?}...", "πŸ”".blue(), path); - } else { + if !is_json { println!("{} Sanctifier: Valid Soroban project found at {:?}", "✨".green(), path); println!("{} Analyzing contract at {:?}...", "πŸ”".blue(), path); } - let mut analyzer = Analyzer::new(sanctifier_core::SanctifyConfig::default()); - - let config = SanctifyConfig::default(); + let mut config = SanctifyConfig::default(); + config.ledger_limit = args.limit; let analyzer = Analyzer::new(config); - let mut collisions = Vec::new(); + let mut all_size_warnings = Vec::new(); + let mut all_unsafe_patterns = Vec::new(); + let mut all_auth_gaps = Vec::new(); + let mut all_panic_issues = Vec::new(); + let mut all_arithmetic_issues = Vec::new(); + let mut all_storage_collisions = Vec::new(); + let mut all_symbol_issues = Vec::new(); if path.is_dir() { - walk_dir(path, &analyzer, &mut collisions)?; + analyze_directory( + path, &analyzer, &mut all_size_warnings, &mut all_unsafe_patterns, &mut all_auth_gaps, + &mut all_panic_issues, &mut all_arithmetic_issues, &mut all_storage_collisions, &mut all_symbol_issues + ); } else { if path.extension().and_then(|s| s.to_str()) == Some("rs") { - if let Ok(content) = fs::read_to_string(path) { - collisions.extend(analyzer.scan_storage_collisions(&content)); - } + analyze_file( + path, &analyzer, &mut all_size_warnings, &mut all_unsafe_patterns, &mut all_auth_gaps, + &mut all_panic_issues, &mut all_arithmetic_issues, &mut all_storage_collisions, &mut all_symbol_issues + ); } } - if collisions.is_empty() { - println!("\n{} No storage key collisions found.", "βœ…".green()); + if is_json { + let report = serde_json::json!({ + "size_warnings": all_size_warnings, + "unsafe_patterns": all_unsafe_patterns, + "auth_gaps": all_auth_gaps, + "panic_issues": all_panic_issues, + "arithmetic_issues": all_arithmetic_issues, + "storage_collisions": all_storage_collisions, + "symbol_issues": all_symbol_issues, + }); + println!("{}", serde_json::to_string_pretty(&report)?); } else { - println!("\n{} Found potential Storage Key Collisions!", "⚠️".yellow()); - for collision in collisions { - println!(" {} Value: {}", "->".red(), collision.key_value.bold()); - println!(" Type: {}", collision.key_type); - println!(" Location: {}", collision.location); - println!(" Message: {}", collision.message); - } + print_text_report( + &all_size_warnings, &all_unsafe_patterns, &all_auth_gaps, + &all_panic_issues, &all_arithmetic_issues, &all_storage_collisions, &all_symbol_issues + ); } Ok(()) } -fn walk_dir(dir: &Path, analyzer: &Analyzer, collisions: &mut Vec) -> anyhow::Result<()> { - for entry in fs::read_dir(dir)? { - let entry = entry?; - let path = entry.path(); - if path.is_dir() { - walk_dir(&path, analyzer, collisions)?; - } else if path.extension().and_then(|s| s.to_str()) == Some("rs") { - if let Ok(content) = fs::read_to_string(&path) { - let mut issues = analyzer.scan_storage_collisions(&content); - // Prefix location with filename - let file_name = path.display().to_string(); - for issue in &mut issues { - issue.location = format!("{}:{}", file_name, issue.location); - } - collisions.extend(issues); - } +fn analyze_file( + path: &Path, + analyzer: &Analyzer, + size_warnings: &mut Vec, + unsafe_patterns: &mut Vec, + auth_gaps: &mut Vec, + panic_issues: &mut Vec, + arithmetic_issues: &mut Vec, + storage_collisions: &mut Vec, + symbol_issues: &mut Vec, +) { + if let Ok(content) = fs::read_to_string(path) { + let file_path = path.display().to_string(); + + // Ledger size + for mut w in analyzer.analyze_ledger_size(&content) { + w.struct_name = format!("{} in {}", w.struct_name, file_path); + size_warnings.push(w); } - } - Ok(()) -} -fn is_soroban_project(path: &Path) -> bool { - // Basic heuristics for tests. - if path.extension().and_then(|s| s.to_str()) == Some("rs") { - return true; + // Unsafe patterns + for mut p in analyzer.analyze_unsafe_patterns(&content) { + p.snippet = format!("{}: {}", file_path, p.snippet); + unsafe_patterns.push(p); + } + + // Auth gaps + for g in analyzer.scan_auth_gaps(&content) { + auth_gaps.push(format!("{}: {}", file_path, g)); + } + + // Panics + for mut p in analyzer.scan_panics(&content) { + p.location = format!("{}: {}", file_path, p.location); + panic_issues.push(p); + } + + // Arithmetic + for mut a in analyzer.scan_arithmetic_overflow(&content) { + a.location = format!("{}: {}", file_path, a.location); + arithmetic_issues.push(a); + } + + // Storage collisions + for mut s in analyzer.scan_storage_collisions(&content) { + s.location = format!("{}: {}", file_path, s.location); + storage_collisions.push(s); + } + + // Symbol issues (v20) + for mut s in analyzer.scan_symbols(&content) { + s.location = format!("{}: {}", file_path, s.location); + symbol_issues.push(s); + } } - let cargo_toml_path = if path.is_dir() { - path.join("Cargo.toml") - } else { - path.to_path_buf() - }; - cargo_toml_path.exists() } fn analyze_directory( dir: &Path, analyzer: &Analyzer, - all_size_warnings: &mut Vec, - all_unsafe_patterns: &mut Vec, - all_auth_gaps: &mut Vec, - all_panic_issues: &mut Vec, - all_arithmetic_issues: &mut Vec, + size_warnings: &mut Vec, + unsafe_patterns: &mut Vec, + auth_gaps: &mut Vec, + panic_issues: &mut Vec, + arithmetic_issues: &mut Vec, + storage_collisions: &mut Vec, + symbol_issues: &mut Vec, ) { if let Ok(entries) = fs::read_dir(dir) { for entry in entries.flatten() { let path = entry.path(); if path.is_dir() { - analyze_directory( - &path, analyzer, all_size_warnings, all_unsafe_patterns, all_auth_gaps, - all_panic_issues, all_arithmetic_issues, - ); - } else if path.extension().and_then(|s| s.to_str()) == Some("rs") { - if let Ok(content) = fs::read_to_string(&path) { - all_size_warnings.extend(analyzer.analyze_ledger_size(&content)); - - let patterns = analyzer.analyze_unsafe_patterns(&content); - for mut p in patterns { - p.snippet = format!("{}: {}", path.display(), p.snippet); - all_unsafe_patterns.push(p); - } - - let gaps = analyzer.scan_auth_gaps(&content); - for g in gaps { - all_auth_gaps.push(format!("{}: {}", path.display(), g)); - } - - let panics = analyzer.scan_panics(&content); - for p in panics { - let mut p_mod = p.clone(); - p_mod.location = format!("{}: {}", path.display(), p.location); - all_panic_issues.push(p_mod); - } - - let arith = analyzer.scan_arithmetic_overflow(&content); - for mut a in arith { - a.location = format!("{}: {}", path.display(), a.location); - all_arithmetic_issues.push(a); - } + if !path.ends_with("target") && !path.ends_with(".git") { + analyze_directory(&path, analyzer, size_warnings, unsafe_patterns, auth_gaps, panic_issues, arithmetic_issues, storage_collisions, symbol_issues); } + } else if path.extension().and_then(|s| s.to_str()) == Some("rs") { + analyze_file(&path, analyzer, size_warnings, unsafe_patterns, auth_gaps, panic_issues, arithmetic_issues, storage_collisions, symbol_issues); } } } } + +fn print_text_report( + size_warnings: &[SizeWarning], + unsafe_patterns: &[UnsafePattern], + auth_gaps: &[String], + panic_issues: &[PanicIssue], + arithmetic_issues: &[ArithmeticIssue], + storage_collisions: &[sanctifier_core::StorageCollisionIssue], + symbol_issues: &[SymbolIssue], +) { + println!("\n{}", "--- Analysis Results ---".bold()); + + if auth_gaps.is_empty() { + println!("{} No authentication gaps found.", "βœ…".green()); + } else { + println!("{} Found {} potential Authentication Gaps!", "⚠️".yellow(), auth_gaps.len()); + for gap in auth_gaps { + println!(" {} {}", "->".red(), gap); + } + } + + if symbol_issues.is_empty() { + println!("{} No symbol length issues found.", "βœ…".green()); + } else { + println!("{} Found {} Symbol length issues (Soroban v20 limit)!", "⚠️".yellow(), symbol_issues.len()); + for issue in symbol_issues { + println!(" {} {} ('{}') at {}", "->".red(), issue.issue_type.bold(), issue.value, issue.location); + } + } + + if panic_issues.is_empty() { + println!("{} No panic!/unwrap/expect found in contract impls.", "βœ…".green()); + } else { + println!("{} Found {} potential Panic issues!", "⚠️".yellow(), panic_issues.len()); + for issue in panic_issues { + println!(" {} {} in {}", "->".red(), issue.issue_type.bold(), issue.location); + } + } + + if arithmetic_issues.is_empty() { + println!("{} No unchecked arithmetic issues found.", "βœ…".green()); + } else { + println!("{} Found {} potential Arithmetic Overflow issues!", "⚠️".yellow(), arithmetic_issues.len()); + for issue in arithmetic_issues { + println!(" {} {} at {}", "->".red(), issue.operation.bold(), issue.location); + println!(" Suggestion: {}", issue.suggestion.italic()); + } + } + + if storage_collisions.is_empty() { + println!("{} No storage key collisions found.", "βœ…".green()); + } else { + println!("{} Found {} potential Storage Key Collisions!", "⚠️".yellow(), storage_collisions.len()); + for collision in storage_collisions { + println!(" {} Value: {}", "->".red(), collision.key_value.bold()); + println!(" Location: {}", collision.location); + } + } + + if size_warnings.is_empty() { + println!("{} No ledger size warnings.", "βœ…".green()); + } else { + println!("{} Found {} Ledger Size Warnings!", "⚠️".yellow(), size_warnings.len()); + for warning in size_warnings { + println!(" {} {}: {} bytes (limit: {})", "->".red(), warning.struct_name, warning.estimated_size, warning.limit); + } + } +} + +fn is_soroban_project(path: &Path) -> bool { + if path.is_file() { + return path.extension().and_then(|s| s.to_str()) == Some("rs") || path.ends_with("Cargo.toml"); + } + path.join("Cargo.toml").exists() +} diff --git a/tooling/sanctifier-core/Cargo.toml b/tooling/sanctifier-core/Cargo.toml index 360c1d7..5f45e28 100644 --- a/tooling/sanctifier-core/Cargo.toml +++ b/tooling/sanctifier-core/Cargo.toml @@ -6,7 +6,7 @@ description = "Core analysis logic for Sanctifier" license = "MIT" [dependencies] -soroban-sdk = "20.0.0" # Target latest Soroban SDK +soroban-sdk = "20.3.2" # Target latest Soroban SDK v20 syn = { version = "2.0", features = ["full", "extra-traits", "visit"] } quote = "1.0" proc-macro2 = { version = "1.0", features = ["span-locations"] } diff --git a/tooling/sanctifier-core/src/lib.rs b/tooling/sanctifier-core/src/lib.rs index 9fb5d1c..dbc002a 100644 --- a/tooling/sanctifier-core/src/lib.rs +++ b/tooling/sanctifier-core/src/lib.rs @@ -1,6 +1,14 @@ use serde::{Deserialize, Serialize}; pub mod gas_estimator; +<<<<<<< HEAD mod storage_collision; +======= +pub mod gas_report; +pub mod complexity; +pub mod reentrancy; +pub mod storage_collision; + +>>>>>>> 0ef6af1 (Update Soroban SDK to v20.3.2 and fix analysis tool breaking changes) use std::collections::HashSet; use std::panic::{catch_unwind, AssertUnwindSafe}; use syn::spanned::Spanned; @@ -96,6 +104,13 @@ pub struct UpgradeReport { pub suggestions: Vec, } +#[derive(Debug, Serialize, Clone)] +pub struct SymbolIssue { + pub value: String, + pub issue_type: String, // "symbol_short! > 10" or "Symbol::new > 32" + pub location: String, +} + impl UpgradeReport { pub fn empty() -> Self { Self { @@ -284,6 +299,11 @@ impl Analyzer { with_panic_guard(|| self.scan_auth_gaps_impl(source)) } + pub fn scan_complexity(&self, source: &str, path: &str) -> Option { + let file = parse_str::(source).ok()?; + Some(complexity::analyze_complexity(&file, path)) + } + pub fn scan_gas_estimation(&self, source: &str) -> Vec { with_panic_guard(|| self.scan_gas_estimation_impl(source)) } @@ -543,11 +563,6 @@ impl Analyzer { } fn analyze_ledger_size_impl(&self, source: &str) -> Vec { - let limit = self.config.ledger_limit; - let approaching = (limit as f64 * DEFAULT_APPROACHING_THRESHOLD) as usize; - let strict = self.config.strict_mode; - let strict_threshold = limit / 2; - let file = match parse_str::(source) { Ok(f) => f, Err(_) => return vec![], @@ -559,16 +574,23 @@ impl Analyzer { let strict = self.config.strict_mode; let strict_threshold = (limit as f64 * 0.5) as usize; +<<<<<<< HEAD let approaching_count = approaching; +======= +>>>>>>> 0ef6af1 (Update Soroban SDK to v20.3.2 and fix analysis tool breaking changes) for item in &file.items { match item { Item::Struct(s) => { if has_contracttype(&s.attrs) { let size = self.estimate_struct_size(s); +<<<<<<< HEAD if let Some(level) = classify_size(size, limit, approaching_count, strict, strict_threshold) { +======= + if let Some(level) = classify_size(size, limit, approaching, strict, strict_threshold) { +>>>>>>> 0ef6af1 (Update Soroban SDK to v20.3.2 and fix analysis tool breaking changes) warnings.push(SizeWarning { struct_name: s.ident.to_string(), estimated_size: size, @@ -581,9 +603,13 @@ impl Analyzer { Item::Enum(e) => { if has_contracttype(&e.attrs) { let size = self.estimate_enum_size(e); +<<<<<<< HEAD if let Some(level) = classify_size(size, limit, approaching_count, strict, strict_threshold) { +======= + if let Some(level) = classify_size(size, limit, approaching, strict, strict_threshold) { +>>>>>>> 0ef6af1 (Update Soroban SDK to v20.3.2 and fix analysis tool breaking changes) warnings.push(SizeWarning { struct_name: e.ident.to_string(), estimated_size: size, @@ -593,7 +619,6 @@ impl Analyzer { } } } - Item::Impl(_) | Item::Macro(_) => {} _ => {} } } @@ -862,6 +887,21 @@ impl Analyzer { visitor.collisions } +<<<<<<< HEAD +======= + pub fn scan_symbols(&self, source: &str) -> Vec { + let file = match parse_str::(source) { + Ok(f) => f, + Err(_) => return vec![], + }; + + let mut visitor = SymbolVisitor { issues: Vec::new() }; + visitor.visit_file(&file); + visitor.issues + } + + +>>>>>>> 0ef6af1 (Update Soroban SDK to v20.3.2 and fix analysis tool breaking changes) // ── Size estimation helpers ─────────────────────────────────────────────── fn estimate_enum_size(&self, e: &syn::ItemEnum) -> usize { @@ -914,7 +954,21 @@ impl Analyzer { "u64" | "i64" => 8, "u128" | "i128" | "I128" | "U128" => 16, "Address" => 32, - "Bytes" | "BytesN" | "String" | "Symbol" => 64, + "BytesN" => { + if let syn::PathArguments::AngleBracketed(args) = &seg.arguments { + if let Some(syn::GenericArgument::Type(Type::Path(tp))) = args.args.first() { + // Sometimes N is a type-level literal or just a number + if let Some(s) = tp.path.segments.last() { + if let Ok(n) = s.ident.to_string().parse::() { + return n; + } + } + } + // Handle const generics if syn supports it easily here + } + 32 + } + "Bytes" | "String" | "Symbol" => 64, "Vec" => { if let syn::PathArguments::AngleBracketed(args) = &seg.arguments { if let Some(syn::GenericArgument::Type(inner)) = args.args.first() { @@ -1111,6 +1165,52 @@ impl<'ast> Visit<'ast> for ArithVisitor { } } +// ── SymbolVisitor ───────────────────────────────────────────────────────────── + +struct SymbolVisitor { + issues: Vec, +} + +impl<'ast> Visit<'ast> for SymbolVisitor { + fn visit_expr_call(&mut self, i: &'ast syn::ExprCall) { + if let syn::Expr::Path(p) = &*i.func { + if p.path.is_ident("Symbol") || p.path.segments.iter().any(|s| s.ident == "Symbol") { + if let Some(last) = p.path.segments.last() { + if last.ident == "new" && i.args.len() >= 2 { + if let syn::Expr::Lit(syn::ExprLit { lit: syn::Lit::Str(s), .. }) = &i.args[1] { + let val = s.value(); + if val.len() > 32 { + self.issues.push(SymbolIssue { + value: val, + issue_type: "Symbol::new > 32".to_string(), + location: format!("line {}", i.span().start().line), + }); + } + } + } + } + } + } + visit::visit_expr_call(self, i); + } + + fn visit_macro(&mut self, i: &'ast syn::Macro) { + if i.path.is_ident("symbol_short") { + let tokens = &i.tokens; + let token_str = quote::quote!(#tokens).to_string(); + let val = token_str.trim_matches('"').trim_matches(' ').to_string(); + if val.len() > 10 { + self.issues.push(SymbolIssue { + value: val, + issue_type: "symbol_short! > 10".to_string(), + location: format!("line {}", i.span().start().line), + }); + } + } + visit::visit_macro(self, i); + } +} + /// Returns `true` if the expression is a string literal β€” used to avoid /// false-positives on `+` for string concatenation (rare in no_std Soroban /// but included for correctness). From ce4f6d642ab4cf823b5a4882c02091fdc14b6f0b Mon Sep 17 00:00:00 2001 From: taberah Date: Mon, 23 Feb 2026 23:40:00 +0100 Subject: [PATCH 2/5] Cleanup: Uncomment tests, fix logic inconsistencies, and ensure Soroban SDK v20 alignment. --- tooling/sanctifier-core/src/lib.rs | 60 ++++++++++++++++++++++++++++++ 1 file changed, 60 insertions(+) diff --git a/tooling/sanctifier-core/src/lib.rs b/tooling/sanctifier-core/src/lib.rs index dbc002a..bbd2381 100644 --- a/tooling/sanctifier-core/src/lib.rs +++ b/tooling/sanctifier-core/src/lib.rs @@ -1278,6 +1278,7 @@ mod tests { assert_eq!(warnings[0].level, SizeWarningLevel::ExceedsLimit); } +<<<<<<< HEAD /* #[test] fn test_ledger_size_enum_and_approaching() { @@ -1305,6 +1306,33 @@ mod tests { assert!(warnings.iter().any(|w| w.level == SizeWarningLevel::ApproachingLimit)); } */ +======= + #[test] + fn test_ledger_size_enum_and_approaching() { + let mut config = SanctifyConfig::default(); + config.ledger_limit = 100; + config.approaching_threshold = 0.5; + let analyzer = Analyzer::new(config); + let source = r#" + #[contracttype] + pub enum DataKey { + Balance(Address), + Admin, + } + + #[contracttype] + pub struct NearLimit { + pub a: u128, + pub b: u128, + pub c: u128, + pub d: u128, + } + "#; + let warnings = analyzer.analyze_ledger_size(source); + assert!(warnings.iter().any(|w| w.struct_name == "NearLimit"), "NearLimit (64 bytes) should exceed 50% of 100"); + assert!(warnings.iter().any(|w| w.level == SizeWarningLevel::ApproachingLimit)); + } +>>>>>>> 43c1409 (Cleanup: Uncomment tests, fix logic inconsistencies, and ensure Soroban SDK v20 alignment.) #[test] fn test_complex_macro_no_panic() { @@ -1535,6 +1563,7 @@ mod tests { assert_eq!(issues[0].operation, "+"); } +<<<<<<< HEAD /* #[test] fn test_analyze_upgrade_patterns() { @@ -1542,6 +1571,14 @@ mod tests { let source = r#" #[contracttype] pub enum DataKey { Admin, Balance } +======= + #[test] + fn test_analyze_upgrade_patterns() { + let analyzer = Analyzer::new(SanctifyConfig::default()); + let source = r#" + #[contracttype] + pub enum DataKey { Admin, Balance } +>>>>>>> 43c1409 (Cleanup: Uncomment tests, fix logic inconsistencies, and ensure Soroban SDK v20 alignment.) #[contractimpl] impl Token { @@ -1552,6 +1589,7 @@ mod tests { env.storage().instance().set(&DataKey::Admin, &new_admin); } } +<<<<<<< HEAD "#; let report = analyzer.analyze_upgrade_patterns(source); assert_eq!(report.init_functions, vec!["initialize"]); @@ -1563,6 +1601,22 @@ mod tests { .any(|f| matches!(f.category, UpgradeCategory::Governance))); } */ +======= + pub fn set_admin(env: Env, new_admin: Address) { + env.storage().instance().set(&DataKey::Admin, &new_admin); + } + } + "#; + let report = analyzer.analyze_upgrade_patterns(source); + assert_eq!(report.init_functions, vec!["initialize"]); + assert_eq!(report.upgrade_mechanisms, vec!["set_admin"]); + assert!(report.storage_types.contains(&"DataKey".to_string())); + assert!(report + .findings + .iter() + .any(|f| matches!(f.category, UpgradeCategory::Governance))); + } +>>>>>>> 43c1409 (Cleanup: Uncomment tests, fix logic inconsistencies, and ensure Soroban SDK v20 alignment.) #[test] fn test_scan_arithmetic_overflow_suggestion_content() { @@ -1583,6 +1637,7 @@ mod tests { assert!(issues[0].location.starts_with("risky:")); } +<<<<<<< HEAD /* #[test] fn test_scan_storage_collisions() { @@ -1607,6 +1662,8 @@ mod tests { } */ +======= +>>>>>>> 43c1409 (Cleanup: Uncomment tests, fix logic inconsistencies, and ensure Soroban SDK v20 alignment.) #[test] fn test_scan_events_consistency() { let analyzer = Analyzer::new(SanctifyConfig::default()); @@ -1619,6 +1676,7 @@ mod tests { } } "#; +<<<<<<< HEAD let issues = analyzer.scan_events(source); assert!(!issues.is_empty()); assert!(issues.iter().any(|i| i.issue_type == "inconsistent_topics")); @@ -1638,4 +1696,6 @@ mod tests { let issues = analyzer.scan_events(source); assert!(issues.iter().any(|i| i.issue_type == "gas_optimization")); } + + } } From 7387bb6739828c66c98de1671b6d30abd2613d62 Mon Sep 17 00:00:00 2001 From: taberah Date: Tue, 24 Feb 2026 01:06:09 +0100 Subject: [PATCH 3/5] current changes --- tooling/sanctifier-core/src/gas_report.rs | 191 +++++ tooling/sanctifier-core/src/lib.rs | 737 ++---------------- tooling/sanctifier-core/src/reentrancy.rs | 161 ++++ .../src/tests/complexity_tests.rs | 2 +- .../src/tests/gas_estimator_tests.rs | 95 +++ .../sanctifier-core/src/tests/lib_tests.rs | 371 +++++++++ tooling/sanctifier-core/src/tests/mod.rs | 7 + .../src/tests/reentrancy_tests.rs | 85 ++ .../src/tests/storage_collision_tests.rs | 80 ++ 9 files changed, 1043 insertions(+), 686 deletions(-) create mode 100644 tooling/sanctifier-core/src/tests/lib_tests.rs create mode 100644 tooling/sanctifier-core/src/tests/mod.rs diff --git a/tooling/sanctifier-core/src/gas_report.rs b/tooling/sanctifier-core/src/gas_report.rs index 8b13789..6de3cf1 100644 --- a/tooling/sanctifier-core/src/gas_report.rs +++ b/tooling/sanctifier-core/src/gas_report.rs @@ -1 +1,192 @@ +<<<<<<< HEAD +======= +// tooling/sanctifier-core/src/gas_report.rs +// +// Aggregated gas/instruction report for the Sanctifier CLI. +// +// This module wraps ``gas_estimator`` output and provides human-readable +// text and JSON rendering used by `sanctifier gas` subcommand. + +use serde::Serialize; +use crate::gas_estimator::GasEstimationReport; + +// ── Report types ────────────────────────────────────────────────────────────── + +/// Severity tier based on estimated instruction count. +#[derive(Debug, Serialize, Clone, PartialEq)] +pub enum GasTier { + /// < 10 000 instructions β€” well within network limits. + Low, + /// 10 000 – 99 999 instructions β€” review recommended. + Medium, + /// β‰₯ 100 000 instructions β€” likely to hit resource limits. + High, +} + +impl GasTier { + pub fn from_instructions(n: usize) -> Self { + if n >= 100_000 { + GasTier::High + } else if n >= 10_000 { + GasTier::Medium + } else { + GasTier::Low + } + } + + pub fn label(&self) -> &'static str { + match self { + GasTier::Low => "LOW", + GasTier::Medium => "MEDIUM", + GasTier::High => "HIGH", + } + } + + pub fn emoji(&self) -> &'static str { + match self { + GasTier::Low => "βœ…", + GasTier::Medium => "⚠️ ", + GasTier::High => "πŸ”΄", + } + } +} + +/// A single annotated function entry in the gas report. +#[derive(Debug, Serialize, Clone)] +pub struct GasReportEntry { + pub function_name: String, + pub estimated_instructions: usize, + pub estimated_memory_bytes: usize, + pub tier: GasTier, +} + +impl From for GasReportEntry { + fn from(r: GasEstimationReport) -> Self { + let tier = GasTier::from_instructions(r.estimated_instructions); + GasReportEntry { + function_name: r.function_name, + estimated_instructions: r.estimated_instructions, + estimated_memory_bytes: r.estimated_memory_bytes, + tier, + } + } +} + +/// Full gas report for one or more files. +#[derive(Debug, Serialize, Clone)] +pub struct GasReport { + pub entries: Vec, + pub total_instructions: usize, + pub total_memory_bytes: usize, +} + +impl GasReport { + pub fn from_estimations(reports: Vec) -> Self { + let entries: Vec = reports.into_iter().map(Into::into).collect(); + let total_instructions = entries.iter().map(|e| e.estimated_instructions).sum(); + let total_memory_bytes = entries.iter().map(|e| e.estimated_memory_bytes).sum(); + GasReport { entries, total_instructions, total_memory_bytes } + } +} + +// ── Text rendering ──────────────────────────────────────────────────────────── + +/// Render a human-readable console report. +pub fn render_text(report: &GasReport) -> String { + let mut out = String::new(); + out.push_str("╔══════════════════════════════════════════════════════════════╗\n"); + out.push_str("β•‘ β›½ SANCTIFIER β€” GAS ESTIMATION REPORT β•‘\n"); + out.push_str("β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•\n\n"); + + if report.entries.is_empty() { + out.push_str(" No public contract functions found.\n"); + return out; + } + + out.push_str("β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”\n"); + out.push_str("β”‚ Function β”‚ Instructionsβ”‚ Memory (B) β”‚ Tier β”‚\n"); + out.push_str("β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€\n"); + + for e in &report.entries { + out.push_str(&format!( + "β”‚ {:<22} β”‚ {:>11} β”‚ {:>10} β”‚ {} {:<4} β”‚\n", + truncate(&e.function_name, 22), + e.estimated_instructions, + e.estimated_memory_bytes, + e.tier.emoji(), + e.tier.label(), + )); + } + out.push_str("β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”˜\n\n"); + out.push_str(&format!( + " Total instructions : {}\n Total memory : {} bytes\n", + report.total_instructions, report.total_memory_bytes + )); + out.push_str("\n Tiers: LOW < 10k | MEDIUM 10k–99k | HIGH β‰₯ 100k instructions\n"); + out +} + +/// Render as pretty-printed JSON. +pub fn render_json(report: &GasReport) -> String { + serde_json::to_string_pretty(report).unwrap_or_else(|_| "{}".to_string()) +} + +// ── Helpers ─────────────────────────────────────────────────────────────────── + +fn truncate(s: &str, max: usize) -> String { + if s.len() <= max { + s.to_string() + } else { + format!("{}…", &s[..max - 1]) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::gas_estimator::GasEstimationReport; + + #[test] + fn test_tier_classification() { + assert_eq!(GasTier::from_instructions(0), GasTier::Low); + assert_eq!(GasTier::from_instructions(9_999), GasTier::Low); + assert_eq!(GasTier::from_instructions(10_000), GasTier::Medium); + assert_eq!(GasTier::from_instructions(99_999), GasTier::Medium); + assert_eq!(GasTier::from_instructions(100_000), GasTier::High); + } + + #[test] + fn test_report_from_estimations() { + let raw = vec![ + GasEstimationReport { + function_name: "transfer".to_string(), + estimated_instructions: 1500, + estimated_memory_bytes: 256, + }, + GasEstimationReport { + function_name: "batch_transfer".to_string(), + estimated_instructions: 150_000, + estimated_memory_bytes: 4096, + }, + ]; + let report = GasReport::from_estimations(raw); + assert_eq!(report.entries.len(), 2); + assert_eq!(report.entries[0].tier, GasTier::Low); + assert_eq!(report.entries[1].tier, GasTier::High); + assert_eq!(report.total_instructions, 151_500); + } + + #[test] + fn test_render_text_nonempty() { + let report = GasReport::from_estimations(vec![GasEstimationReport { + function_name: "mint".to_string(), + estimated_instructions: 500, + estimated_memory_bytes: 64, + }]); + let text = render_text(&report); + assert!(text.contains("mint")); + assert!(text.contains("LOW")); + } +} +>>>>>>> 1a54220 (Update Soroban SDK to v20 and enhance security analysis tools.) diff --git a/tooling/sanctifier-core/src/lib.rs b/tooling/sanctifier-core/src/lib.rs index bbd2381..c6d62a4 100644 --- a/tooling/sanctifier-core/src/lib.rs +++ b/tooling/sanctifier-core/src/lib.rs @@ -1,22 +1,21 @@ use serde::{Deserialize, Serialize}; pub mod gas_estimator; -<<<<<<< HEAD -mod storage_collision; -======= pub mod gas_report; pub mod complexity; pub mod reentrancy; pub mod storage_collision; ->>>>>>> 0ef6af1 (Update Soroban SDK to v20.3.2 and fix analysis tool breaking changes) use std::collections::HashSet; use std::panic::{catch_unwind, AssertUnwindSafe}; use syn::spanned::Spanned; use syn::visit::{self, Visit}; use syn::{parse_str, Fields, File, Item, Meta, Type}; -use soroban_sdk::Env; use thiserror::Error; +// Note: soroban_sdk::Env is only used in the SanctifiedGuard trait below, +// which is gated to avoid conflicts in host-side / test builds. +#[cfg(target_arch = "wasm32")] +use soroban_sdk::Env; const DEFAULT_APPROACHING_THRESHOLD: f64 = 0.8; @@ -33,6 +32,7 @@ where // ── Existing types ──────────────────────────────────────────────────────────── + /// Severity of a ledger size warning. #[derive(Debug, Serialize, Clone, PartialEq)] pub enum SizeWarningLevel { @@ -178,16 +178,6 @@ pub struct StorageCollisionIssue { pub message: String, } -#[derive(Debug, Serialize, Clone)] -pub struct EventIssue { - pub event_name: String, - pub topic_count: usize, - pub location: String, - pub issue_type: String, - pub message: String, - pub suggestion: String, -} - // ── Configuration ───────────────────────────────────────────────────────────── /// User-defined regex-based rule. Defined in .sanctify.toml under [[custom_rules]]. @@ -284,6 +274,8 @@ fn classify_size( } } + + // ── Analyzer ────────────────────────────────────────────────────────────────── pub struct Analyzer { @@ -328,15 +320,9 @@ impl Analyzer { if let syn::Visibility::Public(_) = f.vis { let fn_name = f.sig.ident.to_string(); let mut has_mutation = false; - let mut has_read = false; let mut has_auth = false; - self.check_fn_body( - &f.block, - &mut has_mutation, - &mut has_read, - &mut has_auth, - ); - if has_mutation && !has_read && !has_auth { + self.check_fn_body(&f.block, &mut has_mutation, &mut has_auth); + if has_mutation && !has_auth { gaps.push(fn_name); } } @@ -451,25 +437,17 @@ impl Analyzer { // ── Mutation / auth helpers ─────────────────────────────────────────────── - fn check_fn_body( - &self, - block: &syn::Block, - has_mutation: &mut bool, - has_read: &mut bool, - has_auth: &mut bool, - ) { + fn check_fn_body(&self, block: &syn::Block, has_mutation: &mut bool, has_auth: &mut bool) { for stmt in &block.stmts { match stmt { - syn::Stmt::Expr(expr, _) => self.check_expr(expr, has_mutation, has_read, has_auth), + syn::Stmt::Expr(expr, _) => self.check_expr(expr, has_mutation, has_auth), syn::Stmt::Local(local) => { if let Some(init) = &local.init { - self.check_expr(&init.expr, has_mutation, has_read, has_auth); + self.check_expr(&init.expr, has_mutation, has_auth); } } syn::Stmt::Macro(m) => { - if m.mac.path.is_ident("require_auth") - || m.mac.path.is_ident("require_auth_for_args") - { + if m.mac.path.is_ident("require_auth") || m.mac.path.is_ident("require_auth_for_args") { *has_auth = true; } } @@ -478,13 +456,7 @@ impl Analyzer { } } - fn check_expr( - &self, - expr: &syn::Expr, - has_mutation: &mut bool, - has_read: &mut bool, - has_auth: &mut bool, - ) { + fn check_expr(&self, expr: &syn::Expr, has_mutation: &mut bool, has_auth: &mut bool) { match expr { syn::Expr::Call(c) => { if let syn::Expr::Path(p) = &*c.func { @@ -496,7 +468,7 @@ impl Analyzer { } } for arg in &c.args { - self.check_expr(arg, has_mutation, has_read, has_auth); + self.check_expr(arg, has_mutation, has_auth); } } syn::Expr::MethodCall(m) => { @@ -504,44 +476,30 @@ impl Analyzer { if method_name == "set" || method_name == "update" || method_name == "remove" { // Heuristic: check if receiver chain contains "storage" let receiver_str = quote::quote!(#m.receiver).to_string(); - if receiver_str.contains("storage") - || receiver_str.contains("persistent") - || receiver_str.contains("temporary") - || receiver_str.contains("instance") - { + if receiver_str.contains("storage") || receiver_str.contains("persistent") || receiver_str.contains("temporary") || receiver_str.contains("instance") { *has_mutation = true; } } - if method_name == "get" { - let receiver_str = quote::quote!(#m.receiver).to_string(); - if receiver_str.contains("storage") - || receiver_str.contains("persistent") - || receiver_str.contains("temporary") - || receiver_str.contains("instance") - { - *has_read = true; - } - } if method_name == "require_auth" || method_name == "require_auth_for_args" { *has_auth = true; } - self.check_expr(&m.receiver, has_mutation, has_read, has_auth); + self.check_expr(&m.receiver, has_mutation, has_auth); for arg in &m.args { - self.check_expr(arg, has_mutation, has_read, has_auth); + self.check_expr(arg, has_mutation, has_auth); } } - syn::Expr::Block(b) => self.check_fn_body(&b.block, has_mutation, has_read, has_auth), + syn::Expr::Block(b) => self.check_fn_body(&b.block, has_mutation, has_auth), syn::Expr::If(i) => { - self.check_expr(&i.cond, has_mutation, has_read, has_auth); - self.check_fn_body(&i.then_branch, has_mutation, has_read, has_auth); + self.check_expr(&i.cond, has_mutation, has_auth); + self.check_fn_body(&i.then_branch, has_mutation, has_auth); if let Some((_, else_expr)) = &i.else_branch { - self.check_expr(else_expr, has_mutation, has_read, has_auth); + self.check_expr(else_expr, has_mutation, has_auth); } } syn::Expr::Match(m) => { - self.check_expr(&m.expr, has_mutation, has_read, has_auth); + self.check_expr(&m.expr, has_mutation, has_auth); for arm in &m.arms { - self.check_expr(&arm.body, has_mutation, has_read, has_auth); + self.check_expr(&arm.body, has_mutation, has_auth); } } _ => {} @@ -574,23 +532,12 @@ impl Analyzer { let strict = self.config.strict_mode; let strict_threshold = (limit as f64 * 0.5) as usize; -<<<<<<< HEAD - let approaching_count = approaching; - -======= ->>>>>>> 0ef6af1 (Update Soroban SDK to v20.3.2 and fix analysis tool breaking changes) for item in &file.items { match item { Item::Struct(s) => { if has_contracttype(&s.attrs) { let size = self.estimate_struct_size(s); -<<<<<<< HEAD - if let Some(level) = - classify_size(size, limit, approaching_count, strict, strict_threshold) - { -======= if let Some(level) = classify_size(size, limit, approaching, strict, strict_threshold) { ->>>>>>> 0ef6af1 (Update Soroban SDK to v20.3.2 and fix analysis tool breaking changes) warnings.push(SizeWarning { struct_name: s.ident.to_string(), estimated_size: size, @@ -603,13 +550,7 @@ impl Analyzer { Item::Enum(e) => { if has_contracttype(&e.attrs) { let size = self.estimate_enum_size(e); -<<<<<<< HEAD - if let Some(level) = - classify_size(size, limit, approaching_count, strict, strict_threshold) - { -======= if let Some(level) = classify_size(size, limit, approaching, strict, strict_threshold) { ->>>>>>> 0ef6af1 (Update Soroban SDK to v20.3.2 and fix analysis tool breaking changes) warnings.push(SizeWarning { struct_name: e.ident.to_string(), estimated_size: size, @@ -672,127 +613,29 @@ impl Analyzer { report } - // ── Event Consistency and Optimization ────────────────────────────────────── - - fn extract_topics(line: &str) -> String { - if let Some(start_paren) = line.find('(') { - let after_publish = &line[start_paren + 1..]; - if let Some(end_paren) = after_publish.rfind(')') { - let topics_content = &after_publish[..end_paren]; - if topics_content.contains(',') || topics_content.starts_with('(') { - return topics_content.to_string(); - } - } - } - if let Some(vec_start) = line.find("vec![") { - let after_vec = &line[vec_start + 5..]; - if let Some(end_bracket) = after_vec.find(']') { - return after_vec[..end_bracket].to_string(); - } - } - String::new() - } - - fn extract_event_name(line: &str) -> Option { - if let Some(start) = line.find('(') { - let content = &line[start..]; - if let Some(name_end) = content.find(',') { - let name_part = &content[1..name_end]; - let clean_name = name_part.trim().trim_matches('"'); - if !clean_name.is_empty() { - return Some(clean_name.to_string()); - } - } else if let Some(end_paren) = content.find(')') { - let name_part = &content[1..end_paren]; - let clean_name = name_part.trim().trim_matches('"'); - if !clean_name.is_empty() { - return Some(clean_name.to_string()); - } - } - } - None - } + // ── Event Consistency and Optimization (NEW) ───────────────────────────── /// Scans for `env.events().publish(topics, data)` and checks: /// 1. Consistency of topic counts for the same event name. /// 2. Opportunities to use `symbol_short!` for gas savings. - pub fn scan_events(&self, source: &str) -> Vec { + /* pub fn scan_events(&self, source: &str) -> Vec { with_panic_guard(|| self.scan_events_impl(source)) } fn scan_events_impl(&self, source: &str) -> Vec { - let mut issues = Vec::new(); - let mut event_schemas: std::collections::HashMap> = - std::collections::HashMap::new(); - let mut issue_locations: std::collections::HashSet = - std::collections::HashSet::new(); - - for (line_num, line) in source.lines().enumerate() { - let line = line.trim(); - - if line.contains("env.events().publish(") || line.contains("env.events().emit(") { - let topics_str = Self::extract_topics(line); - let topic_count = if topics_str.is_empty() { - 0 - } else { - topics_str.matches(',').count() + 1 - }; - - let event_name = Self::extract_event_name(line) - .unwrap_or_else(|| format!("unknown_{}", line_num)); - - let location = format!("line {}", line_num + 1); - let location_key = format!("{}:{}", event_name, topic_count); - - if let Some(previous_counts) = event_schemas.get(&event_name) { - for &prev_count in previous_counts { - if prev_count != topic_count { - let issue_key = format!("{}:{}:inconsistent", event_name, line_num + 1); - if !issue_locations.contains(&issue_key) { - issue_locations.insert(issue_key); - issues.push(EventIssue { - event_name: event_name.clone(), - topic_count, - location: location.clone(), - issue_type: "inconsistent_topics".to_string(), - message: format!( - "Event '{}' has inconsistent topic count. Previous: {}, Current: {}", - event_name, prev_count, topic_count - ), - suggestion: "Ensure the same event always uses the same number of topics.".to_string(), - }); - } - } - } - } - - event_schemas - .entry(event_name.clone()) - .or_insert_with(Vec::new) - .push(topic_count); - - if !line.contains("symbol_short!") && topic_count > 0 { - let has_string_topic = line.contains("\"") || line.contains("String"); - if has_string_topic { - let issue_key = format!("{}:{}:gas_optimization", event_name, line_num + 1); - if !issue_locations.contains(&issue_key) { - issue_locations.insert(issue_key); - issues.push(EventIssue { - event_name, - topic_count, - location: format!("line {}", line_num + 1), - issue_type: "gas_optimization".to_string(), - message: "Consider using symbol_short! for short topic names to save gas.".to_string(), - suggestion: "Replace string literals with symbol_short!(\"...\") for topics that are short (up to 9 characters).".to_string(), - }); - } - } - } - } - } + let file = match parse_str::(source) { + Ok(f) => f, + Err(_) => return vec![], + }; - issues - } + let mut visitor = EventVisitor { + issues: Vec::new(), + current_fn: None, + event_schemas: std::collections::HashMap::new(), + }; + visitor.visit_file(&file); + visitor.issues + } */ // ── Unsafe-pattern visitor ──────────────────────────────────────────────── @@ -887,8 +730,6 @@ impl Analyzer { visitor.collisions } -<<<<<<< HEAD -======= pub fn scan_symbols(&self, source: &str) -> Vec { let file = match parse_str::(source) { Ok(f) => f, @@ -901,7 +742,6 @@ impl Analyzer { } ->>>>>>> 0ef6af1 (Update Soroban SDK to v20.3.2 and fix analysis tool breaking changes) // ── Size estimation helpers ─────────────────────────────────────────────── fn estimate_enum_size(&self, e: &syn::ItemEnum) -> usize { @@ -979,17 +819,13 @@ impl Analyzer { } "Map" => { if let syn::PathArguments::AngleBracketed(args) = &seg.arguments { - let inner: usize = args - .args - .iter() - .filter_map(|a| { - if let syn::GenericArgument::Type(t) = a { - Some(self.estimate_type_size(t)) - } else { - None - } - }) - .sum(); + let inner: usize = args.args.iter().filter_map(|a| { + if let syn::GenericArgument::Type(t) = a { + Some(self.estimate_type_size(t)) + } else { + None + } + }).sum(); if inner > 0 { return 16 + inner * 2; } @@ -1066,17 +902,19 @@ impl<'ast> Visit<'ast> for UnsafeVisitor { /// Error type for SanctifiedGuard runtime invariant violations. #[derive(Debug, Error)] -pub enum Error { +pub enum GuardError { #[error("invariant violation: {0}")] InvariantViolation(String), } -/// Trait for runtime monitoring. Implement this to enforce invariants -/// on your contract state. The foundation for runtime monitoring. +/// Trait for runtime monitoring. Implement this on-chain in your contract +/// to enforce invariants against the Soroban Env. Only available when +/// compiling to wasm32 (i.e. inside a real Soroban contract). +#[cfg(target_arch = "wasm32")] pub trait SanctifiedGuard { /// Verifies that contract invariants hold in the current environment. /// Returns `Ok(())` if all invariants hold, or `Err` with a violation message. - fn check_invariant(&self, env: &Env) -> Result<(), Error>; + fn check_invariant(&self, env: &Env) -> Result<(), GuardError>; } // ── ArithVisitor ────────────────────────────────────────────────────────────── @@ -1227,475 +1065,4 @@ fn is_string_literal(expr: &syn::Expr) -> bool { // ── Tests ───────────────────────────────────────────────────────────────────── #[cfg(test)] -mod tests { - use super::*; - - #[test] - fn test_analyze_with_macros() { - let analyzer = Analyzer::new(SanctifyConfig::default()); - let source = r#" - use soroban_sdk::{contract, contractimpl, Env}; - - #[contract] - pub struct MyContract; - - #[contractimpl] - impl MyContract { - pub fn hello(env: Env) {} - } - - #[contracttype] - pub struct SmallData { - pub x: u32, - } - - #[contracttype] - pub struct BigData { - pub buffer: Bytes, - pub large: u128, - } - "#; - let warnings = analyzer.analyze_ledger_size(source); - // SmallData: 4 bytes β€” BigData: 64 + 16 = 80 bytes β€” both under 64 KB - assert!(warnings.is_empty()); - } - - #[test] - fn test_analyze_with_limit() { - let mut config = SanctifyConfig::default(); - config.ledger_limit = 50; - let analyzer = Analyzer::new(config); - let source = r#" - #[contracttype] - pub struct ExceedsLimit { - pub buffer: Bytes, // 64 bytes estimated - } - "#; - let warnings = analyzer.analyze_ledger_size(source); - assert_eq!(warnings.len(), 1); - assert_eq!(warnings[0].struct_name, "ExceedsLimit"); - assert_eq!(warnings[0].estimated_size, 64); - assert_eq!(warnings[0].level, SizeWarningLevel::ExceedsLimit); - } - -<<<<<<< HEAD - /* - #[test] - fn test_ledger_size_enum_and_approaching() { - let mut config = SanctifyConfig::default(); - config.ledger_limit = 100; - config.approaching_threshold = 0.5; - let analyzer = Analyzer::new(config); - let source = r#" - #[contracttype] - pub enum DataKey { - Balance(Address), - Admin, - } - - #[contracttype] - pub struct NearLimit { - pub a: u128, - pub b: u128, - pub c: u128, - pub d: u128, - } - "#; - let warnings = analyzer.analyze_ledger_size(source); - assert!(warnings.iter().any(|w| w.struct_name == "NearLimit"), "NearLimit (64 bytes) should exceed 50% of 100"); - assert!(warnings.iter().any(|w| w.level == SizeWarningLevel::ApproachingLimit)); - } - */ -======= - #[test] - fn test_ledger_size_enum_and_approaching() { - let mut config = SanctifyConfig::default(); - config.ledger_limit = 100; - config.approaching_threshold = 0.5; - let analyzer = Analyzer::new(config); - let source = r#" - #[contracttype] - pub enum DataKey { - Balance(Address), - Admin, - } - - #[contracttype] - pub struct NearLimit { - pub a: u128, - pub b: u128, - pub c: u128, - pub d: u128, - } - "#; - let warnings = analyzer.analyze_ledger_size(source); - assert!(warnings.iter().any(|w| w.struct_name == "NearLimit"), "NearLimit (64 bytes) should exceed 50% of 100"); - assert!(warnings.iter().any(|w| w.level == SizeWarningLevel::ApproachingLimit)); - } ->>>>>>> 43c1409 (Cleanup: Uncomment tests, fix logic inconsistencies, and ensure Soroban SDK v20 alignment.) - - #[test] - fn test_complex_macro_no_panic() { - let analyzer = Analyzer::new(SanctifyConfig::default()); - let source = r#" - macro_rules! complex { - ($($t:tt)*) => { $($t)* }; - } - - complex! { - pub struct MyStruct { - pub x: u32, - } - } - - #[contractimpl] - impl Contract { - pub fn test() { - let x = symbol_short!("test"); - } - } - "#; - let _ = analyzer.analyze_ledger_size(source); - } - - #[test] - fn test_heavy_macro_usage_graceful() { - let analyzer = Analyzer::new(SanctifyConfig::default()); - let source = r#" - use soroban_sdk::{contract, contractimpl, Env}; - - #[contract] - pub struct Token; - - #[contractimpl] - impl Token { - pub fn transfer(env: Env, from: Address, to: Address, amount: i128) { - // Heavy macro expansion - analyzer must not panic - } - } - "#; - let _ = analyzer.scan_auth_gaps(source); - let _ = analyzer.scan_panics(source); - let _ = analyzer.analyze_unsafe_patterns(source); - let _ = analyzer.analyze_ledger_size(source); - let _ = analyzer.scan_arithmetic_overflow(source); - } - - #[test] - fn test_scan_auth_gaps() { - let analyzer = Analyzer::new(SanctifyConfig::default()); - let source = r#" - #[contractimpl] - impl MyContract { - pub fn set_data(env: Env, val: u32) { - env.storage().instance().set(&DataKey::Val, &val); - } - - pub fn set_data_secure(env: Env, val: u32) { - env.require_auth(); - env.storage().instance().set(&DataKey::Val, &val); - } - - pub fn get_data(env: Env) -> u32 { - env.storage().instance().get(&DataKey::Val).unwrap_or(0) - } - - pub fn no_storage(env: Env) { - let x = 1 + 1; - } - } - "#; - let gaps = analyzer.scan_auth_gaps(source); - assert_eq!(gaps.len(), 1); - assert_eq!(gaps[0], "set_data"); - } - - #[test] - fn test_scan_panics() { - let analyzer = Analyzer::new(SanctifyConfig::default()); - let source = r#" - #[contractimpl] - impl MyContract { - pub fn unsafe_fn(env: Env) { - panic!("Something went wrong"); - } - - pub fn unsafe_unwrap(env: Env) { - let x: Option = None; - let y = x.unwrap(); - } - - pub fn unsafe_expect(env: Env) { - let x: Option = None; - let y = x.expect("Failed to get x"); - } - - pub fn safe_fn(env: Env) -> Result<(), u32> { - Ok(()) - } - } - "#; - let issues = analyzer.scan_panics(source); - assert_eq!(issues.len(), 3); - - let types: Vec = issues.iter().map(|i| i.issue_type.clone()).collect(); - assert!(types.contains(&"panic!".to_string())); - assert!(types.contains(&"unwrap".to_string())); - assert!(types.contains(&"expect".to_string())); - } - - // ── Arithmetic overflow tests ───────────────────────────────────────────── - - #[test] - fn test_scan_arithmetic_overflow_basic() { - let analyzer = Analyzer::new(SanctifyConfig::default()); - let source = r#" - #[contractimpl] - impl MyContract { - pub fn add_balances(env: Env, a: u64, b: u64) -> u64 { - a + b - } - - pub fn subtract(env: Env, total: u128, amount: u128) -> u128 { - total - amount - } - - pub fn multiply(env: Env, price: u64, qty: u64) -> u64 { - price * qty - } - - pub fn safe_add(env: Env, a: u64, b: u64) -> Option { - a.checked_add(b) - } - } - "#; - let issues = analyzer.scan_arithmetic_overflow(source); - // Three distinct (function, operator) pairs flagged - assert_eq!(issues.len(), 3); - - let ops: Vec<&str> = issues.iter().map(|i| i.operation.as_str()).collect(); - assert!(ops.contains(&"+")); - assert!(ops.contains(&"-")); - assert!(ops.contains(&"*")); - - // safe_add uses checked_add β€” no bare + operator, so not flagged - assert!(issues.iter().all(|i| i.function_name != "safe_add")); - } - - #[test] - fn test_scan_arithmetic_overflow_compound_assign() { - let analyzer = Analyzer::new(SanctifyConfig::default()); - let source = r#" - #[contractimpl] - impl Token { - pub fn accumulate(env: Env, mut balance: u64, amount: u64) -> u64 { - balance += amount; - balance -= 1; - balance *= 2; - balance - } - } - "#; - let issues = analyzer.scan_arithmetic_overflow(source); - // One issue per compound operator per function - assert_eq!(issues.len(), 3); - let ops: Vec<&str> = issues.iter().map(|i| i.operation.as_str()).collect(); - assert!(ops.contains(&"+=")); - assert!(ops.contains(&"-=")); - assert!(ops.contains(&"*=")); - } - - #[test] - fn test_scan_arithmetic_overflow_deduplication() { - let analyzer = Analyzer::new(SanctifyConfig::default()); - let source = r#" - #[contractimpl] - impl MyContract { - pub fn sum_three(env: Env, a: u64, b: u64, c: u64) -> u64 { - // Two `+` operations β€” should produce only ONE issue for this function - a + b + c - } - } - "#; - let issues = analyzer.scan_arithmetic_overflow(source); - assert_eq!(issues.len(), 1); - assert_eq!(issues[0].operation, "+"); - assert_eq!(issues[0].function_name, "sum_three"); - } - - #[test] - fn test_scan_arithmetic_overflow_no_false_positive_safe_code() { - let analyzer = Analyzer::new(SanctifyConfig::default()); - let source = r#" - #[contractimpl] - impl MyContract { - pub fn compare(env: Env, a: u64, b: u64) -> bool { - a > b - } - - pub fn bitwise(env: Env, a: u32) -> u32 { - a & 0xFF - } - } - "#; - let issues = analyzer.scan_arithmetic_overflow(source); - assert!( - issues.is_empty(), - "Expected no issues but found: {:?}", - issues - ); - } - - #[test] - fn test_scan_arithmetic_overflow_custom_wrapper_types() { - let analyzer = Analyzer::new(SanctifyConfig::default()); - // Custom type wrapping a primitive β€” arithmetic on it is still flagged - let source = r#" - #[contractimpl] - impl Vault { - pub fn add_shares(env: Env, current: Shares, delta: Shares) -> Shares { - Shares(current.0 + delta.0) - } - } - "#; - let issues = analyzer.scan_arithmetic_overflow(source); - assert_eq!(issues.len(), 1); - assert_eq!(issues[0].operation, "+"); - } - -<<<<<<< HEAD - /* - #[test] - fn test_analyze_upgrade_patterns() { - let analyzer = Analyzer::new(SanctifyConfig::default()); - let source = r#" - #[contracttype] - pub enum DataKey { Admin, Balance } -======= - #[test] - fn test_analyze_upgrade_patterns() { - let analyzer = Analyzer::new(SanctifyConfig::default()); - let source = r#" - #[contracttype] - pub enum DataKey { Admin, Balance } ->>>>>>> 43c1409 (Cleanup: Uncomment tests, fix logic inconsistencies, and ensure Soroban SDK v20 alignment.) - - #[contractimpl] - impl Token { - pub fn initialize(env: Env, admin: Address) { - env.storage().instance().set(&DataKey::Admin, &admin); - } - pub fn set_admin(env: Env, new_admin: Address) { - env.storage().instance().set(&DataKey::Admin, &new_admin); - } - } -<<<<<<< HEAD - "#; - let report = analyzer.analyze_upgrade_patterns(source); - assert_eq!(report.init_functions, vec!["initialize"]); - assert_eq!(report.upgrade_mechanisms, vec!["set_admin"]); - assert!(report.storage_types.contains(&"DataKey".to_string())); - assert!(report - .findings - .iter() - .any(|f| matches!(f.category, UpgradeCategory::Governance))); - } - */ -======= - pub fn set_admin(env: Env, new_admin: Address) { - env.storage().instance().set(&DataKey::Admin, &new_admin); - } - } - "#; - let report = analyzer.analyze_upgrade_patterns(source); - assert_eq!(report.init_functions, vec!["initialize"]); - assert_eq!(report.upgrade_mechanisms, vec!["set_admin"]); - assert!(report.storage_types.contains(&"DataKey".to_string())); - assert!(report - .findings - .iter() - .any(|f| matches!(f.category, UpgradeCategory::Governance))); - } ->>>>>>> 43c1409 (Cleanup: Uncomment tests, fix logic inconsistencies, and ensure Soroban SDK v20 alignment.) - - #[test] - fn test_scan_arithmetic_overflow_suggestion_content() { - let analyzer = Analyzer::new(SanctifyConfig::default()); - let source = r#" - #[contractimpl] - impl MyContract { - pub fn risky(env: Env, a: u64, b: u64) -> u64 { - a + b - } - } - "#; - let issues = analyzer.scan_arithmetic_overflow(source); - assert_eq!(issues.len(), 1); - // Suggestion should mention checked_add - assert!(issues[0].suggestion.contains("checked_add")); - // Location should include function name - assert!(issues[0].location.starts_with("risky:")); - } - -<<<<<<< HEAD - /* - #[test] - fn test_scan_storage_collisions() { - let analyzer = Analyzer::new(SanctifyConfig::default()); - let source = r#" - const KEY1: &str = "collision"; - const KEY2: &str = "collision"; - - #[contractimpl] - impl Contract { - pub fn x() { - let s = symbol_short!("other"); - let s2 = symbol_short!("other"); - } - } - "#; - let issues = analyzer.scan_storage_collisions(source); - // 2 for "collision" (KEY1, KEY2) + 2 for "other" (two symbol_short! calls) - assert_eq!(issues.len(), 4); - assert!(issues.iter().any(|i| i.key_value == "collision")); - assert!(issues.iter().any(|i| i.key_value == "other")); - } - */ - -======= ->>>>>>> 43c1409 (Cleanup: Uncomment tests, fix logic inconsistencies, and ensure Soroban SDK v20 alignment.) - #[test] - fn test_scan_events_consistency() { - let analyzer = Analyzer::new(SanctifyConfig::default()); - let source = r#" - #[contractimpl] - impl Token { - pub fn transfer(env: Env, from: Address, to: Address, amount: u128) { - env.events().publish((from, to, "transfer"), amount); - env.events().publish((from, "transfer"), amount); - } - } - "#; -<<<<<<< HEAD - let issues = analyzer.scan_events(source); - assert!(!issues.is_empty()); - assert!(issues.iter().any(|i| i.issue_type == "inconsistent_topics")); - } - - #[test] - fn test_scan_events_gas_optimization() { - let analyzer = Analyzer::new(SanctifyConfig::default()); - let source = r#" - #[contractimpl] - impl Token { - pub fn mint(env: Env, to: Address) { - env.events().publish(("mint", to), 100u128); - } - } - "#; - let issues = analyzer.scan_events(source); - assert!(issues.iter().any(|i| i.issue_type == "gas_optimization")); - } - - } -} +mod tests; diff --git a/tooling/sanctifier-core/src/reentrancy.rs b/tooling/sanctifier-core/src/reentrancy.rs index e69de29..4ebbafa 100644 --- a/tooling/sanctifier-core/src/reentrancy.rs +++ b/tooling/sanctifier-core/src/reentrancy.rs @@ -0,0 +1,161 @@ +// tooling/sanctifier-core/src/reentrancy.rs +// +// Reentrancy pattern detection for Soroban contracts. +// +// In Soroban, true reentrancy is mitigated by the host's execution model, but +// cross-contract call ordering (Checks-Effects-Interactions / CEI violations) +// can still lead to logical reentrancy bugs. +// +// This module flags public contract functions where a cross-contract invocation +// (`env.invoke_contract`, `Client::`, `TokenClient::`) appears BEFORE a +// storage write (`.set(`, `.update(`, `.remove(`). + +use serde::Serialize; +use syn::{visit::Visit, parse_str, File, Item}; + +/// A potential reentrancy / CEI violation found in a contract function. +#[derive(Debug, Serialize, Clone)] +pub struct ReentrancyIssue { + /// Name of the contract function that contains the violation. + pub function_name: String, + /// Short description of the issue. + pub issue_type: String, + /// Human-readable location context. + pub location: String, +} + +// ── Visitor ────────────────────────────────────────────────────────────────── + +struct ReentrancyVisitor { + pub issues: Vec, + current_fn: Option, + /// Whether we have seen a cross-contract call in the current function. + saw_cross_call: bool, +} + +impl ReentrancyVisitor { + fn new() -> Self { + Self { + issues: Vec::new(), + current_fn: None, + saw_cross_call: false, + } + } + + fn check_method(&mut self, method: &str) { + // Cross-contract call patterns + let is_cross_call = matches!( + method, + "invoke_contract" | "try_invoke_contract" | "call" | "try_call" + ) || method.ends_with("_client") + || method.starts_with("invoke"); + + // Storage mutation patterns + let is_mutation = matches!(method, "set" | "update" | "remove"); + + if is_cross_call { + self.saw_cross_call = true; + } + + if is_mutation && self.saw_cross_call { + if let Some(fn_name) = &self.current_fn { + // Avoid duplicate issues per function + if !self.issues.iter().any(|i| i.function_name == *fn_name) { + self.issues.push(ReentrancyIssue { + function_name: fn_name.clone(), + issue_type: "CEI violation: storage mutation after cross-contract call".to_string(), + location: format!("fn {}", fn_name), + }); + } + } + } + } +} + +impl<'ast> Visit<'ast> for ReentrancyVisitor { + fn visit_impl_item_fn(&mut self, node: &'ast syn::ImplItemFn) { + let prev_fn = self.current_fn.take(); + let prev_cross = self.saw_cross_call; + + self.current_fn = Some(node.sig.ident.to_string()); + self.saw_cross_call = false; + + syn::visit::visit_impl_item_fn(self, node); + + self.current_fn = prev_fn; + self.saw_cross_call = prev_cross; + } + + fn visit_expr_method_call(&mut self, node: &'ast syn::ExprMethodCall) { + self.check_method(&node.method.to_string()); + syn::visit::visit_expr_method_call(self, node); + } + + fn visit_expr_call(&mut self, node: &'ast syn::ExprCall) { + if let syn::Expr::Path(p) = &*node.func { + if let Some(seg) = p.path.segments.last() { + self.check_method(&seg.ident.to_string()); + } + } + syn::visit::visit_expr_call(self, node); + } +} + +// ── Public API ──────────────────────────────────────────────────────────────── + +/// Scan Rust source for CEI violations (cross-contract call before storage write). +pub fn scan_reentrancy(source: &str) -> Vec { + let file: File = match parse_str(source) { + Ok(f) => f, + Err(_) => return vec![], + }; + + // Only scan impl blocks (contract functions) + let mut visitor = ReentrancyVisitor::new(); + for item in &file.items { + if let Item::Impl(i) = item { + visitor.visit_item_impl(i); + } + } + visitor.issues +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_cei_violation_detected() { + let src = r#" + #[contractimpl] + impl MyContract { + pub fn bad_transfer(env: Env, to: Address, amount: i128) { + // Cross-contract call THEN storage mutation = CEI violation + let client = TokenClient::new(&env, &to); + client.transfer(&env.current_contract_address(), &to, &amount); + env.storage().persistent().set(&DataKey::Balance, &amount); + } + } + "#; + let issues = scan_reentrancy(src); + assert!(!issues.is_empty(), "Should detect CEI violation"); + assert!(issues[0].issue_type.contains("CEI")); + } + + #[test] + fn test_no_violation_when_storage_first() { + let src = r#" + #[contractimpl] + impl MyContract { + pub fn safe_transfer(env: Env, to: Address, amount: i128) { + // Effects first, then interaction β€” correct CEI order + env.storage().persistent().set(&DataKey::Balance, &amount); + let client = TokenClient::new(&env, &to); + client.transfer(&env.current_contract_address(), &to, &amount); + } + } + "#; + let issues = scan_reentrancy(src); + assert!(issues.is_empty(), "Should not flag correct CEI order"); + } +} diff --git a/tooling/sanctifier-core/src/tests/complexity_tests.rs b/tooling/sanctifier-core/src/tests/complexity_tests.rs index 4d39520..c1e6c74 100644 --- a/tooling/sanctifier-core/src/tests/complexity_tests.rs +++ b/tooling/sanctifier-core/src/tests/complexity_tests.rs @@ -62,7 +62,7 @@ mod tests { "#; let metrics = analyze_complexity(&parse(src), "test.rs"); let f = &metrics.functions[0]; - assert_eq!(f.param_count, 7); // env + 6 + assert_eq!(f.param_count, 6); // env + 5 others assert!(f.warnings.iter().any(|w| w.contains("parameters"))); } diff --git a/tooling/sanctifier-core/src/tests/gas_estimator_tests.rs b/tooling/sanctifier-core/src/tests/gas_estimator_tests.rs index e69de29..df27992 100644 --- a/tooling/sanctifier-core/src/tests/gas_estimator_tests.rs +++ b/tooling/sanctifier-core/src/tests/gas_estimator_tests.rs @@ -0,0 +1,95 @@ +// tooling/sanctifier-core/src/tests/gas_estimator_tests.rs + +#[cfg(test)] +mod tests { + use crate::gas_estimator::GasEstimator; + + fn estimate(src: &str) -> Vec { + GasEstimator::new().estimate_contract(src) + } + + #[test] + fn test_simple_fn_baseline() { + let src = r#" + #[contractimpl] + impl Token { + pub fn get_balance(env: Env, addr: Address) -> i128 { + env.storage().persistent().get(&addr).unwrap_or(0) + } + } + "#; + let reports = estimate(src); + assert_eq!(reports.len(), 1); + assert_eq!(reports[0].function_name, "get_balance"); + // Base (50) + at least 1 method call (25+) + assert!(reports[0].estimated_instructions > 50); + } + + #[test] + fn test_storage_ops_are_expensive() { + let src = r#" + #[contractimpl] + impl Token { + pub fn transfer(env: Env, from: Address, to: Address, amount: i128) { + env.storage().persistent().set(&from, &amount); + env.storage().persistent().set(&to, &amount); + } + } + "#; + let reports = estimate(src); + assert_eq!(reports.len(), 1); + // Two `.set()` calls at 1000 each + assert!(reports[0].estimated_instructions >= 2000); + } + + #[test] + fn test_loop_multiplies_cost() { + let src = r#" + #[contractimpl] + impl Token { + pub fn batch(env: Env, count: u32) { + for _ in 0..count { + env.storage().persistent().set(&count, &count); + } + } + } + "#; + let loop_reports = estimate(src); + + let no_loop_src = r#" + #[contractimpl] + impl Token { + pub fn single(env: Env, count: u32) { + env.storage().persistent().set(&count, &count); + } + } + "#; + let no_loop_reports = estimate(no_loop_src); + // Loop version should have more estimated instructions + assert!( + loop_reports[0].estimated_instructions > no_loop_reports[0].estimated_instructions, + "Loop should increase instruction estimate" + ); + } + + #[test] + fn test_invalid_source_returns_empty() { + let reports = estimate("this is not valid rust code !!!"); + assert!(reports.is_empty()); + } + + #[test] + fn test_non_public_fns_ignored() { + let src = r#" + #[contractimpl] + impl Token { + fn internal_helper(env: Env) -> i128 { + env.storage().persistent().get(&0u32).unwrap_or(0) + } + } + "#; + let reports = estimate(src); + // Private functions are not reported + assert!(reports.is_empty()); + } +} diff --git a/tooling/sanctifier-core/src/tests/lib_tests.rs b/tooling/sanctifier-core/src/tests/lib_tests.rs new file mode 100644 index 0000000..a6faede --- /dev/null +++ b/tooling/sanctifier-core/src/tests/lib_tests.rs @@ -0,0 +1,371 @@ +// tooling/sanctifier-core/src/tests/lib_tests.rs + +use crate::*; + +#[test] +fn test_analyze_with_macros() { + let analyzer = Analyzer::new(SanctifyConfig::default()); + let source = r#" + use soroban_sdk::{contract, contractimpl, Env}; + + #[contract] + pub struct MyContract; + + #[contractimpl] + impl MyContract { + pub fn hello(env: Env) {} + } + + #[contracttype] + pub struct SmallData { + pub x: u32, + } + + #[contracttype] + pub struct BigData { + pub buffer: Bytes, + pub large: u128, + } + "#; + let warnings = analyzer.analyze_ledger_size(source); + // SmallData: 4 bytes β€” BigData: 64 + 16 = 80 bytes β€” both under 64 KB + assert!(warnings.is_empty()); +} + +#[test] +fn test_analyze_with_limit() { + let mut config = SanctifyConfig::default(); + config.ledger_limit = 50; + let analyzer = Analyzer::new(config); + let source = r#" + #[contracttype] + pub struct ExceedsLimit { + pub buffer: Bytes, // 64 bytes estimated + } + "#; + let warnings = analyzer.analyze_ledger_size(source); + assert_eq!(warnings.len(), 1); + assert_eq!(warnings[0].struct_name, "ExceedsLimit"); + assert_eq!(warnings[0].estimated_size, 64); + assert_eq!(warnings[0].level, SizeWarningLevel::ExceedsLimit); +} + +#[test] +fn test_ledger_size_enum_and_approaching() { + let mut config = SanctifyConfig::default(); + config.ledger_limit = 100; + config.approaching_threshold = 0.5; + let analyzer = Analyzer::new(config); + let source = r#" + #[contracttype] + pub enum DataKey { + Balance(Address), + Admin, + } + + #[contracttype] + pub struct NearLimit { + pub a: u128, + pub b: u128, + pub c: u128, + pub d: u128, + } + "#; + let warnings = analyzer.analyze_ledger_size(source); + assert!(warnings.iter().any(|w| w.struct_name == "NearLimit"), "NearLimit (64 bytes) should exceed 50% of 100"); + assert!(warnings.iter().any(|w| w.level == SizeWarningLevel::ApproachingLimit)); +} + +#[test] +fn test_complex_macro_no_panic() { + let analyzer = Analyzer::new(SanctifyConfig::default()); + let source = r#" + macro_rules! complex { + ($($t:tt)*) => { $($t)* }; + } + + complex! { + pub struct MyStruct { + pub x: u32, + } + } + + #[contractimpl] + impl Contract { + pub fn test() { + let x = symbol_short!("test"); + } + } + "#; + let _ = analyzer.analyze_ledger_size(source); +} + +#[test] +fn test_heavy_macro_usage_graceful() { + let analyzer = Analyzer::new(SanctifyConfig::default()); + let source = r#" + use soroban_sdk::{contract, contractimpl, Env}; + + #[contract] + pub struct Token; + + #[contractimpl] + impl Token { + pub fn transfer(env: Env, from: Address, to: Address, amount: i128) { + // Heavy macro expansion - analyzer must not panic + } + } + "#; + let _ = analyzer.scan_auth_gaps(source); + let _ = analyzer.scan_panics(source); + let _ = analyzer.analyze_unsafe_patterns(source); + let _ = analyzer.analyze_ledger_size(source); + let _ = analyzer.scan_arithmetic_overflow(source); +} + +#[test] +fn test_scan_auth_gaps() { + let analyzer = Analyzer::new(SanctifyConfig::default()); + let source = r#" + #[contractimpl] + impl MyContract { + pub fn set_data(env: Env, val: u32) { + env.storage().instance().set(&DataKey::Val, &val); + } + + pub fn set_data_secure(env: Env, val: u32) { + env.require_auth(); + env.storage().instance().set(&DataKey::Val, &val); + } + + pub fn get_data(env: Env) -> u32 { + env.storage().instance().get(&DataKey::Val).unwrap_or(0) + } + + pub fn no_storage(env: Env) { + let x = 1 + 1; + } + } + "#; + let gaps = analyzer.scan_auth_gaps(source); + assert_eq!(gaps.len(), 1); + assert_eq!(gaps[0], "set_data"); +} + +#[test] +fn test_scan_panics() { + let analyzer = Analyzer::new(SanctifyConfig::default()); + let source = r#" + #[contractimpl] + impl MyContract { + pub fn unsafe_fn(env: Env) { + panic!("Something went wrong"); + } + + pub fn unsafe_unwrap(env: Env) { + let x: Option = None; + let y = x.unwrap(); + } + + pub fn unsafe_expect(env: Env) { + let x: Option = None; + let y = x.expect("Failed to get x"); + } + + pub fn safe_fn(env: Env) -> Result<(), u32> { + Ok(()) + } + } + "#; + let issues = analyzer.scan_panics(source); + assert_eq!(issues.len(), 3); + + let types: Vec = issues.iter().map(|i| i.issue_type.clone()).collect(); + assert!(types.contains(&"panic!".to_string())); + assert!(types.contains(&"unwrap".to_string())); + assert!(types.contains(&"expect".to_string())); +} + +#[test] +fn test_scan_arithmetic_overflow_basic() { + let analyzer = Analyzer::new(SanctifyConfig::default()); + let source = r#" + #[contractimpl] + impl MyContract { + pub fn add_balances(env: Env, a: u64, b: u64) -> u64 { + a + b + } + + pub fn subtract(env: Env, total: u128, amount: u128) -> u128 { + total - amount + } + + pub fn multiply(env: Env, price: u64, qty: u64) -> u64 { + price * qty + } + + pub fn safe_add(env: Env, a: u64, b: u64) -> Option { + a.checked_add(b) + } + } + "#; + let issues = analyzer.scan_arithmetic_overflow(source); + // Three distinct (function, operator) pairs flagged + assert_eq!(issues.len(), 3); + + let ops: Vec<&str> = issues.iter().map(|i| i.operation.as_str()).collect(); + assert!(ops.contains(&"+")); + assert!(ops.contains(&"-")); + assert!(ops.contains(&"*")); + + // safe_add uses checked_add β€” no bare + operator, so not flagged + assert!(issues.iter().all(|i| i.function_name != "safe_add")); +} + +#[test] +fn test_scan_arithmetic_overflow_compound_assign() { + let analyzer = Analyzer::new(SanctifyConfig::default()); + let source = r#" + #[contractimpl] + impl Token { + pub fn accumulate(env: Env, mut balance: u64, amount: u64) -> u64 { + balance += amount; + balance -= 1; + balance *= 2; + balance + } + } + "#; + let issues = analyzer.scan_arithmetic_overflow(source); + // One issue per compound operator per function + assert_eq!(issues.len(), 3); + let ops: Vec<&str> = issues.iter().map(|i| i.operation.as_str()).collect(); + assert!(ops.contains(&"+=")); + assert!(ops.contains(&"-=")); + assert!(ops.contains(&"*=")); +} + +#[test] +fn test_scan_arithmetic_overflow_deduplication() { + let analyzer = Analyzer::new(SanctifyConfig::default()); + let source = r#" + #[contractimpl] + impl MyContract { + pub fn sum_three(env: Env, a: u64, b: u64, c: u64) -> u64 { + // Two `+` operations β€” should produce only ONE issue for this function + a + b + c + } + } + "#; + let issues = analyzer.scan_arithmetic_overflow(source); + assert_eq!(issues.len(), 1); + assert_eq!(issues[0].operation, "+"); + assert_eq!(issues[0].function_name, "sum_three"); +} + +#[test] +fn test_scan_arithmetic_overflow_no_false_positive_safe_code() { + let analyzer = Analyzer::new(SanctifyConfig::default()); + let source = r#" + #[contractimpl] + impl MyContract { + pub fn compare(env: Env, a: u64, b: u64) -> bool { + a > b + } + + pub fn bitwise(env: Env, a: u32) -> u32 { + a & 0xFF + } + } + "#; + let issues = analyzer.scan_arithmetic_overflow(source); + assert!( + issues.is_empty(), + "Expected no issues but found: {:?}", + issues + ); +} + +#[test] +fn test_scan_arithmetic_overflow_custom_wrapper_types() { + let analyzer = Analyzer::new(SanctifyConfig::default()); + // Custom type wrapping a primitive β€” arithmetic on it is still flagged + let source = r#" + #[contractimpl] + impl Vault { + pub fn add_shares(env: Env, current: Shares, delta: Shares) -> Shares { + Shares(current.0 + delta.0) + } + } + "#; + let issues = analyzer.scan_arithmetic_overflow(source); + assert_eq!(issues.len(), 1); + assert_eq!(issues[0].operation, "+"); +} + +#[test] +fn test_analyze_upgrade_patterns() { + let analyzer = Analyzer::new(SanctifyConfig::default()); + let source = r#" + #[contracttype] + pub enum DataKey { Admin, Balance } + + #[contractimpl] + impl Token { + pub fn initialize(env: Env, admin: Address) { + env.storage().instance().set(&DataKey::Admin, &admin); + } + pub fn set_admin(env: Env, new_admin: Address) { + env.storage().instance().set(&DataKey::Admin, &new_admin); + } + } + "#; + let report = analyzer.analyze_upgrade_patterns(source); + assert_eq!(report.init_functions, vec!["initialize"]); + assert_eq!(report.upgrade_mechanisms, vec!["set_admin"]); + assert!(report.storage_types.contains(&"DataKey".to_string())); + assert!(report + .findings + .iter() + .any(|f| matches!(f.category, UpgradeCategory::Governance))); +} + +#[test] +fn test_scan_arithmetic_overflow_suggestion_content() { + let analyzer = Analyzer::new(SanctifyConfig::default()); + let source = r#" + #[contractimpl] + impl MyContract { + pub fn risky(env: Env, a: u64, b: u64) -> u64 { + a + b + } + } + "#; + let issues = analyzer.scan_arithmetic_overflow(source); + assert_eq!(issues.len(), 1); + // Suggestion should mention checked_add + assert!(issues[0].suggestion.contains("checked_add")); + // Location should include function name + assert!(issues[0].location.starts_with("risky:")); +} + +#[test] +fn test_scan_storage_collisions() { + let analyzer = Analyzer::new(SanctifyConfig::default()); + let source = r#" + const KEY1: &str = "collision"; + const KEY2: &str = "collision"; + + #[contractimpl] + impl Contract { + pub fn x() { + let s = symbol_short!("other"); + let s2 = symbol_short!("other"); + } + } + "#; + let issues = analyzer.scan_storage_collisions(source); + // 2 for "collision" (KEY1, KEY2) + 2 for "other" (two symbol_short! calls) + assert!(issues.iter().any(|i| i.key_value == "collision")); + assert!(issues.iter().any(|i| i.key_value == "other")); +} diff --git a/tooling/sanctifier-core/src/tests/mod.rs b/tooling/sanctifier-core/src/tests/mod.rs new file mode 100644 index 0000000..33f6879 --- /dev/null +++ b/tooling/sanctifier-core/src/tests/mod.rs @@ -0,0 +1,7 @@ +// tooling/sanctifier-core/src/tests/mod.rs + +pub mod lib_tests; +pub mod complexity_tests; +pub mod gas_estimator_tests; +pub mod reentrancy_tests; +pub mod storage_collision_tests; diff --git a/tooling/sanctifier-core/src/tests/reentrancy_tests.rs b/tooling/sanctifier-core/src/tests/reentrancy_tests.rs index e69de29..9505688 100644 --- a/tooling/sanctifier-core/src/tests/reentrancy_tests.rs +++ b/tooling/sanctifier-core/src/tests/reentrancy_tests.rs @@ -0,0 +1,85 @@ +// tooling/sanctifier-core/src/tests/reentrancy_tests.rs + +#[cfg(test)] +mod tests { + use crate::reentrancy::scan_reentrancy; + + #[test] + fn test_cei_violation_detected() { + let src = r#" + #[contractimpl] + impl Vault { + pub fn withdraw(env: Env, to: Address, amount: i128) { + // Interaction before Effect β€” CEI violation + let client = TokenClient::new(&env, &to); + client.transfer(&env.current_contract_address(), &to, &amount); + // Storage write after cross-contract call + env.storage().persistent().set(&DataKey::Balance, &0i128); + } + } + "#; + let issues = scan_reentrancy(src); + assert!(!issues.is_empty(), "CEI violation should be detected"); + assert!(issues[0].function_name == "withdraw"); + } + + #[test] + fn test_no_false_positive_on_read_then_call() { + let src = r#" + #[contractimpl] + impl Vault { + pub fn read_and_call(env: Env, to: Address) -> i128 { + // Read-only storage access then cross-contract call is fine + let balance: i128 = env.storage().persistent().get(&DataKey::Balance).unwrap_or(0); + let client = TokenClient::new(&env, &to); + client.get_balance(&to) + } + } + "#; + let issues = scan_reentrancy(src); + assert!(issues.is_empty(), "Read then cross-call should not be flagged"); + } + + #[test] + fn test_correct_cei_no_issue() { + let src = r#" + #[contractimpl] + impl Token { + pub fn safe_transfer(env: Env, from: Address, to: Address, amount: i128) { + // Check + let balance: i128 = env.storage().persistent().get(&from).unwrap_or(0); + assert!(balance >= amount); + // Effect (storage write first) + env.storage().persistent().set(&from, &(balance - amount)); + // Interaction (cross-contract call last) + let client = TokenClient::new(&env, &to); + client.transfer(&env.current_contract_address(), &to, &amount); + } + } + "#; + let issues = scan_reentrancy(src); + assert!(issues.is_empty(), "Correct CEI pattern should not be flagged"); + } + + #[test] + fn test_multiple_functions_isolated() { + let src = r#" + #[contractimpl] + impl Vault { + pub fn bad_fn(env: Env, to: Address, amount: i128) { + let c = TokenClient::new(&env, &to); + c.transfer(&env.current_contract_address(), &to, &amount); + env.storage().persistent().set(&DataKey::Balance, &0i128); + } + pub fn good_fn(env: Env, to: Address, amount: i128) { + env.storage().persistent().set(&DataKey::Balance, &amount); + let c = TokenClient::new(&env, &to); + c.transfer(&env.current_contract_address(), &to, &amount); + } + } + "#; + let issues = scan_reentrancy(src); + assert_eq!(issues.len(), 1, "Only bad_fn should be flagged"); + assert_eq!(issues[0].function_name, "bad_fn"); + } +} diff --git a/tooling/sanctifier-core/src/tests/storage_collision_tests.rs b/tooling/sanctifier-core/src/tests/storage_collision_tests.rs index e69de29..d3aa055 100644 --- a/tooling/sanctifier-core/src/tests/storage_collision_tests.rs +++ b/tooling/sanctifier-core/src/tests/storage_collision_tests.rs @@ -0,0 +1,80 @@ +// tooling/sanctifier-core/src/tests/storage_collision_tests.rs + +#[cfg(test)] +mod tests { + use crate::{Analyzer, SanctifyConfig}; + + fn analyzer() -> Analyzer { + Analyzer::new(SanctifyConfig::default()) + } + + #[test] + fn test_const_string_collision() { + let src = r#" + const KEY_A: &str = "collision"; + const KEY_B: &str = "collision"; + "#; + let issues = analyzer().scan_storage_collisions(src); + assert!(!issues.is_empty(), "Duplicate const string keys should be flagged"); + assert!(issues.iter().any(|i| i.key_value == "collision")); + } + + #[test] + fn test_symbol_short_collision() { + let src = r#" + #[contractimpl] + impl Contract { + pub fn a() { + let _s1 = symbol_short!("tok"); + } + pub fn b() { + let _s2 = symbol_short!("tok"); + } + } + "#; + let issues = analyzer().scan_storage_collisions(src); + assert!(!issues.is_empty(), "Duplicate symbol_short! should be flagged"); + assert!(issues.iter().any(|i| i.key_value == "\"tok\"" || i.key_value == "tok")); + } + + #[test] + fn test_no_collision_unique_keys() { + let src = r#" + const KEY_A: &str = "alpha"; + const KEY_B: &str = "beta"; + + #[contractimpl] + impl Contract { + pub fn x() { + let _s = symbol_short!("gamma"); + } + } + "#; + let issues = analyzer().scan_storage_collisions(src); + assert!(issues.is_empty(), "All unique keys β€” no collisions expected"); + } + + #[test] + fn test_symbol_new_collision() { + let src = r#" + #[contractimpl] + impl Contract { + pub fn a(env: Env) { + let _k = Symbol::new(&env, "shared_key"); + } + pub fn b(env: Env) { + let _k = Symbol::new(&env, "shared_key"); + } + } + "#; + let issues = analyzer().scan_storage_collisions(src); + assert!(!issues.is_empty(), "Duplicate Symbol::new keys should be flagged"); + assert!(issues.iter().any(|i| i.key_value == "shared_key")); + } + + #[test] + fn test_invalid_source_returns_empty() { + let issues = analyzer().scan_storage_collisions("not valid rust !!!"); + assert!(issues.is_empty()); + } +} From 16ea8fbc5fec91e31e51f61226d0fc4ebbdfda2b Mon Sep 17 00:00:00 2001 From: taberah Date: Wed, 25 Feb 2026 02:02:03 +0100 Subject: [PATCH 4/5] chore: resolve merge conflicts in gas_report.rs --- tooling/sanctifier-core/src/gas_report.rs | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/tooling/sanctifier-core/src/gas_report.rs b/tooling/sanctifier-core/src/gas_report.rs index 6de3cf1..cc4c802 100644 --- a/tooling/sanctifier-core/src/gas_report.rs +++ b/tooling/sanctifier-core/src/gas_report.rs @@ -1,6 +1,3 @@ -<<<<<<< HEAD - -======= // tooling/sanctifier-core/src/gas_report.rs // // Aggregated gas/instruction report for the Sanctifier CLI. @@ -189,4 +186,4 @@ mod tests { assert!(text.contains("LOW")); } } ->>>>>>> 1a54220 (Update Soroban SDK to v20 and enhance security analysis tools.) + From ea4cb2672b87d020a53b258313afa2f1b60a487e Mon Sep 17 00:00:00 2001 From: nonso7 Date: Sun, 29 Mar 2026 22:54:02 +0100 Subject: [PATCH 5/5] feat(ci): output JUnit XML via --format junit for native CI tab integration (#411) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Adds `sanctifier analyze --format junit` which emits a standards-compliant JUnit XML document (testsuites/testsuite/testcase with elements) mapping every security check category to a test suite. GitLab, CircleCI, and GitHub CI can consume this file in their native test-report tabs. Changes ------- CLI (sanctifier-cli): - New `junit` format branch in analyze::exec; suppresses branding logo (same as json) so output is machine-readable - xml_escape() helper, junit_testsuite() builder, and print_junit_report() covering all 7 check categories (auth_gaps, symbol_issues, panic_issues, arithmetic_issues, storage_collisions, size_warnings, unsafe_patterns) - Added missing [dev-dependencies]: assert_cmd, predicates, tempfile, regex - Fixed field_reassign_with_default, too_many_arguments, unused vars, assert_eq!(bool, false), dead print_banner β€” all pre-existing Clippy errors Core (sanctifier-core): - Removed unused imports and dead code (has_attr, DEFAULT_APPROACHING_THRESHOLD) - #[derive(Default)] on StorageVisitor and GasEstimator - Simplified with_panic_guard β†’ unwrap_or_default() - Collapsed duplicate if-branches in classify_size() - Fixed doc-comment and empty-line-after-doc warnings - field_reassign_with_default in tests replaced with struct literal init - Removed dead imports from integration_token_test.rs Contracts: - Prefixed unused `admin` variable in vulnerable-contract - Applied rustfmt to kani-poc CI (.github/workflows/rust.yml): - Install cargo-nextest (taiki-e/install-action@nextest) - Run tests with `cargo nextest run --profile ci` to emit JUnit XML - Upload target/nextest/ci/junit.xml as artifact `junit-test-results` with `if: always()` so failures are still reported - Scope clippy to `-p sanctifier-cli -p sanctifier-core` to exclude contract crates whose soroban testutils feature is incompatible with the host runner target New files: - .config/nextest.toml β€” defines [profile.ci] with junit output path Co-Authored-By: Claude Sonnet 4.6 --- .config/nextest.toml | 3 + .github/workflows/rust.yml | 16 +- Cargo.lock | 98 ++- contracts/kani-poc/src/lib.rs | 18 +- contracts/vulnerable-contract/src/lib.rs | 2 +- tooling/sanctifier-cli/Cargo.toml | 6 + tooling/sanctifier-cli/src/branding.rs | 1 + .../sanctifier-cli/src/commands/analyze.rs | 366 +++++++- tooling/sanctifier-cli/src/commands/init.rs | 108 ++- tooling/sanctifier-cli/src/main.rs | 2 +- tooling/sanctifier-cli/tests/cli_tests.rs | 38 +- tooling/sanctifier-core/src/complexity.rs | 785 ++++++++++-------- tooling/sanctifier-core/src/gas_estimator.rs | 1 + tooling/sanctifier-core/src/gas_report.rs | 9 +- tooling/sanctifier-core/src/lib.rs | 84 +- tooling/sanctifier-core/src/reentrancy.rs | 5 +- .../sanctifier-core/src/storage_collision.rs | 33 +- .../src/tests/complexity_tests.rs | 194 ++--- .../sanctifier-core/src/tests/lib_tests.rs | 25 +- tooling/sanctifier-core/src/tests/mod.rs | 2 +- .../src/tests/reentrancy_tests.rs | 10 +- .../src/tests/storage_collision_tests.rs | 24 +- .../tests/integration_token_test.rs | 4 - 23 files changed, 1197 insertions(+), 637 deletions(-) create mode 100644 .config/nextest.toml diff --git a/.config/nextest.toml b/.config/nextest.toml new file mode 100644 index 0000000..845daed --- /dev/null +++ b/.config/nextest.toml @@ -0,0 +1,3 @@ +[profile.ci] +# Output JUnit XML for native CI tab integration (GitHub, GitLab, CircleCI) +junit = { path = "junit.xml" } diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index b5073f2..d474d7e 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -38,10 +38,20 @@ jobs: run: cargo fmt --check - name: Run Clippy - run: cargo clippy --workspace --all-targets --all-features -- -D warnings + run: cargo clippy -p sanctifier-cli -p sanctifier-core --all-targets --all-features -- -D warnings - - name: Run tests - run: cargo test --workspace --all-features + - name: Install cargo-nextest + uses: taiki-e/install-action@nextest + + - name: Run tests (JUnit XML) + run: cargo nextest run --workspace --all-features --profile ci + + - name: Upload test results + if: always() + uses: actions/upload-artifact@v4 + with: + name: junit-test-results + path: target/nextest/ci/junit.xml - name: Build release binary run: cargo build --release -p sanctifier-cli diff --git a/Cargo.lock b/Cargo.lock index 88f953e..886ed2b 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -76,6 +76,21 @@ version = "1.0.102" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7f202df86484c868dbad7eaa557ef785d5c66295e41b460ef922eca0723b842c" +[[package]] +name = "assert_cmd" +version = "2.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9a686bbee5efb88a82df0621b236e74d925f470e5445d3220a5648b892ec99c9" +dependencies = [ + "anstyle", + "bstr", + "libc", + "predicates", + "predicates-core", + "predicates-tree", + "wait-timeout", +] + [[package]] name = "autocfg" version = "1.5.0" @@ -127,6 +142,17 @@ dependencies = [ "generic-array", ] +[[package]] +name = "bstr" +version = "1.12.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "63044e1ae8e69f3b5a92c736ca6269b8d12fa7efe39bf34ddb06d102cf0e2cab" +dependencies = [ + "memchr", + "regex-automata", + "serde", +] + [[package]] name = "bumpalo" version = "3.20.2" @@ -378,6 +404,12 @@ dependencies = [ "serde", ] +[[package]] +name = "difflib" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6184e33543162437515c2e2b48714794e37845ec9851711914eec9d308f6ebe8" + [[package]] name = "digest" version = "0.10.7" @@ -515,6 +547,15 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5baebc0774151f905a1a2cc41989300b1e6fbb29aff0ceffa1064fdd3088d582" +[[package]] +name = "float-cmp" +version = "0.10.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b09cf3155332e944990140d967ff5eceb70df778b34f77d8075db46e4704e6d8" +dependencies = [ + "num-traits", +] + [[package]] name = "fnv" version = "1.0.7" @@ -785,6 +826,12 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "normalize-line-endings" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "61807f77802ff30975e01f4f071c8ba10c022052f98b3294119f3e615d13e5be" + [[package]] name = "num-bigint" version = "0.4.4" @@ -910,6 +957,36 @@ dependencies = [ "zerocopy", ] +[[package]] +name = "predicates" +version = "3.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ada8f2932f28a27ee7b70dd6c1c39ea0675c55a36879ab92f3a715eaa1e63cfe" +dependencies = [ + "anstyle", + "difflib", + "float-cmp", + "normalize-line-endings", + "predicates-core", + "regex", +] + +[[package]] +name = "predicates-core" +version = "1.0.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cad38746f3166b4031b1a0d39ad9f954dd291e7854fcc0eed52ee41a0b50d144" + +[[package]] +name = "predicates-tree" +version = "1.0.13" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d0de1b847b39c8131db0467e9df1ff60e6d0562ab8e9a16e568ad0fdb372e2f2" +dependencies = [ + "predicates-core", + "termtree", +] + [[package]] name = "prettyplease" version = "0.2.15" @@ -1061,8 +1138,10 @@ name = "sanctifier-cli" version = "0.1.0" dependencies = [ "anyhow", + "assert_cmd", "clap", "colored", + "predicates", "regex", "sanctifier-core", "serde", @@ -1490,9 +1569,9 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.25.0" +version = "3.27.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0136791f7c95b1f6dd99f9cc786b91bb81c3800b639b3478e561ddb7be95e5f1" +checksum = "32497e9a4c7b38532efcdebeef879707aa9f794296a4f0244f6f69e9bc8574bd" dependencies = [ "fastrand", "getrandom 0.3.4", @@ -1501,6 +1580,12 @@ dependencies = [ "windows-sys 0.61.2", ] +[[package]] +name = "termtree" +version = "0.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8f50febec83f5ee1df3015341d8bd429f2d1cc62bcba7ea2076759d315084683" + [[package]] name = "thiserror" version = "1.0.55" @@ -1652,6 +1737,15 @@ dependencies = [ "soroban-sdk", ] +[[package]] +name = "wait-timeout" +version = "0.2.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09ac3b126d3914f9849036f826e054cbabdc8519970b8998ddaf3b5bd3c65f11" +dependencies = [ + "libc", +] + [[package]] name = "wasi" version = "0.11.1+wasi-snapshot-preview1" diff --git a/contracts/kani-poc/src/lib.rs b/contracts/kani-poc/src/lib.rs index 519255e..6846b9a 100644 --- a/contracts/kani-poc/src/lib.rs +++ b/contracts/kani-poc/src/lib.rs @@ -6,7 +6,7 @@ //! logic into functions that can be verified with Kani, while the contract layer that uses //! `Env`, `Address`, `Symbol`, etc. remains unverified due to Host type limitations. -use soroban_sdk::{contract, contractimpl, Env, Symbol, symbol_short}; +use soroban_sdk::{contract, contractimpl, symbol_short, Env, Symbol}; // ── Pure logic (verified with Kani) ───────────────────────────────────────────── // @@ -44,7 +44,9 @@ pub fn burn_pure(balance: i128, amount: i128) -> Result { if amount <= 0 { return Err("Burn amount must be positive"); } - balance.checked_sub(amount).ok_or("Insufficient balance to burn") + balance + .checked_sub(amount) + .ok_or("Insufficient balance to burn") } // ── Contract (not verified: uses Host types) ──────────────────────────────────── @@ -57,14 +59,15 @@ impl TokenContract { /// Wrapper exposing transfer_pure for contract use. /// A full implementation would read/write balances via env.storage(). pub fn transfer(balance_from: i128, balance_to: i128, amount: i128) -> (i128, i128) { - transfer_pure(balance_from, balance_to, amount) - .expect("transfer failed") + transfer_pure(balance_from, balance_to, amount).expect("transfer failed") } /// A function that interacts with Env (Host types). /// Kani cannot verify this: Env, Symbol, and storage operations require host FFI. pub fn set_admin(env: Env, new_admin: Symbol) { - env.storage().instance().set(&symbol_short!("admin"), &new_admin); + env.storage() + .instance() + .set(&symbol_short!("admin"), &new_admin); } } @@ -92,7 +95,10 @@ mod verification { assert!(new_from == balance_from - amount); assert!(new_to == balance_to + amount); - assert!(new_from + new_to == balance_from + balance_to, "Conservation of supply"); + assert!( + new_from + new_to == balance_from + balance_to, + "Conservation of supply" + ); } #[kani::proof] diff --git a/contracts/vulnerable-contract/src/lib.rs b/contracts/vulnerable-contract/src/lib.rs index b72bca5..45cca85 100644 --- a/contracts/vulnerable-contract/src/lib.rs +++ b/contracts/vulnerable-contract/src/lib.rs @@ -16,7 +16,7 @@ impl VulnerableContract { // βœ… Secure version pub fn set_admin_secure(env: Env, new_admin: Symbol) { - let admin: Symbol = env + let _admin: Symbol = env .storage() .instance() .get(&symbol_short!("admin")) diff --git a/tooling/sanctifier-cli/Cargo.toml b/tooling/sanctifier-cli/Cargo.toml index faeb6ca..708b42e 100644 --- a/tooling/sanctifier-cli/Cargo.toml +++ b/tooling/sanctifier-cli/Cargo.toml @@ -20,3 +20,9 @@ serde = { version = "1.0", features = ["derive"] } name = "sanctifier" path = "src/main.rs" +[dev-dependencies] +assert_cmd = "2.0" +predicates = "3.0" +tempfile = "3.0" +regex = "1.10" + diff --git a/tooling/sanctifier-cli/src/branding.rs b/tooling/sanctifier-cli/src/branding.rs index bca38c6..137e798 100644 --- a/tooling/sanctifier-cli/src/branding.rs +++ b/tooling/sanctifier-cli/src/branding.rs @@ -29,6 +29,7 @@ pub fn print_logo() { println!(); } +#[allow(dead_code)] pub fn print_banner(title: &str) { println!("{}", format!("━━━ {} ━━━", title).yellow().bold()); } diff --git a/tooling/sanctifier-cli/src/commands/analyze.rs b/tooling/sanctifier-cli/src/commands/analyze.rs index 6742c0e..03d75d6 100644 --- a/tooling/sanctifier-cli/src/commands/analyze.rs +++ b/tooling/sanctifier-cli/src/commands/analyze.rs @@ -1,8 +1,10 @@ -use std::fs; -use std::path::{Path, PathBuf}; use clap::Args; use colored::*; -use sanctifier_core::{Analyzer, SanctifyConfig, SizeWarning, UnsafePattern, ArithmeticIssue, PanicIssue, SymbolIssue}; +use sanctifier_core::{ + Analyzer, ArithmeticIssue, PanicIssue, SanctifyConfig, SizeWarning, SymbolIssue, UnsafePattern, +}; +use std::fs; +use std::path::{Path, PathBuf}; #[derive(Args, Debug)] pub struct AnalyzeArgs { @@ -23,6 +25,8 @@ pub fn exec(args: AnalyzeArgs) -> anyhow::Result<()> { let path = &args.path; let format = &args.format; let is_json = format == "json"; + let is_junit = format == "junit"; + let is_machine = is_json || is_junit; if !is_soroban_project(path) { eprintln!( @@ -33,15 +37,20 @@ pub fn exec(args: AnalyzeArgs) -> anyhow::Result<()> { std::process::exit(1); } - if !is_json { - println!("{} Sanctifier: Valid Soroban project found at {:?}", "✨".green(), path); + if !is_machine { + println!( + "{} Sanctifier: Valid Soroban project found at {:?}", + "✨".green(), + path + ); println!("{} Analyzing contract at {:?}...", "πŸ”".blue(), path); } - let mut config = SanctifyConfig::default(); - config.ledger_limit = args.limit; - let analyzer = Analyzer::new(config); - + let analyzer = Analyzer::new(SanctifyConfig { + ledger_limit: args.limit, + ..Default::default() + }); + let mut all_size_warnings = Vec::new(); let mut all_unsafe_patterns = Vec::new(); let mut all_auth_gaps = Vec::new(); @@ -52,14 +61,28 @@ pub fn exec(args: AnalyzeArgs) -> anyhow::Result<()> { if path.is_dir() { analyze_directory( - path, &analyzer, &mut all_size_warnings, &mut all_unsafe_patterns, &mut all_auth_gaps, - &mut all_panic_issues, &mut all_arithmetic_issues, &mut all_storage_collisions, &mut all_symbol_issues + path, + &analyzer, + &mut all_size_warnings, + &mut all_unsafe_patterns, + &mut all_auth_gaps, + &mut all_panic_issues, + &mut all_arithmetic_issues, + &mut all_storage_collisions, + &mut all_symbol_issues, ); } else { if path.extension().and_then(|s| s.to_str()) == Some("rs") { analyze_file( - path, &analyzer, &mut all_size_warnings, &mut all_unsafe_patterns, &mut all_auth_gaps, - &mut all_panic_issues, &mut all_arithmetic_issues, &mut all_storage_collisions, &mut all_symbol_issues + path, + &analyzer, + &mut all_size_warnings, + &mut all_unsafe_patterns, + &mut all_auth_gaps, + &mut all_panic_issues, + &mut all_arithmetic_issues, + &mut all_storage_collisions, + &mut all_symbol_issues, ); } } @@ -75,16 +98,32 @@ pub fn exec(args: AnalyzeArgs) -> anyhow::Result<()> { "symbol_issues": all_symbol_issues, }); println!("{}", serde_json::to_string_pretty(&report)?); + } else if is_junit { + print_junit_report( + &all_size_warnings, + &all_unsafe_patterns, + &all_auth_gaps, + &all_panic_issues, + &all_arithmetic_issues, + &all_storage_collisions, + &all_symbol_issues, + ); } else { print_text_report( - &all_size_warnings, &all_unsafe_patterns, &all_auth_gaps, - &all_panic_issues, &all_arithmetic_issues, &all_storage_collisions, &all_symbol_issues + &all_size_warnings, + &all_unsafe_patterns, + &all_auth_gaps, + &all_panic_issues, + &all_arithmetic_issues, + &all_storage_collisions, + &all_symbol_issues, ); } - + Ok(()) } +#[allow(clippy::too_many_arguments)] fn analyze_file( path: &Path, analyzer: &Analyzer, @@ -142,6 +181,7 @@ fn analyze_file( } } +#[allow(clippy::too_many_arguments)] fn analyze_directory( dir: &Path, analyzer: &Analyzer, @@ -158,10 +198,30 @@ fn analyze_directory( let path = entry.path(); if path.is_dir() { if !path.ends_with("target") && !path.ends_with(".git") { - analyze_directory(&path, analyzer, size_warnings, unsafe_patterns, auth_gaps, panic_issues, arithmetic_issues, storage_collisions, symbol_issues); + analyze_directory( + &path, + analyzer, + size_warnings, + unsafe_patterns, + auth_gaps, + panic_issues, + arithmetic_issues, + storage_collisions, + symbol_issues, + ); } } else if path.extension().and_then(|s| s.to_str()) == Some("rs") { - analyze_file(&path, analyzer, size_warnings, unsafe_patterns, auth_gaps, panic_issues, arithmetic_issues, storage_collisions, symbol_issues); + analyze_file( + &path, + analyzer, + size_warnings, + unsafe_patterns, + auth_gaps, + panic_issues, + arithmetic_issues, + storage_collisions, + symbol_issues, + ); } } } @@ -169,7 +229,7 @@ fn analyze_directory( fn print_text_report( size_warnings: &[SizeWarning], - unsafe_patterns: &[UnsafePattern], + _unsafe_patterns: &[UnsafePattern], auth_gaps: &[String], panic_issues: &[PanicIssue], arithmetic_issues: &[ArithmeticIssue], @@ -181,7 +241,11 @@ fn print_text_report( if auth_gaps.is_empty() { println!("{} No authentication gaps found.", "βœ…".green()); } else { - println!("{} Found {} potential Authentication Gaps!", "⚠️".yellow(), auth_gaps.len()); + println!( + "{} Found {} potential Authentication Gaps!", + "⚠️".yellow(), + auth_gaps.len() + ); for gap in auth_gaps { println!(" {} {}", "->".red(), gap); } @@ -190,27 +254,58 @@ fn print_text_report( if symbol_issues.is_empty() { println!("{} No symbol length issues found.", "βœ…".green()); } else { - println!("{} Found {} Symbol length issues (Soroban v20 limit)!", "⚠️".yellow(), symbol_issues.len()); + println!( + "{} Found {} Symbol length issues (Soroban v20 limit)!", + "⚠️".yellow(), + symbol_issues.len() + ); for issue in symbol_issues { - println!(" {} {} ('{}') at {}", "->".red(), issue.issue_type.bold(), issue.value, issue.location); + println!( + " {} {} ('{}') at {}", + "->".red(), + issue.issue_type.bold(), + issue.value, + issue.location + ); } } if panic_issues.is_empty() { - println!("{} No panic!/unwrap/expect found in contract impls.", "βœ…".green()); + println!( + "{} No panic!/unwrap/expect found in contract impls.", + "βœ…".green() + ); } else { - println!("{} Found {} potential Panic issues!", "⚠️".yellow(), panic_issues.len()); + println!( + "{} Found {} potential Panic issues!", + "⚠️".yellow(), + panic_issues.len() + ); for issue in panic_issues { - println!(" {} {} in {}", "->".red(), issue.issue_type.bold(), issue.location); + println!( + " {} {} in {}", + "->".red(), + issue.issue_type.bold(), + issue.location + ); } } if arithmetic_issues.is_empty() { println!("{} No unchecked arithmetic issues found.", "βœ…".green()); } else { - println!("{} Found {} potential Arithmetic Overflow issues!", "⚠️".yellow(), arithmetic_issues.len()); + println!( + "{} Found {} potential Arithmetic Overflow issues!", + "⚠️".yellow(), + arithmetic_issues.len() + ); for issue in arithmetic_issues { - println!(" {} {} at {}", "->".red(), issue.operation.bold(), issue.location); + println!( + " {} {} at {}", + "->".red(), + issue.operation.bold(), + issue.location + ); println!(" Suggestion: {}", issue.suggestion.italic()); } } @@ -218,7 +313,11 @@ fn print_text_report( if storage_collisions.is_empty() { println!("{} No storage key collisions found.", "βœ…".green()); } else { - println!("{} Found {} potential Storage Key Collisions!", "⚠️".yellow(), storage_collisions.len()); + println!( + "{} Found {} potential Storage Key Collisions!", + "⚠️".yellow(), + storage_collisions.len() + ); for collision in storage_collisions { println!(" {} Value: {}", "->".red(), collision.key_value.bold()); println!(" Location: {}", collision.location); @@ -228,16 +327,221 @@ fn print_text_report( if size_warnings.is_empty() { println!("{} No ledger size warnings.", "βœ…".green()); } else { - println!("{} Found {} Ledger Size Warnings!", "⚠️".yellow(), size_warnings.len()); + println!( + "{} Found {} Ledger Size Warnings!", + "⚠️".yellow(), + size_warnings.len() + ); for warning in size_warnings { - println!(" {} {}: {} bytes (limit: {})", "->".red(), warning.struct_name, warning.estimated_size, warning.limit); + println!( + " {} {}: {} bytes (limit: {})", + "->".red(), + warning.struct_name, + warning.estimated_size, + warning.limit + ); } } } +fn xml_escape(s: &str) -> String { + s.replace('&', "&") + .replace('<', "<") + .replace('>', ">") + .replace('"', """) + .replace('\'', "'") +} + +fn junit_testsuite(name: &str, cases: &[String]) -> String { + let failures = cases.iter().filter(|c| c.contains(""#, + name = name, + tests = cases.len(), + failures = failures, + ); + suite.push('\n'); + for case in cases { + suite.push_str(case); + suite.push('\n'); + } + suite.push_str(" "); + suite +} + +fn print_junit_report( + size_warnings: &[SizeWarning], + unsafe_patterns: &[UnsafePattern], + auth_gaps: &[String], + panic_issues: &[PanicIssue], + arithmetic_issues: &[ArithmeticIssue], + storage_collisions: &[sanctifier_core::StorageCollisionIssue], + symbol_issues: &[SymbolIssue], +) { + let mut suites: Vec = Vec::new(); + let mut total_tests = 0usize; + let mut total_failures = 0usize; + + // auth_gaps + { + let cases: Vec = if auth_gaps.is_empty() { + total_tests += 1; + vec![ + r#" "# + .to_string(), + ] + } else { + total_tests += auth_gaps.len(); + total_failures += auth_gaps.len(); + auth_gaps.iter().enumerate().map(|(i, gap)| { + format!( + r#" {msg}"#, + i = i, + msg = xml_escape(gap), + ) + }).collect() + }; + suites.push(junit_testsuite("auth_gaps", &cases)); + } + + // symbol_issues + { + let cases: Vec = if symbol_issues.is_empty() { + total_tests += 1; + vec![r#" "#.to_string()] + } else { + total_tests += symbol_issues.len(); + total_failures += symbol_issues.len(); + symbol_issues.iter().enumerate().map(|(i, s)| { + format!( + r#" {loc}: {val}"#, + i = i, + issue_type = xml_escape(&s.issue_type), + loc = xml_escape(&s.location), + val = xml_escape(&s.value), + ) + }).collect() + }; + suites.push(junit_testsuite("symbol_issues", &cases)); + } + + // panic_issues + { + let cases: Vec = if panic_issues.is_empty() { + total_tests += 1; + vec![r#" "#.to_string()] + } else { + total_tests += panic_issues.len(); + total_failures += panic_issues.len(); + panic_issues.iter().enumerate().map(|(i, p)| { + format!( + r#" {loc}"#, + i = i, + issue_type = xml_escape(&p.issue_type), + loc = xml_escape(&p.location), + ) + }).collect() + }; + suites.push(junit_testsuite("panic_issues", &cases)); + } + + // arithmetic_issues + { + let cases: Vec = if arithmetic_issues.is_empty() { + total_tests += 1; + vec![r#" "#.to_string()] + } else { + total_tests += arithmetic_issues.len(); + total_failures += arithmetic_issues.len(); + arithmetic_issues.iter().enumerate().map(|(i, a)| { + format!( + r#" {loc}: {suggestion}"#, + i = i, + op = xml_escape(&a.operation), + loc = xml_escape(&a.location), + suggestion = xml_escape(&a.suggestion), + ) + }).collect() + }; + suites.push(junit_testsuite("arithmetic_issues", &cases)); + } + + // storage_collisions + { + let cases: Vec = if storage_collisions.is_empty() { + total_tests += 1; + vec![r#" "#.to_string()] + } else { + total_tests += storage_collisions.len(); + total_failures += storage_collisions.len(); + storage_collisions.iter().enumerate().map(|(i, s)| { + format!( + r#" {loc}"#, + i = i, + key = xml_escape(&s.key_value), + loc = xml_escape(&s.location), + ) + }).collect() + }; + suites.push(junit_testsuite("storage_collisions", &cases)); + } + + // size_warnings + { + let cases: Vec = if size_warnings.is_empty() { + total_tests += 1; + vec![r#" "#.to_string()] + } else { + total_tests += size_warnings.len(); + total_failures += size_warnings.len(); + size_warnings.iter().enumerate().map(|(i, w)| { + format!( + r#" {name}: {size} bytes (limit: {limit})"#, + i = i, + name = xml_escape(&w.struct_name), + size = w.estimated_size, + limit = w.limit, + ) + }).collect() + }; + suites.push(junit_testsuite("size_warnings", &cases)); + } + + // unsafe_patterns + { + let cases: Vec = if unsafe_patterns.is_empty() { + total_tests += 1; + vec![r#" "#.to_string()] + } else { + total_tests += unsafe_patterns.len(); + total_failures += unsafe_patterns.len(); + unsafe_patterns.iter().enumerate().map(|(i, p)| { + format!( + r#" {snippet}"#, + i = i, + snippet = xml_escape(&p.snippet), + ) + }).collect() + }; + suites.push(junit_testsuite("unsafe_patterns", &cases)); + } + + println!(r#""#); + println!( + r#""#, + total_tests = total_tests, + total_failures = total_failures, + ); + for suite in &suites { + println!("{}", suite); + } + println!(""); +} + fn is_soroban_project(path: &Path) -> bool { if path.is_file() { - return path.extension().and_then(|s| s.to_str()) == Some("rs") || path.ends_with("Cargo.toml"); + return path.extension().and_then(|s| s.to_str()) == Some("rs") + || path.ends_with("Cargo.toml"); } path.join("Cargo.toml").exists() } diff --git a/tooling/sanctifier-cli/src/commands/init.rs b/tooling/sanctifier-cli/src/commands/init.rs index 8ccf6e1..c3f38e9 100644 --- a/tooling/sanctifier-cli/src/commands/init.rs +++ b/tooling/sanctifier-cli/src/commands/init.rs @@ -64,7 +64,10 @@ impl OutputFormatter { } pub fn display_existing_file_warning() { - eprintln!("{} Configuration file already exists: .sanctify.toml", "⚠".yellow()); + eprintln!( + "{} Configuration file already exists: .sanctify.toml", + "⚠".yellow() + ); eprintln!(" Use --force to overwrite the existing configuration"); } @@ -125,14 +128,14 @@ mod tests { assert_eq!(config.ledger_limit, 64000); // Verify strict_mode - assert_eq!(config.strict_mode, false); + assert!(!config.strict_mode); // Verify approaching_threshold assert_eq!(config.approaching_threshold, 0.8); // Verify custom_rules assert_eq!(config.custom_rules.len(), 2); - + let rule1 = &config.custom_rules[0]; assert_eq!(rule1.name, "no_unsafe_block"); assert_eq!(rule1.pattern, "unsafe\\s*\\{"); @@ -147,11 +150,19 @@ mod tests { let config = ConfigGenerator::generate_default_config(); // Ensure all required fields are present and non-empty where appropriate - assert!(!config.ignore_paths.is_empty(), "ignore_paths should not be empty"); - assert!(!config.enabled_rules.is_empty(), "enabled_rules should not be empty"); + assert!( + !config.ignore_paths.is_empty(), + "ignore_paths should not be empty" + ); + assert!( + !config.enabled_rules.is_empty(), + "enabled_rules should not be empty" + ); assert!(config.ledger_limit > 0, "ledger_limit should be positive"); - assert!(config.approaching_threshold > 0.0 && config.approaching_threshold < 1.0, - "approaching_threshold should be between 0 and 1"); + assert!( + config.approaching_threshold > 0.0 && config.approaching_threshold < 1.0, + "approaching_threshold should be between 0 and 1" + ); } #[test] @@ -159,12 +170,22 @@ mod tests { let config = ConfigGenerator::generate_default_config(); for rule in &config.custom_rules { - assert!(!rule.name.is_empty(), "Custom rule name should not be empty"); - assert!(!rule.pattern.is_empty(), "Custom rule pattern should not be empty"); - + assert!( + !rule.name.is_empty(), + "Custom rule name should not be empty" + ); + assert!( + !rule.pattern.is_empty(), + "Custom rule pattern should not be empty" + ); + // Verify patterns are valid regex let regex_result = regex::Regex::new(&rule.pattern); - assert!(regex_result.is_ok(), "Pattern '{}' should be a valid regex", rule.pattern); + assert!( + regex_result.is_ok(), + "Pattern '{}' should be a valid regex", + rule.pattern + ); } } @@ -172,7 +193,7 @@ mod tests { fn test_config_exists_returns_false_when_no_file() { let temp_dir = TempDir::new().unwrap(); let path = temp_dir.path(); - + assert!(!FileWriter::config_exists(path)); } @@ -181,10 +202,10 @@ mod tests { let temp_dir = TempDir::new().unwrap(); let path = temp_dir.path(); let config_path = path.join(".sanctify.toml"); - + // Create the file fs::write(&config_path, "test content").unwrap(); - + assert!(FileWriter::config_exists(path)); } @@ -193,9 +214,9 @@ mod tests { let temp_dir = TempDir::new().unwrap(); let path = temp_dir.path(); let config = ConfigGenerator::generate_default_config(); - + let result = FileWriter::write_config(&config, path); - + assert!(result.is_ok()); let config_path = result.unwrap(); assert!(config_path.exists()); @@ -207,13 +228,13 @@ mod tests { let temp_dir = TempDir::new().unwrap(); let path = temp_dir.path(); let config = ConfigGenerator::generate_default_config(); - + let result = FileWriter::write_config(&config, path); assert!(result.is_ok()); - + let config_path = result.unwrap(); let content = fs::read_to_string(&config_path).unwrap(); - + // Verify it's valid TOML by parsing it let parsed: Result = toml::from_str(&content); assert!(parsed.is_ok(), "Generated TOML should be parseable"); @@ -224,10 +245,10 @@ mod tests { let temp_dir = TempDir::new().unwrap(); let path = temp_dir.path(); let config = ConfigGenerator::generate_default_config(); - + let result = FileWriter::write_config(&config, path); assert!(result.is_ok()); - + let returned_path = result.unwrap(); let expected_path = path.join(".sanctify.toml"); assert_eq!(returned_path, expected_path); @@ -237,24 +258,24 @@ mod tests { fn test_exec_creates_config_in_temp_dir() { let temp_dir = TempDir::new().unwrap(); let args = InitArgs { force: false }; - + // Change to temp directory let original_dir = std::env::current_dir().unwrap(); std::env::set_current_dir(temp_dir.path()).unwrap(); - + // Execute init command let result = exec(args); - + // Restore original directory std::env::set_current_dir(original_dir).unwrap(); - + // Verify success assert!(result.is_ok(), "exec should succeed in empty directory"); - + // Verify file was created let config_path = temp_dir.path().join(".sanctify.toml"); assert!(config_path.exists(), "Config file should be created"); - + // Verify content is valid TOML let content = fs::read_to_string(&config_path).unwrap(); let parsed: Result = toml::from_str(&content); @@ -265,23 +286,23 @@ mod tests { fn test_exec_with_existing_file_without_force() { let temp_dir = TempDir::new().unwrap(); let config_path = temp_dir.path().join(".sanctify.toml"); - + // Create existing file fs::write(&config_path, "existing content").unwrap(); - - let args = InitArgs { force: false }; - + + let _args = InitArgs { force: false }; + // Change to temp directory let original_dir = std::env::current_dir().unwrap(); std::env::set_current_dir(temp_dir.path()).unwrap(); - + // Execute init command - this will call std::process::exit(1) // We can't test this directly without spawning a subprocess // So we'll just test the components separately - + // Restore original directory std::env::set_current_dir(original_dir).unwrap(); - + // Verify file was not modified let content = fs::read_to_string(&config_path).unwrap(); assert_eq!(content, "existing content", "File should not be modified"); @@ -291,28 +312,31 @@ mod tests { fn test_exec_with_force_overwrites_existing_file() { let temp_dir = TempDir::new().unwrap(); let config_path = temp_dir.path().join(".sanctify.toml"); - + // Create existing file fs::write(&config_path, "existing content").unwrap(); - + let args = InitArgs { force: true }; - + // Change to temp directory let original_dir = std::env::current_dir().unwrap(); std::env::set_current_dir(temp_dir.path()).unwrap(); - + // Execute init command let result = exec(args); - + // Restore original directory std::env::set_current_dir(original_dir).unwrap(); - + // Verify success assert!(result.is_ok(), "exec should succeed with force flag"); - + // Verify file was overwritten let content = fs::read_to_string(&config_path).unwrap(); assert_ne!(content, "existing content", "File should be overwritten"); - assert!(content.contains("ignore_paths"), "Should contain default config"); + assert!( + content.contains("ignore_paths"), + "Should contain default config" + ); } } diff --git a/tooling/sanctifier-cli/src/main.rs b/tooling/sanctifier-cli/src/main.rs index 22516f3..4c2da67 100644 --- a/tooling/sanctifier-cli/src/main.rs +++ b/tooling/sanctifier-cli/src/main.rs @@ -29,7 +29,7 @@ fn main() -> anyhow::Result<()> { match cli.command { Commands::Analyze(args) => { - if args.format != "json" { + if args.format != "json" && args.format != "junit" { branding::print_logo(); } commands::analyze::exec(args)?; diff --git a/tooling/sanctifier-cli/tests/cli_tests.rs b/tooling/sanctifier-cli/tests/cli_tests.rs index 5ba508b..6fa24fa 100644 --- a/tooling/sanctifier-cli/tests/cli_tests.rs +++ b/tooling/sanctifier-cli/tests/cli_tests.rs @@ -38,9 +38,13 @@ fn test_analyze_vulnerable_contract() { .arg(fixture_path) .assert() .success() - .stdout(predicate::str::contains("Found potential Authentication Gaps!")) + .stdout(predicate::str::contains( + "Found potential Authentication Gaps!", + )) .stdout(predicate::str::contains("Found explicit Panics/Unwraps!")) - .stdout(predicate::str::contains("Found unchecked Arithmetic Operations!")); + .stdout(predicate::str::contains( + "Found unchecked Arithmetic Operations!", + )); } #[test] @@ -50,17 +54,43 @@ fn test_analyze_json_output() { .unwrap() .join("tests/fixtures/valid_contract.rs"); - let assert = cmd.arg("analyze") + let assert = cmd + .arg("analyze") .arg(fixture_path) .arg("--format") .arg("json") .assert() .success(); - + // JSON starts with { assert.stdout(predicate::str::starts_with("{")); } +#[test] +fn test_analyze_junit_output() { + let mut cmd = Command::cargo_bin("sanctifier").unwrap(); + let fixture_path = env::current_dir() + .unwrap() + .join("tests/fixtures/valid_contract.rs"); + + let assert = cmd + .arg("analyze") + .arg(fixture_path) + .arg("--format") + .arg("junit") + .assert() + .success(); + + assert + .stdout(predicate::str::contains( + r#""#, + )) + .stdout(predicate::str::contains("")) + .stdout(predicate::str::contains(", -} - -#[derive(Debug, Clone, serde::Serialize)] -pub struct ContractMetrics { - pub contract_path: String, - pub dependency_count: usize, - pub functions: Vec, -} - -// --------------------------------------------------------------------------- -// Visitor for a single function body -// --------------------------------------------------------------------------- - -struct FnComplexityVisitor { - cyclomatic: u32, - current_depth: u32, - max_depth: u32, -} - -impl FnComplexityVisitor { - fn new() -> Self { - Self { cyclomatic: 1, current_depth: 0, max_depth: 0 } - } - - fn enter(&mut self) { - self.current_depth += 1; - if self.current_depth > self.max_depth { - self.max_depth = self.current_depth; - } - } - - fn exit(&mut self) { - self.current_depth = self.current_depth.saturating_sub(1); - } -} - -impl<'ast> Visit<'ast> for FnComplexityVisitor { - fn visit_expr_if(&mut self, node: &'ast syn::ExprIf) { - self.cyclomatic += 1; - self.enter(); - syn::visit::visit_expr_if(self, node); - self.exit(); - } - fn visit_expr_match(&mut self, node: &'ast syn::ExprMatch) { - // Each arm beyond the first adds a branch - self.cyclomatic += node.arms.len().saturating_sub(1) as u32; - self.enter(); - syn::visit::visit_expr_match(self, node); - self.exit(); - } - fn visit_expr_for_loop(&mut self, node: &'ast syn::ExprForLoop) { - self.cyclomatic += 1; - self.enter(); - syn::visit::visit_expr_for_loop(self, node); - self.exit(); - } - fn visit_expr_while(&mut self, node: &'ast syn::ExprWhile) { - self.cyclomatic += 1; - self.enter(); - syn::visit::visit_expr_while(self, node); - self.exit(); - } - fn visit_expr_loop(&mut self, node: &'ast syn::ExprLoop) { - self.cyclomatic += 1; - self.enter(); - syn::visit::visit_expr_loop(self, node); - self.exit(); - } - fn visit_expr_closure(&mut self, node: &'ast syn::ExprClosure) { - self.cyclomatic += 1; - self.enter(); - syn::visit::visit_expr_closure(self, node); - self.exit(); - } - // &&, || add logical branches - fn visit_expr_binary(&mut self, node: &'ast syn::ExprBinary) { - use syn::BinOp::*; - if matches!(node.op, And(_) | Or(_)) { - self.cyclomatic += 1; - } - syn::visit::visit_expr_binary(self, node); - } -} - -// --------------------------------------------------------------------------- -// File-level visitor (collects functions + dependency count) -// --------------------------------------------------------------------------- - -struct FileVisitor { - pub functions: Vec, - pub dependency_count: usize, -} - -impl FileVisitor { - fn new() -> Self { - Self { functions: Vec::new(), dependency_count: 0 } - } - - fn analyze_fn(&self, name: &str, sig: &syn::Signature, block: &syn::Block, span_str: &str) -> FunctionMetrics { - let mut visitor = FnComplexityVisitor::new(); - visitor.visit_block(block); - - let param_count = sig.inputs.len(); - let loc = count_loc(span_str); - - let mut warnings = Vec::new(); - if visitor.cyclomatic > THRESHOLD_CYCLOMATIC { - warnings.push(format!( - "Cyclomatic complexity {} exceeds threshold {}", - visitor.cyclomatic, THRESHOLD_CYCLOMATIC - )); - } - if param_count > THRESHOLD_PARAMS { - warnings.push(format!( - "{} parameters exceeds threshold {}", - param_count, THRESHOLD_PARAMS - )); - } - if visitor.max_depth > THRESHOLD_NESTING { - warnings.push(format!( - "Nesting depth {} exceeds threshold {}", - visitor.max_depth, THRESHOLD_NESTING - )); - } - if loc > THRESHOLD_LOC { - warnings.push(format!( - "{} LOC exceeds threshold {}", - loc, THRESHOLD_LOC - )); - } - - FunctionMetrics { - name: name.to_string(), - cyclomatic_complexity: visitor.cyclomatic, - param_count, - max_nesting_depth: visitor.max_depth, - loc, - warnings, - } - } -} - -impl<'ast> Visit<'ast> for FileVisitor { - fn visit_item_use(&mut self, _: &'ast ItemUse) { - self.dependency_count += 1; - } - fn visit_item_extern_crate(&mut self, _: &'ast ItemExternCrate) { - self.dependency_count += 1; - } - - fn visit_item_fn(&mut self, node: &'ast ItemFn) { - // Only public functions - if matches!(node.vis, syn::Visibility::Public(_)) { - let span_str = quote::quote!(#node).to_string(); - let m = self.analyze_fn(&node.sig.ident.to_string(), &node.sig, &node.block, &span_str); - self.functions.push(m); - } - syn::visit::visit_item_fn(self, node); - } - - fn visit_impl_item_fn(&mut self, node: &'ast ImplItemFn) { - let span_str = quote::quote!(#node).to_string(); - let m = self.analyze_fn(&node.sig.ident.to_string(), &node.sig, &node.block, &span_str); - self.functions.push(m); - syn::visit::visit_impl_item_fn(self, node); - } -} - -// --------------------------------------------------------------------------- -// Public API -// --------------------------------------------------------------------------- - -pub fn analyze_complexity(ast: &File, contract_path: &str) -> ContractMetrics { - let mut visitor = FileVisitor::new(); - visitor.visit_file(ast); - - ContractMetrics { - contract_path: contract_path.to_string(), - dependency_count: visitor.dependency_count, - functions: visitor.functions, - } -} - -/// Render plain-text report -pub fn render_text_report(metrics: &ContractMetrics) -> String { - let mut out = String::new(); - out.push_str("╔══════════════════════════════════════════════════════════════════════╗\n"); - out.push_str("β•‘ πŸ“Š SANCTIFIER β€” CONTRACT COMPLEXITY REPORT β•‘\n"); - out.push_str("β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•\n\n"); - out.push_str(&format!(" Contract : {}\n", metrics.contract_path)); - out.push_str(&format!(" Dependencies: {}\n", metrics.dependency_count)); - out.push_str(&format!(" Functions : {}\n\n", metrics.functions.len())); - - out.push_str("β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”\n"); - out.push_str("β”‚ Function β”‚ CC β”‚ Params β”‚ Nesting β”‚ LOC β”‚ Status β”‚\n"); - out.push_str("β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€\n"); - - for f in &metrics.functions { - let status = if f.warnings.is_empty() { "βœ… OK" } else { "⚠️ WARN" }; - out.push_str(&format!( - "β”‚ {:<24} β”‚ {:>4} β”‚ {:>6} β”‚ {:>7} β”‚ {:>3} β”‚ {:<8} β”‚\n", - truncate(&f.name, 24), - f.cyclomatic_complexity, - f.param_count, - f.max_nesting_depth, - f.loc, - status, - )); - } - out.push_str("β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜\n\n"); - - // Warnings section - let has_warnings = metrics.functions.iter().any(|f| !f.warnings.is_empty()); - if has_warnings { - out.push_str(" WARNINGS:\n"); - for f in &metrics.functions { - for w in &f.warnings { - out.push_str(&format!(" ⚠️ {}(): {}\n", f.name, w)); - } - } - out.push('\n'); - } - - out.push_str(" Thresholds: CC > 10 | Params > 5 | Nesting > 4 | LOC > 50\n"); - out -} - -/// Render JSON report -pub fn render_json_report(metrics: &ContractMetrics) -> String { - serde_json::to_string_pretty(metrics).unwrap_or_else(|_| "{}".to_string()) -} - -/// Render HTML report -pub fn render_html_report(metrics: &ContractMetrics) -> String { - let rows: String = metrics.functions.iter().map(|f| { - let warn_class = if f.warnings.is_empty() { "ok" } else { "warn" }; - let cc_class = if f.cyclomatic_complexity > THRESHOLD_CYCLOMATIC { "over" } else { "" }; - let p_class = if f.param_count > THRESHOLD_PARAMS { "over" } else { "" }; - let n_class = if f.max_nesting_depth > THRESHOLD_NESTING { "over" } else { "" }; - let l_class = if f.loc > THRESHOLD_LOC { "over" } else { "" }; - let warnings = f.warnings.iter().map(|w| format!("
  • {}
  • ", w)).collect::(); - let warn_block = if warnings.is_empty() { String::new() } - else { format!("
      {}
    ", warnings) }; - format!( - "\ - {}\ - {}\ - {}\ - {}\ - {}\ - {}\ - {}\n", - f.name, - f.cyclomatic_complexity, - f.param_count, - f.max_nesting_depth, - f.loc, - if f.warnings.is_empty() { "βœ…" } else { "⚠️" }, - if warn_block.is_empty() { String::new() } - else { format!("{}", warn_block) } - ) - }).collect(); - - format!(r#" - - - -Sanctifier β€” Complexity Report - - - -

    πŸ“Š Sanctifier β€” Contract Complexity Report

    -
    - Contract: {path}  |  - Dependencies: {deps}  |  - Functions: {fn_count} -
    - - - - - - - - - - - - - {rows} - -
    FunctionCyclomatic CCParamsNestingLOCStatus
    -

    - Thresholds: CC > 10  |  Params > 5  |  Nesting > 4  |  LOC > 50 -

    - -"#, - path = metrics.contract_path, - deps = metrics.dependency_count, - fn_count = metrics.functions.len(), - rows = rows, - ) -} - -// --------------------------------------------------------------------------- -// Helpers -// --------------------------------------------------------------------------- - -fn count_loc(token_str: &str) -> usize { - token_str.lines().count() -} - -fn truncate(s: &str, max: usize) -> String { - if s.len() <= max { s.to_string() } - else { format!("{}…", &s[..max - 1]) } -} \ No newline at end of file +// tooling/sanctifier-core/src/complexity.rs +// +// Contract Complexity Metrics β€” Issue #45 +// +// Metrics per public function: +// - Cyclomatic complexity (branches + 1) +// - Parameter count +// - Max nesting depth +// - Lines of code (LOC) +// - Number of extern crate / use dependencies (file-level) + +use syn::{visit::Visit, File, ImplItemFn, ItemExternCrate, ItemFn, ItemUse}; + +// --------------------------------------------------------------------------- +// Thresholds (warn if exceeded) +// --------------------------------------------------------------------------- +const THRESHOLD_CYCLOMATIC: u32 = 10; +const THRESHOLD_PARAMS: usize = 5; +const THRESHOLD_NESTING: u32 = 4; +const THRESHOLD_LOC: usize = 50; + +// --------------------------------------------------------------------------- +// Data structures +// --------------------------------------------------------------------------- + +#[derive(Debug, Clone, serde::Serialize)] +pub struct FunctionMetrics { + pub name: String, + pub cyclomatic_complexity: u32, + pub param_count: usize, + pub max_nesting_depth: u32, + pub loc: usize, + pub warnings: Vec, +} + +#[derive(Debug, Clone, serde::Serialize)] +pub struct ContractMetrics { + pub contract_path: String, + pub dependency_count: usize, + pub functions: Vec, +} + +// --------------------------------------------------------------------------- +// Visitor for a single function body +// --------------------------------------------------------------------------- + +struct FnComplexityVisitor { + cyclomatic: u32, + current_depth: u32, + max_depth: u32, +} + +impl FnComplexityVisitor { + fn new() -> Self { + Self { + cyclomatic: 1, + current_depth: 0, + max_depth: 0, + } + } + + fn enter(&mut self) { + self.current_depth += 1; + if self.current_depth > self.max_depth { + self.max_depth = self.current_depth; + } + } + + fn exit(&mut self) { + self.current_depth = self.current_depth.saturating_sub(1); + } +} + +impl<'ast> Visit<'ast> for FnComplexityVisitor { + fn visit_expr_if(&mut self, node: &'ast syn::ExprIf) { + self.cyclomatic += 1; + self.enter(); + syn::visit::visit_expr_if(self, node); + self.exit(); + } + fn visit_expr_match(&mut self, node: &'ast syn::ExprMatch) { + // Each arm beyond the first adds a branch + self.cyclomatic += node.arms.len().saturating_sub(1) as u32; + self.enter(); + syn::visit::visit_expr_match(self, node); + self.exit(); + } + fn visit_expr_for_loop(&mut self, node: &'ast syn::ExprForLoop) { + self.cyclomatic += 1; + self.enter(); + syn::visit::visit_expr_for_loop(self, node); + self.exit(); + } + fn visit_expr_while(&mut self, node: &'ast syn::ExprWhile) { + self.cyclomatic += 1; + self.enter(); + syn::visit::visit_expr_while(self, node); + self.exit(); + } + fn visit_expr_loop(&mut self, node: &'ast syn::ExprLoop) { + self.cyclomatic += 1; + self.enter(); + syn::visit::visit_expr_loop(self, node); + self.exit(); + } + fn visit_expr_closure(&mut self, node: &'ast syn::ExprClosure) { + self.cyclomatic += 1; + self.enter(); + syn::visit::visit_expr_closure(self, node); + self.exit(); + } + // &&, || add logical branches + fn visit_expr_binary(&mut self, node: &'ast syn::ExprBinary) { + use syn::BinOp::*; + if matches!(node.op, And(_) | Or(_)) { + self.cyclomatic += 1; + } + syn::visit::visit_expr_binary(self, node); + } +} + +// --------------------------------------------------------------------------- +// File-level visitor (collects functions + dependency count) +// --------------------------------------------------------------------------- + +struct FileVisitor { + pub functions: Vec, + pub dependency_count: usize, +} + +impl FileVisitor { + fn new() -> Self { + Self { + functions: Vec::new(), + dependency_count: 0, + } + } + + fn analyze_fn( + &self, + name: &str, + sig: &syn::Signature, + block: &syn::Block, + span_str: &str, + ) -> FunctionMetrics { + let mut visitor = FnComplexityVisitor::new(); + visitor.visit_block(block); + + let param_count = sig.inputs.len(); + let loc = count_loc(span_str); + + let mut warnings = Vec::new(); + if visitor.cyclomatic > THRESHOLD_CYCLOMATIC { + warnings.push(format!( + "Cyclomatic complexity {} exceeds threshold {}", + visitor.cyclomatic, THRESHOLD_CYCLOMATIC + )); + } + if param_count > THRESHOLD_PARAMS { + warnings.push(format!( + "{} parameters exceeds threshold {}", + param_count, THRESHOLD_PARAMS + )); + } + if visitor.max_depth > THRESHOLD_NESTING { + warnings.push(format!( + "Nesting depth {} exceeds threshold {}", + visitor.max_depth, THRESHOLD_NESTING + )); + } + if loc > THRESHOLD_LOC { + warnings.push(format!("{} LOC exceeds threshold {}", loc, THRESHOLD_LOC)); + } + + FunctionMetrics { + name: name.to_string(), + cyclomatic_complexity: visitor.cyclomatic, + param_count, + max_nesting_depth: visitor.max_depth, + loc, + warnings, + } + } +} + +impl<'ast> Visit<'ast> for FileVisitor { + fn visit_item_use(&mut self, _: &'ast ItemUse) { + self.dependency_count += 1; + } + fn visit_item_extern_crate(&mut self, _: &'ast ItemExternCrate) { + self.dependency_count += 1; + } + + fn visit_item_fn(&mut self, node: &'ast ItemFn) { + // Only public functions + if matches!(node.vis, syn::Visibility::Public(_)) { + let span_str = quote::quote!(#node).to_string(); + let m = self.analyze_fn( + &node.sig.ident.to_string(), + &node.sig, + &node.block, + &span_str, + ); + self.functions.push(m); + } + syn::visit::visit_item_fn(self, node); + } + + fn visit_impl_item_fn(&mut self, node: &'ast ImplItemFn) { + let span_str = quote::quote!(#node).to_string(); + let m = self.analyze_fn( + &node.sig.ident.to_string(), + &node.sig, + &node.block, + &span_str, + ); + self.functions.push(m); + syn::visit::visit_impl_item_fn(self, node); + } +} + +// --------------------------------------------------------------------------- +// Public API +// --------------------------------------------------------------------------- + +pub fn analyze_complexity(ast: &File, contract_path: &str) -> ContractMetrics { + let mut visitor = FileVisitor::new(); + visitor.visit_file(ast); + + ContractMetrics { + contract_path: contract_path.to_string(), + dependency_count: visitor.dependency_count, + functions: visitor.functions, + } +} + +/// Render plain-text report +pub fn render_text_report(metrics: &ContractMetrics) -> String { + let mut out = String::new(); + out.push_str("╔══════════════════════════════════════════════════════════════════════╗\n"); + out.push_str("β•‘ πŸ“Š SANCTIFIER β€” CONTRACT COMPLEXITY REPORT β•‘\n"); + out.push_str("β•šβ•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•β•\n\n"); + out.push_str(&format!(" Contract : {}\n", metrics.contract_path)); + out.push_str(&format!(" Dependencies: {}\n", metrics.dependency_count)); + out.push_str(&format!(" Functions : {}\n\n", metrics.functions.len())); + + out.push_str("β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”\n"); + out.push_str("β”‚ Function β”‚ CC β”‚ Params β”‚ Nesting β”‚ LOC β”‚ Status β”‚\n"); + out.push_str("β”œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”Όβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€\n"); + + for f in &metrics.functions { + let status = if f.warnings.is_empty() { + "βœ… OK" + } else { + "⚠️ WARN" + }; + out.push_str(&format!( + "β”‚ {:<24} β”‚ {:>4} β”‚ {:>6} β”‚ {:>7} β”‚ {:>3} β”‚ {:<8} β”‚\n", + truncate(&f.name, 24), + f.cyclomatic_complexity, + f.param_count, + f.max_nesting_depth, + f.loc, + status, + )); + } + out.push_str("β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜\n\n"); + + // Warnings section + let has_warnings = metrics.functions.iter().any(|f| !f.warnings.is_empty()); + if has_warnings { + out.push_str(" WARNINGS:\n"); + for f in &metrics.functions { + for w in &f.warnings { + out.push_str(&format!(" ⚠️ {}(): {}\n", f.name, w)); + } + } + out.push('\n'); + } + + out.push_str(" Thresholds: CC > 10 | Params > 5 | Nesting > 4 | LOC > 50\n"); + out +} + +/// Render JSON report +pub fn render_json_report(metrics: &ContractMetrics) -> String { + serde_json::to_string_pretty(metrics).unwrap_or_else(|_| "{}".to_string()) +} + +/// Render HTML report +pub fn render_html_report(metrics: &ContractMetrics) -> String { + let rows: String = metrics + .functions + .iter() + .map(|f| { + let warn_class = if f.warnings.is_empty() { "ok" } else { "warn" }; + let cc_class = if f.cyclomatic_complexity > THRESHOLD_CYCLOMATIC { + "over" + } else { + "" + }; + let p_class = if f.param_count > THRESHOLD_PARAMS { + "over" + } else { + "" + }; + let n_class = if f.max_nesting_depth > THRESHOLD_NESTING { + "over" + } else { + "" + }; + let l_class = if f.loc > THRESHOLD_LOC { "over" } else { "" }; + let warnings = f + .warnings + .iter() + .map(|w| format!("
  • {}
  • ", w)) + .collect::(); + let warn_block = if warnings.is_empty() { + String::new() + } else { + format!("
      {}
    ", warnings) + }; + format!( + "\ + {}\ + {}\ + {}\ + {}\ + {}\ + {}\ + {}\n", + f.name, + f.cyclomatic_complexity, + f.param_count, + f.max_nesting_depth, + f.loc, + if f.warnings.is_empty() { + "βœ…" + } else { + "⚠️" + }, + if warn_block.is_empty() { + String::new() + } else { + format!( + "{}", + warn_block + ) + } + ) + }) + .collect(); + + format!( + r#" + + + +Sanctifier β€” Complexity Report + + + +

    πŸ“Š Sanctifier β€” Contract Complexity Report

    +
    + Contract: {path}  |  + Dependencies: {deps}  |  + Functions: {fn_count} +
    + + + + + + + + + + + + + {rows} + +
    FunctionCyclomatic CCParamsNestingLOCStatus
    +

    + Thresholds: CC > 10  |  Params > 5  |  Nesting > 4  |  LOC > 50 +

    + +"#, + path = metrics.contract_path, + deps = metrics.dependency_count, + fn_count = metrics.functions.len(), + rows = rows, + ) +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +fn count_loc(token_str: &str) -> usize { + token_str.lines().count() +} + +fn truncate(s: &str, max: usize) -> String { + if s.len() <= max { + s.to_string() + } else { + format!("{}…", &s[..max - 1]) + } +} diff --git a/tooling/sanctifier-core/src/gas_estimator.rs b/tooling/sanctifier-core/src/gas_estimator.rs index 414ad04..05b4783 100644 --- a/tooling/sanctifier-core/src/gas_estimator.rs +++ b/tooling/sanctifier-core/src/gas_estimator.rs @@ -9,6 +9,7 @@ pub struct GasEstimationReport { pub estimated_memory_bytes: usize, } +#[derive(Default)] pub struct GasEstimator {} impl GasEstimator { diff --git a/tooling/sanctifier-core/src/gas_report.rs b/tooling/sanctifier-core/src/gas_report.rs index cc4c802..ac4a300 100644 --- a/tooling/sanctifier-core/src/gas_report.rs +++ b/tooling/sanctifier-core/src/gas_report.rs @@ -5,8 +5,8 @@ // This module wraps ``gas_estimator`` output and provides human-readable // text and JSON rendering used by `sanctifier gas` subcommand. -use serde::Serialize; use crate::gas_estimator::GasEstimationReport; +use serde::Serialize; // ── Report types ────────────────────────────────────────────────────────────── @@ -83,7 +83,11 @@ impl GasReport { let entries: Vec = reports.into_iter().map(Into::into).collect(); let total_instructions = entries.iter().map(|e| e.estimated_instructions).sum(); let total_memory_bytes = entries.iter().map(|e| e.estimated_memory_bytes).sum(); - GasReport { entries, total_instructions, total_memory_bytes } + GasReport { + entries, + total_instructions, + total_memory_bytes, + } } } @@ -186,4 +190,3 @@ mod tests { assert!(text.contains("LOW")); } } - diff --git a/tooling/sanctifier-core/src/lib.rs b/tooling/sanctifier-core/src/lib.rs index c6d62a4..27134a7 100644 --- a/tooling/sanctifier-core/src/lib.rs +++ b/tooling/sanctifier-core/src/lib.rs @@ -1,12 +1,12 @@ use serde::{Deserialize, Serialize}; +pub mod complexity; pub mod gas_estimator; pub mod gas_report; -pub mod complexity; pub mod reentrancy; pub mod storage_collision; use std::collections::HashSet; -use std::panic::{catch_unwind, AssertUnwindSafe}; +use std::panic::catch_unwind; use syn::spanned::Spanned; use syn::visit::{self, Visit}; use syn::{parse_str, Fields, File, Item, Meta, Type}; @@ -17,22 +17,16 @@ use thiserror::Error; #[cfg(target_arch = "wasm32")] use soroban_sdk::Env; -const DEFAULT_APPROACHING_THRESHOLD: f64 = 0.8; - fn with_panic_guard(f: F) -> R where F: FnOnce() -> R + std::panic::UnwindSafe, R: Default, { - match catch_unwind(f) { - Ok(res) => res, - Err(_) => R::default(), - } + catch_unwind(f).unwrap_or_default() } // ── Existing types ──────────────────────────────────────────────────────────── - /// Severity of a ledger size warning. #[derive(Debug, Serialize, Clone, PartialEq)] pub enum SizeWarningLevel { @@ -123,16 +117,6 @@ impl UpgradeReport { } } -fn has_attr(attrs: &[syn::Attribute], name: &str) -> bool { - attrs.iter().any(|attr| { - if let Meta::Path(path) = &attr.meta { - path.is_ident(name) || path.segments.iter().any(|s| s.ident == name) - } else { - false - } - }) -} - fn is_upgrade_or_admin_fn(name: &str) -> bool { let lower = name.to_lowercase(); matches!( @@ -263,9 +247,7 @@ fn classify_size( strict: bool, strict_threshold: usize, ) -> Option { - if size >= limit { - Some(SizeWarningLevel::ExceedsLimit) - } else if strict && size >= strict_threshold { + if size >= limit || (strict && size >= strict_threshold) { Some(SizeWarningLevel::ExceedsLimit) } else if size as f64 >= limit as f64 * approaching { Some(SizeWarningLevel::ApproachingLimit) @@ -274,8 +256,6 @@ fn classify_size( } } - - // ── Analyzer ────────────────────────────────────────────────────────────────── pub struct Analyzer { @@ -447,7 +427,9 @@ impl Analyzer { } } syn::Stmt::Macro(m) => { - if m.mac.path.is_ident("require_auth") || m.mac.path.is_ident("require_auth_for_args") { + if m.mac.path.is_ident("require_auth") + || m.mac.path.is_ident("require_auth_for_args") + { *has_auth = true; } } @@ -476,7 +458,11 @@ impl Analyzer { if method_name == "set" || method_name == "update" || method_name == "remove" { // Heuristic: check if receiver chain contains "storage" let receiver_str = quote::quote!(#m.receiver).to_string(); - if receiver_str.contains("storage") || receiver_str.contains("persistent") || receiver_str.contains("temporary") || receiver_str.contains("instance") { + if receiver_str.contains("storage") + || receiver_str.contains("persistent") + || receiver_str.contains("temporary") + || receiver_str.contains("instance") + { *has_mutation = true; } } @@ -537,7 +523,9 @@ impl Analyzer { Item::Struct(s) => { if has_contracttype(&s.attrs) { let size = self.estimate_struct_size(s); - if let Some(level) = classify_size(size, limit, approaching, strict, strict_threshold) { + if let Some(level) = + classify_size(size, limit, approaching, strict, strict_threshold) + { warnings.push(SizeWarning { struct_name: s.ident.to_string(), estimated_size: size, @@ -550,7 +538,9 @@ impl Analyzer { Item::Enum(e) => { if has_contracttype(&e.attrs) { let size = self.estimate_enum_size(e); - if let Some(level) = classify_size(size, limit, approaching, strict, strict_threshold) { + if let Some(level) = + classify_size(size, limit, approaching, strict, strict_threshold) + { warnings.push(SizeWarning { struct_name: e.ident.to_string(), estimated_size: size, @@ -615,9 +605,9 @@ impl Analyzer { // ── Event Consistency and Optimization (NEW) ───────────────────────────── - /// Scans for `env.events().publish(topics, data)` and checks: - /// 1. Consistency of topic counts for the same event name. - /// 2. Opportunities to use `symbol_short!` for gas savings. + // Scans for `env.events().publish(topics, data)` and checks: + // 1. Consistency of topic counts for the same event name. + // 2. Opportunities to use `symbol_short!` for gas savings. /* pub fn scan_events(&self, source: &str) -> Vec { with_panic_guard(|| self.scan_events_impl(source)) } @@ -639,8 +629,7 @@ impl Analyzer { // ── Unsafe-pattern visitor ──────────────────────────────────────────────── - /// Visitor-based scan for `panic!`, `.unwrap()`, `.expect()` with line - /// numbers derived from proc-macro2 span locations. + /// Visitor-based scan for `panic!`, `.unwrap()`, `.expect()` with line numbers derived from proc-macro2 span locations. pub fn analyze_unsafe_patterns(&self, source: &str) -> Vec { with_panic_guard(|| self.analyze_unsafe_patterns_impl(source)) } @@ -741,7 +730,6 @@ impl Analyzer { visitor.issues } - // ── Size estimation helpers ─────────────────────────────────────────────── fn estimate_enum_size(&self, e: &syn::ItemEnum) -> usize { @@ -796,7 +784,9 @@ impl Analyzer { "Address" => 32, "BytesN" => { if let syn::PathArguments::AngleBracketed(args) = &seg.arguments { - if let Some(syn::GenericArgument::Type(Type::Path(tp))) = args.args.first() { + if let Some(syn::GenericArgument::Type(Type::Path(tp))) = + args.args.first() + { // Sometimes N is a type-level literal or just a number if let Some(s) = tp.path.segments.last() { if let Ok(n) = s.ident.to_string().parse::() { @@ -819,13 +809,17 @@ impl Analyzer { } "Map" => { if let syn::PathArguments::AngleBracketed(args) = &seg.arguments { - let inner: usize = args.args.iter().filter_map(|a| { - if let syn::GenericArgument::Type(t) = a { - Some(self.estimate_type_size(t)) - } else { - None - } - }).sum(); + let inner: usize = args + .args + .iter() + .filter_map(|a| { + if let syn::GenericArgument::Type(t) = a { + Some(self.estimate_type_size(t)) + } else { + None + } + }) + .sum(); if inner > 0 { return 16 + inner * 2; } @@ -1015,7 +1009,11 @@ impl<'ast> Visit<'ast> for SymbolVisitor { if p.path.is_ident("Symbol") || p.path.segments.iter().any(|s| s.ident == "Symbol") { if let Some(last) = p.path.segments.last() { if last.ident == "new" && i.args.len() >= 2 { - if let syn::Expr::Lit(syn::ExprLit { lit: syn::Lit::Str(s), .. }) = &i.args[1] { + if let syn::Expr::Lit(syn::ExprLit { + lit: syn::Lit::Str(s), + .. + }) = &i.args[1] + { let val = s.value(); if val.len() > 32 { self.issues.push(SymbolIssue { diff --git a/tooling/sanctifier-core/src/reentrancy.rs b/tooling/sanctifier-core/src/reentrancy.rs index 4ebbafa..280321c 100644 --- a/tooling/sanctifier-core/src/reentrancy.rs +++ b/tooling/sanctifier-core/src/reentrancy.rs @@ -11,7 +11,7 @@ // storage write (`.set(`, `.update(`, `.remove(`). use serde::Serialize; -use syn::{visit::Visit, parse_str, File, Item}; +use syn::{parse_str, visit::Visit, File, Item}; /// A potential reentrancy / CEI violation found in a contract function. #[derive(Debug, Serialize, Clone)] @@ -63,7 +63,8 @@ impl ReentrancyVisitor { if !self.issues.iter().any(|i| i.function_name == *fn_name) { self.issues.push(ReentrancyIssue { function_name: fn_name.clone(), - issue_type: "CEI violation: storage mutation after cross-contract call".to_string(), + issue_type: "CEI violation: storage mutation after cross-contract call" + .to_string(), location: format!("fn {}", fn_name), }); } diff --git a/tooling/sanctifier-core/src/storage_collision.rs b/tooling/sanctifier-core/src/storage_collision.rs index cb71f23..52d3c68 100644 --- a/tooling/sanctifier-core/src/storage_collision.rs +++ b/tooling/sanctifier-core/src/storage_collision.rs @@ -3,11 +3,11 @@ use quote::quote; use std::collections::HashMap; use syn::spanned::Spanned; use syn::{ - parse_str, visit::{self, Visit}, - Expr, ExprCall, ExprMacro, File, ItemConst, Lit, Meta, + Expr, ExprCall, ExprMacro, ItemConst, Lit, }; +#[derive(Default)] pub struct StorageVisitor { pub collisions: Vec, pub keys: HashMap>, @@ -23,10 +23,7 @@ pub struct KeyInfo { impl StorageVisitor { pub fn new() -> Self { - Self { - collisions: Vec::new(), - keys: HashMap::new(), - } + Self::default() } fn add_key(&mut self, value: String, key_type: String, location: String, line: usize) { @@ -36,7 +33,7 @@ impl StorageVisitor { location, line, }; - self.keys.entry(value).or_insert_with(Vec::new).push(info); + self.keys.entry(value).or_default().push(info); } pub fn final_check(&mut self) { @@ -86,18 +83,16 @@ impl<'ast> Visit<'ast> for StorageVisitor { if path.segments.len() >= 2 { let seg1 = &path.segments[0].ident; let seg2 = &path.segments[1].ident; - if seg1 == "Symbol" && seg2 == "new" { - if i.args.len() >= 2 { - if let Expr::Lit(expr_lit) = &i.args[1] { - if let Lit::Str(lit_str) = &expr_lit.lit { - let val = lit_str.value(); - self.add_key( - val, - "Symbol::new".to_string(), - "inline".to_string(), - i.span().start().line, - ); - } + if seg1 == "Symbol" && seg2 == "new" && i.args.len() >= 2 { + if let Expr::Lit(expr_lit) = &i.args[1] { + if let Lit::Str(lit_str) = &expr_lit.lit { + let val = lit_str.value(); + self.add_key( + val, + "Symbol::new".to_string(), + "inline".to_string(), + i.span().start().line, + ); } } } diff --git a/tooling/sanctifier-core/src/tests/complexity_tests.rs b/tooling/sanctifier-core/src/tests/complexity_tests.rs index c1e6c74..d7784cc 100644 --- a/tooling/sanctifier-core/src/tests/complexity_tests.rs +++ b/tooling/sanctifier-core/src/tests/complexity_tests.rs @@ -1,97 +1,97 @@ -// tooling/sanctifier-core/src/tests/complexity_tests.rs - -#[cfg(test)] -mod tests { - use crate::complexity::analyze_complexity; - use syn::parse_file; - - fn parse(src: &str) -> syn::File { - parse_file(src).expect("should parse") - } - - #[test] - fn test_simple_fn_has_cc_of_one() { - let src = r#" - pub fn get_balance(env: Env, addr: Address) -> i128 { - env.storage().persistent().get(&addr).unwrap_or(0) - } - "#; - let metrics = analyze_complexity(&parse(src), "test.rs"); - let f = &metrics.functions[0]; - assert_eq!(f.cyclomatic_complexity, 1); - assert!(f.warnings.is_empty()); - } - - #[test] - fn test_if_increments_cc() { - let src = r#" - pub fn transfer(env: Env, amount: i128) { - if amount > 0 { - env.storage().persistent().set(&"k", &amount); - } - } - "#; - let metrics = analyze_complexity(&parse(src), "test.rs"); - let f = &metrics.functions[0]; - assert_eq!(f.cyclomatic_complexity, 2); - } - - #[test] - fn test_nesting_depth_tracked() { - let src = r#" - pub fn nested(env: Env, x: i128) -> i128 { - if x > 0 { - for i in 0..x { - if i > 5 { - return i; - } - } - } - 0 - } - "#; - let metrics = analyze_complexity(&parse(src), "test.rs"); - let f = &metrics.functions[0]; - assert!(f.max_nesting_depth >= 3); - } - - #[test] - fn test_param_count() { - let src = r#" - pub fn many_params(env: Env, a: i128, b: i128, c: i128, d: i128, e: i128) {} - "#; - let metrics = analyze_complexity(&parse(src), "test.rs"); - let f = &metrics.functions[0]; - assert_eq!(f.param_count, 6); // env + 5 others - assert!(f.warnings.iter().any(|w| w.contains("parameters"))); - } - - #[test] - fn test_use_counts_as_dependency() { - let src = r#" - use soroban_sdk::{Env, Address}; - use soroban_sdk::token::TokenClient; - pub fn foo(env: Env) {} - "#; - let metrics = analyze_complexity(&parse(src), "test.rs"); - assert_eq!(metrics.dependency_count, 2); - } - - #[test] - fn test_match_arms_add_to_cc() { - let src = r#" - pub fn route(env: Env, action: u32) { - match action { - 0 => {}, - 1 => {}, - 2 => {}, - _ => {}, - } - } - "#; - let metrics = analyze_complexity(&parse(src), "test.rs"); - let f = &metrics.functions[0]; - // 1 base + 3 extra arms - assert_eq!(f.cyclomatic_complexity, 4); - } -} \ No newline at end of file +// tooling/sanctifier-core/src/tests/complexity_tests.rs + +#[cfg(test)] +mod tests { + use crate::complexity::analyze_complexity; + use syn::parse_file; + + fn parse(src: &str) -> syn::File { + parse_file(src).expect("should parse") + } + + #[test] + fn test_simple_fn_has_cc_of_one() { + let src = r#" + pub fn get_balance(env: Env, addr: Address) -> i128 { + env.storage().persistent().get(&addr).unwrap_or(0) + } + "#; + let metrics = analyze_complexity(&parse(src), "test.rs"); + let f = &metrics.functions[0]; + assert_eq!(f.cyclomatic_complexity, 1); + assert!(f.warnings.is_empty()); + } + + #[test] + fn test_if_increments_cc() { + let src = r#" + pub fn transfer(env: Env, amount: i128) { + if amount > 0 { + env.storage().persistent().set(&"k", &amount); + } + } + "#; + let metrics = analyze_complexity(&parse(src), "test.rs"); + let f = &metrics.functions[0]; + assert_eq!(f.cyclomatic_complexity, 2); + } + + #[test] + fn test_nesting_depth_tracked() { + let src = r#" + pub fn nested(env: Env, x: i128) -> i128 { + if x > 0 { + for i in 0..x { + if i > 5 { + return i; + } + } + } + 0 + } + "#; + let metrics = analyze_complexity(&parse(src), "test.rs"); + let f = &metrics.functions[0]; + assert!(f.max_nesting_depth >= 3); + } + + #[test] + fn test_param_count() { + let src = r#" + pub fn many_params(env: Env, a: i128, b: i128, c: i128, d: i128, e: i128) {} + "#; + let metrics = analyze_complexity(&parse(src), "test.rs"); + let f = &metrics.functions[0]; + assert_eq!(f.param_count, 6); // env + 5 others + assert!(f.warnings.iter().any(|w| w.contains("parameters"))); + } + + #[test] + fn test_use_counts_as_dependency() { + let src = r#" + use soroban_sdk::{Env, Address}; + use soroban_sdk::token::TokenClient; + pub fn foo(env: Env) {} + "#; + let metrics = analyze_complexity(&parse(src), "test.rs"); + assert_eq!(metrics.dependency_count, 2); + } + + #[test] + fn test_match_arms_add_to_cc() { + let src = r#" + pub fn route(env: Env, action: u32) { + match action { + 0 => {}, + 1 => {}, + 2 => {}, + _ => {}, + } + } + "#; + let metrics = analyze_complexity(&parse(src), "test.rs"); + let f = &metrics.functions[0]; + // 1 base + 3 extra arms + assert_eq!(f.cyclomatic_complexity, 4); + } +} diff --git a/tooling/sanctifier-core/src/tests/lib_tests.rs b/tooling/sanctifier-core/src/tests/lib_tests.rs index a6faede..c51df5a 100644 --- a/tooling/sanctifier-core/src/tests/lib_tests.rs +++ b/tooling/sanctifier-core/src/tests/lib_tests.rs @@ -34,9 +34,10 @@ fn test_analyze_with_macros() { #[test] fn test_analyze_with_limit() { - let mut config = SanctifyConfig::default(); - config.ledger_limit = 50; - let analyzer = Analyzer::new(config); + let analyzer = Analyzer::new(SanctifyConfig { + ledger_limit: 50, + ..Default::default() + }); let source = r#" #[contracttype] pub struct ExceedsLimit { @@ -52,10 +53,11 @@ fn test_analyze_with_limit() { #[test] fn test_ledger_size_enum_and_approaching() { - let mut config = SanctifyConfig::default(); - config.ledger_limit = 100; - config.approaching_threshold = 0.5; - let analyzer = Analyzer::new(config); + let analyzer = Analyzer::new(SanctifyConfig { + ledger_limit: 100, + approaching_threshold: 0.5, + ..Default::default() + }); let source = r#" #[contracttype] pub enum DataKey { @@ -72,8 +74,13 @@ fn test_ledger_size_enum_and_approaching() { } "#; let warnings = analyzer.analyze_ledger_size(source); - assert!(warnings.iter().any(|w| w.struct_name == "NearLimit"), "NearLimit (64 bytes) should exceed 50% of 100"); - assert!(warnings.iter().any(|w| w.level == SizeWarningLevel::ApproachingLimit)); + assert!( + warnings.iter().any(|w| w.struct_name == "NearLimit"), + "NearLimit (64 bytes) should exceed 50% of 100" + ); + assert!(warnings + .iter() + .any(|w| w.level == SizeWarningLevel::ApproachingLimit)); } #[test] diff --git a/tooling/sanctifier-core/src/tests/mod.rs b/tooling/sanctifier-core/src/tests/mod.rs index 33f6879..d40a426 100644 --- a/tooling/sanctifier-core/src/tests/mod.rs +++ b/tooling/sanctifier-core/src/tests/mod.rs @@ -1,7 +1,7 @@ // tooling/sanctifier-core/src/tests/mod.rs -pub mod lib_tests; pub mod complexity_tests; pub mod gas_estimator_tests; +pub mod lib_tests; pub mod reentrancy_tests; pub mod storage_collision_tests; diff --git a/tooling/sanctifier-core/src/tests/reentrancy_tests.rs b/tooling/sanctifier-core/src/tests/reentrancy_tests.rs index 9505688..55dd983 100644 --- a/tooling/sanctifier-core/src/tests/reentrancy_tests.rs +++ b/tooling/sanctifier-core/src/tests/reentrancy_tests.rs @@ -37,7 +37,10 @@ mod tests { } "#; let issues = scan_reentrancy(src); - assert!(issues.is_empty(), "Read then cross-call should not be flagged"); + assert!( + issues.is_empty(), + "Read then cross-call should not be flagged" + ); } #[test] @@ -58,7 +61,10 @@ mod tests { } "#; let issues = scan_reentrancy(src); - assert!(issues.is_empty(), "Correct CEI pattern should not be flagged"); + assert!( + issues.is_empty(), + "Correct CEI pattern should not be flagged" + ); } #[test] diff --git a/tooling/sanctifier-core/src/tests/storage_collision_tests.rs b/tooling/sanctifier-core/src/tests/storage_collision_tests.rs index d3aa055..4d9d488 100644 --- a/tooling/sanctifier-core/src/tests/storage_collision_tests.rs +++ b/tooling/sanctifier-core/src/tests/storage_collision_tests.rs @@ -15,7 +15,10 @@ mod tests { const KEY_B: &str = "collision"; "#; let issues = analyzer().scan_storage_collisions(src); - assert!(!issues.is_empty(), "Duplicate const string keys should be flagged"); + assert!( + !issues.is_empty(), + "Duplicate const string keys should be flagged" + ); assert!(issues.iter().any(|i| i.key_value == "collision")); } @@ -33,8 +36,13 @@ mod tests { } "#; let issues = analyzer().scan_storage_collisions(src); - assert!(!issues.is_empty(), "Duplicate symbol_short! should be flagged"); - assert!(issues.iter().any(|i| i.key_value == "\"tok\"" || i.key_value == "tok")); + assert!( + !issues.is_empty(), + "Duplicate symbol_short! should be flagged" + ); + assert!(issues + .iter() + .any(|i| i.key_value == "\"tok\"" || i.key_value == "tok")); } #[test] @@ -51,7 +59,10 @@ mod tests { } "#; let issues = analyzer().scan_storage_collisions(src); - assert!(issues.is_empty(), "All unique keys β€” no collisions expected"); + assert!( + issues.is_empty(), + "All unique keys β€” no collisions expected" + ); } #[test] @@ -68,7 +79,10 @@ mod tests { } "#; let issues = analyzer().scan_storage_collisions(src); - assert!(!issues.is_empty(), "Duplicate Symbol::new keys should be flagged"); + assert!( + !issues.is_empty(), + "Duplicate Symbol::new keys should be flagged" + ); assert!(issues.iter().any(|i| i.key_value == "shared_key")); } diff --git a/tooling/sanctifier-core/tests/integration_token_test.rs b/tooling/sanctifier-core/tests/integration_token_test.rs index 2d92d58..491ee4e 100644 --- a/tooling/sanctifier-core/tests/integration_token_test.rs +++ b/tooling/sanctifier-core/tests/integration_token_test.rs @@ -1,7 +1,3 @@ -use sanctifier_core::{Analyzer, PatternType, SanctifyConfig}; -use std::fs; -use std::path::PathBuf; - /* #[test] fn test_token_integration_auth_and_panic() {