diff --git a/applications/tests/test_fatfs_consistency/Cargo.toml b/applications/tests/test_fatfs_consistency/Cargo.toml new file mode 100644 index 000000000..6d927befd --- /dev/null +++ b/applications/tests/test_fatfs_consistency/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "test_fatfs_consistency" +version = "0.1.0" +edition = "2024" + +[dependencies] +log = "0.4" + +[dependencies.awkernel_async_lib] +path = "../../../awkernel_async_lib" +default-features = false + +[dependencies.awkernel_lib] +path = "../../../awkernel_lib" +default-features = false + +[dependencies.awkernel_services] +path = "../../awkernel_services" \ No newline at end of file diff --git a/applications/tests/test_fatfs_consistency/src/lib.rs b/applications/tests/test_fatfs_consistency/src/lib.rs new file mode 100644 index 000000000..1444d6758 --- /dev/null +++ b/applications/tests/test_fatfs_consistency/src/lib.rs @@ -0,0 +1,553 @@ +#![no_std] + +extern crate alloc; + +use alloc::borrow::Cow; +use alloc::format; +use alloc::vec; +use alloc::vec::Vec; +use awkernel_async_lib::file::path::AsyncVfsPath; +use awkernel_async_lib::scheduler::SchedulerType; +use awkernel_lib::file::fatfs::init_memory_fatfs; +use core::sync::atomic::{AtomicBool, AtomicU32, Ordering}; +use log::info; + +static TEST_PASSED: AtomicBool = AtomicBool::new(false); +static WRITE_COUNT: AtomicU32 = AtomicU32::new(0); +static EXPECTED_LINES: AtomicU32 = AtomicU32::new(0); + +const TEST_FILE_PATH: &str = "test_consistency.txt"; +const NUM_WRITERS: usize = 3; +const WRITES_PER_TASK: usize = 5; + +pub async fn run() { + // Initialize memory FatFS if not already done + match init_memory_fatfs() { + Ok(_) => info!("In-memory FAT filesystem initialized for consistency test."), + Err(e) => { + info!("Failed to initialize in-memory FAT filesystem: {:?}", e); + return; + } + } + + awkernel_async_lib::spawn( + "fatfs consistency test".into(), + consistency_test(), + SchedulerType::FIFO, + ) + .await + .join() + .await; + + // Run metadata cache cleanup test + awkernel_async_lib::spawn( + "metadata cache cleanup test".into(), + metadata_cache_cleanup_test(), + SchedulerType::FIFO, + ) + .await + .join() + .await; +} + +async fn writer_task(id: usize) -> usize { + info!("Writer {} starting", id); + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join(TEST_FILE_PATH).unwrap(); + + // Keep file handle open for all writes + let mut file = match file_path.create_file().await { + Ok(f) => { + info!("Writer {} created/opened file", id); + f + } + Err(e) => { + info!("Writer {} failed to create file: {:?}", id, e); + return 0; + } + }; + + let mut lines_written = 0; + + for i in 0..WRITES_PER_TASK { + // Seek to end to append + use awkernel_lib::file::io::SeekFrom; + match file.seek(SeekFrom::End(0)).await { + Ok(pos) => { + info!("Writer {} at position {} for iteration {}", id, pos, i); + } + Err(e) => { + info!("Writer {} failed to seek: {:?}", id, e); + continue; + } + } + + // Write data unique to this writer + let data = format!("Writer {} iteration {}\n", id, i); + match file.write(data.as_bytes()).await { + Ok(_) => { + lines_written += 1; + WRITE_COUNT.fetch_add(1, Ordering::Relaxed); + info!("Writer {} wrote iteration {}", id, i); + } + Err(e) => { + info!("Writer {} failed to write: {:?}", id, e); + continue; + } + } + + // Yield to allow other tasks to run + for _ in 0..3 { + awkernel_async_lib::r#yield().await; + } + } + + // Explicitly drop the file + drop(file); + info!("Writer {} finished, wrote {} lines", id, lines_written); + + lines_written +} + +async fn monitor_task() { + info!("Monitor starting"); + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join(TEST_FILE_PATH).unwrap(); + + // Wait a bit for writers to start + for _ in 0..20 { + awkernel_async_lib::r#yield().await; + } + + let mut checks = 0; + loop { + match file_path.open_file().await { + Ok(mut file) => { + let mut buffer = vec![0u8; 2048]; + match file.read(&mut buffer).await { + Ok(bytes_read) => { + if let Ok(content) = core::str::from_utf8(&buffer[..bytes_read]) { + let lines: Vec<&str> = + content.lines().filter(|l| !l.is_empty()).collect(); + info!("Monitor: File has {} lines", lines.len()); + } + } + Err(e) => { + info!("Monitor: Failed to read: {:?}", e); + } + } + } + Err(_) => { + info!("Monitor: File not found yet"); + } + } + + checks += 1; + if checks > 20 { + break; + } + + // Check periodically + for _ in 0..10 { + awkernel_async_lib::r#yield().await; + } + } + + info!("Monitor finished"); +} + +async fn consistency_test() { + info!("Starting FatFS consistency test"); + + // First, ensure we can create a file + info!("Pre-test: Creating initial file"); + { + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join(TEST_FILE_PATH).unwrap(); + match file_path.create_file().await { + Ok(mut f) => { + f.write(b"Initial line\n") + .await + .expect("Initial write failed"); + EXPECTED_LINES.store(1, Ordering::Relaxed); + info!("Pre-test: Initial file created successfully"); + } + Err(e) => { + info!("Pre-test: Failed to create initial file: {:?}", e); + return; + } + } + } + + // Spawn writer tasks + let mut writer_handles = Vec::new(); + for i in 0..NUM_WRITERS { + let handle = awkernel_async_lib::spawn( + Cow::Owned(format!("writer_{}", i)), + writer_task(i), + SchedulerType::FIFO, + ) + .await; + writer_handles.push(handle); + } + + // Spawn monitor task + let monitor_handle = + awkernel_async_lib::spawn("monitor".into(), monitor_task(), SchedulerType::FIFO).await; + + // Wait for all writers to complete + let mut total_lines_written = 0; + for handle in writer_handles { + match handle.join().await { + Ok(lines) => total_lines_written += lines, + Err(_) => info!("Writer task was cancelled"), + } + } + + info!( + "All writers completed, total lines written: {}", + total_lines_written + ); + EXPECTED_LINES.fetch_add(total_lines_written as u32, Ordering::Relaxed); + + // Wait for monitor to finish + let _ = monitor_handle.join().await; + + // Give some time for file system to settle + for _ in 0..10 { + awkernel_async_lib::r#yield().await; + } + + // Final check - read the file + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join(TEST_FILE_PATH).unwrap(); + + match file_path.open_file().await { + Ok(mut file) => { + let mut buffer = vec![0u8; 4096]; + let bytes_read = file.read(&mut buffer).await.unwrap_or(0); + + if let Ok(content) = core::str::from_utf8(&buffer[..bytes_read]) { + let lines: Vec<&str> = content.lines().filter(|l| !l.is_empty()).collect(); + let expected = EXPECTED_LINES.load(Ordering::Relaxed) as usize; + + info!( + "Final check: Found {} lines, expected {}", + lines.len(), + expected + ); + + // Print first few lines + for (i, line) in lines.iter().take(5).enumerate() { + info!(" Line {}: {}", i, line); + } + + if lines.len() == expected { + info!("SUCCESS: File consistency maintained!"); + TEST_PASSED.store(true, Ordering::Relaxed); + } else { + info!("FAILURE: Line count mismatch"); + } + } + } + Err(e) => { + info!("Final check: Failed to open file: {:?}", e); + } + } + + if TEST_PASSED.load(Ordering::Relaxed) { + info!("FatFS consistency test PASSED!"); + } else { + info!("FatFS consistency test FAILED!"); + } + + // Run concurrent truncation test + concurrent_truncation_test().await; +} + +async fn truncation_writer_task(id: usize) -> usize { + info!("Truncation writer {} starting", id); + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join("truncation_test.txt").unwrap(); + + let mut lines_written = 0; + + for i in 0..5 { + // Open file for each iteration to get fresh handle + let mut file = match file_path.create_file().await { + Ok(f) => f, + Err(e) => { + info!("Truncation writer {} failed to open file: {:?}", id, e); + continue; + } + }; + + // Write some data + let data = format!( + "Truncation writer {} iteration {} - this is a longer line to test cluster allocation\n", + id, i + ); + + // Seek to end to append + use awkernel_lib::file::io::SeekFrom; + if let Err(e) = file.seek(SeekFrom::End(0)).await { + info!("Truncation writer {} failed to seek: {:?}", id, e); + continue; + } + + match file.write(data.as_bytes()).await { + Ok(_) => { + lines_written += 1; + info!("Truncation writer {} wrote iteration {}", id, i); + } + Err(e) => { + info!("Truncation writer {} failed to write: {:?}", id, e); + } + } + + // Yield to allow truncation + for _ in 0..5 { + awkernel_async_lib::r#yield().await; + } + } + + info!( + "Truncation writer {} finished, wrote {} lines", + id, lines_written + ); + lines_written +} + +async fn truncation_task() { + info!("Truncation task starting"); + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join("truncation_test.txt").unwrap(); + + // Wait for writers to write some data + for _ in 0..10 { + awkernel_async_lib::r#yield().await; + } + + for i in 0..3 { + match file_path.open_file().await { + Ok(mut file) => { + // Read current size + use awkernel_lib::file::io::SeekFrom; + let size = match file.seek(SeekFrom::End(0)).await { + Ok(pos) => pos as u32, + Err(e) => { + info!("Truncation task: Failed to seek: {:?}", e); + continue; + } + }; + + if size > 100 { + // Truncate to smaller size + let new_size = size / 2; + if let Err(e) = file.seek(SeekFrom::Start(new_size as u64)).await { + info!("Truncation task: Failed to seek for truncate: {:?}", e); + continue; + } + + // Truncate is not available on async file interface, simulate by writing + // For now just log that we would truncate + info!( + "Truncation task: Would truncate file from {} to {} bytes", + size, new_size + ); + } + } + Err(e) => { + info!("Truncation task: Failed to open file: {:?}", e); + } + } + + // Wait before next truncation + for _ in 0..20 { + awkernel_async_lib::r#yield().await; + } + } + + info!("Truncation task finished"); +} + +async fn reader_with_position_task(id: usize) { + info!("Position reader {} starting", id); + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join("truncation_test.txt").unwrap(); + + // Wait for file to be created + for _ in 0..5 { + awkernel_async_lib::r#yield().await; + } + + // Open file once and keep reading + let mut file = match file_path.open_file().await { + Ok(f) => f, + Err(e) => { + info!("Position reader {} failed to open file: {:?}", id, e); + return; + } + }; + + let mut last_pos = 0u64; + + for i in 0..10 { + // Try to read from current position + let mut buffer = vec![0u8; 256]; + match file.read(&mut buffer).await { + Ok(bytes_read) => { + use awkernel_lib::file::io::SeekFrom; + let current_pos = file.seek(SeekFrom::Current(0)).await.unwrap_or(0); + info!( + "Position reader {}: iteration {}, read {} bytes, position {} -> {}", + id, i, bytes_read, last_pos, current_pos + ); + last_pos = current_pos; + } + Err(e) => { + info!( + "Position reader {}: Failed to read at iteration {}: {:?}", + id, i, e + ); + // Try to recover by seeking to start + use awkernel_lib::file::io::SeekFrom; + if let Ok(pos) = file.seek(SeekFrom::Start(0)).await { + info!( + "Position reader {}: Recovered by seeking to start ({})", + id, pos + ); + last_pos = pos; + } + } + } + + // Yield to allow truncation/writing + for _ in 0..3 { + awkernel_async_lib::r#yield().await; + } + } + + info!("Position reader {} finished", id); +} + +async fn concurrent_truncation_test() { + info!("Starting concurrent truncation test"); + + // Create initial file + { + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join("truncation_test.txt").unwrap(); + match file_path.create_file().await { + Ok(mut f) => { + f.write(b"Initial content for truncation test\n") + .await + .expect("Initial write failed"); + info!("Truncation test: Initial file created"); + } + Err(e) => { + info!("Truncation test: Failed to create initial file: {:?}", e); + return; + } + } + } + + // Spawn writers, truncator, and readers + let mut writer_handles = Vec::new(); + let mut reader_handles = Vec::new(); + + // Spawn writers + for i in 0..2 { + let handle = awkernel_async_lib::spawn( + Cow::Owned(format!("truncation_writer_{}", i)), + truncation_writer_task(i), + SchedulerType::FIFO, + ) + .await; + writer_handles.push(handle); + } + + // Spawn truncation task + let truncation_handle = awkernel_async_lib::spawn( + "truncation_task".into(), + truncation_task(), + SchedulerType::FIFO, + ) + .await; + + // Spawn readers + for i in 0..2 { + let handle = awkernel_async_lib::spawn( + Cow::Owned(format!("position_reader_{}", i)), + reader_with_position_task(i), + SchedulerType::FIFO, + ) + .await; + reader_handles.push(handle); + } + + // Wait for all writer tasks to complete + for handle in writer_handles { + let _ = handle.join().await; + } + + // Wait for all reader tasks to complete + for handle in reader_handles { + let _ = handle.join().await; + } + let _ = truncation_handle.join().await; + + info!("Concurrent truncation test completed"); +} + +// Test metadata cache cleanup +async fn metadata_cache_cleanup_test() { + info!("Starting metadata cache cleanup test"); + + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + let file_path = root_path.join("cache_test.txt").unwrap(); + + // Create and drop multiple file handles to test cache cleanup + for i in 0..5 { + // Create a file + let mut file = match file_path.create_file().await { + Ok(f) => f, + Err(e) => { + info!("Failed to create file in iteration {}: {:?}", i, e); + continue; + } + }; + + // Write some data + let data = format!("Test data for iteration {}\n", i); + if let Err(e) = file.write(data.as_bytes()).await { + info!("Failed to write in iteration {}: {:?}", i, e); + } + + // Open the same file multiple times to test shared metadata + let _file2 = file_path.open_file().await.ok(); + let _file3 = file_path.open_file().await.ok(); + + // All handles will be dropped here, should clean up cache + } + + // Open the file again to verify it still works + match file_path.open_file().await { + Ok(mut file) => { + let mut buffer = vec![0u8; 100]; + match file.read(&mut buffer).await { + Ok(bytes_read) => { + info!("Successfully read {} bytes after cache cleanup", bytes_read); + } + Err(e) => { + info!("Failed to read after cache cleanup: {:?}", e); + } + } + } + Err(e) => { + info!("Failed to open file after cache cleanup: {:?}", e); + } + } + + info!("Metadata cache cleanup test completed"); +} diff --git a/applications/tests/test_fatfs_simple_consistency/Cargo.toml b/applications/tests/test_fatfs_simple_consistency/Cargo.toml new file mode 100644 index 000000000..69ce11776 --- /dev/null +++ b/applications/tests/test_fatfs_simple_consistency/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "test_fatfs_simple_consistency" +version = "0.1.0" +edition = "2024" + +[dependencies] +log = "0.4" + +[dependencies.awkernel_async_lib] +path = "../../../awkernel_async_lib" +default-features = false + +[dependencies.awkernel_lib] +path = "../../../awkernel_lib" +default-features = false \ No newline at end of file diff --git a/applications/tests/test_fatfs_simple_consistency/src/lib.rs b/applications/tests/test_fatfs_simple_consistency/src/lib.rs new file mode 100644 index 000000000..5d63670eb --- /dev/null +++ b/applications/tests/test_fatfs_simple_consistency/src/lib.rs @@ -0,0 +1,141 @@ +#![no_std] + +extern crate alloc; + +use alloc::vec; +use awkernel_async_lib::file::path::AsyncVfsPath; +use awkernel_async_lib::scheduler::SchedulerType; +use awkernel_lib::file::fatfs::init_memory_fatfs; +use log::info; + +pub async fn run() { + // Initialize memory FatFS if not already done + match init_memory_fatfs() { + Ok(_) => info!("In-memory FAT filesystem initialized for simple consistency test."), + Err(e) => { + info!("Failed to initialize in-memory FAT filesystem: {e:?}"); + return; + } + } + + awkernel_async_lib::spawn( + "fatfs simple consistency test".into(), + simple_consistency_test(), + SchedulerType::FIFO, + ) + .await; +} + +async fn simple_consistency_test() { + info!("Starting simple FatFS consistency test"); + + let root_path = AsyncVfsPath::new_in_memory_fatfs(); + + // Test 1: Simple create, write, close, read (known to work) + info!("Test 1: Simple sequential test"); + { + let file_path = root_path.join("test1.txt").unwrap(); + + // Create and write + let mut file = file_path + .create_file() + .await + .expect("Failed to create test1"); + file.write(b"Test 1 data") + .await + .expect("Failed to write test1"); + drop(file); + + // Read back + let mut file = file_path.open_file().await.expect("Failed to open test1"); + let mut buf = vec![0u8; 100]; + let read = file.read(&mut buf).await.expect("Failed to read test1"); + info!("Test 1: Read {} bytes", read); + } + + // Test 2: Create file with one handle, then create another handle before closing first + info!("Test 2: Two handles, close in order"); + { + let file_path = root_path.join("test2.txt").unwrap(); + + // Create first handle and write + let mut file1 = file_path + .create_file() + .await + .expect("Failed to create test2 handle 1"); + file1 + .write(b"Handle 1 data") + .await + .expect("Failed to write with handle 1"); + + // Create second handle while first is still open + let file2 = file_path + .create_file() + .await + .expect("Failed to create test2 handle 2"); + + // Close both + drop(file1); + drop(file2); + + // Try to read + match file_path.open_file().await { + Ok(mut file) => { + let mut buf = vec![0u8; 100]; + let read = file.read(&mut buf).await.expect("Failed to read test2"); + info!("Test 2: SUCCESS - Read {} bytes", read); + } + Err(e) => { + info!("Test 2: FAILED - Could not open file: {:?}", e); + } + } + } + + // Test 3: Create handle, close it, then create another handle + info!("Test 3: Sequential handles"); + { + let file_path = root_path.join("test3.txt").unwrap(); + + // Create first handle, write and close + let mut file1 = file_path + .create_file() + .await + .expect("Failed to create test3 handle 1"); + file1 + .write(b"First write") + .await + .expect("Failed to write first"); + drop(file1); + + // Create second handle, write and close + let mut file2 = file_path + .create_file() + .await + .expect("Failed to create test3 handle 2"); + use awkernel_lib::file::io::SeekFrom; + file2.seek(SeekFrom::End(0)).await.expect("Failed to seek"); + file2 + .write(b" Second write") + .await + .expect("Failed to write second"); + drop(file2); + + // Try to read + match file_path.open_file().await { + Ok(mut file) => { + let mut buf = vec![0u8; 100]; + let read = file.read(&mut buf).await.expect("Failed to read test3"); + info!( + "Test 3: Read {} bytes: {:?}", + read, + core::str::from_utf8(&buf[..read]) + ); + } + Err(e) => { + info!("Test 3: Could not open file: {:?}", e); + } + } + } + + info!("Simple FatFS consistency test completed"); +} diff --git a/awkernel_lib/src/file/fatfs/dir_entry.rs b/awkernel_lib/src/file/fatfs/dir_entry.rs index c3047d1f0..4718e05e3 100644 --- a/awkernel_lib/src/file/fatfs/dir_entry.rs +++ b/awkernel_lib/src/file/fatfs/dir_entry.rs @@ -131,16 +131,16 @@ impl ShortName { #[derive(Clone, Debug, Default)] pub(crate) struct DirFileEntryData { name: [u8; SFN_SIZE], - attrs: FileAttributes, + pub attrs: FileAttributes, reserved_0: u8, create_time_0: u8, create_time_1: u16, create_date: u16, access_date: u16, - first_cluster_hi: u16, + pub first_cluster_hi: u16, modify_time: u16, modify_date: u16, - first_cluster_lo: u16, + pub first_cluster_lo: u16, size: u32, } @@ -153,6 +153,15 @@ impl DirFileEntryData { } } + pub(crate) fn new_for_rootdir(first_cluster: u32) -> Self { + Self { + attrs: FileAttributes::DIRECTORY, + first_cluster_hi: (first_cluster >> 16) as u16, + first_cluster_lo: (first_cluster & 0xFFFF) as u16, + ..Self::default() + } + } + pub(crate) fn renamed(&self, new_name: [u8; SFN_SIZE]) -> Self { let mut sfn_entry = self.clone(); sfn_entry.name = new_name; @@ -204,7 +213,7 @@ impl DirFileEntryData { } } - fn set_size(&mut self, size: u32) { + pub(crate) fn set_size(&mut self, size: u32) { self.size = size; } @@ -224,15 +233,15 @@ impl DirFileEntryData { self.reserved_0 & (1 << 4) != 0 } - fn created(&self) -> DateTime { + pub(crate) fn created(&self) -> DateTime { DateTime::decode(self.create_date, self.create_time_1, self.create_time_0) } - fn accessed(&self) -> Date { + pub(crate) fn accessed(&self) -> Date { Date::decode(self.access_date) } - fn modified(&self) -> DateTime { + pub(crate) fn modified(&self) -> DateTime { DateTime::decode(self.modify_date, self.modify_time, 0) } @@ -468,12 +477,12 @@ impl DirEntryData { #[derive(Clone, Debug)] pub(crate) struct DirEntryEditor { data: DirFileEntryData, - pos: u64, + pub(crate) pos: u64, dirty: bool, } impl DirEntryEditor { - fn new(data: DirFileEntryData, pos: u64) -> Self { + pub(crate) fn new(data: DirFileEntryData, pos: u64) -> Self { Self { data, pos, @@ -628,10 +637,6 @@ impl DirEntry DirEntryEditor { - DirEntryEditor::new(self.data.clone(), self.entry_pos) - } - pub(crate) fn is_same_entry(&self, other: &DirEntry) -> bool { self.entry_pos == other.entry_pos } @@ -644,7 +649,10 @@ impl DirEntry File { assert!(!self.is_dir(), "Not a file entry"); - File::new(self.first_cluster(), Some(self.editor()), self.fs.clone()) + let metadata = + self.fs + .get_or_create_metadata(self.entry_pos, self.data.clone(), self.first_cluster()); + File::new(Some(metadata), self.fs.clone()) } /// Returns `Dir` struct for this entry. @@ -656,8 +664,13 @@ impl DirEntry Dir { assert!(self.is_dir(), "Not a directory entry"); match self.first_cluster() { - Some(n) => { - let file = File::new(Some(n), Some(self.editor()), self.fs.clone()); + Some(_) => { + let metadata = self.fs.get_or_create_metadata( + self.entry_pos, + self.data.clone(), + self.first_cluster(), + ); + let file = File::new(Some(metadata), self.fs.clone()); Dir::new(DirRawStream::File(file), self.fs.clone()) } None => FileSystem::root_dir(&self.fs), diff --git a/awkernel_lib/src/file/fatfs/file.rs b/awkernel_lib/src/file/fatfs/file.rs index 22f9b8396..ff5a37beb 100644 --- a/awkernel_lib/src/file/fatfs/file.rs +++ b/awkernel_lib/src/file/fatfs/file.rs @@ -8,7 +8,7 @@ use super::error::Error; use super::fs::{FileSystem, ReadWriteSeek}; use super::time::{Date, DateTime, TimeProvider}; -use awkernel_sync::mcs::MCSNode; +use awkernel_sync::{mcs::MCSNode, mutex::Mutex}; const MAX_FILE_SIZE: u32 = u32::MAX; @@ -16,14 +16,10 @@ const MAX_FILE_SIZE: u32 = u32::MAX; /// /// This struct is created by the `open_file` or `create_file` methods on `Dir`. pub struct File { - // Note first_cluster is None if file is empty - first_cluster: Option, - // Note: if offset points between clusters current_cluster is the previous cluster - current_cluster: Option, + // Shared metadata for this file (None for root dir) + metadata: Option>>, // current position in this file offset: u32, - // file dir entry editor - None for root dir - entry: Option, // file-system reference fs: Arc>, } @@ -41,15 +37,12 @@ pub struct Extent { impl File { pub(crate) fn new( - first_cluster: Option, - entry: Option, + metadata: Option>>, fs: Arc>, ) -> Self { File { - first_cluster, - entry, + metadata, fs, - current_cluster: None, // cluster before first one offset: 0, } } @@ -59,32 +52,34 @@ impl File { /// # Errors /// /// `Error::Io` will be returned if the underlying storage object returned an I/O error. - /// - /// # Panics - /// - /// Will panic if this is the root directory. pub fn truncate(&mut self) -> Result<(), Error> { log::trace!("File::truncate"); - if let Some(ref mut e) = self.entry { - e.set_size(self.offset); + if let Some(ref metadata_arc) = self.metadata { + // Lock ordering: metadata → disk + let mut node = MCSNode::new(); + let mut metadata = metadata_arc.lock(&mut node); + + metadata.set_size(self.offset); if self.offset == 0 { - e.set_first_cluster(None, self.fs.fat_type()); + metadata.set_first_cluster(None, self.fs.fat_type()); } - } else { - // Note: we cannot handle this case because there is no size field - panic!("Trying to truncate a file without an entry"); - } - if let Some(current_cluster) = self.current_cluster { - // current cluster is none only if offset is 0 - debug_assert!(self.offset > 0); - FileSystem::truncate_cluster_chain(&self.fs, current_cluster) - } else { - debug_assert!(self.offset == 0); - if let Some(n) = self.first_cluster { - FileSystem::free_cluster_chain(&self.fs, n)?; - self.first_cluster = None; + + let first_cluster = metadata.inner().first_cluster(self.fs.fat_type()); + let current_cluster_opt = self.get_current_cluster_with_metadata(&metadata)?; + + if let Some(current_cluster) = current_cluster_opt { + // current cluster is none only if offset is 0 + debug_assert!(self.offset > 0); + FileSystem::truncate_cluster_chain(&self.fs, current_cluster) + } else { + debug_assert!(self.offset == 0); + if let Some(n) = first_cluster { + FileSystem::free_cluster_chain(&self.fs, n)?; + } + Ok(()) } - Ok(()) + } else { + unreachable!("File must have metadata before it is dropped"); } } @@ -98,7 +93,14 @@ impl File { let Some(mut bytes_left) = self.size() else { return None.into_iter().flatten(); }; - let Some(first) = self.first_cluster else { + let first_cluster = if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let metadata = metadata_arc.lock(&mut node); + metadata.inner().first_cluster(self.fs.fat_type()) + } else { + None + }; + let Some(first) = first_cluster else { return None.into_iter().flatten(); }; @@ -124,7 +126,7 @@ impl File { pub(crate) fn abs_pos(&self) -> Option { // Returns current position relative to filesystem start // Note: when between clusters it returns position after previous cluster - match self.current_cluster { + match self.get_current_cluster().ok()? { Some(n) => { let cluster_size = self.fs.cluster_size(); let offset_mod_cluster_size = self.offset % cluster_size; @@ -143,8 +145,10 @@ impl File { } fn flush_dir_entry(&mut self) -> Result<(), Error> { - if let Some(ref mut e) = self.entry { - e.flush(&self.fs)?; + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let mut metadata = metadata_arc.lock(&mut node); + metadata.flush(&self.fs)?; } Ok(()) } @@ -154,8 +158,10 @@ impl File { /// Note: it is set to a value from the `TimeProvider` when creating a file. #[deprecated] pub fn set_created(&mut self, date_time: DateTime) { - if let Some(ref mut e) = self.entry { - e.set_created(date_time); + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let mut metadata = metadata_arc.lock(&mut node); + metadata.set_created(date_time); } } @@ -164,8 +170,10 @@ impl File { /// Note: it is overwritten by a value from the `TimeProvider` on every file read operation. #[deprecated] pub fn set_accessed(&mut self, date: Date) { - if let Some(ref mut e) = self.entry { - e.set_accessed(date); + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let mut metadata = metadata_arc.lock(&mut node); + metadata.set_accessed(date); } } @@ -174,39 +182,90 @@ impl File { /// Note: it is overwritten by a value from the `TimeProvider` on every file write operation. #[deprecated] pub fn set_modified(&mut self, date_time: DateTime) { - if let Some(ref mut e) = self.entry { - e.set_modified(date_time); + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let mut metadata = metadata_arc.lock(&mut node); + metadata.set_modified(date_time); } } fn size(&self) -> Option { - match self.entry { - Some(ref e) => e.inner().size(), - None => None, + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let metadata = metadata_arc.lock(&mut node); + metadata.inner().size() + } else { + None } } fn is_dir(&self) -> bool { - match self.entry { - Some(ref e) => e.inner().is_dir(), - None => true, // root directory + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let metadata = metadata_arc.lock(&mut node); + metadata.inner().is_dir() + } else { + true // root directory } } - fn bytes_left_in_file(&self) -> Option { - // Note: seeking beyond end of file is not allowed so overflow is impossible - self.size().map(|s| (s - self.offset) as usize) + pub(crate) fn first_cluster(&self) -> Option { + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let metadata = metadata_arc.lock(&mut node); + metadata.inner().first_cluster(self.fs.fat_type()) + } else { + None + } } - fn set_first_cluster(&mut self, cluster: u32) { - self.first_cluster = Some(cluster); - if let Some(ref mut e) = self.entry { - e.set_first_cluster(self.first_cluster, self.fs.fat_type()); + /// Calculate current cluster with an existing metadata lock held. + fn get_current_cluster_with_metadata( + &self, + metadata: &impl core::ops::Deref, + ) -> Result, Error> { + if self.offset == 0 { + return Ok(None); + } + + let Some(first_cluster) = metadata.inner().first_cluster(self.fs.fat_type()) else { + return Err(Error::CorruptedFileSystem); + }; + + let offset_in_clusters = self.fs.clusters_from_bytes(u64::from(self.offset)); + debug_assert!(offset_in_clusters > 0); + + let clusters_to_skip = offset_in_clusters - 1; + let mut cluster = first_cluster; + let mut iter = FileSystem::cluster_iter(&self.fs, first_cluster); + + for _i in 0..clusters_to_skip { + cluster = if let Some(r) = iter.next() { + r? + } else { + return Err(Error::CorruptedFileSystem); + }; } + + Ok(Some(cluster)) } - pub(crate) fn first_cluster(&self) -> Option { - self.first_cluster + /// Get the cluster that contains the current file position. + /// + /// NOTE: when offset is at a cluster boundary (offset % cluster_size == 0), + /// this returns the PREVIOUS cluster, not the next one. + fn get_current_cluster(&self) -> Result, Error> { + if self.offset == 0 { + return Ok(None); + } + + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let metadata = metadata_arc.lock(&mut node); + self.get_current_cluster_with_metadata(&metadata) + } else { + Ok(None) + } } fn flush(&mut self) -> Result<(), Error> { @@ -218,18 +277,20 @@ impl File { } pub(crate) fn is_root_dir(&self) -> bool { - self.entry.is_none() + self.metadata.is_none() } } impl File { fn update_dir_entry_after_write(&mut self) { let offset = self.offset; - if let Some(ref mut e) = self.entry { + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let mut metadata = metadata_arc.lock(&mut node); let now = self.fs.options.time_provider.get_current_date_time(); - e.set_modified(now); - if e.inner().size().is_some_and(|s| offset > s) { - e.set_size(offset); + metadata.set_modified(now); + if metadata.inner().size().is_some_and(|s| offset > s) { + metadata.set_size(offset); } } } @@ -240,6 +301,15 @@ impl Drop for File { if let Err(err) = self.flush() { log::error!("flush failed {err:?}"); } + + if let Some(metadata_arc) = self.metadata.take() { + let mut node = MCSNode::new(); + let metadata = metadata_arc.lock(&mut node); + let entry_pos = metadata.pos; + drop(metadata); + drop(metadata_arc); + self.fs.cleanup_metadata_if_unused(entry_pos); + } } } @@ -247,10 +317,8 @@ impl Drop for File { impl Clone for File { fn clone(&self) -> Self { File { - first_cluster: self.first_cluster, - current_cluster: self.current_cluster, + metadata: self.metadata.clone(), offset: self.offset, - entry: self.entry.clone(), fs: Arc::clone(&self.fs), } } @@ -264,54 +332,74 @@ impl Read for File Result { log::trace!("File::read"); let cluster_size = self.fs.cluster_size(); - let current_cluster_opt = if self.offset % cluster_size == 0 { - // next cluster - match self.current_cluster { - None => self.first_cluster, - Some(n) => { - let r = FileSystem::cluster_iter(&self.fs, n).next(); - match r { - Some(Err(err)) => return Err(err), - Some(Ok(n)) => Some(n), - None => None, + + // Hold metadata lock during entire read operation to prevent truncate races + // Lock ordering: metadata → disk + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let mut metadata = metadata_arc.lock(&mut node); + + let current_cluster_opt = if self.offset % cluster_size == 0 { + // next cluster + match self.get_current_cluster_with_metadata(&metadata)? { + None => { + // only if offset is 0 + metadata.inner().first_cluster(self.fs.fat_type()) + } + Some(n) => { + let r = FileSystem::cluster_iter(&self.fs, n).next(); + match r { + Some(Err(err)) => return Err(err), + Some(Ok(n)) => Some(n), + None => None, + } } } + } else { + self.get_current_cluster_with_metadata(&metadata)? + }; + + let Some(current_cluster) = current_cluster_opt else { + return Ok(0); + }; + let offset_in_cluster = self.offset % cluster_size; + let bytes_left_in_cluster = (cluster_size - offset_in_cluster) as usize; + let size = metadata.inner().size(); + if let Some(s) = size { + if self.offset >= s { + return Ok(0); + } } - } else { - self.current_cluster - }; - let Some(current_cluster) = current_cluster_opt else { - return Ok(0); - }; - let offset_in_cluster = self.offset % cluster_size; - let bytes_left_in_cluster = (cluster_size - offset_in_cluster) as usize; - let bytes_left_in_file = self.bytes_left_in_file().unwrap_or(bytes_left_in_cluster); - let read_size = buf.len().min(bytes_left_in_cluster).min(bytes_left_in_file); - if read_size == 0 { - return Ok(0); - } - log::trace!("read {read_size} bytes in cluster {current_cluster}"); - let offset_in_fs = - self.fs.offset_from_cluster(current_cluster) + u64::from(offset_in_cluster); - let read_bytes = { - let mut node = MCSNode::new(); - let mut disk_guard = self.fs.disk.lock(&mut node); - disk_guard.seek(SeekFrom::Start(offset_in_fs))?; - disk_guard.read(&mut buf[..read_size])? - }; - if read_bytes == 0 { - return Ok(0); - } - self.offset += read_bytes as u32; - self.current_cluster = Some(current_cluster); + let bytes_left_in_file = size + .map(|s| (s - self.offset) as usize) + .unwrap_or(bytes_left_in_cluster); + let read_size = buf.len().min(bytes_left_in_cluster).min(bytes_left_in_file); + if read_size == 0 { + return Ok(0); + } + log::trace!("read {read_size} bytes in cluster {current_cluster}"); + let offset_in_fs = + self.fs.offset_from_cluster(current_cluster) + u64::from(offset_in_cluster); + // lock ordering: metadata → disk + let read_bytes = { + let mut node_disk = MCSNode::new(); + let mut disk_guard = self.fs.disk.lock(&mut node_disk); + disk_guard.seek(SeekFrom::Start(offset_in_fs))?; + disk_guard.read(&mut buf[..read_size])? + }; + if read_bytes == 0 { + return Ok(0); + } + self.offset += read_bytes as u32; - if let Some(ref mut e) = self.entry { if self.fs.options.update_accessed_date { let now = self.fs.options.time_provider.get_current_date(); - e.set_accessed(now); + metadata.set_accessed(now); } + Ok(read_bytes) + } else { + unreachable!("File must have metadata before it is dropped"); } - Ok(read_bytes) } } @@ -332,39 +420,53 @@ impl Write for File self.first_cluster, - Some(n) => { - let r = FileSystem::cluster_iter(&self.fs, n).next(); - match r { - Some(Err(err)) => return Err(err), - Some(Ok(n)) => Some(n), - None => None, + // Lock ordering: metadata → (fs_info, disk) + let current_cluster = if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let mut metadata = metadata_arc.lock(&mut node); + + if self.offset % cluster_size == 0 { + let current_cluster_opt = self.get_current_cluster_with_metadata(&metadata)?; + + let next_cluster = match current_cluster_opt { + None => { + // Only if offset is 0 + metadata.inner().first_cluster(self.fs.fat_type()) + } + Some(n) => { + let r = FileSystem::cluster_iter(&self.fs, n).next(); + match r { + Some(Err(err)) => return Err(err), + Some(Ok(n)) => Some(n), + None => None, + } } + }; + + if let Some(n) = next_cluster { + n + } else { + // allocate new cluster + let new_cluster = + FileSystem::alloc_cluster(&self.fs, current_cluster_opt, self.is_dir())?; + log::trace!("allocated cluster {new_cluster}"); + let first_cluster_opt = metadata.inner().first_cluster(self.fs.fat_type()); + if first_cluster_opt.is_none() { + metadata.set_first_cluster(Some(new_cluster), self.fs.fat_type()); + } + new_cluster } - }; - if let Some(n) = next_cluster { - n } else { - // end of chain reached - allocate new cluster - let new_cluster = - FileSystem::alloc_cluster(&self.fs, self.current_cluster, self.is_dir())?; - log::trace!("allocated cluster {new_cluster}"); - if self.first_cluster.is_none() { - self.set_first_cluster(new_cluster); + // self.current_cluster should be a valid cluster + match self.get_current_cluster_with_metadata(&metadata)? { + Some(n) => n, + None => return Err(Error::CorruptedFileSystem), } - new_cluster } } else { - // self.current_cluster should be a valid cluster - match self.current_cluster { - Some(n) => n, - None => panic!("Offset inside cluster but no cluster allocated"), - } + unreachable!("File must have metadata before it is dropped"); }; + log::trace!("write {write_size} bytes in cluster {current_cluster}"); let offset_in_fs = self.fs.offset_from_cluster(current_cluster) + u64::from(offset_in_cluster); @@ -379,7 +481,6 @@ impl Write for File Seek for File { new_offset = size; } } - log::trace!( - "file seek {} -> {} - entry {:?}", - self.offset, - new_offset, - self.entry - ); - if new_offset == self.offset { - // position is the same - nothing to do - return Ok(u64::from(self.offset)); - } - let new_offset_in_clusters = self.fs.clusters_from_bytes(u64::from(new_offset)); - let old_offset_in_clusters = self.fs.clusters_from_bytes(u64::from(self.offset)); - let new_cluster = if new_offset == 0 { - None - } else if new_offset_in_clusters == old_offset_in_clusters { - self.current_cluster - } else if let Some(first_cluster) = self.first_cluster { - // calculate number of clusters to skip - // return the previous cluster if the offset points to the cluster boundary - // Note: new_offset_in_clusters cannot be 0 here because new_offset is not 0 - debug_assert!(new_offset_in_clusters > 0); - let clusters_to_skip = new_offset_in_clusters - 1; - let mut cluster = first_cluster; - let mut iter = FileSystem::cluster_iter(&self.fs, first_cluster); - for i in 0..clusters_to_skip { - cluster = if let Some(r) = iter.next() { - r? + + if new_offset > 0 { + if let Some(ref metadata_arc) = self.metadata { + let mut node = MCSNode::new(); + let metadata = metadata_arc.lock(&mut node); + let first_cluster_opt = metadata.inner().first_cluster(self.fs.fat_type()); + + if let Some(first_cluster) = first_cluster_opt { + let new_offset_in_clusters = self.fs.clusters_from_bytes(u64::from(new_offset)); + if new_offset_in_clusters > 0 { + let clusters_to_skip = new_offset_in_clusters - 1; + let mut iter = FileSystem::cluster_iter(&self.fs, first_cluster); + for i in 0..clusters_to_skip { + if iter.next().is_none() { + // cluster chain ends before the new position - seek to the end of the last cluster + new_offset = self.fs.bytes_from_clusters(i + 1) as u32; + break; + }; + } + } } else { - // cluster chain ends before the new position - seek to the end of the last cluster - new_offset = self.fs.bytes_from_clusters(i + 1) as u32; - break; - }; + // empty file - always seek to 0 + new_offset = 0; + } } - Some(cluster) - } else { - // empty file - always seek to 0 - new_offset = 0; - None - }; + } + self.offset = new_offset; - self.current_cluster = new_cluster; Ok(u64::from(self.offset)) } } diff --git a/awkernel_lib/src/file/fatfs/fs.rs b/awkernel_lib/src/file/fatfs/fs.rs index 7ec65f942..8c0f8da75 100644 --- a/awkernel_lib/src/file/fatfs/fs.rs +++ b/awkernel_lib/src/file/fatfs/fs.rs @@ -1,3 +1,4 @@ +use alloc::collections::BTreeMap; use alloc::string::String; use alloc::sync::Arc; use core::borrow::BorrowMut; @@ -8,7 +9,7 @@ use core::marker::PhantomData; use super::super::io::{self, IoBase, Read, ReadLeExt, Seek, SeekFrom, Write, WriteLeExt}; use super::boot_sector::{format_boot_sector, BiosParameterBlock, BootSector}; use super::dir::{Dir, DirRawStream}; -use super::dir_entry::{DirFileEntryData, FileAttributes, SFN_PADDING, SFN_SIZE}; +use super::dir_entry::{DirEntryEditor, DirFileEntryData, FileAttributes, SFN_PADDING, SFN_SIZE}; use super::error::Error; use super::file::File; use super::table::{ @@ -22,6 +23,17 @@ use awkernel_sync::{mcs::MCSNode, mutex::Mutex}; // http://wiki.osdev.org/FAT // https://www.win.tue.nl/~aeb/linux/fs/fat/fat-1.html +// Lock Ordering Hierarchy +// +// To prevent deadlocks, locks must be acquired in the following order: +// 1. metadata lock → disk lock (via FAT operations) +// 2. metadata lock → fs_info lock (via cluster allocation) +// 3. fs_info lock → disk lock +// 4. metadata_cache lock (independent, no nesting) +// 5. current_status_flags lock (independent, no nesting) +// +// IMPORTANT: Never acquire metadata lock while holding disk or fs_info locks! + /// A type of FAT filesystem. /// /// `FatType` values are based on the size of File Allocation Table entry. @@ -342,6 +354,8 @@ pub struct FileSystem< total_clusters: u32, fs_info: Mutex, current_status_flags: Mutex, + // Metadata cache for open files - maps entry position to shared metadata + pub(crate) metadata_cache: Mutex>>>, } impl Debug for FileSystem @@ -361,6 +375,7 @@ where .field("total_clusters", &self.total_clusters) .field("fs_info", &" fs_info") .field("current_status_flags", &" status flags") + .field("metadata_cache", &" metadata cache") .finish() } } @@ -448,6 +463,7 @@ impl FileSystem { total_clusters, fs_info: Mutex::new(fs_info), current_status_flags: Mutex::new(status_flags), + metadata_cache: Mutex::new(BTreeMap::new()), }) } @@ -634,16 +650,16 @@ impl FileSystem { fn flush_fs_info(&self) -> Result<(), Error> { let mut node = MCSNode::new(); - let fs_info_guard = self.fs_info.lock(&mut node); + let mut fs_info_guard = self.fs_info.lock(&mut node); if self.fat_type == FatType::Fat32 && fs_info_guard.dirty { - drop(fs_info_guard); let fs_info_sector_offset = self.offset_from_sector(u32::from(self.bpb.fs_info_sector)); + + // This prevents race conditions where fs_info could be modified between checking + // dirty flag and writing the data + // Lock ordering: fs_info → disk let mut node_disk = MCSNode::new(); let mut disk_guard = self.disk.lock(&mut node_disk); disk_guard.seek(SeekFrom::Start(fs_info_sector_offset))?; - - let mut node = MCSNode::new(); - let mut fs_info_guard = self.fs_info.lock(&mut node); fs_info_guard.serialize(&mut *disk_guard)?; fs_info_guard.dirty = false; } @@ -699,11 +715,16 @@ impl FileSystem { &fs.bpb, FsIoAdapter { fs: Arc::clone(fs) }, )), - FatType::Fat32 => DirRawStream::File(File::new( - Some(fs.bpb.root_dir_first_cluster), - None, - Arc::clone(fs), - )), + FatType::Fat32 => { + let metadata = DirEntryEditor::new( + DirFileEntryData::new_for_rootdir(fs.bpb.root_dir_first_cluster), + 0, + ); + DirRawStream::File(File::new( + Some(Arc::new(Mutex::new(metadata))), + Arc::clone(fs), + )) + } } }; Dir::new(root_rdr, Arc::clone(fs)) @@ -725,6 +746,34 @@ impl FileSystem FileSystem { + pub(crate) fn get_or_create_metadata( + &self, + entry_pos: u64, + entry_data: DirFileEntryData, + _first_cluster: Option, + ) -> Arc> { + let mut node = MCSNode::new(); + let mut cache = self.metadata_cache.lock(&mut node); + + cache + .entry(entry_pos) + .or_insert_with(|| Arc::new(Mutex::new(DirEntryEditor::new(entry_data, entry_pos)))) + .clone() + } + + pub(crate) fn cleanup_metadata_if_unused(&self, entry_pos: u64) { + let mut node = MCSNode::new(); + let mut cache = self.metadata_cache.lock(&mut node); + + if let Some(metadata_arc) = cache.get(&entry_pos) { + if Arc::strong_count(metadata_arc) == 1 { + cache.remove(&entry_pos); + } + } + } +} + impl FileSystem { diff --git a/userland/Cargo.toml b/userland/Cargo.toml index f418ef706..66457d7bb 100644 --- a/userland/Cargo.toml +++ b/userland/Cargo.toml @@ -86,8 +86,16 @@ optional = true path = "../applications/tests/test_memfatfs" optional = true +[dependencies.test_fatfs_consistency] +path = "../applications/tests/test_fatfs_consistency" +optional = true + +[dependencies.test_fatfs_simple_consistency] +path = "../applications/tests/test_fatfs_simple_consistency" +optional = true + [features] -default = ["test_memfatfs"] +default = ["test_fatfs_consistency"] perf = ["awkernel_services/perf"] # Evaluation applications @@ -109,4 +117,6 @@ test_measure_channel_heavy = ["dep:test_measure_channel_heavy"] test_sched_preempt = ["dep:test_sched_preempt"] test_dag = ["dep:test_dag"] test_voluntary_preemption = ["dep:test_voluntary_preemption"] -test_memfatfs = ["dep:test_memfatfs"] \ No newline at end of file +test_memfatfs = ["dep:test_memfatfs"] +test_fatfs_consistency = ["dep:test_fatfs_consistency"] +test_fatfs_simple_consistency = ["dep:test_fatfs_simple_consistency"] diff --git a/userland/src/lib.rs b/userland/src/lib.rs index 464fce029..74c90f4e4 100644 --- a/userland/src/lib.rs +++ b/userland/src/lib.rs @@ -61,5 +61,11 @@ pub async fn main() -> Result<(), Cow<'static, str>> { #[cfg(feature = "test_memfatfs")] test_memfatfs::run().await; // test for filesystem + #[cfg(feature = "test_fatfs_consistency")] + test_fatfs_consistency::run().await; // test for filesystem consistency + + #[cfg(feature = "test_fatfs_simple_consistency")] + test_fatfs_simple_consistency::run().await; // test for simple filesystem consistency + Ok(()) }