diff --git a/rcore-fs-cli/Cargo.toml b/rcore-fs-cli/Cargo.toml index b562461..44d493f 100644 --- a/rcore-fs-cli/Cargo.toml +++ b/rcore-fs-cli/Cargo.toml @@ -22,3 +22,5 @@ rcore-fs-sefs = { path = "../rcore-fs-sefs", features = ["std"] } rcore-fs-ramfs = { path = "../rcore-fs-ramfs" } rcore-fs-hostfs = { path = "../rcore-fs-hostfs" } rcore-fs-unionfs = { path = "../rcore-fs-unionfs" } +threadpool = "1.8" +num_cpus = "1.13" diff --git a/rcore-fs-cli/src/zip.rs b/rcore-fs-cli/src/zip.rs index b1f1d82..44ecc93 100644 --- a/rcore-fs-cli/src/zip.rs +++ b/rcore-fs-cli/src/zip.rs @@ -8,12 +8,118 @@ use std::os::unix::fs::PermissionsExt; use std::path::Path; use std::str; use std::sync::Arc; +use threadpool::ThreadPool; + + use rcore_fs::vfs::{FileType, INode, PATH_MAX}; const BUF_SIZE: usize = 0x10000; const S_IMASK: u32 = 0o777; +#[derive(Debug)] +struct ZipError(String); + +impl std::error::Error for ZipError {} + +impl std::fmt::Display for ZipError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + +/// This function creates a thread pool with 16 worker threads to process files and symlinks concurrently, +/// while recursively traversing the directory structure. Directory creation remains sequential +/// to maintain consistency, but file and symlink operations are dispatched to worker threads. +/// +/// # Parameters +/// * `path` - Path to the source directory in the host file system +/// * `inode` - Target directory INode in the destination file system +/// +/// # Returns +/// * `Result<(), Box>` - Success or an error +pub fn zip_dir_parallel(path: &Path, inode: Arc) -> Result<(), Box> { + let pool = Arc::new(ThreadPool::new(16)); + let result = zip_dir_recursive(path, inode, &pool); + pool.join(); + result +} + +fn zip_dir_recursive(path: &Path, inode: Arc, pool: &Arc) -> Result<(), Box> { + let mut entries: Vec = fs::read_dir(path)?.map(|dir| dir.unwrap()).collect(); + entries.sort_by_key(|entry| entry.file_name()); + for entry in entries { + let name_ = entry.file_name(); + let name = name_.to_str().unwrap().to_string(); + let metadata = fs::symlink_metadata(entry.path())?; + let type_ = metadata.file_type(); + let mode = (metadata.permissions().mode() & S_IMASK) as u16; + let inode = inode.clone(); + let path = entry.path(); + if type_.is_dir() { + let dir_inode = inode.create(&name, FileType::Dir, mode)?; + zip_dir_recursive(&path, dir_inode, &pool)?; + } else { + //every file or symlink is handled in a separate thread + pool.execute(move || { + let inode = Arc::clone(&inode); + if type_.is_file() { + if let Err(e) = handle_file(&inode, &name, mode, &path) { + println!("failed!: {}", e); + } + } else if type_.is_symlink() { + if let Err(e) = handle_symlink(&inode, &name, mode, &path) { + println!("failed!: {}", e); + } + + } + }); + } + } + + Ok(()) +} + +fn handle_file( + inode: &Arc, + name: &str, + mode: u16, + path: &Path, +)-> Result<(), Box> { + + let file_inode = inode.create(name, FileType::File, mode)?; + + let mut file = fs::File::open(path)?; + let metadata = file.metadata()?; + file_inode.resize(metadata.len() as usize)?; + + let mut buf = vec![0u8; BUF_SIZE]; + let mut offset = 0usize; + let mut len = BUF_SIZE; + + while len == BUF_SIZE { + len = file.read(&mut buf)?; + file_inode.write_at(offset, &buf[..len])?; + offset += len; + } + Ok(()) +} + +fn handle_symlink( + inode: &Arc, + name: &str, + mode: u16, + path: &Path, +) -> Result<(), Box> { + let target = fs::read_link(path)?; + let symlink_inode = inode.create(name, FileType::SymLink, mode)?; + let data = target.as_os_str().as_bytes(); + symlink_inode.resize(data.len())?; + symlink_inode.write_at(0, data)?; + Ok(()) +} + +/// The old implementation of zip_dir function, which is not parallelized. pub fn zip_dir(path: &Path, inode: Arc) -> Result<(), Box> { let mut entries: Vec = fs::read_dir(path)?.map(|dir| dir.unwrap()).collect(); entries.sort_by_key(|entry| entry.file_name()); @@ -23,7 +129,6 @@ pub fn zip_dir(path: &Path, inode: Arc) -> Result<(), Box> let metadata = fs::symlink_metadata(entry.path())?; let type_ = metadata.file_type(); let mode = (metadata.permissions().mode() & S_IMASK) as u16; - //println!("zip: name: {:?}, mode: {:#o}", entry.path(), mode); if type_.is_file() { let inode = inode.create(name, FileType::File, mode)?; let mut file = fs::File::open(entry.path())?; @@ -38,7 +143,7 @@ pub fn zip_dir(path: &Path, inode: Arc) -> Result<(), Box> } } else if type_.is_dir() { let inode = inode.create(name, FileType::Dir, mode)?; - zip_dir(entry.path().as_path(), inode)?; + zip_dir(entry.path().as_path(), inode)?; } else if type_.is_symlink() { let target = fs::read_link(entry.path())?; let inode = inode.create(name, FileType::SymLink, mode)?; @@ -86,3 +191,78 @@ pub fn unzip_dir(path: &Path, inode: Arc) -> Result<(), Box>` - Success or an error +pub fn update_dir(dir: &Path, path: &Path, root_inode: Arc) -> Result<(), Box> { + let full_path = dir.join(path); + let components: Vec<_> = path.components() + .filter_map(|comp| comp.as_os_str().to_str()) + .collect(); + let mut current_inode = root_inode; + for (idx, component) in components.iter().enumerate() { + if idx == components.len() - 1 { + // last component + let metadata = fs::metadata(&full_path)?; + let mode = (metadata.permissions().mode() & S_IMASK) as u16; + if metadata.is_dir() { + match current_inode.find(component) { + Ok(_) => (), + Err(_) => { + current_inode.create(component, FileType::Dir, mode)?; + } + } + } + else if metadata.is_symlink() { + // symlink-type operation + if let Ok(_) = current_inode.find(component) { + current_inode.unlink(component)?; + } + let symlink_inode = current_inode.create(component, FileType::SymLink, mode)?; + let target_path = fs::read_link(&full_path)?; + let target_path_str = target_path.to_string_lossy().into_owned(); + let data = target_path_str.as_bytes(); + + symlink_inode.resize(data.len())?; + symlink_inode.write_at(0, data)?; + } + else if metadata.is_file(){ + // file-type operation + if let Ok(_) = current_inode.find(component) { + current_inode.unlink(component)?; + } + let file_inode = current_inode.create(component, FileType::File, mode)?; + + let mut file = fs::File::open(&full_path)?; + file_inode.resize(file.metadata()?.len() as usize)?; + + let mut buf = unsafe { Box::<[u8; BUF_SIZE]>::new_uninit().assume_init() }; + let mut offset = 0usize; + let mut len = BUF_SIZE; + while len == BUF_SIZE { + len = file.read(buf.as_mut())?; + file_inode.write_at(offset, &buf[..len])?; + offset += len; + } + } + } + else { + // mkdir + current_inode = match current_inode.find(component) { + Ok(inode) => inode, + Err(_) => current_inode.create(component, FileType::Dir, 0o755)? + }; + } + } + Ok(()) +} diff --git a/rcore-fs-sefs/src/lib.rs b/rcore-fs-sefs/src/lib.rs index f250d29..97c9747 100644 --- a/rcore-fs-sefs/src/lib.rs +++ b/rcore-fs-sefs/src/lib.rs @@ -3,6 +3,8 @@ #[macro_use] extern crate alloc; +// + use alloc::{ boxed::Box, collections::BTreeMap, @@ -28,6 +30,7 @@ use self::structs::*; pub mod dev; mod structs; + /// Helper methods for `File` impl dyn File { fn read_block(&self, id: BlockId, buf: &mut [u8]) -> DevResult<()> { @@ -103,7 +106,7 @@ impl INodeImpl { fn get_entry_and_entry_id(&self, name: &str) -> vfs::Result<(DiskEntry, usize)> { let name = if name.is_empty() { "." } else { name }; for entry_id in 0..self.disk_inode.read().blocks as usize { - let entry = self.file.read_direntry(entry_id)?; + let entry = self.file.read_direntry(entry_id)?; if entry.name.as_ref() == name { return Ok((entry, entry_id)); } @@ -183,8 +186,7 @@ impl INodeImpl { pub fn update_mac(&self) -> vfs::Result<()> { if self.fs.device.protect_integrity() { self.disk_inode.write().inode_mac = self.file.get_file_mac().unwrap(); - //println!("file_mac {:?}", self.disk_inode.read().inode_mac); - self.sync_all()?; + self.sync_part()?; } Ok(()) } @@ -212,6 +214,33 @@ impl INodeImpl { self.fs.meta_file.flush()?; Ok(()) } + /// Write the Inode's info into metadata cache, seperate the code into two parts to avoid dead lock + #[cfg(feature = "create_image")] + fn sync_metadata_cache(&self) -> vfs::Result<()> { + let need_sync; + let buffer = { + let disk_inode = self.disk_inode.read(); + need_sync = disk_inode.dirty(); + if !need_sync { + return Ok(()); + } + let mut buffer = [0u8; 128]; + buffer[..disk_inode.as_buf().len()].copy_from_slice(&disk_inode.as_buf()); + buffer + }; + { + let mut meta_data_cache = self.fs.meta_data_cache.write(); + meta_data_cache[self.id].copy_from_slice(&buffer); + } + { + let mut disk_inode = self.disk_inode.write(); + if disk_inode.dirty() { + disk_inode.sync(); + } + } + + Ok(()) + } /// Write zeros for the specified range of file. fn zero_range(&self, range: &Range, keep_size: bool) -> vfs::Result<()> { @@ -336,6 +365,15 @@ impl INodeImpl { } Ok(()) } + /// Sync file data and metadata cache of the INode + #[cfg(feature = "create_image")] + fn sync_part(&self) -> vfs::Result<()> { + // Sync data + self.sync_data()?; + // Sync metadata + self.sync_metadata_cache()?; + Ok(()) + } } impl vfs::INode for INodeImpl { @@ -420,7 +458,7 @@ impl vfs::INode for INodeImpl { } fn sync_data(&self) -> vfs::Result<()> { - self.file.flush()?; + self.file.flush()?; Ok(()) } @@ -495,6 +533,7 @@ impl vfs::INode for INodeImpl { type_: vfs::FileType, mode: u16, ) -> vfs::Result> { + let type_ = match type_ { vfs::FileType::File => FileType::File, vfs::FileType::Dir => FileType::Dir, @@ -513,16 +552,20 @@ impl vfs::INode for INodeImpl { return Err(FsError::NameTooLong); } - // Ensure the name is not exist + + #[cfg(not(feature = "create_image"))]{ if self.get_file_inode_id(name).is_ok() { return Err(FsError::EntryExist); } - + } + // Create a new INode let inode = self.fs.new_inode(type_, mode)?; + if type_ == FileType::Dir { inode.dirent_init(self.id)?; } + // Insert it into dir entry let entry = DiskEntry { id: inode.id as u32, @@ -536,13 +579,13 @@ impl vfs::INode for INodeImpl { inode.nlinks_inc(); //for . self.nlinks_inc(); //for .. } - // Update metadata file to make the INode valid - self.fs.sync_metadata()?; - inode.sync_all()?; - // Sync the dirINode's info into file - // MUST sync the INode's info first, or the entry maybe invalid - self.sync_all()?; - + // When creating an image, synchronization from cache to disk is not needed for every operation + #[cfg(not(feature = "create_image"))]{ + self.fs.sync_metadata()?; + inode.sync_all()?; + self.sync_all()?; + } + Ok(inode) } @@ -806,10 +849,15 @@ impl vfs::INode for INodeImpl { impl Drop for INodeImpl { /// Auto sync when drop fn drop(&mut self) { - #[cfg(feature = "create_image")] - self.update_mac() + + #[cfg(feature = "create_image")]{ + self.update_mac() .expect("failed to update mac when dropping the SEFS Inode"); + self.sync_part() + .expect("failed to sync data when dropping the SEFS Inode"); + } + #[cfg(not(feature = "create_image"))] self.sync_all() .expect("failed to sync when dropping the SEFS Inode"); if self.disk_inode.read().nlinks == 0 { @@ -838,6 +886,8 @@ pub struct SEFS { super_block: RwLock>, /// blocks in use are marked 0 free_map: RwLock>>, + /// metadata cache + meta_data_cache: RwLock>>, /// inode list inodes: RwLock>>, /// device @@ -888,18 +938,23 @@ impl SEFS { &mut free_map.as_mut_slice()[BLKSIZE * i..BLKSIZE * (i + 1)], )?; } - - Ok(SEFS { + let blocks = super_block.blocks as usize; + + let sefs = SEFS { super_block: RwLock::new(Dirty::new(super_block)), free_map: RwLock::new(Dirty::new(free_map)), + meta_data_cache: RwLock::new(Dirty::new(vec![[0u8; BLKSIZE]; blocks])), inodes: RwLock::new(BTreeMap::new()), device, meta_file, time_provider, uuid_provider, self_ptr: Weak::default(), - } - .wrap()) + }.wrap(); + // When opening an image for incremental zip, the metadata cache is initialized + #[cfg(feature = "create_image")] + sefs.init_meta_data_cache(); + Ok(sefs) } /// Create a new SEFS @@ -928,10 +983,14 @@ impl SEFS { device.clear()?; let meta_file = device.create(METAFILE_NAME)?; meta_file.set_len(blocks * BLKSIZE)?; - + let zeros = vec![0u8; BLKSIZE]; + for i in 0..blocks { + meta_file.write_at(&zeros,i * BLKSIZE)?; + } let sefs = SEFS { super_block: RwLock::new(Dirty::new_dirty(super_block)), free_map: RwLock::new(Dirty::new_dirty(free_map)), + meta_data_cache: RwLock::new(Dirty::new_dirty(vec![[0u8; BLKSIZE]; blocks])), inodes: RwLock::new(BTreeMap::new()), device, meta_file, @@ -940,6 +999,9 @@ impl SEFS { self_ptr: Weak::default(), } .wrap(); + // When creating an image, the metadata cache is initialized + #[cfg(feature = "create_image")] + sefs.init_meta_data_cache(); // Init root INode let root = sefs.new_inode(FileType::Dir, 0o755)?; assert_eq!(root.id, BLKN_ROOT); @@ -953,6 +1015,23 @@ impl SEFS { Ok(sefs) } + #[cfg(feature = "create_image")] + /// Initialize/Copy the cache of metadata when opening/creating SEFS + pub fn init_meta_data_cache(&self) { + let mut meta_data_cache = self.meta_data_cache.write(); + let num_blocks = self.super_block.read().blocks as usize; + meta_data_cache.resize(num_blocks, [0u8; BLKSIZE]); + + for i in 0..num_blocks { + self.meta_file + .read_block(i, &mut meta_data_cache[i]) + .expect("failed to read block"); + } + meta_data_cache.sync(); + + + } + /// Wrap pure SEFS with Arc /// Used in constructors fn wrap(self) -> Arc { @@ -968,9 +1047,18 @@ impl SEFS { } /// Write back super block and free map if dirty - fn sync_metadata(&self) -> vfs::Result<()> { - let (mut free_map, mut super_block) = self.write_lock_free_map_and_super_block(); + pub fn sync_metadata(&self) -> vfs::Result<()> { + let (mut free_map, mut super_block, mut meta_data_cache) = self.write_lock_free_map_and_super_block(); // Sync super block + + if meta_data_cache.dirty() { + for i in 0..super_block.blocks as usize { + self.meta_file + .write_all_at(&meta_data_cache[i], BLKSIZE * i) + .expect("failed to write block"); + } + meta_data_cache.sync(); + } if super_block.dirty() { self.meta_file .write_all_at(super_block.as_buf(), BLKSIZE * BLKN_SUPER)?; @@ -992,7 +1080,7 @@ impl SEFS { /// Allocate a block, return block id fn alloc_block(&self) -> Option { - let (mut free_map, mut super_block) = self.write_lock_free_map_and_super_block(); + let (mut free_map, mut super_block, mut meta_data_cache) = self.write_lock_free_map_and_super_block(); let id = free_map.alloc().or_else(|| { // Allocate a new group let new_group_id = super_block.groups as usize; @@ -1002,6 +1090,12 @@ impl SEFS { self.meta_file .set_len(super_block.groups as usize * BLKBITS * BLKSIZE) .expect("failed to extend meta file"); + #[cfg(feature = "create_image")]{ + for _ in 0..BLKBITS { + meta_data_cache.push([0u8; BLKSIZE]); + } + } + free_map.extend(core::iter::repeat(true).take(BLKBITS)); // Set the bit to false to avoid to allocate it as INode ID free_map.set(Self::get_freemap_block_id_of_group(new_group_id), false); @@ -1015,7 +1109,7 @@ impl SEFS { /// Release a block fn free_block(&self, block_id: usize) { - let (mut free_map, mut super_block) = self.write_lock_free_map_and_super_block(); + let (mut free_map, mut super_block, _meta_data_cache) = self.write_lock_free_map_and_super_block(); assert!(!free_map[block_id]); free_map.set(block_id, true); super_block.unused_blocks += 1; @@ -1028,10 +1122,12 @@ impl SEFS { ) -> ( RwLockWriteGuard>>, RwLockWriteGuard>, + RwLockWriteGuard>> ) { let free_map = self.free_map.write(); let super_block = self.super_block.write(); - (free_map, super_block) + let meta_data_cache = self.meta_data_cache.write(); + (free_map, super_block, meta_data_cache) } /// Create a new INode struct, then insert it to self.inodes @@ -1043,7 +1139,6 @@ impl SEFS { create: bool, ) -> vfs::Result> { let filename = disk_inode.disk_filename.to_string(); - let inode = Arc::new(INodeImpl { id, disk_inode: RwLock::new(disk_inode), @@ -1065,7 +1160,6 @@ impl SEFS { /// ** Must ensure it's a valid INode ** fn get_inode(&self, id: INodeId) -> vfs::Result> { assert!(!self.free_map.read()[id]); - // In the BTreeSet and not weak. if let Some(inode) = self.inodes.read().get(&id) { if let Some(inode) = inode.upgrade() { diff --git a/sefs-cli/app/src/main.rs b/sefs-cli/app/src/main.rs index 90d4a98..95f28cf 100644 --- a/sefs-cli/app/src/main.rs +++ b/sefs-cli/app/src/main.rs @@ -3,6 +3,7 @@ use std::ffi::CString; use std::io::{Error as IoError, ErrorKind, Read}; use std::os::unix::ffi::OsStrExt; use std::os::unix::fs::FileExt; +use std::io::{BufRead, BufReader}; use std::path::PathBuf; use std::process::exit; @@ -13,7 +14,7 @@ use structopt::StructOpt; use rcore_fs::dev::std_impl::StdTimeProvider; use rcore_fs::vfs::FileSystem; use rcore_fs_cli::fuse::VfsFuse; -use rcore_fs_cli::zip::{unzip_dir, zip_dir}; +use rcore_fs_cli::zip::{unzip_dir, zip_dir_parallel, update_dir}; use rcore_fs_sefs as sefs; use rcore_fs_sefs::dev::std_impl::StdUuidProvider; use rcore_fs_unionfs as unionfs; @@ -21,6 +22,17 @@ use rcore_fs_unionfs as unionfs; mod enclave; mod sgx_dev; +#[derive(Debug)] +struct MainError(String); + +impl std::error::Error for MainError {} + +impl std::fmt::Display for MainError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}", self.0) + } +} + #[derive(Debug, StructOpt)] struct Opt { /// Path of the enclave library @@ -65,6 +77,28 @@ enum Cmd { #[structopt(short, long, parse(from_os_str))] key: Option, }, + /// Update data from to + #[structopt(name = "update")] + Update { + /// Source SEFS image directory + #[structopt(parse(from_os_str))] + image: PathBuf, + /// Target directory + #[structopt(parse(from_os_str))] + dir: PathBuf, + // LogFile directory + #[structopt(parse(from_os_str))] + log: PathBuf, + /// Root MAC of the SEFS image + #[structopt(parse(from_os_str))] + mac: PathBuf, + /// Protect the integrity of FS + #[structopt(short, long)] + protect_integrity: bool, + /// Key for decryption + #[structopt(short, long, parse(from_os_str))] + key: Option, + }, /// Mount overlayed with to #[structopt(name = "mount")] Mount { @@ -158,7 +192,9 @@ fn main() -> Result<(), Box> { let device = sgx_dev::SgxStorage::new(enclave.geteid(), &image, mode); sefs::SEFS::create(Box::new(device), &StdTimeProvider, &StdUuidProvider)? }; - zip_dir(&dir, sefs_fs.root_inode())?; + //Use thread pool to speed up the zip process + zip_dir_parallel(&dir, sefs_fs.root_inode()) + .map_err(|e| Box::new(MainError(format!("failed to zip: {}", e))) as Box)?; sefs_fs.sync()?; let root_mac_str = { let mut s = String::from(""); @@ -190,6 +226,41 @@ fn main() -> Result<(), Box> { unzip_dir(&dir, sefs_fs.root_inode())?; println!("Decrypt the SEFS image successfully"); } + Cmd::Update { + image, + dir, + log, + mac, + protect_integrity, + key, + } => { + let sefs_fs = { + let key = parse_key(&key)?; + let mode = sgx_dev::EncryptMode::from_parameters(protect_integrity, &key)?; + let device = sgx_dev::SgxStorage::new(enclave.geteid(), &image, mode); + sefs::SEFS::open(Box::new(device), &StdTimeProvider, &StdUuidProvider)? + }; + let file = std::fs::File::open(&log)?; + let reader = BufReader::new(file); + for line in reader.lines() { + let path = PathBuf::from(line?.trim_end().to_string()); + update_dir(&dir, &path, sefs_fs.root_inode())?; + sefs_fs.sync()?; + } + let root_mac_str = { + let mut s = String::from(""); + for (i, byte) in sefs_fs.root_mac().iter().enumerate() { + if i != 0 { + s += "-"; + } + s += &format!("{:02x}", byte); + } + s + }; + let f = std::fs::File::create(mac)?; + f.write_all_at(root_mac_str.as_bytes(), 0)?; + println!("Update the SEFS image successfully"); + } } Ok(()) } diff --git a/sefs-cli/enclave/Enclave.config.xml b/sefs-cli/enclave/Enclave.config.xml index 109fcd2..1a7dca0 100644 --- a/sefs-cli/enclave/Enclave.config.xml +++ b/sefs-cli/enclave/Enclave.config.xml @@ -4,7 +4,7 @@ 0 0x100000 0x20000000 - 1 + 32 1 0 0 diff --git a/sefs-cli/lib/libsefs-cli.signed.so b/sefs-cli/lib/libsefs-cli.signed.so new file mode 100644 index 0000000..eb3223d Binary files /dev/null and b/sefs-cli/lib/libsefs-cli.signed.so differ diff --git a/sefs-cli/lib/libsefs-cli_sim.so b/sefs-cli/lib/libsefs-cli_sim.so new file mode 100755 index 0000000..8aaaaf8 Binary files /dev/null and b/sefs-cli/lib/libsefs-cli_sim.so differ