diff --git a/rcore-fs-cli/src/lib.rs b/rcore-fs-cli/src/lib.rs index 52467eb..743e290 100644 --- a/rcore-fs-cli/src/lib.rs +++ b/rcore-fs-cli/src/lib.rs @@ -6,3 +6,4 @@ extern crate log; #[cfg(feature = "use_fuse")] pub mod fuse; pub mod zip; +mod thread_pool; \ No newline at end of file diff --git a/rcore-fs-cli/src/thread_pool.rs b/rcore-fs-cli/src/thread_pool.rs new file mode 100644 index 0000000..a2ac31f --- /dev/null +++ b/rcore-fs-cli/src/thread_pool.rs @@ -0,0 +1,79 @@ +use std::sync::{Arc, Mutex}; +use std::thread::{self, JoinHandle}; +use std::sync::mpsc; + +type Job = Box; + +enum Message { + NewJob(Job), + Shutdown, +} + +pub struct ThreadPool { + workers: Vec, + sender: mpsc::Sender, +} + +impl ThreadPool { + pub fn new(size: usize) -> ThreadPool { + assert!(size > 0, "Thread pool size must be greater than zero"); + + let (sender, receiver) = mpsc::channel(); + let receiver = Arc::new(Mutex::new(receiver)); + + let mut workers = Vec::with_capacity(size); + + for id in 0..size { + workers.push(Worker::new(id, Arc::clone(&receiver))); + } + + ThreadPool { workers, sender } + } + + pub fn execute(&self, f: F) where F: FnOnce() + Send + 'static { + let job = Box::new(f); + self.sender.send(Message::NewJob(job)).unwrap(); + } + + pub fn shutdown(&mut self) { + for _ in &self.workers { + self.sender.send(Message::Shutdown).unwrap(); + } + + for worker in &mut self.workers { + worker.thread.take().unwrap().join().unwrap(); + } + + self.workers.clear(); + } +} + +struct Worker { + id: usize, + thread: Option>, +} + +impl Worker { + fn new(id: usize, receiver: Arc>>) -> Worker { + let thread = thread::spawn(move || loop { + let message = receiver.lock().unwrap().recv().unwrap(); + + match message { + Message::NewJob(job) => { + job(); + } + Message::Shutdown => { + break; + } + } + }); + + Worker { id, thread: Some(thread) } + } +} + +impl Drop for ThreadPool { + fn drop(&mut self) { + self.shutdown(); + } +} diff --git a/rcore-fs-cli/src/zip.rs b/rcore-fs-cli/src/zip.rs index b1f1d82..891471d 100644 --- a/rcore-fs-cli/src/zip.rs +++ b/rcore-fs-cli/src/zip.rs @@ -8,43 +8,86 @@ use std::os::unix::fs::PermissionsExt; use std::path::Path; use std::str; use std::sync::Arc; +use std::env; -use rcore_fs::vfs::{FileType, INode, PATH_MAX}; +use rcore_fs::vfs::{FileType, DirEntryData, INode, PATH_MAX}; +use rcore_fs::vfs::FsError; + +use crate::thread_pool::ThreadPool; const BUF_SIZE: usize = 0x10000; const S_IMASK: u32 = 0o777; +struct FileTask { + inode: Arc, + entry: fs::DirEntry, +} + pub fn zip_dir(path: &Path, inode: Arc) -> Result<(), Box> { + let pool = ThreadPool::new(30); + zip_dir_task(path, inode, &pool)?; + Ok(()) +} + +pub fn zip_dir_task(path: &Path, inode: Arc, pool: &ThreadPool) -> Result<(), Box> { let mut entries: Vec = fs::read_dir(path)?.map(|dir| dir.unwrap()).collect(); entries.sort_by_key(|entry| entry.file_name()); + + let mut dir_entries: Vec = Vec::new(); + let mut file_tasks: Vec = Vec::new(); + for entry in entries { let name_ = entry.file_name(); let name = name_.to_str().unwrap(); let metadata = fs::symlink_metadata(entry.path())?; let type_ = metadata.file_type(); let mode = (metadata.permissions().mode() & S_IMASK) as u16; - //println!("zip: name: {:?}, mode: {:#o}", entry.path(), mode); + if type_.is_file() { - let inode = inode.create(name, FileType::File, mode)?; - let mut file = fs::File::open(entry.path())?; - inode.resize(file.metadata()?.len() as usize)?; - let mut buf = unsafe { Box::<[u8; BUF_SIZE]>::new_uninit().assume_init() }; - let mut offset = 0usize; - let mut len = BUF_SIZE; - while len == BUF_SIZE { - len = file.read(buf.as_mut())?; - inode.write_at(offset, &buf[..len])?; - offset += len; - } - } else if type_.is_dir() { - let inode = inode.create(name, FileType::Dir, mode)?; - zip_dir(entry.path().as_path(), inode)?; + let inode = inode.create_for_zip(name, FileType::File, mode)?; + dir_entries.push(DirEntryData { inode: Arc::clone(&inode), name: String::from(name), file_type: FileType::File }); + file_tasks.push(FileTask { inode, entry }); } else if type_.is_symlink() { let target = fs::read_link(entry.path())?; - let inode = inode.create(name, FileType::SymLink, mode)?; + let inode = inode.create_for_zip(name, FileType::SymLink, mode)?; + dir_entries.push(DirEntryData { inode: Arc::clone(&inode), name: String::from(name), file_type: FileType::SymLink }); let data = target.as_os_str().as_bytes(); inode.resize(data.len())?; inode.write_at(0, data)?; + } else if type_.is_dir() { + let inode = inode.create_for_zip(name, FileType::Dir, mode)?; + dir_entries.push(DirEntryData { inode: Arc::clone(&inode), name: String::from(name), file_type: FileType::Dir }); + zip_dir_task(entry.path().as_path(), inode, &pool)?; + } + } + + if dir_entries.len() > 0 { + process_sync(inode, dir_entries, file_tasks, &pool); + } + Ok(()) +} + +fn process_sync(dir_inode: Arc, dir_entries: Vec, file_tasks: Vec, pool: &ThreadPool) { + pool.execute(move || { + if let Err(e) = dir_inode.write_all_direntry(dir_entries) { + eprintln!("Failed to write direntry: {}", e); + } + if let Err(e) = process_files_task(&file_tasks) { + eprintln!("Failed to process files: {}", e); + } + }); +} + +fn process_files_task(file_tasks: &[FileTask]) -> Result<(), FsError> { + for task in file_tasks { + let mut file = fs::File::open(task.entry.path())?; + let mut buf = unsafe { Box::<[u8; BUF_SIZE]>::new_uninit().assume_init() }; + let mut offset = 0usize; + let mut len = BUF_SIZE; + while len == BUF_SIZE { + len = file.read(buf.as_mut())?; + task.inode.write_at(offset, &buf[..len])?; + offset += len; } } Ok(()) @@ -86,3 +129,99 @@ pub fn unzip_dir(path: &Path, inode: Arc) -> Result<(), Box) -> Result<(), Box> { + // Convert to absolute path + let abs_root = if root_path.is_absolute() { + root_path.to_path_buf() + } else { + env::current_dir()?.join(root_path) + }.canonicalize()?; + + let abs_inc = if inc_path.is_absolute() { + inc_path.to_path_buf() + } else { + env::current_dir()?.join(inc_path) + }.canonicalize()?; + + if !abs_inc.starts_with(&abs_root) { + return Err(format!("{} is not under root {}", + abs_inc.display(), abs_root.display()).into()); + } + + let relative_path = abs_inc.strip_prefix(&abs_root)?; + let mut components = Vec::new(); + + for c in relative_path.components() { + let os_str = c.as_os_str(); + let s = os_str.to_str() + .ok_or_else(|| format!("Invalid UTF-8 component: {:?}", os_str))?; + components.push(s.to_string()); + } + + let mut current_inode = root_inode; + for (i, name) in components.iter().enumerate() { + let is_last = i == components.len() - 1; + + // Process the directory in the middle of the path + if !is_last { + current_inode = match current_inode.lookup_follow(name, 1) { + Ok(inode) => { + if inode.metadata()?.type_ != FileType::Dir { + return Err(format!("'{}' exists but is not directory", name).into()); + } + inode + } + Err(_) => current_inode.create(name, FileType::Dir, 0o755)?, + }; + continue; + } + + // Process the final path + if let Ok(_) = current_inode.lookup_follow(name, 1) { + if let Err(e) = current_inode.unlink_recursive(name) { + return Err(Box::new(e)); + } + } + + if !abs_inc.exists() { return Ok(()); } + + let meta = fs::symlink_metadata(&abs_inc)?; + let mode = (meta.permissions().mode() & S_IMASK) as u16; + + match meta.file_type() { + ft if ft.is_file() => { + let inode = current_inode.create(name, FileType::File, mode)?; + let mut file = fs::File::open(&abs_inc)?; + let size = file.metadata()?.len() as usize; + inode.resize(size)?; + + let mut buf = vec![0u8; 4096]; + let mut offset = 0; + while let Ok(len) = file.read(&mut buf) { + if len == 0 { break; } + inode.write_at(offset, &buf[..len])?; + offset += len; + } + } + ft if ft.is_dir() => { + let inode = current_inode.create(name, FileType::Dir, mode)?; + for entry in fs::read_dir(&abs_inc)? { + let entry = entry?; + let path = entry.path(); + inc_zip_dir(&abs_root, &path, inode.clone())?; + } + } + ft if ft.is_symlink() => { + let target = fs::read_link(&abs_inc)?; + let inode = current_inode.create(name, FileType::SymLink, mode)?; + let data = target.as_os_str().as_bytes(); + inode.resize(data.len())?; + inode.write_at(0, data)?; + } + _ => return Err("Unsupported file type".into()), + } + } + + Ok(()) +} \ No newline at end of file diff --git a/rcore-fs-sefs/src/lib.rs b/rcore-fs-sefs/src/lib.rs index f250d29..94295fc 100644 --- a/rcore-fs-sefs/src/lib.rs +++ b/rcore-fs-sefs/src/lib.rs @@ -18,14 +18,16 @@ use core::ops::Range; use bitvec::prelude::*; use rcore_fs::dev::{DevResult, TimeProvider}; use rcore_fs::dirty::Dirty; -use rcore_fs::vfs::{self, AllocFlags, DirentVisitor, FallocateMode, FileSystem, FsError, INode}; +use rcore_fs::vfs::{self, AllocFlags, DirentVisitor, FallocateMode, FileSystem, DirEntryData, FsError, INode}; use spin::{RwLock, RwLockWriteGuard}; use self::dev::*; +use self::ram_file::*; pub use self::structs::SEFS_MAGIC; use self::structs::*; pub mod dev; +mod ram_file; mod structs; /// Helper methods for `File` @@ -546,6 +548,89 @@ impl vfs::INode for INodeImpl { Ok(inode) } + // Only used in the accelerated packaging process, + // you need to call "write_all_direntry(...)" to manually + // write all directory entries; + fn create_for_zip( + &self, + name: &str, + type_: vfs::FileType, + mode: u16, + ) -> vfs::Result> { + let type_ = match type_ { + vfs::FileType::File => FileType::File, + vfs::FileType::Dir => FileType::Dir, + vfs::FileType::SymLink => FileType::SymLink, + vfs::FileType::Socket => FileType::Socket, + _ => return Err(FsError::InvalidParam), + }; + let info = self.metadata()?; + if info.type_ != vfs::FileType::Dir { + return Err(FsError::NotDir); + } + if info.nlinks == 0 { + return Err(FsError::DirRemoved); + } + if name.len() > MAX_FNAME_LEN { + return Err(FsError::NameTooLong); + } + + // Ensure the name is not exist + if self.get_file_inode_id(name).is_ok() { + return Err(FsError::EntryExist); + } + + // Create a new INode + let inode = self.fs.new_inode(type_, mode)?; + if type_ == FileType::Dir { + inode.dirent_init(self.id)?; + } + // Append success, increase nlinks + inode.nlinks_inc(); + if type_ == FileType::Dir { + inode.nlinks_inc(); //for . + self.nlinks_inc(); //for .. + } + // Update metadata file to make the INode valid + self.fs.sync_metadata()?; + inode.sync_all()?; + // Sync the dirINode's info into file + // MUST sync the INode's info first, or the entry maybe invalid + self.sync_all()?; + + Ok(inode) + } + + fn write_all_direntry(&self, dir_entries: Vec) -> vfs::Result<()> { + let mut inode = self.disk_inode.write(); + let total = &mut inode.blocks; + let mut entry_id = *total as usize; + + for entry in dir_entries { + let dir_inode = entry.inode + .downcast_ref::() + .ok_or(FsError::NotSameFs)?; + let type_ = match entry.file_type { + vfs::FileType::File => FileType::File, + vfs::FileType::Dir => FileType::Dir, + vfs::FileType::SymLink => FileType::SymLink, + vfs::FileType::Socket => FileType::Socket, + _ => return Err(FsError::InvalidParam), + }; + let entry = DiskEntry { + id: dir_inode.id as u32, + name: Str256::from(entry.name.as_str()), + type_, + }; + self.file.write_direntry(entry_id, &entry)?; + *total += 1; + entry_id += 1; + } + + self.file.flush()?; + Ok(()) + } + fn unlink(&self, name: &str) -> vfs::Result<()> { let info = self.metadata()?; if info.type_ != vfs::FileType::Dir { @@ -579,6 +664,45 @@ impl vfs::INode for INodeImpl { Ok(()) } + fn unlink_recursive(&self, name: &str) -> vfs::Result<()> { + let info = self.metadata()?; + if info.type_ != vfs::FileType::Dir { + return Err(FsError::NotDir); + } + if info.nlinks == 0 { + return Err(FsError::DirRemoved); + } + if name == "." || name == ".." || name.is_empty() { + return Err(FsError::IsDir); + } + if name.len() > MAX_FNAME_LEN { + return Err(FsError::NameTooLong); + } + + let (inode_id, entry_id) = self.get_file_inode_and_entry_id(name)?; + let inode = self.fs.get_inode(inode_id)?; + + if inode.disk_inode.read().type_ == FileType::Dir { + if inode.disk_inode.read().blocks > 2 { + let total = inode.disk_inode.read().blocks as usize; + + // When deleting a sub-entry in the directory, its order will be adjusted, + // and the last one will be moved to the front (dirent_inode_remove). + // Therefore, delete it from the back to the front. + for sub_id in (2..total).rev() { + let sub_entry = inode.file.read_direntry(sub_id)?; + let sub_entry_name = sub_entry.name.as_ref(); + inode.unlink_recursive(sub_entry_name)?; + } + inode.file.set_len(2 * DIRENT_SIZE)?; + inode.disk_inode.write().blocks = 2; + } + } + self.dirent_inode_remove(inode, entry_id)?; + self.sync_all()?; + Ok(()) + } + fn link(&self, name: &str, other: &Arc) -> vfs::Result<()> { let info = self.metadata()?; if info.type_ != vfs::FileType::Dir { @@ -868,12 +992,56 @@ impl SEFS { time_provider: &'static dyn TimeProvider, uuid_provider: &'static dyn UuidProvider, ) -> vfs::Result> { - let meta_file = device.open(METAFILE_NAME)?; + Self::_open(device, time_provider, uuid_provider, false) + } - // Load super block - let super_block = meta_file.load_struct::(BLKN_SUPER)?; - if !super_block.check() { - return Err(FsError::WrongFs); + pub fn open_for_zip( + device: Box, + time_provider: &'static dyn TimeProvider, + uuid_provider: &'static dyn UuidProvider, + ) -> vfs::Result> { + Self::_open(device, time_provider, uuid_provider, true) + } + + fn _open( + device: Box, + time_provider: &'static dyn TimeProvider, + uuid_provider: &'static dyn UuidProvider, + for_zip: bool, + ) -> vfs::Result> { + let meta_file: Box; + let super_block: SuperBlock; + if for_zip { + // Read all data in into meta_file(in memory) + let meta_file_disk = device.open(METAFILE_NAME)?; + meta_file = Box::new(RamFile::new()); + super_block = meta_file_disk.load_struct::(BLKN_SUPER)?; + + if !super_block.check() { + return Err(FsError::WrongFs); + } + + let mut offset = 0; + let mut buffer = vec![0u8; crate::BLKSIZE]; + let mut remaining_len = super_block.blocks as usize * crate::BLKSIZE; + + while remaining_len > 0 { + let bytes_to_read = remaining_len.min(crate::BLKSIZE); + let len = meta_file_disk.read_at(&mut buffer[..bytes_to_read], offset)?; + if len == 0 { + break; + } + meta_file.write_all_at(&buffer[..len], offset)?; + offset += len; + remaining_len -= len; + } + } else { + meta_file = device.open(METAFILE_NAME)?; + super_block = meta_file.load_struct::(BLKN_SUPER)?; + + if !super_block.check() { + return Err(FsError::WrongFs); + } } // Load free map @@ -907,6 +1075,23 @@ impl SEFS { device: Box, time_provider: &'static dyn TimeProvider, uuid_provider: &'static dyn UuidProvider, + ) -> vfs::Result> { + Self::_create(device, time_provider, uuid_provider, false) + } + + pub fn create_for_zip( + device: Box, + time_provider: &'static dyn TimeProvider, + uuid_provider: &'static dyn UuidProvider, + ) -> vfs::Result> { + Self::_create(device, time_provider, uuid_provider, true) + } + + fn _create( + device: Box, + time_provider: &'static dyn TimeProvider, + uuid_provider: &'static dyn UuidProvider, + for_zip: bool, ) -> vfs::Result> { let blocks = BLKBITS; @@ -926,7 +1111,12 @@ impl SEFS { }; // Clear the existing files in storage device.clear()?; - let meta_file = device.create(METAFILE_NAME)?; + let meta_file: Box; + if for_zip { + meta_file = Box::new(RamFile::new()); + } else { + meta_file = device.create(METAFILE_NAME)?; + } meta_file.set_len(blocks * BLKSIZE)?; let sefs = SEFS { @@ -967,6 +1157,28 @@ impl SEFS { unsafe { Arc::from_raw(ptr) } } + pub fn write_metadata(&self) -> vfs::Result<()> { + let mut offset = 0; + let mut buffer = vec![0u8; crate::BLKSIZE]; + + let meta_file_disk = self.device.create(METAFILE_NAME)?; + let mut remaining_len = self.super_block.read().blocks as usize * crate::BLKSIZE; + meta_file_disk.set_len(remaining_len)?; + + while remaining_len > 0 { + let bytes_to_read = remaining_len.min(crate::BLKSIZE); + let len = self.meta_file.read_at(&mut buffer[..bytes_to_read], offset)?; + if len == 0 { + break; + } + meta_file_disk.write_all_at(&buffer[..len], offset)?; + offset += len; + remaining_len -= len; + } + meta_file_disk.flush()?; + Ok(()) + } + /// Write back super block and free map if dirty fn sync_metadata(&self) -> vfs::Result<()> { let (mut free_map, mut super_block) = self.write_lock_free_map_and_super_block(); diff --git a/rcore-fs-sefs/src/ram_file.rs b/rcore-fs-sefs/src/ram_file.rs new file mode 100644 index 0000000..85646fb --- /dev/null +++ b/rcore-fs-sefs/src/ram_file.rs @@ -0,0 +1,70 @@ +use alloc::vec::Vec; +use spin::Mutex; +use rcore_fs::dev::{DevError, DevResult, EIO}; +use crate::dev::File; +use crate::dev::SefsMac; + +pub struct RamFile { + data: Mutex>, +} + +impl RamFile { + pub fn new() -> Self { + RamFile { + data: Mutex::new(Vec::new()), + } + } +} + +impl File for RamFile { + fn read_at(&self, buf: &mut [u8], offset: usize) -> DevResult { + if buf.is_empty() { + return Ok(0); + } + + let data = self.data.lock(); + let data_len = data.len(); + + if offset >= data_len { + return Err(DevError(EIO)); + } + + let read_len = core::cmp::min(data_len - offset, buf.len()); + buf[..read_len].copy_from_slice(&data[offset..offset + read_len]); + Ok(read_len) + } + + fn write_at(&self, buf: &[u8], offset: usize) -> DevResult { + if buf.is_empty() { + return Ok(0); + } + + let mut data = self.data.lock(); + let data_len = data.len(); + let write_end = match offset.checked_add(buf.len()) { + Some(end) => end, + None => return Err(DevError(EIO)), + }; + + if write_end > data_len { + return Err(DevError(EIO)); + } + + data[offset..write_end].copy_from_slice(buf); + Ok(buf.len()) + } + + fn set_len(&self, len: usize) -> DevResult<()> { + let mut data = self.data.lock(); + data.resize(len, 0); + Ok(()) + } + + fn flush(&self) -> DevResult<()> { + Ok(()) + } + + fn get_file_mac(&self) -> DevResult { + Ok(SefsMac::default()) + } +} diff --git a/rcore-fs/src/vfs.rs b/rcore-fs/src/vfs.rs index 02ba09a..d19527b 100644 --- a/rcore-fs/src/vfs.rs +++ b/rcore-fs/src/vfs.rs @@ -69,6 +69,17 @@ pub trait INode: Any + Sync + Send { ) -> Result> { self.create(name, type_, mode) } + + /// Create a new INode in the directory, without write filename to directory entries + fn create_for_zip(&self, _name: &str, _type: FileType, _mode: u16) -> Result> { + Err(FsError::NotSupported) + } + + /// write all filenames to directory entries + /// _inodes: Vec>, _names: Vec, _types: Vec + fn write_all_direntry(&self, _dir_entries: Vec) -> Result<()> { + Err(FsError::NotSupported) + } /// Create a hard link `name` to `other` fn link(&self, _name: &str, _other: &Arc) -> Result<()> { @@ -80,6 +91,11 @@ pub trait INode: Any + Sync + Send { Err(FsError::NotSupported) } + /// Recursive delete a hard link `name` + fn unlink_recursive(&self, _name: &str) -> Result<()> { + Err(FsError::NotSupported) + } + /// Move INode `self/old_name` to `target/new_name`. /// If `target` equals `self`, do rename. fn move_(&self, _old_name: &str, _target: &Arc, _new_name: &str) -> Result<()> { @@ -340,6 +356,12 @@ pub enum FileType { Socket, } +pub struct DirEntryData { + pub inode: Arc, + pub name: String, + pub file_type: FileType, +} + pub const FS_MAC_SIZE: usize = 16; pub type FsMac = [u8; FS_MAC_SIZE]; diff --git a/sefs-cli/app/src/main.rs b/sefs-cli/app/src/main.rs index 90d4a98..898fe10 100644 --- a/sefs-cli/app/src/main.rs +++ b/sefs-cli/app/src/main.rs @@ -1,6 +1,7 @@ use std::error::Error; use std::ffi::CString; -use std::io::{Error as IoError, ErrorKind, Read}; +use std::fs::File; +use std::io::{Error as IoError, ErrorKind, Read, BufRead, BufReader}; use std::os::unix::ffi::OsStrExt; use std::os::unix::fs::FileExt; use std::path::PathBuf; @@ -13,7 +14,7 @@ use structopt::StructOpt; use rcore_fs::dev::std_impl::StdTimeProvider; use rcore_fs::vfs::FileSystem; use rcore_fs_cli::fuse::VfsFuse; -use rcore_fs_cli::zip::{unzip_dir, zip_dir}; +use rcore_fs_cli::zip::{unzip_dir, zip_dir, inc_zip_dir}; use rcore_fs_sefs as sefs; use rcore_fs_sefs::dev::std_impl::StdUuidProvider; use rcore_fs_unionfs as unionfs; @@ -48,6 +49,12 @@ enum Cmd { /// Key for encryption #[structopt(short, long, parse(from_os_str))] key: Option, + /// Enable incremental zip mode + #[structopt(long = "inc")] + inc: bool, + /// Path for incremental data [default: .inc] + #[structopt(long = "inc-path", parse(from_os_str))] + inc_path: Option, }, /// Unzip data from given to #[structopt(name = "unzip")] @@ -150,16 +157,50 @@ fn main() -> Result<(), Box> { image, mac, key, + inc, + inc_path } => { - let sefs_fs = { - std::fs::create_dir(&image)?; - let key = parse_key(&key)?; - let mode = sgx_dev::EncryptMode::from_parameters(true, &key)?; - let device = sgx_dev::SgxStorage::new(enclave.geteid(), &image, mode); - sefs::SEFS::create(Box::new(device), &StdTimeProvider, &StdUuidProvider)? + let actual_inc_path = inc_path.unwrap_or_else(|| { + let lossy_str = dir.to_string_lossy(); + let trimmed_image = lossy_str.trim_end_matches('/'); + let trimmed_image_path = format!("{}.path_log", trimmed_image); + PathBuf::from(trimmed_image_path) + }); + let mut inc_mode = inc; + if inc && !actual_inc_path.exists() { + println!("File \"{}\" was not found, and it could not be packaged incrementally, switch to normal zip mode", actual_inc_path.display()); + inc_mode = false; + } + let sefs_fs = if inc_mode { + let sefs_fs_inc = { + let key = parse_key(&key)?; + let mode = sgx_dev::EncryptMode::from_parameters(true, &key)?; + let device = sgx_dev::SgxStorage::new(enclave.geteid(), &image, mode); + sefs::SEFS::open(Box::new(device), &StdTimeProvider, &StdUuidProvider)? + }; + + let inc_path_file = File::open(&actual_inc_path)?; + let reader = BufReader::new(inc_path_file); + + // Iterate over each path in the file and call inc_zip_dir + for line in reader.lines() { + let p = PathBuf::from(line?); // Each line is a new path + inc_zip_dir(&dir, &p, sefs_fs_inc.root_inode())?; + } + sefs_fs_inc + } else { + let sefs_fs_zip = { + std::fs::create_dir(&image)?; + let key = parse_key(&key)?; + let mode = sgx_dev::EncryptMode::from_parameters(true, &key)?; + let device = sgx_dev::SgxStorage::new(enclave.geteid(), &image, mode); + sefs::SEFS::create_for_zip(Box::new(device), &StdTimeProvider, &StdUuidProvider)? + }; + zip_dir(&dir, sefs_fs_zip.root_inode())?; + sefs_fs_zip }; - zip_dir(&dir, sefs_fs.root_inode())?; sefs_fs.sync()?; + sefs_fs.write_metadata()?; let root_mac_str = { let mut s = String::from(""); for (i, byte) in sefs_fs.root_mac().iter().enumerate() { @@ -172,6 +213,9 @@ fn main() -> Result<(), Box> { }; let f = std::fs::File::create(mac)?; f.write_all_at(root_mac_str.as_bytes(), 0)?; + if actual_inc_path.exists() { + std::fs::remove_file(&actual_inc_path)?; + } println!("Generate the SEFS image successfully"); } Cmd::Unzip { diff --git a/sefs-cli/app/src/sgx_dev.rs b/sefs-cli/app/src/sgx_dev.rs index fbdb979..0f88175 100644 --- a/sefs-cli/app/src/sgx_dev.rs +++ b/sefs-cli/app/src/sgx_dev.rs @@ -3,6 +3,7 @@ use rcore_fs_sefs::dev::SefsMac; use rcore_fs_sefs::dev::{File, Storage}; use sgx_types::*; use std::fs::{read_dir, remove_file}; +use std::sync::{Arc, Mutex}; use std::io; use std::mem; use std::path::*; @@ -75,14 +76,14 @@ impl Storage for SgxStorage { let mut path = self.path.clone(); path.push(file_id); let file = file_open(path.to_str().unwrap(), false, &self.mode)?; - Ok(Box::new(SgxFile { file })) + Ok(Box::new(SgxFile::new(file))) } fn create(&self, file_id: &str) -> DevResult> { let mut path = self.path.clone(); path.push(file_id); let file = file_open(path.to_str().unwrap(), true, &self.mode)?; - Ok(Box::new(SgxFile { file })) + Ok(Box::new(SgxFile::new(file))) } fn remove(&self, file_id: &str) -> DevResult<()> { @@ -110,17 +111,27 @@ impl Storage for SgxStorage { } pub struct SgxFile { - file: usize, + file: Arc>, +} + +impl SgxFile { + pub fn new(file: usize) -> Self { + SgxFile { + file: Arc::new(Mutex::new(file)), + } + } } impl File for SgxFile { fn read_at(&self, buf: &mut [u8], offset: usize) -> DevResult { - let len = file_read_at(self.file, offset, buf); + let file = self.file.lock().unwrap(); + let len = file_read_at(*file, offset, buf); Ok(len) } fn write_at(&self, buf: &[u8], offset: usize) -> DevResult { - let len = file_write_at(self.file, offset, buf); + let file = self.file.lock().unwrap(); + let len = file_write_at(*file, offset, buf); if len != buf.len() { println!( "write_at return len: {} not equal to buf_len: {}", @@ -138,7 +149,8 @@ impl File for SgxFile { } fn flush(&self) -> DevResult<()> { - match file_flush(self.file) { + let file = self.file.lock().unwrap(); + match file_flush(*file) { 0 => Ok(()), e => { println!("failed to flush"); @@ -148,9 +160,10 @@ impl File for SgxFile { } fn get_file_mac(&self) -> DevResult { + let file = self.file.lock().unwrap(); let mut mac: sgx_aes_gcm_128bit_tag_t = [0u8; 16]; - file_get_mac(self.file, &mut mac); + file_get_mac(*file, &mut mac); let sefs_mac = SefsMac(mac); Ok(sefs_mac) } @@ -158,7 +171,8 @@ impl File for SgxFile { impl Drop for SgxFile { fn drop(&mut self) { - let _ = file_close(self.file); + let file = self.file.lock().unwrap(); + let _ = file_close(*file); } } diff --git a/sefs-cli/enclave/Enclave.config.xml b/sefs-cli/enclave/Enclave.config.xml index 109fcd2..1a7dca0 100644 --- a/sefs-cli/enclave/Enclave.config.xml +++ b/sefs-cli/enclave/Enclave.config.xml @@ -4,7 +4,7 @@ 0 0x100000 0x20000000 - 1 + 32 1 0 0