From 15eb028cf3a0c53fa3486f36b03d57fddcb105ff Mon Sep 17 00:00:00 2001 From: XanderC Date: Tue, 1 Apr 2025 16:09:37 +0800 Subject: [PATCH 1/3] feat: add multi-thread acceleration for ZIP ops --- rcore-fs-cli/src/lib.rs | 1 + rcore-fs-cli/src/main.rs | 10 +++- rcore-fs-cli/src/thread_pool.rs | 81 +++++++++++++++++++++++++++++ rcore-fs-cli/src/zip.rs | 29 ++++++----- rcore-fs-sefs/src/lib.rs | 28 +++++----- sefs-cli/app/src/main.rs | 7 ++- sefs-cli/enclave/Enclave.config.xml | 4 +- 7 files changed, 130 insertions(+), 30 deletions(-) create mode 100644 rcore-fs-cli/src/thread_pool.rs diff --git a/rcore-fs-cli/src/lib.rs b/rcore-fs-cli/src/lib.rs index 52467eb..cef2e94 100644 --- a/rcore-fs-cli/src/lib.rs +++ b/rcore-fs-cli/src/lib.rs @@ -6,3 +6,4 @@ extern crate log; #[cfg(feature = "use_fuse")] pub mod fuse; pub mod zip; +pub mod thread_pool; diff --git a/rcore-fs-cli/src/main.rs b/rcore-fs-cli/src/main.rs index c3af2fa..fa088d0 100644 --- a/rcore-fs-cli/src/main.rs +++ b/rcore-fs-cli/src/main.rs @@ -4,6 +4,7 @@ use std::fs::OpenOptions; use std::path::{Path, PathBuf}; use std::sync::{Arc, Mutex}; +use rcore_fs_cli::thread_pool::Pool; use structopt::StructOpt; use rcore_fs::dev::std_impl::StdTimeProvider; @@ -38,6 +39,10 @@ enum Opt { /// File system: [sfs | sefs | hostfs] #[structopt(short = "f", long = "fs", default_value = "sfs")] fs: String, + + /// Number of threads + #[structopt(short="j", long, default_value="4")] + thread_num: usize, }, /// Extract files from a fs image. @@ -86,9 +91,10 @@ fn main() { let opt = Opt::from_args(); match opt { - Opt::Zip { dir, image, fs } => { + Opt::Zip { dir, image, fs , thread_num} => { let fs = open_fs(&fs, &image, true); - zip_dir(&dir, fs.root_inode()).expect("failed to zip fs"); + let thread_pool = Pool::new(thread_num); + zip_dir(&dir, fs.root_inode(), &thread_pool).expect("failed to zip fs"); } Opt::Unzip { dir, image, fs } => { let fs = open_fs(&fs, &image, false); diff --git a/rcore-fs-cli/src/thread_pool.rs b/rcore-fs-cli/src/thread_pool.rs new file mode 100644 index 0000000..f3a90b1 --- /dev/null +++ b/rcore-fs-cli/src/thread_pool.rs @@ -0,0 +1,81 @@ +use std::{sync::{mpsc, Arc, Mutex}, thread::{JoinHandle, self}}; + +pub struct Pool { + workers: Vec, + max_workers: usize, + sender: mpsc::Sender +} + +impl Pool { + pub fn new(max_workers: usize) -> Pool { + if max_workers == 0 { + panic!("max_workers must not be zero!") + } + let (tx, rx) = mpsc::channel(); + + let mut workers = Vec::with_capacity(max_workers); + let receiver = Arc::new(Mutex::new(rx)); + for i in 0..max_workers { + workers.push(Worker::new(i, Arc::clone(&receiver))); + } + + Pool { workers: workers, max_workers: max_workers, sender: tx } + } + + pub fn execute(&self, f:F) where F: FnOnce() + 'static + Send + { + + let job = Message::NewJob(Box::new(f)); + self.sender.send(job).unwrap(); + } +} + +impl Drop for Pool { + fn drop(&mut self) { + for _ in 0..self.max_workers { + self.sender.send(Message::Close).unwrap(); + } + for w in self.workers.iter_mut() { + if let Some(t) = w.t.take() { + t.join().unwrap(); + } + } + } +} + +struct Worker +{ + _id: usize, + t: Option>, +} + +type Job = Box; +enum Message { + Close, + NewJob(Job), +} + +impl Worker +{ + fn new(id: usize, receiver: Arc::>>) -> Worker { + let t = thread::spawn( move || { + loop { + let message = receiver.lock().unwrap().recv().unwrap(); + match message { + Message::NewJob(job) => { + job(); + }, + Message::Close => { + // println!("Close from worker[{}]", id); + break + }, + } + } + }); + + Worker { + _id: id, + t: Some(t), + } + } +} \ No newline at end of file diff --git a/rcore-fs-cli/src/zip.rs b/rcore-fs-cli/src/zip.rs index b1f1d82..84e10b4 100644 --- a/rcore-fs-cli/src/zip.rs +++ b/rcore-fs-cli/src/zip.rs @@ -11,10 +11,12 @@ use std::sync::Arc; use rcore_fs::vfs::{FileType, INode, PATH_MAX}; +use crate::thread_pool; + const BUF_SIZE: usize = 0x10000; const S_IMASK: u32 = 0o777; -pub fn zip_dir(path: &Path, inode: Arc) -> Result<(), Box> { +pub fn zip_dir(path: &Path, inode: Arc, thread_pool: &thread_pool::Pool) -> Result<(), Box> { let mut entries: Vec = fs::read_dir(path)?.map(|dir| dir.unwrap()).collect(); entries.sort_by_key(|entry| entry.file_name()); for entry in entries { @@ -26,19 +28,22 @@ pub fn zip_dir(path: &Path, inode: Arc) -> Result<(), Box> //println!("zip: name: {:?}, mode: {:#o}", entry.path(), mode); if type_.is_file() { let inode = inode.create(name, FileType::File, mode)?; - let mut file = fs::File::open(entry.path())?; - inode.resize(file.metadata()?.len() as usize)?; - let mut buf = unsafe { Box::<[u8; BUF_SIZE]>::new_uninit().assume_init() }; - let mut offset = 0usize; - let mut len = BUF_SIZE; - while len == BUF_SIZE { - len = file.read(buf.as_mut())?; - inode.write_at(offset, &buf[..len])?; - offset += len; - } + // copy file content in another thread + thread_pool.execute(move ||{ + let mut file = fs::File::open(entry.path()).unwrap(); + inode.resize(file.metadata().unwrap().len() as usize).expect(format!("resize {} failed", entry.path().display()).as_str()); + let mut buf = unsafe { Box::<[u8; BUF_SIZE]>::new_uninit().assume_init() }; + let mut offset = 0usize; + let mut len = BUF_SIZE; + while len == BUF_SIZE { + len = file.read(buf.as_mut()).unwrap(); + inode.write_at(offset, &buf[..len]).expect(format!("write {} failed", entry.path().display()).as_str()); + offset += len; + }; + }); } else if type_.is_dir() { let inode = inode.create(name, FileType::Dir, mode)?; - zip_dir(entry.path().as_path(), inode)?; + zip_dir(entry.path().as_path(), inode, thread_pool)?; } else if type_.is_symlink() { let target = fs::read_link(entry.path())?; let inode = inode.create(name, FileType::SymLink, mode)?; diff --git a/rcore-fs-sefs/src/lib.rs b/rcore-fs-sefs/src/lib.rs index f250d29..6df826b 100644 --- a/rcore-fs-sefs/src/lib.rs +++ b/rcore-fs-sefs/src/lib.rs @@ -205,11 +205,11 @@ impl INodeImpl { let mut disk_inode = self.disk_inode.write(); if disk_inode.dirty() { self.fs - .meta_file + .meta_file.write() .write_block(self.id, disk_inode.as_buf())?; disk_inode.sync(); } - self.fs.meta_file.flush()?; + self.fs.meta_file.write().flush()?; Ok(()) } @@ -843,7 +843,7 @@ pub struct SEFS { /// device device: Box, /// metadata file - meta_file: Box, + meta_file: RwLock>, /// Time provider time_provider: &'static dyn TimeProvider, /// uuid provider @@ -868,10 +868,10 @@ impl SEFS { time_provider: &'static dyn TimeProvider, uuid_provider: &'static dyn UuidProvider, ) -> vfs::Result> { - let meta_file = device.open(METAFILE_NAME)?; + let meta_file = RwLock::new(device.open(METAFILE_NAME)?); // Load super block - let super_block = meta_file.load_struct::(BLKN_SUPER)?; + let super_block = meta_file.read().load_struct::(BLKN_SUPER)?; if !super_block.check() { return Err(FsError::WrongFs); } @@ -883,7 +883,7 @@ impl SEFS { } for i in 0..super_block.groups as usize { let block_id = Self::get_freemap_block_id_of_group(i); - meta_file.read_block( + meta_file.read().read_block( block_id, &mut free_map.as_mut_slice()[BLKSIZE * i..BLKSIZE * (i + 1)], )?; @@ -926,8 +926,8 @@ impl SEFS { }; // Clear the existing files in storage device.clear()?; - let meta_file = device.create(METAFILE_NAME)?; - meta_file.set_len(blocks * BLKSIZE)?; + let meta_file = RwLock::new(device.create(METAFILE_NAME)?); + meta_file.write().set_len(blocks * BLKSIZE)?; let sefs = SEFS { super_block: RwLock::new(Dirty::new_dirty(super_block)), @@ -972,7 +972,7 @@ impl SEFS { let (mut free_map, mut super_block) = self.write_lock_free_map_and_super_block(); // Sync super block if super_block.dirty() { - self.meta_file + self.meta_file.write() .write_all_at(super_block.as_buf(), BLKSIZE * BLKN_SUPER)?; super_block.sync(); } @@ -980,13 +980,13 @@ impl SEFS { if free_map.dirty() { for i in 0..super_block.groups as usize { let slice = &free_map.as_slice()[BLKSIZE * i..BLKSIZE * (i + 1)]; - self.meta_file + self.meta_file.write() .write_all_at(slice, BLKSIZE * Self::get_freemap_block_id_of_group(i))?; } free_map.sync(); } // Flush - self.meta_file.flush()?; + self.meta_file.write().flush()?; Ok(()) } @@ -999,7 +999,7 @@ impl SEFS { super_block.groups += 1; super_block.blocks += BLKBITS as u32; super_block.unused_blocks += BLKBITS as u32 - 1; - self.meta_file + self.meta_file.write() .set_len(super_block.groups as usize * BLKBITS * BLKSIZE) .expect("failed to extend meta file"); free_map.extend(core::iter::repeat(true).take(BLKBITS)); @@ -1073,7 +1073,7 @@ impl SEFS { } } // Load if not in set, or is weak ref. - let disk_inode = Dirty::new(self.meta_file.load_struct::(id)?); + let disk_inode = Dirty::new(self.meta_file.read().load_struct::(id)?); self._new_inode(id, disk_inode, false) } @@ -1143,7 +1143,7 @@ impl vfs::FileSystem for SEFS { } fn root_mac(&self) -> vfs::FsMac { - self.meta_file.get_file_mac().unwrap().0 + self.meta_file.read().get_file_mac().unwrap().0 } fn info(&self) -> vfs::FsInfo { diff --git a/sefs-cli/app/src/main.rs b/sefs-cli/app/src/main.rs index 90d4a98..2b68a27 100644 --- a/sefs-cli/app/src/main.rs +++ b/sefs-cli/app/src/main.rs @@ -14,6 +14,7 @@ use rcore_fs::dev::std_impl::StdTimeProvider; use rcore_fs::vfs::FileSystem; use rcore_fs_cli::fuse::VfsFuse; use rcore_fs_cli::zip::{unzip_dir, zip_dir}; +use rcore_fs_cli::thread_pool::Pool; use rcore_fs_sefs as sefs; use rcore_fs_sefs::dev::std_impl::StdUuidProvider; use rcore_fs_unionfs as unionfs; @@ -26,6 +27,9 @@ struct Opt { /// Path of the enclave library #[structopt(short, long, parse(from_os_str))] enclave: PathBuf, + /// Number of threads + #[structopt(short="j", long, default_value="4")] + thread_num: usize, /// Command #[structopt(subcommand)] cmd: Cmd, @@ -158,7 +162,8 @@ fn main() -> Result<(), Box> { let device = sgx_dev::SgxStorage::new(enclave.geteid(), &image, mode); sefs::SEFS::create(Box::new(device), &StdTimeProvider, &StdUuidProvider)? }; - zip_dir(&dir, sefs_fs.root_inode())?; + let thread_pool = Pool::new(opt.thread_num); + zip_dir(&dir, sefs_fs.root_inode(), &thread_pool)?; sefs_fs.sync()?; let root_mac_str = { let mut s = String::from(""); diff --git a/sefs-cli/enclave/Enclave.config.xml b/sefs-cli/enclave/Enclave.config.xml index 109fcd2..b1a289c 100644 --- a/sefs-cli/enclave/Enclave.config.xml +++ b/sefs-cli/enclave/Enclave.config.xml @@ -4,7 +4,9 @@ 0 0x100000 0x20000000 - 1 + 256 + 256 + 256 1 0 0 From e7e0afa4304052698f74c021f932e8e4050d79d1 Mon Sep 17 00:00:00 2001 From: XanderC Date: Tue, 1 Apr 2025 16:11:58 +0800 Subject: [PATCH 2/3] feat(sefs-cli): cache metadata in memory --- sefs-cli/app/src/cache_dev.rs | 176 ++++++++++++++++++++++++++++++++++ sefs-cli/app/src/main.rs | 19 ++-- 2 files changed, 188 insertions(+), 7 deletions(-) create mode 100644 sefs-cli/app/src/cache_dev.rs diff --git a/sefs-cli/app/src/cache_dev.rs b/sefs-cli/app/src/cache_dev.rs new file mode 100644 index 0000000..a43d1c7 --- /dev/null +++ b/sefs-cli/app/src/cache_dev.rs @@ -0,0 +1,176 @@ +use std::sync::{Arc, Mutex}; + +use rcore_fs::dev::{DevResult, DevError, EINVAL}; +use rcore_fs_sefs::dev::{File, SefsMac, Storage}; + +enum CacheFileData { + InMemory{cache: Vec}, + WritenToInner{inner_file: Box} +} + +/// Cache data in memory before it is written to the inner storage +#[derive(Clone)] +pub struct CacheFile { + data: Arc>, +} + +impl CacheFile { + fn get_file_size(&self) -> DevResult { + match &*self.data.lock().unwrap() { + CacheFileData::InMemory { cache } => { + return Ok(cache.len()) + }, + CacheFileData::WritenToInner { inner_file: _ } => { + return Err(DevError(EINVAL)) + } + } + } +} + +impl File for CacheFile { + fn read_at(&self, buf: &mut [u8], offset: usize) -> DevResult { + match & *self.data.lock().unwrap() { + CacheFileData::InMemory { cache } => { + let data_len = cache.len(); + let len = if offset + buf.len() > data_len { + data_len - offset + } else { + buf.len() + }; + buf[..len].copy_from_slice(&cache[offset..offset + len]); + return Ok(len) + }, + CacheFileData::WritenToInner { inner_file } => { + return inner_file.read_at(buf, offset); + } + } + } + fn write_at(&self, buf: &[u8], offset: usize) -> DevResult { + match & mut *self.data.lock().unwrap() { + CacheFileData::InMemory { cache } => { + let data_len = cache.len(); + let len = if offset + buf.len() > data_len { + data_len - offset + } else { + buf.len() + }; + cache[offset..offset + len].copy_from_slice(&buf[..len]); + return Ok(len); + }, + CacheFileData::WritenToInner { inner_file } => { + return inner_file.write_at(buf, offset); + } + } + } + fn set_len(&self, len: usize) -> DevResult<()> { + match & mut *self.data.lock().unwrap() { + CacheFileData::InMemory { cache } => { + cache.resize(len, 0); + return Ok(()); + }, + CacheFileData::WritenToInner { inner_file } => { + return inner_file.set_len(len); + } + } + } + fn flush(&self) -> DevResult<()> { + match & mut *self.data.lock().unwrap() { + CacheFileData::InMemory { cache: _ } => { + return Ok(()); + }, + CacheFileData::WritenToInner { inner_file } => { + return inner_file.flush(); + } + } + } + fn get_file_mac(&self) -> DevResult { + match &*self.data.lock().unwrap() { + CacheFileData::InMemory { cache: _ } => { + return Err(DevError(EINVAL)); + }, + CacheFileData::WritenToInner { inner_file } => { + return inner_file.get_file_mac(); + } + } + } + +} + +impl CacheFile { + pub fn new() -> Self { + Self { + data: Arc::new(Mutex::new(CacheFileData::InMemory { cache: Vec::new() })), + } + } +} + +#[derive(Clone)] +pub struct CacheStorage { + cached_metadata: CacheFile, + inner_storage: Arc>, +} + +/// CacheStorage wrap the trait `Storage`, and cache the metadata in memory +/// before it is written to the inner storage +impl CacheStorage { + pub fn new(inner_storage: Arc>) -> Self { + Self { + cached_metadata: CacheFile::new(), + inner_storage, + } + } + + pub fn write_cache_to_inner(&self) -> DevResult<()> { + let _ = self.inner_storage.remove("metadata"); + let metafile = self.inner_storage.create("metadata")?; + let metafile_size = self.cached_metadata.get_file_size()?; + metafile.set_len(metafile_size)?; + + let mut offset = 0; + let mut buf = vec![0; 1024]; + while offset < metafile_size { + let read_size = self.cached_metadata.read_at(&mut buf, offset)?; + metafile.write_at(&buf[..read_size], offset)?; + offset += read_size; + } + // update the inner file status + let mut inner_file = self.cached_metadata.data.lock().unwrap(); + *inner_file = CacheFileData::WritenToInner { inner_file: metafile }; + Ok(()) + } +} + +impl Storage for CacheStorage { + fn open(&self, file_id: &str) -> DevResult> { + let file = if file_id.eq("metadata") { + return Err(DevError(114)); + } else { + self.inner_storage.open(file_id)? + }; + Ok(file) + } + + fn create(&self, file_id: &str) -> DevResult> { + let file = if file_id.eq("metadata") { + Box::new(self.cached_metadata.clone()) + } else { + self.inner_storage.create(file_id)? + }; + Ok(file) + } + + fn remove(&self, file_id: &str) -> DevResult<()> { + if file_id.eq("metadata") { + return Ok(()); + } + self.inner_storage.remove(file_id) + } + + fn protect_integrity(&self) -> bool { + self.inner_storage.protect_integrity() + } + + fn clear(&self) -> DevResult<()> { + self.inner_storage.clear() + } +} \ No newline at end of file diff --git a/sefs-cli/app/src/main.rs b/sefs-cli/app/src/main.rs index 2b68a27..b471dfa 100644 --- a/sefs-cli/app/src/main.rs +++ b/sefs-cli/app/src/main.rs @@ -5,6 +5,7 @@ use std::os::unix::ffi::OsStrExt; use std::os::unix::fs::FileExt; use std::path::PathBuf; use std::process::exit; +use std::sync::Arc; use ctrlc; use libc; @@ -21,6 +22,7 @@ use rcore_fs_unionfs as unionfs; mod enclave; mod sgx_dev; +mod cache_dev; #[derive(Debug, StructOpt)] struct Opt { @@ -155,16 +157,19 @@ fn main() -> Result<(), Box> { mac, key, } => { - let sefs_fs = { - std::fs::create_dir(&image)?; - let key = parse_key(&key)?; - let mode = sgx_dev::EncryptMode::from_parameters(true, &key)?; - let device = sgx_dev::SgxStorage::new(enclave.geteid(), &image, mode); - sefs::SEFS::create(Box::new(device), &StdTimeProvider, &StdUuidProvider)? - }; + std::fs::create_dir(&image)?; + let key = parse_key(&key)?; + let mode = sgx_dev::EncryptMode::from_parameters(true, &key)?; + let device = sgx_dev::SgxStorage::new(enclave.geteid(), &image, mode); + // Wrap the inner storage with CacheStorage, cache the metadata in + // memory before the zipping process is completed + let cache_device = cache_dev::CacheStorage::new(Arc::new(Box::new(device))); + let sefs_fs = sefs::SEFS::create(Box::new(cache_device.clone()), &StdTimeProvider, &StdUuidProvider)?; + let thread_pool = Pool::new(opt.thread_num); zip_dir(&dir, sefs_fs.root_inode(), &thread_pool)?; sefs_fs.sync()?; + cache_device.write_cache_to_inner().unwrap(); let root_mac_str = { let mut s = String::from(""); for (i, byte) in sefs_fs.root_mac().iter().enumerate() { From 75f5284bbaacd0624635e9adf5370bf9f3ef295c Mon Sep 17 00:00:00 2001 From: XanderC Date: Mon, 3 Mar 2025 16:28:55 +0800 Subject: [PATCH 3/3] feat: incremental zip --- rcore-fs-cli/src/zip.rs | 57 ++++++++++++++++++++++++++++++++--- sefs-cli/app/src/cache_dev.rs | 7 +---- sefs-cli/app/src/main.rs | 26 +++++++++++++--- 3 files changed, 75 insertions(+), 15 deletions(-) diff --git a/rcore-fs-cli/src/zip.rs b/rcore-fs-cli/src/zip.rs index 84e10b4..acc3aba 100644 --- a/rcore-fs-cli/src/zip.rs +++ b/rcore-fs-cli/src/zip.rs @@ -16,9 +16,17 @@ use crate::thread_pool; const BUF_SIZE: usize = 0x10000; const S_IMASK: u32 = 0o777; -pub fn zip_dir(path: &Path, inode: Arc, thread_pool: &thread_pool::Pool) -> Result<(), Box> { +pub fn zip_dir(path: &Path, inode: Arc, thread_pool: &thread_pool::Pool, image_time: Option) -> Result<(), Box> { let mut entries: Vec = fs::read_dir(path)?.map(|dir| dir.unwrap()).collect(); entries.sort_by_key(|entry| entry.file_name()); + let is_incremental = image_time.is_some(); + let mut deleted_files: Vec = Vec::new(); + if is_incremental { + // at first, we record all the files in the image, + // existing files would be remove from the lsit later + deleted_files = inode.list()?; + let _ = deleted_files.drain(0..2); + } for entry in entries { let name_ = entry.file_name(); let name = name_.to_str().unwrap(); @@ -26,8 +34,34 @@ pub fn zip_dir(path: &Path, inode: Arc, thread_pool: &thread_pool::Po let type_ = metadata.file_type(); let mode = (metadata.permissions().mode() & S_IMASK) as u16; //println!("zip: name: {:?}, mode: {:#o}", entry.path(), mode); + if is_incremental { + // if a file still exists, remove it from deleted_files + // we use a linear search here, because `inode.list()` should have + // same order with `entries`. we break at the first match, + // it would not cause a large overhead. + for (index, image_node_name) in deleted_files.iter().enumerate() { + if image_node_name == name { + deleted_files.remove(index); + break; + } + } + // skip the file not modified after image is created + if !type_.is_dir() { + if let Some(last_modify) = image_time { + use std::os::linux::fs::MetadataExt; + if metadata.st_ctime() < last_modify { + continue; + } + println!("{} needs to be updated", name); + } + } + } if type_.is_file() { - let inode = inode.create(name, FileType::File, mode)?; + let inode = if !is_incremental { + inode.create(name, FileType::File, mode)? + } else { + inode.find(name).or(inode.create(name, FileType::File, mode))? + }; // copy file content in another thread thread_pool.execute(move ||{ let mut file = fs::File::open(entry.path()).unwrap(); @@ -42,16 +76,29 @@ pub fn zip_dir(path: &Path, inode: Arc, thread_pool: &thread_pool::Po }; }); } else if type_.is_dir() { - let inode = inode.create(name, FileType::Dir, mode)?; - zip_dir(entry.path().as_path(), inode, thread_pool)?; + let inode = if !is_incremental { + inode.create(name, FileType::Dir, mode)? + } else { + inode.find(name).or(inode.create(name, FileType::Dir, mode))? + }; + zip_dir(entry.path().as_path(), inode, thread_pool, image_time)?; } else if type_.is_symlink() { let target = fs::read_link(entry.path())?; - let inode = inode.create(name, FileType::SymLink, mode)?; + let inode = if !is_incremental { + inode.create(name, FileType::SymLink, mode)? + } else { + inode.find(name).or(inode.create(name, FileType::SymLink, mode))? + }; let data = target.as_os_str().as_bytes(); inode.resize(data.len())?; inode.write_at(0, data)?; } } + // Delete files that are not in the source directory + for file_name in deleted_files { + inode.unlink(&file_name).unwrap(); + println!("{} deleted", file_name); + } Ok(()) } diff --git a/sefs-cli/app/src/cache_dev.rs b/sefs-cli/app/src/cache_dev.rs index a43d1c7..321e4cf 100644 --- a/sefs-cli/app/src/cache_dev.rs +++ b/sefs-cli/app/src/cache_dev.rs @@ -142,12 +142,7 @@ impl CacheStorage { impl Storage for CacheStorage { fn open(&self, file_id: &str) -> DevResult> { - let file = if file_id.eq("metadata") { - return Err(DevError(114)); - } else { - self.inner_storage.open(file_id)? - }; - Ok(file) + Ok(self.inner_storage.open(file_id)?) } fn create(&self, file_id: &str) -> DevResult> { diff --git a/sefs-cli/app/src/main.rs b/sefs-cli/app/src/main.rs index b471dfa..667b1f9 100644 --- a/sefs-cli/app/src/main.rs +++ b/sefs-cli/app/src/main.rs @@ -54,6 +54,9 @@ enum Cmd { /// Key for encryption #[structopt(short, long, parse(from_os_str))] key: Option, + /// Incremental Zip + #[structopt(short="i", long)] + incremental: bool, }, /// Unzip data from given to #[structopt(name = "unzip")] @@ -156,18 +159,33 @@ fn main() -> Result<(), Box> { image, mac, key, + incremental } => { - std::fs::create_dir(&image)?; + if !incremental { + std::fs::create_dir(&image)?; + } let key = parse_key(&key)?; let mode = sgx_dev::EncryptMode::from_parameters(true, &key)?; let device = sgx_dev::SgxStorage::new(enclave.geteid(), &image, mode); // Wrap the inner storage with CacheStorage, cache the metadata in // memory before the zipping process is completed let cache_device = cache_dev::CacheStorage::new(Arc::new(Box::new(device))); - let sefs_fs = sefs::SEFS::create(Box::new(cache_device.clone()), &StdTimeProvider, &StdUuidProvider)?; - + let sefs_fs = if incremental { + sefs::SEFS::open(Box::new(cache_device.clone()), &StdTimeProvider, &StdUuidProvider)? + } else { + sefs::SEFS::create(Box::new(cache_device.clone()), &StdTimeProvider, &StdUuidProvider)? + }; + let image_last_modified_time = if incremental { + let mut metadata_path: PathBuf = PathBuf::from(image); + metadata_path.push("metadata"); + let metadata = std::fs::metadata(metadata_path)?; + use std::os::linux::fs::MetadataExt; + Some(metadata.st_ctime()) + } else { + None + }; let thread_pool = Pool::new(opt.thread_num); - zip_dir(&dir, sefs_fs.root_inode(), &thread_pool)?; + zip_dir(&dir, sefs_fs.root_inode(), &thread_pool, image_last_modified_time)?; sefs_fs.sync()?; cache_device.write_cache_to_inner().unwrap(); let root_mac_str = {