diff --git a/.github/install-dep/action.yml b/.github/install-dep/action.yml new file mode 100644 index 00000000..6e8eb23a --- /dev/null +++ b/.github/install-dep/action.yml @@ -0,0 +1,136 @@ +name: 'Install Dependencies' +description: 'Install dependencies with caching support' + +inputs: + cache-key: + description: 'Cache key for dependencies' + required: true + platform: + description: 'Platform (ubuntu-latest, windows-latest, macos-latest)' + required: true + use-gtk: + description: 'Whether download and setup GTK libs (true, false)' + required: false + default: 'true' + +runs: + using: "composite" + steps: + # ================= Universal Initialization ================= + # on windows, it suffers from extremely slow zstd compression and decompression speed + # https://github.com/actions/toolkit/issues/1578#issuecomment-2253355054 + # So we have to manuallly restore and save the cache + - name: Restore dependencies + id: cache-restore + uses: actions/cache/restore@v4 + with: + path: | + /var/cache/apt/ + ~/Library/Caches/Homebrew + C:\ProgramData\chocolatey\lib + ./.choco-cache + key: ${{ inputs.platform }}-sysdeps-${{ inputs.cache-key }} + + - name: Setup Rust Toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1 + with: + components: clippy rustfmt + cache-workspaces: | + ./ + ./scorpio + ./monobean + cache-directories: | + ./target + ./scorpio/target + ./monobean/target + + # ================ Platform-specific Initialization (Linux) ================ + - name: Install dependencies (Ubuntu) + if: inputs.platform == 'ubuntu-latest' + shell: bash + run: | + echo "deb http://gb.archive.ubuntu.com/ubuntu jammy main" | sudo tee -a /etc/apt/sources.list + sudo apt update + sudo apt install -y \ + git-lfs \ + libwebkit2gtk-4.1-dev \ + build-essential \ + curl \ + wget \ + file \ + libssl-dev \ + libgtk-3-dev \ + libayatana-appindicator3-dev \ + librsvg2-dev \ + libgtk-4-dev \ + libadwaita-1-0 \ + libadwaita-1-dev \ + fuse3 \ + libfuse3-dev \ + + # ================ Platform-specific Initialization (macOS) ================ + - name: Set up Homebrew + if: inputs.platform == 'macos-latest' + id: set-up-homebrew + uses: Homebrew/actions/setup-homebrew@master + + # fuse not available on macOS + - name: Install dependencies (macOS) + if: inputs.platform == 'macos-latest' + shell: bash + run: | + export HOMEBREW_NO_AUTO_UPDATE=1 + brew install \ + gtk4 \ + gtk+3 \ + libadwaita \ + openssl@3 \ + librsvg \ + + # =============== Platform-specific Initialization (Windows) =============== + # According to https://github.com/actions/runner-images/blob/main/images/windows/toolsets/toolset-2025.json + # OpenSSL, cmake and some other build tools have been embedded into the windows-latest image. + - name: Install dependencies (Windows) + if: inputs.platform == 'windows-latest' + shell: pwsh + run: | + choco config set cacheLocation ./.choco-cache + choco install --no-progress git-lfs pkgconfiglite ninja nasm + pip install requests tqdm + + # Required by monobean, performing this step would take about 5s. + # Won't caching it, for compression and decompression are MUCH slower than downloading it directly. + - name: Load GTK and libadwaita + if: inputs.platform == 'windows-latest' && inputs.use-gtk == 'true' + shell: pwsh + run: python ./monobean/setup.py + + - name: Manually set env vars and export + if: inputs.platform == 'windows-latest' + shell: pwsh + run: | + $env:Path = "$env:Path;${{ github.workspace }}\monobean\resources\lib\bin;C:\Program Files\NASM;C:\Program Files\CMake\bin" + $env:OPENSSL_DIR = "C:\Program Files\OpenSSL\" + $env:PKG_CONFIG_PATH = "$env:PKG_CONFIG_PATH;${{ github.workspace }}\monobean\resources\lib\lib\pkgconfig;C:\Program Files\PkgConfig\lib\pkgconfig" + $env:LIB = "$env:LIB;${{ github.workspace }}\monobean\resources\lib\lib" + $env:INCLUDE = "$env:INCLUDE;${{ github.workspace }}\monobean\resources\lib\include;${{ github.workspace }}\monobean\resources\lib\include\cairo;${{ github.workspace }}\monobean\resources\lib\include\glib-2.0;${{ github.workspace }}\monobean\resources\lib\include\gobject-introspection-1.0;${{ github.workspace }}\monobean\resources\lib\lib\glib-2.0\include" + $env:AWS_LC_SYS_PREBUILT_NASM = 1 + $env:AWS_LC_SYS_C_STD = 11 + $exportVariables = @("Path", "OPENSSL_DIR", "OPENSSL_LIB_DIR", "PKG_CONFIG_PATH", "LIB", "INCLUDE", "AWS_LC_SYS_C_STD", "AWS_LC_SYS_PREBUILT_NASM") + foreach ($var in $exportVariables) { + if (Test-Path "Env:\$var") { + "$var=$((Get-Item "Env:\$var").Value)" | Out-File -FilePath $env:GITHUB_ENV -Append + } + } + + # =============== Save cache for speeding up =============== + - name: Cache dependencies + if: inputs.platform != 'windows-latest' || steps.cache-restore.outputs.cache-hit == 'false' + uses: actions/cache/save@v4 + with: + path: | + /var/cache/apt/ + ~/Library/Caches/Homebrew + C:\ProgramData\chocolatey\lib + ./.choco-cache + key: ${{ inputs.platform }}-sysdeps-${{ inputs.cache-key }} diff --git a/.github/workflows/base.yml b/.github/workflows/base.yml index 276b2929..c8271287 100644 --- a/.github/workflows/base.yml +++ b/.github/workflows/base.yml @@ -3,7 +3,7 @@ # History: # 1. 2023-02-14: Created at 2023-02-14T16:00:00Z by Quanyi Ma # 2. 2024-05-07: Update the `fuse` job to install `fuse3` and `libfuse3-dev` at 2024-05-07T16:00:00Z by Xiaoyang Han -# +# 3. 2025-02-27: Reconstruct the workflow and add `install-dep` action to support test on multiple platforms by Neon # on: [push, pull_request] @@ -11,125 +11,154 @@ on: [push, pull_request] name: Base GitHub Action for Check, Test and Lints jobs: + # cache files before all jobs setup: - runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + + name: Setup and cache ${{ matrix.os }} + runs-on: ${{ matrix.os }} + + env: + CARGO_TERM_COLOR: always + steps: - - uses: actions/checkout@v3 + - name: Checkout repository + uses: actions/checkout@v4 with: submodules: recursive - - uses: actions-rs/toolchain@v1 + - name: Setup Rust Toolchain + uses: actions-rust-lang/setup-rust-toolchain@v1 with: - profile: minimal - toolchain: stable - override: true - - uses: Swatinem/rust-cache@v2 + components: clippy rustfmt + cache-workspaces: | + ./ + ./scorpio + ./monobean + cache-directories: | + ./target + ./scorpio/target + ./monobean/target + # - check: - name: Check + format: + name: Rustfmt Check runs-on: ubuntu-latest - needs: setup steps: - - name: Install system dependencies - run: | - sudo apt-get update - sudo apt-get install -y libgtk-4-dev libadwaita-1-0 libadwaita-1-dev librust-gtk4-sys-dev - - uses: actions/checkout@v3 + - name: Checkout repository + uses: actions/checkout@v4 with: submodules: recursive - - uses: actions-rs/cargo@v1 - with: - command: check + - run: cargo fmt --all -- --check # clippy: - name: Clippy - runs-on: ubuntu-latest + name: Clippy for ${{ matrix.os }} + strategy: + fail-fast: true + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + + runs-on: ${{ matrix.os }} needs: setup + env: + CARGO_TERM_COLOR: always + steps: - - uses: actions/checkout@v3 + - name: Checkout repository + uses: actions/checkout@v4 with: submodules: recursive - - run: | - echo "deb http://gb.archive.ubuntu.com/ubuntu jammy main" | sudo tee -a /etc/apt/sources.list - sudo apt update - sudo apt install -y libwebkit2gtk-4.1-dev \ - build-essential \ - curl \ - wget \ - file \ - libssl-dev \ - libgtk-3-dev \ - libayatana-appindicator3-dev \ - librsvg2-dev \ - libgtk-4-dev \ - libadwaita-1-0 \ - libadwaita-1-dev \ - librust-gtk4-sys-dev - - uses: actions-rs/cargo@v1 - with: - command: build - args: --bin mega --bin libra - - run: rustup component add clippy - - uses: actions-rs/cargo@v1 + + - name: Install system dependencies + uses: ./.github/install-dep with: - command: clippy - args: --workspace --all-targets --all-features -- -D warnings + cache-key: sysdeps + platform: ${{ matrix.os }} + + - name: Run cargo clippy + run: | + cargo build --bin mega --bin libra + cargo clippy --workspace --all-targets --all-features -- -D warnings # test: - name: Tests - runs-on: ubuntu-latest + name: Test for ${{ matrix.os }} + strategy: + fail-fast: true + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + + runs-on: ${{ matrix.os }} needs: setup + env: + CARGO_TERM_COLOR: always + steps: - - uses: actions/checkout@v3 + - name: Checkout repository + uses: actions/checkout@v4 with: submodules: recursive - - run: | - sudo apt update - sudo apt-get install -y git-lfs - sudo apt install libgtk-4-dev libadwaita-1-0 libadwaita-1-dev librust-gtk4-sys-dev + + - name: Install system dependencies + uses: ./.github/install-dep + with: + cache-key: sysdeps + platform: ${{ matrix.os }} + + - name: Set up git lfs + run: | git lfs install git config --global user.email "mega@github.com" git config --global user.name "Mega" git config --global lfs.url http://localhost:8000 - - uses: actions-rs/cargo@v1 - with: - command: build - args: --bin mega --bin libra - - uses: actions-rs/cargo@v1 - with: - command: test - args: --workspace --test '*' -- --nocapture + + - name: Run cargo test + run: | + cargo build --bin mega --bin libra + cargo test --workspace --all-features --all --no-fail-fast # - doc: - name: Doc - runs-on: ubuntu-latest + monobean: + name: Test Monobean for ${{ matrix.os }} + strategy: + fail-fast: true + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + + runs-on: ${{ matrix.os }} needs: setup + env: + CARGO_TERM_COLOR: always + steps: - - name: Install system dependencies - run: | - sudo apt-get update - sudo apt-get install -y libgtk-4-dev libadwaita-1-0 libadwaita-1-dev librust-gtk4-sys-dev - - uses: actions/checkout@v3 + - name: Checkout repository + uses: actions/checkout@v4 with: submodules: recursive - - uses: actions-rs/cargo@v1 + + - name: Install system dependencies + uses: ./.github/install-dep with: - command: doc + cache-key: sysdeps + platform: ${{ matrix.os }} + + - name: Run Lints + run: | + cargo clippy --manifest-path monobean/Cargo.toml --all-targets --all-features -- -D warnings + cargo test --manifest-path monobean/Cargo.toml --all-features --all --no-fail-fast -- --nocapture # fuse: name: Fuse Lints runs-on: ubuntu-latest - needs: setup steps: - - uses: actions/checkout@v3 - - run: | - git submodule update --init --recursive - sudo apt-get update && sudo apt-get install -y fuse3 libfuse3-dev - cd ./scorpio && cargo clippy --all-targets --all-features -- -D warnings + - uses: actions/checkout@v4 + - uses: actions-rust-lang/setup-rust-toolchain@v1 + - run: cargo clippy --manifest-path scorpio/Cargo.toml --all-targets --all --all-features -- -D warnings moon-lint-and-build: name: MOON Lint & Build diff --git a/aries/src/build.rs b/aries/src/build.rs index 4c3e433a..a7d615bb 100644 --- a/aries/src/build.rs +++ b/aries/src/build.rs @@ -3,4 +3,4 @@ fn main() { println!("cargo:rustc-link-arg=-Wl,-rpath,$ORIGIN"); #[cfg(target_os = "macos")] println!("cargo:rustc-link-arg=-Wl,-rpath,@executable_path"); -} \ No newline at end of file +} diff --git a/aries/src/service/api/mod.rs b/aries/src/service/api/mod.rs index e854934c..51eec23b 100644 --- a/aries/src/service/api/mod.rs +++ b/aries/src/service/api/mod.rs @@ -1,5 +1,5 @@ -pub mod nostr_router; pub mod ca_router; +pub mod nostr_router; #[cfg(test)] mod tests {} diff --git a/ceres/src/api_service/import_api_service.rs b/ceres/src/api_service/import_api_service.rs index 231ac9a6..b6390ad3 100644 --- a/ceres/src/api_service/import_api_service.rs +++ b/ceres/src/api_service/import_api_service.rs @@ -34,7 +34,6 @@ impl ApiHandler for ImportApiService { )); } - fn strip_relative(&self, path: &Path) -> Result { if let Ok(relative_path) = path.strip_prefix(self.repo.repo_path.clone()) { Ok(relative_path.to_path_buf()) diff --git a/ceres/src/api_service/mod.rs b/ceres/src/api_service/mod.rs index 3416cc77..2a06da2a 100644 --- a/ceres/src/api_service/mod.rs +++ b/ceres/src/api_service/mod.rs @@ -156,10 +156,8 @@ pub trait ApiHandler: Send + Sync { .get_commits_by_hashes(commit_ids.into_iter().collect()) .await .unwrap(); - let commit_map: HashMap = commits - .into_iter() - .map(|x| (x.id.to_string(), x)) - .collect(); + let commit_map: HashMap = + commits.into_iter().map(|x| (x.id.to_string(), x)).collect(); let root_commit: Option = None; for item in tree.tree_items { diff --git a/ceres/src/lfs/handler.rs b/ceres/src/lfs/handler.rs index 1ab1cded..3056f157 100644 --- a/ceres/src/lfs/handler.rs +++ b/ceres/src/lfs/handler.rs @@ -295,8 +295,7 @@ pub async fn lfs_upload_object( let storage = context.services.lfs_db_storage.clone(); let lfs_storage = context.services.lfs_storage.clone(); - let meta = lfs_get_meta(storage.clone(), &request_vars.oid) - .await?; + let meta = lfs_get_meta(storage.clone(), &request_vars.oid).await?; tracing::debug!("upload lfs object {} size: {}", meta.oid, meta.size); let split_size = match PackConfig::get_size_from_str(&config.split_size, || Ok(0)) { Ok(split_size) => split_size, diff --git a/ceres/src/lfs/lfs_structs.rs b/ceres/src/lfs/lfs_structs.rs index 4618dc34..70bf2dd7 100644 --- a/ceres/src/lfs/lfs_structs.rs +++ b/ceres/src/lfs/lfs_structs.rs @@ -100,7 +100,7 @@ pub struct BatchResponse { #[derive(Serialize, Deserialize)] pub struct FetchchunkResponse { pub oid: String, - pub size : i64, + pub size: i64, pub chunks: Vec, } diff --git a/ceres/src/lib.rs b/ceres/src/lib.rs index e0c3bda5..858c974a 100644 --- a/ceres/src/lib.rs +++ b/ceres/src/lib.rs @@ -1,5 +1,5 @@ pub mod api_service; pub mod lfs; +pub mod model; pub mod pack; pub mod protocol; -pub mod model; diff --git a/ceres/src/pack/monorepo.rs b/ceres/src/pack/monorepo.rs index 85b86372..a086af15 100644 --- a/ceres/src/pack/monorepo.rs +++ b/ceres/src/pack/monorepo.rs @@ -227,8 +227,7 @@ impl PackHandler for MonoRepo { encoder.encode_async(entry_rx).await.unwrap(); let mut send_exist = HashSet::new(); for tree in trees { - self.traverse(tree, &mut send_exist, Some(&entry_tx)) - .await; + self.traverse(tree, &mut send_exist, Some(&entry_tx)).await; } entry_tx.send(commit.into()).await.unwrap(); drop(entry_tx); @@ -275,10 +274,7 @@ impl PackHandler for MonoRepo { } } - let want_tree_ids = want_commits - .iter() - .map(|c| c.tree_id.to_string()) - .collect(); + let want_tree_ids = want_commits.iter().map(|c| c.tree_id.to_string()).collect(); let want_trees: HashMap = storage .get_trees_by_hashes(want_tree_ids) .await diff --git a/ceres/src/protocol/import_refs.rs b/ceres/src/protocol/import_refs.rs index 523cb2d1..74afbf76 100644 --- a/ceres/src/protocol/import_refs.rs +++ b/ceres/src/protocol/import_refs.rs @@ -25,7 +25,6 @@ impl From for Refs { } } - impl From for Refs { fn from(value: mega_refs::Model) -> Self { Self { diff --git a/ceres/src/protocol/mod.rs b/ceres/src/protocol/mod.rs index 58052b16..4b49b881 100644 --- a/ceres/src/protocol/mod.rs +++ b/ceres/src/protocol/mod.rs @@ -10,12 +10,12 @@ use import_refs::RefCommand; use jupiter::context::Context; use repo::Repo; -use crate::pack::{PackHandler, import_repo::ImportRepo, monorepo::MonoRepo}; +use crate::pack::{import_repo::ImportRepo, monorepo::MonoRepo, PackHandler}; -pub mod smart; -pub mod repo; pub mod import_refs; pub mod mr; +pub mod repo; +pub mod smart; #[derive(Clone)] pub struct SmartProtocol { diff --git a/ceres/src/protocol/mr.rs b/ceres/src/protocol/mr.rs index 4edefa33..a1bfd91f 100644 --- a/ceres/src/protocol/mr.rs +++ b/ceres/src/protocol/mr.rs @@ -72,4 +72,3 @@ impl From for MergeRequest { } } } - diff --git a/common/src/config.rs b/common/src/config.rs index e0ee308a..18f3c91a 100644 --- a/common/src/config.rs +++ b/common/src/config.rs @@ -41,7 +41,11 @@ impl Config { pub fn load_str(content: &str) -> Result { let builder = c::Config::builder() .add_source(c::File::from_str(content, FileFormat::Toml)) - .add_source(c::Environment::with_prefix("mega").prefix_separator("_").separator("__")); + .add_source( + c::Environment::with_prefix("mega") + .prefix_separator("_") + .separator("__"), + ); let config = variable_placeholder_substitute(builder); @@ -456,10 +460,7 @@ mod test { PackConfig::get_size_from_str("4G", || Ok(4 * 1024 * 1024 * 1024)).unwrap(), 4 * 1024 * 1024 * 1024 ); - assert_eq!( - PackConfig::get_size_from_str("1%", || Ok(100)).unwrap(), - 1 - ); + assert_eq!(PackConfig::get_size_from_str("1%", || Ok(100)).unwrap(), 1); assert_eq!( PackConfig::get_size_from_str("50%", || Ok(100)).unwrap(), 50 @@ -476,6 +477,5 @@ mod test { PackConfig::get_size_from_str("1", || Ok(100)).unwrap(), 1024 * 1024 * 1024 ); - } } diff --git a/common/src/errors.rs b/common/src/errors.rs index 4a9f2e83..093da1c1 100644 --- a/common/src/errors.rs +++ b/common/src/errors.rs @@ -103,9 +103,7 @@ impl IntoResponse for ProtocolError { // This error is caused by bad user input so don't log it (StatusCode::UNAUTHORIZED, err) } - ProtocolError::TooLarge(err) => { - (StatusCode::PAYLOAD_TOO_LARGE, err) - } + ProtocolError::TooLarge(err) => (StatusCode::PAYLOAD_TOO_LARGE, err), ProtocolError::NotFound(err) => { // Because `TraceLayer` wraps each request in a span that contains the request // method, uri, etc we don't need to include those details here diff --git a/gateway/src/api/github_router.rs b/gateway/src/api/github_router.rs index 0b97b4dc..4649605c 100644 --- a/gateway/src/api/github_router.rs +++ b/gateway/src/api/github_router.rs @@ -1,12 +1,12 @@ -use axum::{Json, Router}; +use crate::api::MegaApiServiceState; use axum::http::{HeaderMap, StatusCode}; use axum::response::IntoResponse; use axum::routing::post; +use axum::{Json, Router}; use lazy_static::lazy_static; use reqwest::Client; use serde_json::Value; use taurus::event::github_webhook::{GithubWebhookEvent, WebhookType}; -use crate::api::MegaApiServiceState; lazy_static! { static ref CLIENT: Client = Client::builder() @@ -16,15 +16,14 @@ lazy_static! { } pub fn routers() -> Router { - Router::new() - .route("/github/webhook", post(webhook)) + Router::new().route("/github/webhook", post(webhook)) } /// Handle the GitHub webhook event.
/// For more details, see https://docs.github.com/zh/webhooks/webhook-events-and-payloads. async fn webhook( headers: HeaderMap, - Json(mut payload): Json + Json(mut payload): Json, ) -> Result { let event_type = headers .get("X-GitHub-Event") @@ -38,14 +37,16 @@ async fn webhook( let action = payload["action"].as_str().unwrap(); tracing::debug!("PR action: {}", action); - if ["opened", "reopened", "synchronize"].contains(&action) { // contents changed + if ["opened", "reopened", "synchronize"].contains(&action) { + // contents changed let url = payload["pull_request"]["url"].as_str().unwrap(); let files = get_pr_files(url).await; let commits = get_pr_commits(url).await; // Add details to the payload payload["files"] = files; payload["commits"] = commits; - } else if action == "edited" { // PR title or body edited + } else if action == "edited" { + // PR title or body edited let _ = payload["pull_request"]["title"].as_str().unwrap(); let _ = payload["pull_request"]["body"].as_str().unwrap(); } @@ -79,4 +80,4 @@ pub async fn get_pr_commits(pr_url: &str) -> Value { async fn get_request(url: &str) -> Value { let resp = CLIENT.get(url).send().await.unwrap(); resp.json().await.unwrap() -} \ No newline at end of file +} diff --git a/gemini/src/nostr/tag.rs b/gemini/src/nostr/tag.rs index 39f23a0c..7f3722a5 100644 --- a/gemini/src/nostr/tag.rs +++ b/gemini/src/nostr/tag.rs @@ -87,7 +87,6 @@ impl From for Vec { impl fmt::Display for TagKind { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - match self { Self::P => write!(f, "p"), Self::Peer => write!(f, "peer"), diff --git a/jupiter/callisto/src/lib.rs b/jupiter/callisto/src/lib.rs index d1aa8a4a..12fa8a1e 100644 --- a/jupiter/callisto/src/lib.rs +++ b/jupiter/callisto/src/lib.rs @@ -17,9 +17,9 @@ pub mod lfs_objects; pub mod lfs_split_relations; pub mod mega_blob; pub mod mega_commit; +pub mod mega_conversation; pub mod mega_issue; pub mod mega_mr; -pub mod mega_conversation; pub mod mega_refs; pub mod mega_tag; pub mod mega_tree; diff --git a/jupiter/callisto/src/prelude.rs b/jupiter/callisto/src/prelude.rs index 88065886..0d7740a1 100644 --- a/jupiter/callisto/src/prelude.rs +++ b/jupiter/callisto/src/prelude.rs @@ -14,9 +14,9 @@ pub use crate::lfs_objects::Entity as LfsObjects; pub use crate::lfs_split_relations::Entity as LfsSplitRelations; pub use crate::mega_blob::Entity as MegaBlob; pub use crate::mega_commit::Entity as MegaCommit; +pub use crate::mega_conversation::Entity as MegaMrConv; pub use crate::mega_issue::Entity as MegaIssue; pub use crate::mega_mr::Entity as MegaMr; -pub use crate::mega_conversation::Entity as MegaMrConv; pub use crate::mega_refs::Entity as MegaRefs; pub use crate::mega_tag::Entity as MegaTag; pub use crate::mega_tree::Entity as MegaTree; diff --git a/jupiter/src/lfs_storage/local_storage.rs b/jupiter/src/lfs_storage/local_storage.rs index 3919b017..43ff28ca 100644 --- a/jupiter/src/lfs_storage/local_storage.rs +++ b/jupiter/src/lfs_storage/local_storage.rs @@ -23,22 +23,20 @@ impl LocalStorage { #[async_trait] impl LfsStorage for LocalStorage { - async fn get_ref(&self, repo_id: i64, ref_name: &str) -> Result { - let path = Path::new(&self.base_path).join(repo_id.to_string()).join(ref_name); + let path = Path::new(&self.base_path) + .join(repo_id.to_string()) + .join(ref_name); let mut file = fs::File::open(path)?; let mut buf = String::new(); file.read_to_string(&mut buf)?; Ok(buf) } - async fn put_ref( - &self, - repo_id: i64, - ref_name: &str, - ref_hash: &str, - ) -> Result<(), MegaError> { - let path = Path::new(&self.base_path).join(repo_id.to_string()).join(ref_name); + async fn put_ref(&self, repo_id: i64, ref_name: &str, ref_hash: &str) -> Result<(), MegaError> { + let path = Path::new(&self.base_path) + .join(repo_id.to_string()) + .join(ref_name); let parent = path.parent().unwrap(); fs::create_dir_all(parent)?; let mut file = fs::File::create(path)?; @@ -47,7 +45,9 @@ impl LfsStorage for LocalStorage { } async fn delete_ref(&self, repo_id: i64, ref_name: &str) -> Result<(), MegaError> { - let path = Path::new(&self.base_path).join(repo_id.to_string()).join(ref_name); + let path = Path::new(&self.base_path) + .join(repo_id.to_string()) + .join(ref_name); Ok(fs::remove_file(path)?) } @@ -57,7 +57,9 @@ impl LfsStorage for LocalStorage { ref_name: &str, ref_hash: &str, ) -> Result<(), MegaError> { - let path = Path::new(&self.base_path).join(repo_id.to_string()).join(ref_name); + let path = Path::new(&self.base_path) + .join(repo_id.to_string()) + .join(ref_name); let mut file = OpenOptions::new().write(true).open(path).unwrap(); file.write_all(ref_hash.as_bytes()).unwrap(); Ok(()) @@ -74,11 +76,7 @@ impl LfsStorage for LocalStorage { Ok(Bytes::from(buffer)) } - async fn put_object( - &self, - object_id: &str, - body_content: &[u8], - ) -> Result { + async fn put_object(&self, object_id: &str, body_content: &[u8]) -> Result { let path = Path::new(&self.base_path) .join("objects") .join(self.transform_path(object_id)); diff --git a/jupiter/src/storage/init.rs b/jupiter/src/storage/init.rs index 6628bbbe..6e90ab15 100644 --- a/jupiter/src/storage/init.rs +++ b/jupiter/src/storage/init.rs @@ -1,6 +1,9 @@ +use common::errors::MegaError; +use sea_orm::{ + ConnectOptions, ConnectionTrait, Database, DatabaseConnection, DbErr, Statement, + TransactionError, TransactionTrait, +}; use std::{path::Path, time::Duration}; -use std::error::Error; -use sea_orm::{ConnectOptions, ConnectionTrait, Database, DatabaseConnection, DbErr, Statement, TransactionError, TransactionTrait}; use tracing::log; use common::config::DbConfig; @@ -18,7 +21,9 @@ pub async fn database_connection(db_config: &DbConfig) -> DatabaseConnection { Err(e) => { log::error!("Failed to connect to postgres: {}", e); log::info!("Falling back to sqlite"); - sqlite_connection(db_config).await.expect("Cannot connect to any database") + sqlite_connection(db_config) + .await + .expect("Cannot connect to any database") } } } else { @@ -26,15 +31,15 @@ pub async fn database_connection(db_config: &DbConfig) -> DatabaseConnection { } } -async fn postgres_connection(db_config: &DbConfig) -> Result { +async fn postgres_connection(db_config: &DbConfig) -> Result { let db_url = db_config.db_url.to_owned(); log::info!("Connecting to database: {}", db_url); let opt = setup_option(db_url); - Database::connect(opt).await + Database::connect(opt).await.map_err(|e| e.into()) } -async fn sqlite_connection(db_config: &DbConfig) -> Result> { +async fn sqlite_connection(db_config: &DbConfig) -> Result { if !Path::new(&db_config.db_path).exists() { eprintln!("Creating new sqlite database: {}", db_config.db_path); std::fs::create_dir_all(Path::new(&db_config.db_path).parent().unwrap())?; @@ -45,11 +50,13 @@ async fn sqlite_connection(db_config: &DbConfig) -> Result Result<(), TransactionError ConnectOptions { .sqlx_logging(true) .sqlx_logging_level(log::LevelFilter::Debug); opt -} \ No newline at end of file +} diff --git a/jupiter/src/storage/mono_storage.rs b/jupiter/src/storage/mono_storage.rs index 4a143163..4f024a19 100644 --- a/jupiter/src/storage/mono_storage.rs +++ b/jupiter/src/storage/mono_storage.rs @@ -2,7 +2,8 @@ use std::sync::{Arc, Mutex}; use futures::{stream, StreamExt}; use sea_orm::{ - ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, IntoActiveModel, QueryFilter, QueryOrder, QuerySelect + ActiveModelTrait, ColumnTrait, DatabaseConnection, EntityTrait, IntoActiveModel, QueryFilter, + QueryOrder, QuerySelect, }; use callisto::{mega_blob, mega_commit, mega_refs, mega_tag, mega_tree, raw_blob}; @@ -92,10 +93,7 @@ impl MonoStorage { Ok(result) } - pub async fn get_ref( - &self, - path: &str, - ) -> Result, MegaError> { + pub async fn get_ref(&self, path: &str) -> Result, MegaError> { let result = mega_refs::Entity::find() .filter(mega_refs::Column::Path.eq(path)) .filter(mega_refs::Column::RefName.eq(MEGA_BRANCH_NAME.to_owned())) diff --git a/jupiter/src/storage/mq_storage.rs b/jupiter/src/storage/mq_storage.rs index 0bbbed5d..42491eda 100644 --- a/jupiter/src/storage/mq_storage.rs +++ b/jupiter/src/storage/mq_storage.rs @@ -5,7 +5,6 @@ use sea_orm::{DatabaseConnection, EntityTrait, QueryOrder, QuerySelect}; use super::batch_save_model; - #[derive(Clone)] pub struct MQStorage { pub connection: Arc, diff --git a/libra/src/cli.rs b/libra/src/cli.rs index 04c8b116..f98ecdcd 100644 --- a/libra/src/cli.rs +++ b/libra/src/cli.rs @@ -2,14 +2,17 @@ //! It includes the definition of the CLI and the main function. //! //! -use clap::{Parser, Subcommand}; -use mercury::errors::GitError; use crate::command; use crate::utils; +use clap::{Parser, Subcommand}; +use mercury::errors::GitError; // The Cli struct represents the root of the command line interface. #[derive(Parser, Debug)] -#[command(about = "Libra: A partial Git implemented in Rust", version = "0.1.0-pre")] +#[command( + about = "Libra: A partial Git implemented in Rust", + version = "0.1.0-pre" +)] struct Cli { #[command(subcommand)] command: Commands, @@ -84,7 +87,9 @@ pub async fn parse(args: Option<&[&str]>) -> Result<(), GitError> { /// `async` version of the [parse] function pub async fn parse_async(args: Option<&[&str]>) -> Result<(), GitError> { let args = match args { - Some(args) => Cli::try_parse_from(args).map_err(|e| GitError::InvalidArgument(e.to_string()))?, + Some(args) => { + Cli::try_parse_from(args).map_err(|e| GitError::InvalidArgument(e.to_string()))? + } None => Cli::parse(), }; // TODO: try check repo before parsing diff --git a/libra/src/command/branch.rs b/libra/src/command/branch.rs index cbe970ad..0831e30f 100644 --- a/libra/src/command/branch.rs +++ b/libra/src/command/branch.rs @@ -1,5 +1,6 @@ use crate::{ - command::get_target_commit, internal::{branch::Branch, config::Config, head::Head} + command::get_target_commit, + internal::{branch::Branch, config::Config, head::Head}, }; use clap::Parser; use colored::Colorize; @@ -72,10 +73,18 @@ pub async fn set_upstream(branch: &str, upstream: &str) { }; Config::insert("branch", Some(branch), "remote", remote).await; // set upstream branch (tracking branch) - Config::insert("branch", Some(branch), "merge", - &format!("refs/heads/{}", remote_branch)).await; + Config::insert( + "branch", + Some(branch), + "merge", + &format!("refs/heads/{}", remote_branch), + ) + .await; } - println!("Branch '{}' set up to track remote branch '{}'", branch, upstream); + println!( + "Branch '{}' set up to track remote branch '{}'", + branch, upstream + ); } pub async fn create_branch(new_branch: String, branch_or_commit: Option) { @@ -185,7 +194,6 @@ async fn list_branches(remotes: bool) { } } - pub fn is_valid_git_branch_name(name: &str) -> bool { // Validate branch name // Not contain spaces, control characters or special characters diff --git a/libra/src/command/clone.rs b/libra/src/command/clone.rs index b37451b3..2b46adbc 100644 --- a/libra/src/command/clone.rs +++ b/libra/src/command/clone.rs @@ -1,16 +1,16 @@ -use std::path::PathBuf; -use std::{env, fs}; -use std::cell::Cell; use crate::command; use crate::command::restore::RestoreArgs; use crate::internal::branch::Branch; use crate::internal::config::{Config, RemoteConfig}; use crate::internal::head::Head; +use crate::utils::path_ext::PathExt; +use crate::utils::util; use clap::Parser; use colored::Colorize; use scopeguard::defer; -use crate::utils::path_ext::PathExt; -use crate::utils::util; +use std::cell::Cell; +use std::path::PathBuf; +use std::{env, fs}; use super::fetch::{self}; @@ -71,9 +71,14 @@ pub async fn execute(args: CloneArgs) { // CAUTION: change [current_dir] to the repo directory env::set_current_dir(&local_path).unwrap(); - let init_args = command::init::InitArgs { bare: false, initial_branch: None, repo_directory: local_path.to_str().unwrap().to_string(),quiet:false }; + let init_args = command::init::InitArgs { + bare: false, + initial_branch: None, + repo_directory: local_path.to_str().unwrap().to_string(), + quiet: false, + }; command::init::execute(init_args).await; - + /* fetch remote */ let remote_config = RemoteConfig { name: "origin".to_string(), diff --git a/libra/src/command/config.rs b/libra/src/command/config.rs index 342c3540..7110c943 100644 --- a/libra/src/command/config.rs +++ b/libra/src/command/config.rs @@ -33,32 +33,27 @@ pub struct ConfigArgs { pub struct Key { configuration: String, name: Option, - key: String + key: String, } pub async fn execute(args: ConfigArgs) { if args.list { list_config().await; - } - else { + } else { let origin_key = args.key.unwrap(); let key: Key = parse_key(origin_key).await; if args.add { add_config(&key, &args.valuepattern.unwrap()).await; - } - else if args.get { - get_config(&key ,args.valuepattern.as_deref()).await; - } - else if args.get_all { + } else if args.get { + get_config(&key, args.valuepattern.as_deref()).await; + } else if args.get_all { get_all_config(&key, args.valuepattern.as_deref()).await; - } - else if args.unset { + } else if args.unset { unset_config(&key, args.valuepattern.as_deref()).await; - } - else if args.unset_all { - unset_all_config(&key,args.valuepattern.as_deref()).await; - } - else { // If none of the above flags are present, then default to setting a config + } else if args.unset_all { + unset_all_config(&key, args.valuepattern.as_deref()).await; + } else { + // If none of the above flags are present, then default to setting a config set_config(&key, &args.valuepattern.unwrap()).await; } } @@ -73,67 +68,61 @@ async fn parse_key(mut origin_key: String) -> Key { let configuration: String; let name: Option; (configuration, origin_key) = match origin_key.split_once(".") { - Some((first_part, remainer)) => ( - first_part.to_string(), - remainer.to_string() - ), + Some((first_part, remainer)) => (first_part.to_string(), remainer.to_string()), None => { panic!("error: key does not contain a section: {}", origin_key); } }; (name, origin_key) = match origin_key.rsplit_once(".") { - Some((first_part, remainer)) => ( - Some(first_part.to_string()), - remainer.to_string() - ), - None => ( - None, - origin_key - ) + Some((first_part, remainer)) => (Some(first_part.to_string()), remainer.to_string()), + None => (None, origin_key), }; let key: String = origin_key; Key { - configuration, name, key + configuration, + name, + key, } } /// Add a configuration entry by the given key and value (create new one no matter old one is present or not) -async fn add_config(key: &Key, value: &str) { +async fn add_config(key: &Key, value: &str) { config::Config::insert(&key.configuration, key.name.as_deref(), &key.key, value).await; } /// Set a configuration entry by the given key and value (if old one is present, overwrites its value, otherwise create new one) async fn set_config(key: &Key, value: &str) { // First, check whether given key has multiple values - let values: Vec = config::Config::get_all(&key.configuration, key.name.as_deref(), &key.key).await; + let values: Vec = + config::Config::get_all(&key.configuration, key.name.as_deref(), &key.key).await; if values.len() >= 2 { - eprintln!("warning: {}.{} has multiple values", &key.configuration, + eprintln!( + "warning: {}.{} has multiple values", + &key.configuration, match &key.name { - Some(str) => str.to_string() + ".", - None => "".to_string() + Some(str) => str.to_string() + ".", + None => "".to_string(), } + &key.key ); eprintln!("error: cannot overwrite multiple values with a single value"); - } - else if values.len() == 1 { + } else if values.len() == 1 { config::Config::update(&key.configuration, key.name.as_deref(), &key.key, value).await; - } - else { + } else { config::Config::insert(&key.configuration, key.name.as_deref(), &key.key, value).await; } } /// Get the first configuration by the given key and value pattern async fn get_config(key: &Key, valuepattern: Option<&str>) { - let value: Option = config::Config::get(&key.configuration, key.name.as_deref(), &key.key).await; + let value: Option = + config::Config::get(&key.configuration, key.name.as_deref(), &key.key).await; if let Some(v) = value { - if let Some(vp) = valuepattern { + if let Some(vp) = valuepattern { // if value pattern is present, check it if v.contains(vp) { println!("{}", v); } - } - else { + } else { // if value pattern is not present, just print it println!("{}", v); } @@ -142,15 +131,15 @@ async fn get_config(key: &Key, valuepattern: Option<&str>) { /// Get all the configurations by the given key and value pattern async fn get_all_config(key: &Key, valuepattern: Option<&str>) { - let values: Vec = config::Config::get_all(&key.configuration, key.name.as_deref(), &key.key).await; + let values: Vec = + config::Config::get_all(&key.configuration, key.name.as_deref(), &key.key).await; for value in values { if let Some(vp) = valuepattern { // for each value, check if it matches the pattern if value.contains(vp) { println!("{}", value) } - } - else { + } else { // print all if value pattern is not present println!("{}", value) } @@ -159,12 +148,26 @@ async fn get_all_config(key: &Key, valuepattern: Option<&str>) { /// Remove one configuration by given key and value pattern async fn unset_config(key: &Key, valuepattern: Option<&str>) { - config::Config::remove_config(&key.configuration, key.name.as_deref(), &key.key, valuepattern, false).await; + config::Config::remove_config( + &key.configuration, + key.name.as_deref(), + &key.key, + valuepattern, + false, + ) + .await; } /// Remove all configurations by given key and value pattern async fn unset_all_config(key: &Key, valuepattern: Option<&str>) { - config::Config::remove_config(&key.configuration, key.name.as_deref(), &key.key, valuepattern, true).await; + config::Config::remove_config( + &key.configuration, + key.name.as_deref(), + &key.key, + valuepattern, + true, + ) + .await; } /// List all configurations @@ -173,4 +176,4 @@ async fn list_config() { for (key, value) in configurations { println!("{}={}", key, value); } -} \ No newline at end of file +} diff --git a/libra/src/command/fetch.rs b/libra/src/command/fetch.rs index 42a1dc58..c60ff0c8 100644 --- a/libra/src/command/fetch.rs +++ b/libra/src/command/fetch.rs @@ -1,17 +1,18 @@ -use std::io; -use std::vec; -use std::{collections::HashSet, fs, io::Write}; -use std::time::Instant; use ceres::protocol::ServiceType::UploadPack; use clap::Parser; use indicatif::ProgressBar; -use mercury::internal::object::commit::Commit; use mercury::hash::SHA1; +use mercury::internal::object::commit::Commit; +use std::io; +use std::time::Instant; +use std::vec; +use std::{collections::HashSet, fs, io::Write}; use tokio::io::{AsyncRead, AsyncReadExt}; use tokio_util::io::StreamReader; use url::Url; use crate::command::load_object; +use crate::utils::util; use crate::{ command::index_pack::{self, IndexPackArgs}, internal::{ @@ -22,7 +23,6 @@ use crate::{ }, utils::{self, path_ext::PathExt}, }; -use crate::utils::util; const DEFAULT_REMOTE: &str = "origin"; @@ -52,20 +52,26 @@ pub async fn execute(args: FetchArgs) { } else { let remote = match args.repository { Some(remote) => remote, - None => Config::get_current_remote().await.unwrap_or_else(|_| { - eprintln!("fatal: HEAD is detached"); - Some(DEFAULT_REMOTE.to_owned()) - }).unwrap_or_else(|| { - eprintln!("fatal: No remote configured for current branch"); - DEFAULT_REMOTE.to_owned() - }), + None => Config::get_current_remote() + .await + .unwrap_or_else(|_| { + eprintln!("fatal: HEAD is detached"); + Some(DEFAULT_REMOTE.to_owned()) + }) + .unwrap_or_else(|| { + eprintln!("fatal: No remote configured for current branch"); + DEFAULT_REMOTE.to_owned() + }), }; let remote_config = Config::remote_config(&remote).await; match remote_config { Some(remote_config) => fetch_repository(&remote_config, args.refspec).await, None => { tracing::error!("remote config '{}' not found", remote); - eprintln!("fatal: '{}' does not appear to be a libra repository", remote); + eprintln!( + "fatal: '{}' does not appear to be a libra repository", + remote + ); } } } @@ -74,12 +80,15 @@ pub async fn execute(args: FetchArgs) { /// Fetch from remote repository /// - `branch` is optional, if `None`, fetch all branches pub async fn fetch_repository(remote_config: &RemoteConfig, branch: Option) { - println!("fetching from {}{}", remote_config.name, - if let Some(branch) = &branch { - format!(" ({})", branch) - } else { - "".to_owned() - }); + println!( + "fetching from {}{}", + remote_config.name, + if let Some(branch) = &branch { + format!(" ({})", branch) + } else { + "".to_owned() + } + ); // fetch remote let url = match Url::parse(&remote_config.url) { @@ -128,10 +137,7 @@ pub async fn fetch_repository(remote_config: &RemoteConfig, branch: Option>(); let have = current_have().await; // TODO: return `DiscRef` rather than only hash, to compare `have` & `want` more accurately - let mut result_stream = http_client - .fetch_objects(&have, &want) - .await - .unwrap(); + let mut result_stream = http_client.fetch_objects(&have, &want).await.unwrap(); let mut reader = StreamReader::new(&mut result_stream); let mut pack_data = Vec::new(); @@ -147,7 +153,8 @@ pub async fn fetch_repository(remote_config: &RemoteConfig, branch: Option { // Data + 1 => { + // Data pack_data.extend(data); // TODO: decode meanwhile & calc progress } - 2 => { // Progress + 2 => { + // Progress print!("{}", String::from_utf8_lossy(data)); std::io::stdout().flush().unwrap(); } - 3 => { // Error + 3 => { + // Error eprintln!("{}", String::from_utf8_lossy(data)); } _ => { eprintln!("unknown side-band-64k code: {}", code); } } - } else if &data != b"NAK\n" { // 1.front info (server progress), ignore NAK (first line) + } else if &data != b"NAK\n" { + // 1.front info (server progress), ignore NAK (first line) print!("{}", String::from_utf8_lossy(&data)); // data contains '\r' & '\n' at end std::io::stdout().flush().unwrap(); } - }; + } bar.finish(); /* save pack file */ @@ -187,7 +198,8 @@ pub async fn fetch_repository(remote_config: &RemoteConfig, branch: Option 32 { // 12 header + 20 hash + if pack_data.len() > 32 { + // 12 header + 20 hash let pack_file = utils::path::objects() .join("pack") .join(format!("pack-{}.pack", checksum)); @@ -218,14 +230,17 @@ pub async fn fetch_repository(remote_config: &RemoteConfig, branch: Option { - let remote_head_ref = ref_heads - .iter() - .find(|r| r._hash == remote_head._hash); + let remote_head_ref = ref_heads.iter().find(|r| r._hash == remote_head._hash); match remote_head_ref { Some(remote_head_ref) => { - let remote_head_branch = remote_head_ref._ref.strip_prefix("refs/heads/").unwrap(); - Head::update(Head::Branch(remote_head_branch.to_owned()), Some(&remote_config.name)).await; + let remote_head_branch = + remote_head_ref._ref.strip_prefix("refs/heads/").unwrap(); + Head::update( + Head::Branch(remote_head_branch.to_owned()), + Some(&remote_config.name), + ) + .await; } None => { if branch.is_none() { @@ -310,4 +325,4 @@ async fn read_pkt_line(reader: &mut (impl AsyncRead + Unpin)) -> io::Result<(usi let mut data = vec![0u8; (len - 4) as usize]; reader.read_exact(&mut data).await?; Ok((len as usize, data)) -} \ No newline at end of file +} diff --git a/libra/src/command/index_pack.rs b/libra/src/command/index_pack.rs index 7a21009c..867e0852 100644 --- a/libra/src/command/index_pack.rs +++ b/libra/src/command/index_pack.rs @@ -7,8 +7,8 @@ use byteorder::{BigEndian, WriteBytesExt}; use clap::Parser; use sha1::{Digest, Sha1}; -use mercury::internal::pack::Pack; use mercury::errors::GitError; +use mercury::internal::pack::Pack; #[derive(Parser, Debug)] pub struct IndexPackArgs { @@ -64,7 +64,12 @@ pub fn build_index_v1(pack_file: &str, index_file: &str) -> Result<(), GitError> let mut pack_reader = std::io::BufReader::new(pack_file); let obj_map = Arc::new(Mutex::new(BTreeMap::new())); // sorted by hash let obj_map_c = obj_map.clone(); - let mut pack = Pack::new(Some(8), Some(1024 * 1024 * 1024), Some(tmp_path.to_path_buf()), true); + let mut pack = Pack::new( + Some(8), + Some(1024 * 1024 * 1024), + Some(tmp_path.to_path_buf()), + true, + ); pack.decode(&mut pack_reader, move |entry, offset| { obj_map_c.lock().unwrap().insert(entry.hash, offset); })?; @@ -80,9 +85,11 @@ pub fn build_index_v1(pack_file: &str, index_file: &str) -> Result<(), GitError> let mut cnt: u32 = 0; let mut fan_out = Vec::with_capacity(256 * 4); let obj_map = Arc::try_unwrap(obj_map).unwrap().into_inner().unwrap(); - for (hash, _) in obj_map.iter() { // sorted + for (hash, _) in obj_map.iter() { + // sorted let first_byte = hash.0[0]; - while first_byte > i { // `while` rather than `if` to fill the gap, e.g. 0, 1, 2, 2, 2, 6 + while first_byte > i { + // `while` rather than `if` to fill the gap, e.g. 0, 1, 2, 2, 2, 6 fan_out.write_u32::(cnt)?; i += 1; } @@ -114,7 +121,7 @@ pub fn build_index_v1(pack_file: &str, index_file: &str) -> Result<(), GitError> index_hash.update(pack.signature.0); // A copy of the pack checksum at the end of the corresponding pack-file. index_file.write_all(&pack.signature.0)?; - let index_hash:[u8; 20] = index_hash.finalize().into(); + let index_hash: [u8; 20] = index_hash.finalize().into(); // Index checksum of all of the above. index_file.write_all(&index_hash)?; diff --git a/libra/src/command/init.rs b/libra/src/command/init.rs index a1d9dcf6..6bc6da6b 100644 --- a/libra/src/command/init.rs +++ b/libra/src/command/init.rs @@ -347,7 +347,12 @@ mod tests { // Set up the test environment without a Libra repository test::setup_clean_testing_env(); let cur_dir = std::env::current_dir().unwrap(); - let args = InitArgs { bare: false, initial_branch: Some("main".to_string()), repo_directory: cur_dir.to_str().unwrap().to_string(),quiet:false }; + let args = InitArgs { + bare: false, + initial_branch: Some("main".to_string()), + repo_directory: cur_dir.to_str().unwrap().to_string(), + quiet: false, + }; // Run the init function init(args).await.unwrap(); diff --git a/libra/src/command/lfs.rs b/libra/src/command/lfs.rs index a47b2518..a5b1195e 100644 --- a/libra/src/command/lfs.rs +++ b/libra/src/command/lfs.rs @@ -1,28 +1,24 @@ +use crate::command::status; +use crate::internal::head::Head; +use crate::internal::protocol::lfs_client::LFSClient; +use crate::utils::path_ext::PathExt; +use crate::utils::{lfs, path, util}; +use ceres::lfs::lfs_structs::LockListQuery; use clap::Subcommand; +use mercury::internal::index::Index; +use reqwest::StatusCode; use std::fs::{File, OpenOptions}; use std::io; use std::io::{BufRead, BufReader, Read, Seek, SeekFrom, Write}; use std::path::Path; -use reqwest::StatusCode; -use ceres::lfs::lfs_structs::LockListQuery; -use mercury::internal::index::Index; -use crate::command::status; -use crate::internal::head::Head; -use crate::internal::protocol::lfs_client::LFSClient; -use crate::utils::{lfs, path, util}; -use crate::utils::path_ext::PathExt; /// [Docs](https://github.com/git-lfs/git-lfs/tree/main/docs/man) #[derive(Subcommand, Debug)] pub enum LfsCmds { /// View or add LFS paths to Libra Attributes (root) - Track { - pattern: Option>, - }, + Track { pattern: Option> }, /// Remove LFS paths from Libra Attributes - Untrack { - path: Vec, - }, + Untrack { path: Vec }, /// Lists currently locked files from the Libra LFS server. (Current Branch) Locks { #[clap(long, short)] @@ -43,7 +39,7 @@ pub enum LfsCmds { #[clap(long, short)] force: bool, #[clap(long, short)] - id: Option + id: Option, }, /// Show information about Git LFS files in the index and working tree (current branch) LsFiles { @@ -56,14 +52,15 @@ pub enum LfsCmds { /// Show only the lfs tracked file names. #[clap(long, short)] name_only: bool, - } + }, } pub async fn execute(cmd: LfsCmds) { // TODO: attributes file should be created in current dir, NOT root dir let attr_path = path::attributes().to_string_or_panic(); match cmd { - LfsCmds::Track { pattern } => { // TODO: deduplicate + LfsCmds::Track { pattern } => { + // TODO: deduplicate match pattern { Some(pattern) => { let pattern = convert_patterns_to_workdir(pattern); // @@ -80,7 +77,8 @@ pub async fn execute(cmd: LfsCmds) { } } } - LfsCmds::Untrack { path } => { // only remove totally same pattern with path ? + LfsCmds::Untrack { path } => { + // only remove totally same pattern with path ? let path = convert_patterns_to_workdir(path); // untrack_lfs_patterns(&attr_path, path).unwrap(); } @@ -98,7 +96,12 @@ pub async fn execute(cmd: LfsCmds) { if !locks.is_empty() { let max_path_len = locks.iter().map(|l| l.path.len()).max().unwrap(); for lock in locks { - println!("{: { // get id by path - let locks = LFSClient::get().await.get_locks(LockListQuery { - refspec: refspec.clone(), - path: path.clone(), - id: "".to_string(), - cursor: "".to_string(), - limit: "".to_string(), - }).await.locks; + let locks = LFSClient::get() + .await + .get_locks(LockListQuery { + refspec: refspec.clone(), + path: path.clone(), + id: "".to_string(), + cursor: "".to_string(), + limit: "".to_string(), + }) + .await + .locks; if locks.is_empty() { eprintln!("fatal: no lock found for path '{}'", path); return; } locks[0].id.clone() } - Some(id) => id + Some(id) => id, }; - let code = LFSClient::get().await.unlock(id.clone(), refspec.clone(), force).await; + let code = LFSClient::get() + .await + .unlock(id.clone(), refspec.clone(), force) + .await; if code.is_success() { println!("Unlocked {}", path); } else if code == StatusCode::FORBIDDEN { eprintln!("Forbidden: You must have push access to unlock"); } } - LfsCmds::LsFiles { long, size, name_only} => { + LfsCmds::LsFiles { + long, + size, + name_only, + } => { let idx_file = path::index(); let index = Index::load(&idx_file).unwrap(); let entries = index.tracked_entries(0); @@ -169,7 +186,11 @@ pub async fn execute(cmd: LfsCmds) { let is_pointer = lfs::parse_pointer_file(&path_abs).is_ok(); // An asterisk (*) after the OID indicates a full object, a minus (-) indicates an LFS pointer. // or not exists (-) - let _type = if is_pointer || !path_abs.exists() { "-" } else { "*" }; + let _type = if is_pointer || !path_abs.exists() { + "-" + } else { + "*" + }; let oid = if long { oid } else { oid[..10].to_owned() }; let tail = if size { let byte = util::auto_unit_bytes(lfs_size); @@ -201,9 +222,10 @@ pub(crate) async fn current_refspec() -> Option { /// temp fn convert_patterns_to_workdir(patterns: Vec) -> Vec { - patterns.into_iter().map(|p| { - util::to_workdir_path(&p).to_string_or_panic() - }).collect() + patterns + .into_iter() + .map(|p| util::to_workdir_path(&p).to_string_or_panic()) + .collect() } fn add_lfs_patterns(file_path: &str, patterns: Vec) -> io::Result<()> { @@ -231,7 +253,10 @@ fn add_lfs_patterns(file_path: &str, patterns: Vec) -> io::Result<()> { continue; } println!("Tracking \"{}\"", pattern); - let pattern = format!("{} filter=lfs diff=lfs merge=lfs -text\n", pattern.replace(" ", r"\ ")); + let pattern = format!( + "{} filter=lfs diff=lfs merge=lfs -text\n", + pattern.replace(" ", r"\ ") + ); file.write_all(pattern.as_bytes())?; } @@ -275,4 +300,4 @@ fn untrack_lfs_patterns(file_path: &str, patterns: Vec) -> io::Result<() } Ok(()) -} \ No newline at end of file +} diff --git a/libra/src/command/log.rs b/libra/src/command/log.rs index 77ac2b07..e3eaf0b1 100644 --- a/libra/src/command/log.rs +++ b/libra/src/command/log.rs @@ -11,10 +11,10 @@ use std::io::Write; #[cfg(unix)] use std::process::{Command, Stdio}; -use std::collections::VecDeque; -use std::str::FromStr; use mercury::hash::SHA1; use mercury::internal::object::commit::Commit; +use std::collections::VecDeque; +use std::str::FromStr; use common::utils::parse_commit_msg; #[derive(Parser, Debug)] @@ -55,7 +55,7 @@ pub async fn execute(args: LogArgs) { #[cfg(unix)] let mut process = Command::new("less") // create a pipe to less .arg("-R") // raw control characters - .arg("-F") + .arg("-F") .stdin(Stdio::piped()) .stdout(Stdio::inherit()) .spawn() @@ -87,11 +87,7 @@ pub async fn execute(args: LogArgs) { } output_number += 1; let mut message = { - let mut message = format!( - "{} {}", - "commit".yellow(), - &commit.id.to_string().yellow() - ); + let mut message = format!("{} {}", "commit".yellow(), &commit.id.to_string().yellow()); // TODO other branch's head should shown branch name if output_number == 1 { @@ -166,18 +162,15 @@ mod tests { // save_object(&commit_1); save_object(&commit_1, &commit_1.id).unwrap(); - let mut commit_2 = - Commit::from_tree_id(SHA1::new(&[2; 20]), vec![commit_1.id], "Commit_2"); + let mut commit_2 = Commit::from_tree_id(SHA1::new(&[2; 20]), vec![commit_1.id], "Commit_2"); commit_2.committer.timestamp = 2; save_object(&commit_2, &commit_2.id).unwrap(); - let mut commit_3 = - Commit::from_tree_id(SHA1::new(&[3; 20]), vec![commit_2.id], "Commit_3"); + let mut commit_3 = Commit::from_tree_id(SHA1::new(&[3; 20]), vec![commit_2.id], "Commit_3"); commit_3.committer.timestamp = 3; save_object(&commit_3, &commit_3.id).unwrap(); - let mut commit_4 = - Commit::from_tree_id(SHA1::new(&[4; 20]), vec![commit_2.id], "Commit_4"); + let mut commit_4 = Commit::from_tree_id(SHA1::new(&[4; 20]), vec![commit_2.id], "Commit_4"); commit_4.committer.timestamp = 4; save_object(&commit_4, &commit_4.id).unwrap(); @@ -197,8 +190,7 @@ mod tests { commit_6.committer.timestamp = 6; save_object(&commit_6, &commit_6.id).unwrap(); - let mut commit_7 = - Commit::from_tree_id(SHA1::new(&[7; 20]), vec![commit_5.id], "Commit_7"); + let mut commit_7 = Commit::from_tree_id(SHA1::new(&[7; 20]), vec![commit_5.id], "Commit_7"); commit_7.committer.timestamp = 7; save_object(&commit_7, &commit_7.id).unwrap(); diff --git a/libra/src/command/merge.rs b/libra/src/command/merge.rs index f8fbb8b9..ce5984bc 100644 --- a/libra/src/command/merge.rs +++ b/libra/src/command/merge.rs @@ -7,8 +7,7 @@ use crate::{ }; use super::{ - get_target_commit, - load_object, log, + get_target_commit, load_object, log, restore::{self, RestoreArgs}, }; diff --git a/libra/src/command/mod.rs b/libra/src/command/mod.rs index fbb32190..c2735a32 100644 --- a/libra/src/command/mod.rs +++ b/libra/src/command/mod.rs @@ -2,6 +2,7 @@ pub mod add; pub mod branch; pub mod clone; pub mod commit; +pub mod config; pub mod diff; pub mod fetch; pub mod index_pack; @@ -16,7 +17,6 @@ pub mod remove; pub mod restore; pub mod status; pub mod switch; -pub mod config; use crate::internal::branch::Branch; use crate::internal::head::Head; diff --git a/libra/src/command/pull.rs b/libra/src/command/pull.rs index 11b43200..3c97e1f5 100644 --- a/libra/src/command/pull.rs +++ b/libra/src/command/pull.rs @@ -17,7 +17,8 @@ pub async fn execute(args: PullArgs) { repository: args.repository, refspec: args.refspec, all: false, - }).await; + }) + .await; let head = Head::current().await; match head { diff --git a/libra/src/command/push.rs b/libra/src/command/push.rs index 09f09b74..35c2f2ca 100644 --- a/libra/src/command/push.rs +++ b/libra/src/command/push.rs @@ -1,30 +1,31 @@ -use std::collections::{HashSet, VecDeque}; -use std::io::Write; -use std::str::FromStr; +use crate::command::branch; +use crate::internal::branch::Branch; +use crate::internal::config::Config; +use crate::internal::head::Head; +use crate::internal::protocol::https_client::HttpsClient; +use crate::internal::protocol::lfs_client::LFSClient; +use crate::internal::protocol::ProtocolClient; +use crate::utils::object_ext::{BlobExt, CommitExt, TreeExt}; use bytes::BytesMut; +use ceres::protocol::smart::{add_pkt_line_string, read_pkt_line}; +use ceres::protocol::ServiceType::ReceivePack; use clap::Parser; use colored::Colorize; -use tokio::sync::mpsc; -use url::Url; -use ceres::protocol::ServiceType::ReceivePack; -use ceres::protocol::smart::{add_pkt_line_string, read_pkt_line}; use mercury::hash::SHA1; use mercury::internal::object::blob::Blob; use mercury::internal::object::commit::Commit; use mercury::internal::object::tree::{Tree, TreeItemMode}; use mercury::internal::pack::encode::PackEncoder; use mercury::internal::pack::entry::Entry; -use crate::command::branch; -use crate::internal::branch::Branch; -use crate::internal::config::Config; -use crate::internal::head::Head; -use crate::internal::protocol::https_client::HttpsClient; -use crate::internal::protocol::lfs_client::LFSClient; -use crate::internal::protocol::ProtocolClient; -use crate::utils::object_ext::{BlobExt, CommitExt, TreeExt}; +use std::collections::{HashSet, VecDeque}; +use std::io::Write; +use std::str::FromStr; +use tokio::sync::mpsc; +use url::Url; #[derive(Parser, Debug)] -pub struct PushArgs { // TODO --force +pub struct PushArgs { + // TODO --force /// repository, e.g. origin #[clap(requires("refspec"))] repository: Option, @@ -37,7 +38,8 @@ pub struct PushArgs { // TODO --force } pub async fn execute(args: PushArgs) { - if args.repository.is_some() ^ args.refspec.is_some() { // must provide both or none + if args.repository.is_some() ^ args.refspec.is_some() { + // must provide both or none eprintln!("fatal: both repository and refspec should be provided"); return; } @@ -67,9 +69,16 @@ pub async fn execute(args: PushArgs) { let repo_url = Config::get_remote_url(&repository).await; let branch = args.refspec.unwrap_or(branch); - let commit_hash = Branch::find_branch(&branch, None).await.unwrap().commit.to_string(); - - println!("pushing {}({}) to {}({})", branch, commit_hash, repository, repo_url); + let commit_hash = Branch::find_branch(&branch, None) + .await + .unwrap() + .commit + .to_string(); + + println!( + "pushing {}({}) to {}({})", + branch, commit_hash, repository, repo_url + ); let url = Url::parse(&repo_url).unwrap(); let client = HttpsClient::from_url(&url); @@ -87,27 +96,33 @@ pub async fn execute(args: PushArgs) { let tracked_ref = refs.iter().find(|r| r._ref == tracked_branch); // [0; 20] if new branch - let remote_hash = tracked_ref.map(|r| r._hash.clone()).unwrap_or(SHA1::default().to_string()); + let remote_hash = tracked_ref + .map(|r| r._hash.clone()) + .unwrap_or(SHA1::default().to_string()); if remote_hash == commit_hash { println!("Everything up-to-date"); return; } let mut data = BytesMut::new(); - add_pkt_line_string(&mut data, format!("{} {} {}\0report-status\n", - remote_hash, - commit_hash, - tracked_branch)); + add_pkt_line_string( + &mut data, + format!( + "{} {} {}\0report-status\n", + remote_hash, commit_hash, tracked_branch + ), + ); data.extend_from_slice(b"0000"); tracing::debug!("{:?}", data); // TODO 考虑remote有多个refs,可以少发一点commits let objs = incremental_objs( SHA1::from_str(&commit_hash).unwrap(), - SHA1::from_str(&remote_hash).unwrap() + SHA1::from_str(&remote_hash).unwrap(), ); - { // upload lfs files + { + // upload lfs files let client = LFSClient::from_url(&url); let res = client.push_objects(&objs).await; if res.is_err() { @@ -119,7 +134,7 @@ pub async fn execute(args: PushArgs) { // let (tx, rx) = mpsc::channel::(); let (entry_tx, entry_rx) = mpsc::channel(1_000_000); let (stream_tx, mut stream_rx) = mpsc::channel(1_000_000); - + let encoder = PackEncoder::new(objs.len(), 0, stream_tx); // TODO: diff slow, so window_size = 0 encoder.encode_async(entry_rx).await.unwrap(); @@ -166,7 +181,8 @@ pub async fn execute(args: PushArgs) { /// collect all commits from `commit_id` to root commit fn collect_history_commits(commit_id: &SHA1) -> HashSet { - if commit_id == &SHA1::default() { // 0000...0000 means not exist + if commit_id == &SHA1::default() { + // 0000...0000 means not exist return HashSet::new(); } @@ -188,7 +204,8 @@ fn incremental_objs(local_ref: SHA1, remote_ref: SHA1) -> HashSet { tracing::debug!("local_ref: {}, remote_ref: {}", local_ref, remote_ref); // just fast-forward optimization - if remote_ref != SHA1::default() { // remote exists + if remote_ref != SHA1::default() { + // remote exists let mut commit = Commit::load(&local_ref); let mut commits = Vec::new(); let mut ok = true; @@ -197,14 +214,16 @@ fn incremental_objs(local_ref: SHA1, remote_ref: SHA1) -> HashSet { if commit.id == remote_ref { break; } - if commit.parent_commit_ids.len() != 1 { // merge commit or root commit + if commit.parent_commit_ids.len() != 1 { + // merge commit or root commit ok = false; break; } // update commit to it's only parent commit = Commit::load(&commit.parent_commit_ids[0]); } - if ok { // fast-forward + if ok { + // fast-forward let mut objs = HashSet::new(); commits.reverse(); // from old to new for i in 0..commits.len() - 1 { @@ -217,7 +236,6 @@ fn incremental_objs(local_ref: SHA1, remote_ref: SHA1) -> HashSet { } } - let mut objs = HashSet::new(); let mut visit = HashSet::new(); // avoid duplicate commit visit let exist_commits = collect_history_commits(&remote_ref); @@ -264,7 +282,8 @@ fn incremental_objs(local_ref: SHA1, remote_ref: SHA1) -> HashSet { /// calc objects that in `new_tree` but not in `old_tree` /// - if `old_tree` is None, return all objects in `new_tree` (include tree itself) -fn diff_tree_objs(old_tree: Option<&SHA1>, new_tree: &SHA1) -> HashSet { // TODO: skip objs that has been added in caller +fn diff_tree_objs(old_tree: Option<&SHA1>, new_tree: &SHA1) -> HashSet { + // TODO: skip objs that has been added in caller let mut objs = HashSet::new(); if let Some(old_tree) = old_tree { if old_tree == new_tree { @@ -278,11 +297,12 @@ fn diff_tree_objs(old_tree: Option<&SHA1>, new_tree: &SHA1) -> HashSet { let old_items = match old_tree { Some(tree) => { let tree = Tree::load(tree); - tree.tree_items.iter() + tree.tree_items + .iter() .map(|item| item.id) .collect::>() } - None => HashSet::new() + None => HashSet::new(), }; for item in new_tree.tree_items.iter() { @@ -291,8 +311,10 @@ fn diff_tree_objs(old_tree: Option<&SHA1>, new_tree: &SHA1) -> HashSet { TreeItemMode::Tree => { objs.extend(diff_tree_objs(None, &item.id)); //TODO optimize, find same name tree } - _ => { // TODO: submodule (TreeItemMode: Commit) - if item.mode == TreeItemMode::Commit { // (160000)| Gitlink (Submodule) + _ => { + // TODO: submodule (TreeItemMode: Commit) + if item.mode == TreeItemMode::Commit { + // (160000)| Gitlink (Submodule) eprintln!("{}", "Warning: Submodule is not supported yet".red()); } let blob = Blob::load(&item.id); @@ -306,7 +328,7 @@ fn diff_tree_objs(old_tree: Option<&SHA1>, new_tree: &SHA1) -> HashSet { } #[cfg(test)] -mod test{ +mod test { use super::*; #[test] fn test_parse_args_success() { @@ -347,5 +369,4 @@ mod test{ let args = PushArgs::try_parse_from(args); assert!(args.is_err()); } - -} \ No newline at end of file +} diff --git a/libra/src/command/remove.rs b/libra/src/command/remove.rs index a3a09df4..a5aa186a 100644 --- a/libra/src/command/remove.rs +++ b/libra/src/command/remove.rs @@ -6,9 +6,9 @@ use colored::Colorize; use mercury::errors::GitError; -use mercury::internal::index::Index; use crate::utils::path_ext::PathExt; use crate::utils::{path, util}; +use mercury::internal::index::Index; #[derive(Parser, Debug)] pub struct RemoveArgs { @@ -34,7 +34,10 @@ pub fn execute(args: RemoveArgs) -> Result<(), GitError> { } let dirs = get_dirs(&args.pathspec, &index); if !dirs.is_empty() && !args.recursive { - println!("fatal: not removing '{}' recursively without -r", dirs[0].bright_blue()); // Git print first + println!( + "fatal: not removing '{}' recursively without -r", + dirs[0].bright_blue() + ); // Git print first return Ok(()); } @@ -44,7 +47,8 @@ pub fn execute(args: RemoveArgs) -> Result<(), GitError> { if dirs.contains(path_str) { // dir let removed = index.remove_dir_files(&path_wd); - for file in removed.iter() { // to workdir + for file in removed.iter() { + // to workdir println!("rm '{}'", file.bright_green()); } if !args.cached { @@ -97,4 +101,4 @@ fn get_dirs(pathspec: &[String], index: &Index) -> Vec { } } dirs -} \ No newline at end of file +} diff --git a/libra/src/command/restore.rs b/libra/src/command/restore.rs index 0074404a..1ce4e1ce 100644 --- a/libra/src/command/restore.rs +++ b/libra/src/command/restore.rs @@ -1,20 +1,20 @@ +use crate::command::calc_file_blob_hash; use crate::internal::branch::Branch; use crate::internal::head::Head; -use mercury::internal::index::{Index, IndexEntry}; +use crate::internal::protocol::lfs_client::LFSClient; use crate::utils::object_ext::{BlobExt, CommitExt, TreeExt}; use crate::utils::path_ext::PathExt; use crate::utils::{lfs, path, util}; use clap::Parser; -use std::collections::{HashMap, HashSet}; -use std::{fs, io}; -use std::path::PathBuf; -use crate::internal::protocol::lfs_client::LFSClient; use mercury::hash::SHA1; +use mercury::internal::index::{Index, IndexEntry}; use mercury::internal::object::blob::Blob; use mercury::internal::object::commit::Commit; use mercury::internal::object::tree::Tree; use mercury::internal::object::types::ObjectType; -use crate::command::calc_file_blob_hash; +use std::collections::{HashMap, HashSet}; +use std::path::PathBuf; +use std::{fs, io}; #[derive(Parser, Debug)] pub struct RestoreArgs { @@ -154,7 +154,11 @@ async fn restore_to_file(hash: &SHA1, path: &PathBuf) -> io::Result<()> { fs::copy(&lfs_obj_path, &path_abs)?; } else { // not exist, download from server - if let Err(e) = LFSClient::get().await.download_object(&oid, size, &path_abs, None).await { + if let Err(e) = LFSClient::get() + .await + .download_object(&oid, size, &path_abs, None) + .await + { eprintln!("fatal: {}", e); } } @@ -223,7 +227,9 @@ pub async fn restore_worktree(filter: &Vec, target_blobs: &[(PathBuf, S // file not exist, deleted or illegal if target_blobs.contains_key(path_wd) { // file in target_blobs (deleted), need to restore - restore_to_file(&target_blobs[path_wd], path_wd).await.unwrap(); + restore_to_file(&target_blobs[path_wd], path_wd) + .await + .unwrap(); } else { // not in target_commit and workdir (illegal path), user input unreachable!("It should be checked before"); @@ -236,7 +242,9 @@ pub async fn restore_worktree(filter: &Vec, target_blobs: &[(PathBuf, S // both in target & worktree: 1. modified 2. same if hash != target_blobs[path_wd] { // modified - restore_to_file(&target_blobs[path_wd], path_wd).await.unwrap(); + restore_to_file(&target_blobs[path_wd], path_wd) + .await + .unwrap(); } // else: same, keep } else { // not in target but in worktree: New file diff --git a/libra/src/command/status.rs b/libra/src/command/status.rs index 284e96e6..5ea91dd1 100644 --- a/libra/src/command/status.rs +++ b/libra/src/command/status.rs @@ -6,11 +6,11 @@ use colored::Colorize; use mercury::internal::object::commit::Commit; use mercury::internal::object::tree::Tree; -use crate::internal::head::Head; -use mercury::internal::index::Index; use crate::command::calc_file_blob_hash; +use crate::internal::head::Head; use crate::utils::object_ext::{CommitExt, TreeExt}; use crate::utils::{path, util}; +use mercury::internal::index::Index; /// path: to workdir #[derive(Debug, Default, Clone)] @@ -48,7 +48,10 @@ pub async fn execute() { // TODO .gitignore match Head::current().await { Head::Detached(commit) => { - println!("HEAD detached at {}", String::from_utf8_lossy(&commit.0[0..7])); + println!( + "HEAD detached at {}", + String::from_utf8_lossy(&commit.0[0..7]) + ); } Head::Branch(branch) => { println!("On branch {}", branch); @@ -123,7 +126,8 @@ pub async fn changes_to_be_committed() -> Changes { let head_commit = Head::current_commit().await; let tracked_files = index.tracked_files(); - if head_commit.is_none() { // no commit yet + if head_commit.is_none() { + // no commit yet changes.new = tracked_files; return changes; } @@ -146,7 +150,8 @@ pub async fn changes_to_be_committed() -> Changes { } let tree_files_set: HashSet = tree_files.into_iter().map(|(path, _)| path).collect(); // `new` means the files in index but not in the last commit - changes.new = tracked_files.into_iter() + changes.new = tracked_files + .into_iter() .filter(|path| !tree_files_set.contains(path)) .collect(); @@ -180,4 +185,4 @@ pub fn changes_to_be_staged() -> Changes { } } changes -} \ No newline at end of file +} diff --git a/libra/src/internal/config.rs b/libra/src/internal/config.rs index e4d9e428..0d473ffe 100644 --- a/libra/src/internal/config.rs +++ b/libra/src/internal/config.rs @@ -43,12 +43,10 @@ impl Config { let db = get_db_conn_instance().await; let mut config: ActiveModel = config::Entity::find() .filter(config::Column::Configuration.eq(configuration)) - .filter( - match name { - Some(str) => config::Column::Name.eq(str), - None => config::Column::Name.is_null() - } - ) + .filter(match name { + Some(str) => config::Column::Name.eq(str), + None => config::Column::Name.is_null(), + }) .filter(config::Column::Key.eq(key)) .one(db) .await @@ -63,12 +61,10 @@ impl Config { let db = get_db_conn_instance().await; config::Entity::find() .filter(config::Column::Configuration.eq(configuration)) - .filter( - match name { - Some(str) => config::Column::Name.eq(str), - None => config::Column::Name.is_null() - } - ) + .filter(match name { + Some(str) => config::Column::Name.eq(str), + None => config::Column::Name.is_null(), + }) .filter(config::Column::Key.eq(key)) .all(db) .await @@ -92,13 +88,11 @@ impl Config { /// - `Error` if `HEAD` is detached pub async fn get_current_remote() -> Result, ()> { match Head::current().await { - Head::Branch(name) => { - Ok(Config::get_remote(&name).await) - }, + Head::Branch(name) => Ok(Config::get_remote(&name).await), Head::Detached(_) => { eprintln!("fatal: HEAD is detached, cannot get remote"); Err(()) - }, + } } } @@ -135,20 +129,26 @@ impl Config { .await .unwrap() .iter() - .map(|m| + .map(|m| { ( match &m.name { Some(n) => m.configuration.to_owned() + "." + n + "." + &m.key, - None => m.configuration.to_owned() + "." + &m.key + None => m.configuration.to_owned() + "." + &m.key, }, - m.value.to_owned() + m.value.to_owned(), ) - ) + }) .collect() } /// Delete one or all configuration using given key and value pattern - pub async fn remove_config(configuration: &str, name: Option<&str>, key: &str, valuepattern: Option<&str>, delete_all: bool) { + pub async fn remove_config( + configuration: &str, + name: Option<&str>, + key: &str, + valuepattern: Option<&str>, + delete_all: bool, + ) { let db = get_db_conn_instance().await; let entries: Vec = Self::query(configuration, name, key).await; for e in entries { @@ -156,14 +156,11 @@ impl Config { Some(vp) => { if e.value.contains(vp) { e.delete(db).await - } - else { + } else { continue; } } - None => { - e.delete(db).await - } + None => e.delete(db).await, }; if !delete_all { break; diff --git a/libra/src/internal/model/config.rs b/libra/src/internal/model/config.rs index 0f93718e..f353b83c 100644 --- a/libra/src/internal/model/config.rs +++ b/libra/src/internal/model/config.rs @@ -7,7 +7,7 @@ pub struct Model { pub id: i64, // [configuration "name"]=>[remote "origin"] pub configuration: String, // configuration option - pub name: Option, // name of the configuration (optionally) + pub name: Option, // name of the configuration (optionally) pub key: String, pub value: String, } @@ -15,4 +15,4 @@ pub struct Model { #[derive(Copy, Clone, Debug, EnumIter, DeriveRelation)] pub enum Relation {} -impl ActiveModelBehavior for ActiveModel {} \ No newline at end of file +impl ActiveModelBehavior for ActiveModel {} diff --git a/libra/src/internal/model/mod.rs b/libra/src/internal/model/mod.rs index 0653776c..859d1a75 100644 --- a/libra/src/internal/model/mod.rs +++ b/libra/src/internal/model/mod.rs @@ -1,2 +1,2 @@ pub mod config; -pub mod reference; \ No newline at end of file +pub mod reference; diff --git a/libra/src/internal/protocol/https_client.rs b/libra/src/internal/protocol/https_client.rs index 2cf4ea99..cfb54160 100644 --- a/libra/src/internal/protocol/https_client.rs +++ b/libra/src/internal/protocol/https_client.rs @@ -1,4 +1,5 @@ use super::ProtocolClient; +use crate::command::ask_basic_auth; use bytes::Bytes; use ceres::protocol::smart::{add_pkt_line_string, read_pkt_line}; use ceres::protocol::ServiceType; @@ -13,7 +14,6 @@ use std::ops::Deref; use std::sync::Mutex; use tokio_util::bytes::BytesMut; use url::Url; -use crate::command::ask_basic_auth; /// A Git protocol client that communicates with a Git server over HTTPS. /// Only support `SmartProtocol` now, see [http-protocol](https://www.git-scm.com/docs/http-protocol) for protocol details. @@ -53,7 +53,7 @@ impl BasicAuth { /// send request with basic auth, retry 3 times pub async fn send(request_builder: impl Fn() -> Fut) -> Result where - Fut: std::future::Future, + Fut: std::future::Future, { const MAX_TRY: usize = 3; let mut res; @@ -64,7 +64,8 @@ impl BasicAuth { request = request.basic_auth(auth.username.clone(), Some(auth.password.clone())); } // if no auth exists, try without auth (e.g. clone public) res = request.send().await?; - if res.status() == StatusCode::FORBIDDEN { // 403: no access, no need to retry + if res.status() == StatusCode::FORBIDDEN { + // 403: no access, no need to retry eprintln!("Authentication failed, forbidden"); break; } else if res.status() != StatusCode::UNAUTHORIZED { @@ -109,7 +110,9 @@ impl HttpsClient { .url .join(&format!("info/refs?service={}", service)) .unwrap(); - let res = BasicAuth::send(|| async{self.client.get(url.clone())}).await.unwrap(); + let res = BasicAuth::send(|| async { self.client.get(url.clone()) }) + .await + .unwrap(); tracing::debug!("{:?}", res); if res.status() == 401 { @@ -209,12 +212,13 @@ impl HttpsClient { tracing::debug!("fetch_objects with body: {:?}", body); let res = BasicAuth::send(|| async { - self - .client + self.client .post(url.clone()) .header("Content-Type", "application/x-git-upload-pack-request") .body(body.clone()) - }).await.unwrap(); + }) + .await + .unwrap(); tracing::debug!("request: {:?}", res); if res.status() != 200 && res.status() != 304 { @@ -236,12 +240,12 @@ impl HttpsClient { data: T, ) -> Result { BasicAuth::send(|| async { - self - .client + self.client .post(self.url.join("git-receive-pack").unwrap()) .header(CONTENT_TYPE, "application/x-git-receive-pack-request") .body(data.clone()) - }).await + }) + .await } } /// for fetching diff --git a/libra/src/internal/protocol/lfs_client.rs b/libra/src/internal/protocol/lfs_client.rs index ed4207c5..80c67305 100644 --- a/libra/src/internal/protocol/lfs_client.rs +++ b/libra/src/internal/protocol/lfs_client.rs @@ -3,7 +3,12 @@ use crate::internal::config::Config; use crate::internal::protocol::https_client::BasicAuth; use crate::internal::protocol::ProtocolClient; use crate::utils::{lfs, util}; -use ceres::lfs::lfs_structs::{BatchRequest, ChunkRepresentation, FetchchunkResponse, LockList, LockListQuery, LockRequest, ObjectError, Ref, Representation, RequestVars, UnlockRequest, VerifiableLockList, VerifiableLockRequest}; +use anyhow::anyhow; +use ceres::lfs::lfs_structs::{ + BatchRequest, ChunkRepresentation, FetchchunkResponse, LockList, LockListQuery, LockRequest, + ObjectError, Ref, Representation, RequestVars, UnlockRequest, VerifiableLockList, + VerifiableLockRequest, +}; use futures_util::StreamExt; use mercury::internal::object::types::ObjectType; use mercury::internal::pack::entry::Entry; @@ -12,7 +17,6 @@ use ring::digest::{Context, SHA256}; use serde::{Deserialize, Serialize}; use std::collections::HashSet; use std::path::Path; -use anyhow::anyhow; use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt}; use tokio::sync::OnceCell; use url::Url; @@ -22,16 +26,16 @@ pub struct LFSClient { pub batch_url: Url, pub lfs_url: Url, pub client: Client, - pub bootstrap: Option<(String, u16)> // for p2p: (bootstrap_node, ztm_agent_port) + pub bootstrap: Option<(String, u16)>, // for p2p: (bootstrap_node, ztm_agent_port) } static LFS_CLIENT: OnceCell = OnceCell::const_new(); impl LFSClient { /// Get LFSClient instance /// - DO NOT use `async_static!`: No IDE Code Completion & lagging pub async fn get() -> &'static LFSClient { - LFS_CLIENT.get_or_init(|| async { - LFSClient::new().await - }).await + LFS_CLIENT + .get_or_init(|| async { LFSClient::new().await }) + .await } } @@ -92,7 +96,7 @@ impl LFSClient { /// push LFS objects to remote server pub async fn push_objects<'a, I>(&self, objs: I) -> Result<(), ()> where - I: IntoIterator + I: IntoIterator, { // filter pointer file within blobs let mut lfs_oids = Vec::new(); @@ -123,11 +127,16 @@ impl LFSClient { return Ok(()); } - { // verify locks - let (code, locks) = self.verify_locks(VerifiableLockRequest { - refs: Ref { name: command::lfs::current_refspec().await.unwrap() }, - ..Default::default() - }).await; + { + // verify locks + let (code, locks) = self + .verify_locks(VerifiableLockRequest { + refs: Ref { + name: command::lfs::current_refspec().await.unwrap(), + }, + ..Default::default() + }) + .await; if code == StatusCode::FORBIDDEN { eprintln!("fatal: Forbidden: You must have push access to verify locks"); @@ -142,20 +151,28 @@ impl LFSClient { // success tracing::debug!("LFS verify locks response:\n {:?}", locks); let oids: HashSet = lfs_oids.iter().map(|(oid, _)| oid.clone()).collect(); - let ours = locks.ours.iter().filter(|l| { - let oid = lfs::get_oid_by_path(&l.path); - oids.contains(&oid) - }).collect::>(); + let ours = locks + .ours + .iter() + .filter(|l| { + let oid = lfs::get_oid_by_path(&l.path); + oids.contains(&oid) + }) + .collect::>(); if !ours.is_empty() { println!("The following files are locked by you, consider unlocking them:"); for lock in ours { println!(" - {}", lock.path); } } - let theirs = locks.theirs.iter().filter(|l| { - let oid = lfs::get_oid_by_path(&l.path); - oids.contains(&oid) - }).collect::>(); + let theirs = locks + .theirs + .iter() + .filter(|l| { + let oid = lfs::get_oid_by_path(&l.path); + oids.contains(&oid) + }) + .collect::>(); if !theirs.is_empty() { eprintln!("Locking failed: The following files are locked by another user:"); for lock in theirs { @@ -178,10 +195,15 @@ impl LFSClient { .post(self.batch_url.clone()) .json(&batch_request) .headers(lfs::LFS_HEADERS.clone()) - }).await.unwrap(); + }) + .await + .unwrap(); let resp = response.json::().await.unwrap(); - tracing::debug!("LFS push response:\n {:#?}", serde_json::to_value(&resp).unwrap()); + tracing::debug!( + "LFS push response:\n {:#?}", + serde_json::to_value(&resp).unwrap() + ); // TODO: parallel upload for obj in resp.objects { @@ -210,14 +232,20 @@ impl LFSClient { .post(self.batch_url.clone()) .json(&batch_request) .headers(lfs::LFS_HEADERS.clone()) - }).await.unwrap(); + }) + .await + .unwrap(); let resp = response.json::().await.unwrap(); tracing::debug!( "LFS push response:\n {:#?}", serde_json::to_value(&resp).unwrap() ); - assert_eq!(resp.objects.len(), 1, "fatal: LFS push failed. No object found."); + assert_eq!( + resp.objects.len(), + 1, + "fatal: LFS push failed. No object found." + ); // self.upload_object(resp.objects).await?; let obj = resp.objects.into_iter().next().unwrap(); @@ -229,7 +257,10 @@ impl LFSClient { /// upload (PUT) one LFS file to remote server async fn upload_object(&self, object: Representation, file: &Path) -> Result<(), ()> { if let Some(err) = object.error { - eprintln!("fatal: LFS upload failed. Code: {}, Message: {}", err.code, err.message); + eprintln!( + "fatal: LFS upload failed. Code: {}, Message: {}", + err.code, err.message + ); return Err(()); } @@ -250,7 +281,8 @@ impl LFSClient { } let content = tokio::fs::File::open(file).await.unwrap(); - let progress_bar = util::default_progress_bar(content.metadata().await.unwrap().len()); + let progress_bar = + util::default_progress_bar(content.metadata().await.unwrap().len()); let stream = tokio_util::io::ReaderStream::new(content); let progress_stream = stream.map(move |chunk| { @@ -260,10 +292,16 @@ impl LFSClient { chunk }); request.body(reqwest::Body::wrap_stream(progress_stream)) - }).await.unwrap(); + }) + .await + .unwrap(); if !resp.status().is_success() { - eprintln!("fatal: LFS upload failed. Status: {}, Message: {}", resp.status(), resp.text().await.unwrap()); + eprintln!( + "fatal: LFS upload failed. Status: {}, Message: {}", + resp.status(), + resp.text().await.unwrap() + ); return Err(()); } println!("Uploaded."); @@ -295,9 +333,9 @@ impl LFSClient { path: impl AsRef, mut reporter: Option<( &mut (dyn FnMut(f64) -> anyhow::Result<()> + Send), // progress callback - f64 // step - )>) -> anyhow::Result<()> - { + f64, // step + )>, + ) -> anyhow::Result<()> { let batch_request = BatchRequest { operation: "download".to_string(), transfers: vec![lfs::LFS_TRANSFER_API.to_string()], @@ -314,10 +352,14 @@ impl LFSClient { .post(self.batch_url.clone()) .json(&batch_request) .headers(lfs::LFS_HEADERS.clone()) - }).await?; + }) + .await?; let text = response.text().await?; - tracing::debug!("LFS download response:\n {:#?}", serde_json::from_str::(&text)?); + tracing::debug!( + "LFS download response:\n {:#?}", + serde_json::from_str::(&text)? + ); let resp = serde_json::from_str::(&text)?; let obj = resp.objects.first().expect("No object"); // Only get first if obj.error.is_some() || obj.actions.is_none() { @@ -326,7 +368,10 @@ impl LFSClient { message: "Unknown error".to_string(), }; let err = obj.error.as_ref().unwrap_or(&unknown_err); - eprintln!("fatal: LFS download failed (BatchRequest). Code: {}, Message: {}", err.code, err.message); + eprintln!( + "fatal: LFS download failed (BatchRequest). Code: {}, Message: {}", + err.code, err.message + ); return Err(anyhow!("LFS download failed.")); } @@ -341,7 +386,7 @@ impl LFSClient { chunk_size = chunks.first().map(|c| c.size); tracing::info!("LFS Chunk API supported."); chunks.into_iter().map(|c| c.link).collect() - }, + } Err(_) => { chunk_size = Some(size as i64); vec![link.clone()] @@ -350,7 +395,7 @@ impl LFSClient { let mut checksum = Context::new(&SHA256); let mut got_parts = 0; - let mut file = if links.len() <= 1 || lfs::parse_pointer_file(&path).is_ok() { + let mut file = if links.len() <= 1 || lfs::parse_pointer_file(&path).is_ok() { // pointer file or Not Chunks, truncate tokio::fs::File::create(path).await? } else { @@ -360,7 +405,8 @@ impl LFSClient { .read(true) .create(true) .truncate(false) - .open(&path).await?; + .open(&path) + .await?; let file_len = file.metadata().await?.len(); if file_len > size { println!("Local file size is larger than remote, truncate to 0."); @@ -370,7 +416,11 @@ impl LFSClient { let chunk_size = chunk_size.unwrap() as u64; got_parts = file_len / chunk_size; let file_offset = got_parts * chunk_size; - println!("Resume download from offset: {}, part: {}", file_offset, got_parts + 1); + println!( + "Resume download from offset: {}, part: {}", + file_offset, + got_parts + 1 + ); file.set_len(file_offset).await?; // truncate Self::update_file_checksum(&mut file, &mut checksum).await; // resume checksum file.seek(tokio::io::SeekFrom::End(0)).await?; @@ -395,20 +445,27 @@ impl LFSClient { request = request.header(k, v); } request - }).await?; + }) + .await?; if !response.status().is_success() { - eprintln!("fatal: LFS download failed. Status: {}, Message: {}", response.status(), response.text().await?); + eprintln!( + "fatal: LFS download failed. Status: {}, Message: {}", + response.status(), + response.text().await? + ); return Err(anyhow!("LFS download failed.")); } let cur_chunk_size = if (got_parts as usize) < parts { chunk_size.unwrap() as u64 - } else { // last part + } else { + // last part size - (parts as u64 - 1) * chunk_size.unwrap() as u64 }; let pb = util::default_progress_bar(cur_chunk_size); let mut stream = response.bytes_stream(); - while let Some(chunk) = stream.next().await { // TODO: progress bar TODO: multi-thread or async + while let Some(chunk) = stream.next().await { + // TODO: progress bar TODO: multi-thread or async let chunk = chunk?; file.write_all(&chunk).await?; checksum.update(&chunk); @@ -421,7 +478,8 @@ impl LFSClient { last_progress = progress; report_fn(progress)?; } - } else { // mutually exclusive with reporter + } else { + // mutually exclusive with reporter pb.inc(chunk.len() as u64); } } @@ -447,13 +505,20 @@ impl LFSClient { let path = url.path().trim_end_matches('/'); url.set_path(&(path.to_owned() + "/chunks")); // reserve query params (for GitHub link) - let resp = BasicAuth::send(|| async { self.client.get(url.clone()) }).await.unwrap(); + let resp = BasicAuth::send(|| async { self.client.get(url.clone()) }) + .await + .unwrap(); let code = resp.status(); - if code == StatusCode::NOT_FOUND || code == StatusCode::FORBIDDEN { // GitHub maybe return 403 + if code == StatusCode::NOT_FOUND || code == StatusCode::FORBIDDEN { + // GitHub maybe return 403 tracing::info!("Remote LFS Server not support Chunks API, or forbidden."); return Err(()); } else if !code.is_success() { - tracing::debug!("fatal: LFS get chunk hrefs failed. Status: {}, Message: {}", code, resp.text().await.unwrap()); + tracing::debug!( + "fatal: LFS get chunk hrefs failed. Status: {}, Message: {}", + code, + resp.text().await.unwrap() + ); return Err(()); } let mut res = resp.json::().await.unwrap(); @@ -463,8 +528,8 @@ impl LFSClient { } } -#[cfg(feature="p2p")] -impl LFSClient{ +#[cfg(feature = "p2p")] +impl LFSClient { /// download (GET) one LFS file peer-to-peer #[allow(clippy::type_complexity)] pub async fn download_object_p2p( @@ -473,9 +538,9 @@ impl LFSClient{ path: impl AsRef, mut reporter: Option<( &mut (dyn FnMut(f64) -> anyhow::Result<()> + Send), // progress callback - f64 // step - )>) -> anyhow::Result<()> - { + f64, // step + )>, + ) -> anyhow::Result<()> { let (bootstrap_node, ztm_agent_port) = match &self.bootstrap { Some(value) => value, None => return Err(anyhow!("fatal: No bootstrap node set for P2P download.")), @@ -486,18 +551,22 @@ impl LFSClient{ let peer_ports = gemini::lfs::create_lfs_download_tunnel( bootstrap_node.clone(), *ztm_agent_port, - file_uri.to_owned() - ).await.unwrap(); + file_uri.to_owned(), + ) + .await + .unwrap(); if peer_ports.is_empty() { eprintln!("fatal: No peer online, download failed"); return Err(anyhow!("fatal: No peer online.")); } tracing::debug!("P2P download tunnel ports: {:?}", peer_ports); - let lfs_info = match gemini::lfs::get_lfs_chunks_info(bootstrap_node.clone(), hash.clone()).await { // auth? - Some(chunks) => chunks, - None => return Err(anyhow!("fatal: LFS Chunk API failed.")) - }; + let lfs_info = + match gemini::lfs::get_lfs_chunks_info(bootstrap_node.clone(), hash.clone()).await { + // auth? + Some(chunks) => chunks, + None => return Err(anyhow!("fatal: LFS Chunk API failed.")), + }; let mut chunks = lfs_info.chunks; if chunks.is_empty() { eprintln!("fatal: LFS Chunk API failed. No chunks found."); @@ -513,26 +582,41 @@ impl LFSClient{ tokio::fs::create_dir_all(parent).await?; } let mut file = tokio::fs::File::create(path).await?; - for (i, chunk) in chunks.iter().enumerate() { // TODO parallel download + for (i, chunk) in chunks.iter().enumerate() { + // TODO parallel download println!("- part: {}/{}", i + 1, chunks.len()); let mut retry = 0; - let data = loop { // retry + let data = loop { + // retry let mut downloaded = i * chunk_size; // TODO support resume let mut last_progress = downloaded as f64 / lfs_info.size as f64 * 100.0; let pb = util::default_progress_bar(chunk.size as u64); - let url = format!("http://localhost:{}/objects/{}/{}", peer_ports[(i + retry) % peer_ports.len()], hash, chunk.sub_oid); - let data = self.download_chunk(&url, &chunk.sub_oid, chunk.size as usize, chunk.offset as usize, |size| { - if let Some((ref mut report_fn, step)) = reporter { - downloaded += size; - let progress = (downloaded as f64 / lfs_info.size as f64) * 100.0; - if progress >= last_progress + step { - last_progress = progress; - report_fn(progress).unwrap(); - } - } else { - pb.inc(size as u64); - } - }).await; + let url = format!( + "http://localhost:{}/objects/{}/{}", + peer_ports[(i + retry) % peer_ports.len()], + hash, + chunk.sub_oid + ); + let data = self + .download_chunk( + &url, + &chunk.sub_oid, + chunk.size as usize, + chunk.offset as usize, + |size| { + if let Some((ref mut report_fn, step)) = reporter { + downloaded += size; + let progress = (downloaded as f64 / lfs_info.size as f64) * 100.0; + if progress >= last_progress + step { + last_progress = progress; + report_fn(progress).unwrap(); + } + } else { + pb.inc(size as u64); + } + }, + ) + .await; pb.finish_and_clear(); match data { Ok(data) => break data, @@ -563,14 +647,26 @@ impl LFSClient{ } } - async fn download_chunk(&self, url: &str, hash: &str, size: usize, offset: usize, mut callback: impl FnMut(usize)) -> anyhow::Result> { + async fn download_chunk( + &self, + url: &str, + hash: &str, + size: usize, + offset: usize, + mut callback: impl FnMut(usize), + ) -> anyhow::Result> { let response = BasicAuth::send(|| async { self.client .get(url) .query(&[("offset", offset), ("size", size)]) - }).await?; + }) + .await?; if !response.status().is_success() { - eprintln!("fatal: LFS download failed. Status: {}, Message: {}", response.status(), response.text().await?); + eprintln!( + "fatal: LFS download failed. Status: {}, Message: {}", + response.status(), + response.text().await? + ); return Err(anyhow!("LFS download failed.")); } let mut buffer = Vec::with_capacity(size); @@ -586,7 +682,10 @@ impl LFSClient{ } let checksum = hex::encode(checksum.finish().as_ref()); if checksum != hash { - eprintln!("fatal: chunk download failed. Chunk checksum mismatch: {} != {}", checksum, hash); + eprintln!( + "fatal: chunk download failed. Chunk checksum mismatch: {} != {}", + checksum, hash + ); return Err(anyhow!("Chunk checksum mismatch.")); } Ok(buffer) @@ -602,11 +701,17 @@ impl LFSClient { ("path", query.path), ("limit", query.limit), ("cursor", query.cursor), - ("refspec", query.refspec) + ("refspec", query.refspec), ]; - let response = BasicAuth::send(|| async { self.client.get(url.clone()).query(&query) }).await.unwrap(); + let response = BasicAuth::send(|| async { self.client.get(url.clone()).query(&query) }) + .await + .unwrap(); if !response.status().is_success() { - eprintln!("fatal: LFS get locks failed. Status: {}, Message: {}", response.status(), response.text().await.unwrap()); + eprintln!( + "fatal: LFS get locks failed. Status: {}, Message: {}", + response.status(), + response.text().await.unwrap() + ); return LockList { locks: Vec::new(), next_cursor: String::default(), @@ -623,12 +728,20 @@ impl LFSClient { let resp = BasicAuth::send(|| async { self.client.post(url.clone()).json(&LockRequest { path: path.clone(), - refs: Ref { name: refspec.clone() }, + refs: Ref { + name: refspec.clone(), + }, }) - }).await.unwrap(); + }) + .await + .unwrap(); let code = resp.status(); if !resp.status().is_success() && code != StatusCode::FORBIDDEN { - eprintln!("fatal: LFS lock failed. Status: {}, Message: {}", code, resp.text().await.unwrap()); + eprintln!( + "fatal: LFS lock failed. Status: {}, Message: {}", + code, + resp.text().await.unwrap() + ); } code } @@ -638,32 +751,50 @@ impl LFSClient { let resp = BasicAuth::send(|| async { self.client.post(url.clone()).json(&UnlockRequest { force: Some(force), - refs: Ref { name: refspec.clone() }, + refs: Ref { + name: refspec.clone(), + }, }) - }).await.unwrap(); + }) + .await + .unwrap(); let code = resp.status(); if !resp.status().is_success() && code != StatusCode::FORBIDDEN { - eprintln!("fatal: LFS unlock failed. Status: {}, Message: {}", code, resp.text().await.unwrap()); + eprintln!( + "fatal: LFS unlock failed. Status: {}, Message: {}", + code, + resp.text().await.unwrap() + ); } code } /// List Locks for Verification - pub async fn verify_locks(&self, query: VerifiableLockRequest) - -> (StatusCode, VerifiableLockList) - { + pub async fn verify_locks( + &self, + query: VerifiableLockRequest, + ) -> (StatusCode, VerifiableLockList) { let url = self.lfs_url.join("locks/verify").unwrap(); - let resp = BasicAuth::send(|| async {self.client.post(url.clone()).json(&query)}).await.unwrap(); + let resp = BasicAuth::send(|| async { self.client.post(url.clone()).json(&query) }) + .await + .unwrap(); let code = resp.status(); // By default, an LFS server that doesn't implement any locking endpoints should return 404. // This response will not halt any Git pushes. if !code.is_success() && code != StatusCode::NOT_FOUND && code != StatusCode::FORBIDDEN { - eprintln!("fatal: LFS verify locks failed. Status: {}, Message: {}", code, resp.text().await.unwrap()); - return (code, VerifiableLockList { - ours: Vec::new(), - theirs: Vec::new(), - next_cursor: String::default(), - }); + eprintln!( + "fatal: LFS verify locks failed. Status: {}, Message: {}", + code, + resp.text().await.unwrap() + ); + return ( + code, + VerifiableLockList { + ours: Vec::new(), + theirs: Vec::new(), + next_cursor: String::default(), + }, + ); } (code, resp.json::().await.unwrap()) } @@ -698,8 +829,11 @@ mod tests { }], hash_algo: lfs::LFS_HASH_ALGO.to_string(), }; - let lfs_client = LFSClient::from_url(&Url::parse("https://github.com/web3infra-foundation/mega.git").unwrap()); - let request = lfs_client.client + let lfs_client = LFSClient::from_url( + &Url::parse("https://github.com/web3infra-foundation/mega.git").unwrap(), + ); + let request = lfs_client + .client .post(lfs_client.batch_url.clone()) .json(&batch_request) .headers(lfs::LFS_HEADERS.clone()); @@ -725,7 +859,7 @@ mod tests { } #[tokio::test] - #[cfg(feature="p2p")] + #[cfg(feature = "p2p")] #[ignore] // need to start local mega server async fn test_download_chunk() { let client = LFSClient::from_url(&Url::parse("http://localhost:8000").unwrap()); diff --git a/libra/src/internal/protocol/mod.rs b/libra/src/internal/protocol/mod.rs index c6452936..0435c806 100644 --- a/libra/src/internal/protocol/mod.rs +++ b/libra/src/internal/protocol/mod.rs @@ -10,5 +10,4 @@ pub trait ProtocolClient { } #[cfg(test)] -mod test { -} +mod test {} diff --git a/libra/src/lib.rs b/libra/src/lib.rs index 65937a94..11e71592 100644 --- a/libra/src/lib.rs +++ b/libra/src/lib.rs @@ -1,9 +1,9 @@ use mercury::errors::GitError; +pub mod cli; mod command; pub mod internal; pub mod utils; -pub mod cli; /// Execute the Libra command in `sync` way. /// ### Caution @@ -25,8 +25,8 @@ pub async fn exec_async(mut args: Vec<&str>) -> Result<(), GitError> { #[cfg(test)] mod tests { - use tempfile::TempDir; use super::*; + use tempfile::TempDir; #[test] fn test_libra_init() { @@ -37,9 +37,9 @@ mod tests { #[tokio::test] async fn test_lfs_client() { - use url::Url; use crate::internal::protocol::lfs_client::LFSClient; use crate::internal::protocol::ProtocolClient; + use url::Url; let client = LFSClient::from_url(&Url::parse("https://git.gitmono.org").unwrap()); println!("{:?}", client); @@ -47,14 +47,14 @@ mod tests { println!("progress: {:.2}%", progress); Ok(()) }; - client.download_object( - "a744b4beab939d899e22c8a070b7041a275582fb942483c9436d455173c7e23d", - 338607424, - "/home/bean/projects/tmp/Qwen2.5-0.5B-Instruct-Q2_K.gguf", - Some(( - &mut report_fn, - 0.1 - )) - ).await.expect("Failed to download object"); + client + .download_object( + "a744b4beab939d899e22c8a070b7041a275582fb942483c9436d455173c7e23d", + 338607424, + "/home/bean/projects/tmp/Qwen2.5-0.5B-Instruct-Q2_K.gguf", + Some((&mut report_fn, 0.1)), + ) + .await + .expect("Failed to download object"); } -} \ No newline at end of file +} diff --git a/libra/src/main.rs b/libra/src/main.rs index 7b000adb..046a7610 100644 --- a/libra/src/main.rs +++ b/libra/src/main.rs @@ -23,4 +23,4 @@ fn main() { } } } -} \ No newline at end of file +} diff --git a/libra/src/utils/client_storage.rs b/libra/src/utils/client_storage.rs index 0f86b0c8..763441ce 100644 --- a/libra/src/utils/client_storage.rs +++ b/libra/src/utils/client_storage.rs @@ -1,22 +1,22 @@ -use std::{fs, io}; use std::collections::HashSet; use std::io::prelude::*; use std::path::{Path, PathBuf}; use std::str::FromStr; use std::sync::{Arc, Mutex}; +use std::{fs, io}; use byteorder::{BigEndian, ReadBytesExt}; -use flate2::Compression; use flate2::read::ZlibDecoder; use flate2::write::ZlibEncoder; +use flate2::Compression; use lru_mem::LruCache; -use once_cell::sync::Lazy; -use mercury::internal::pack::cache_object::CacheObject; -use mercury::internal::pack::Pack; use mercury::errors::GitError; use mercury::hash::SHA1; use mercury::internal::object::types::ObjectType; +use mercury::internal::pack::cache_object::CacheObject; +use mercury::internal::pack::Pack; use mercury::utils::read_sha1; +use once_cell::sync::Lazy; use crate::command; static PACK_OBJ_CACHE: Lazy>> = Lazy::new(|| { @@ -89,7 +89,8 @@ impl ClientStorage { let paths = fs::read_dir(&self.base_path).unwrap(); for path in paths { let path = path.unwrap().path(); - if path.is_dir() && path.file_name().unwrap().len() == 2 { // not very elegant + if path.is_dir() && path.file_name().unwrap().len() == 2 { + // not very elegant let sub_paths = fs::read_dir(&path).unwrap(); for sub_path in sub_paths { let sub_path = sub_path.unwrap().path(); @@ -136,11 +137,12 @@ impl ClientStorage { } fn parse_header(data: &[u8]) -> (String, usize, usize) { - let end_of_header = data.iter() + let end_of_header = data + .iter() .position(|&b| b == b'\0') .expect("Invalid object: no header terminator"); - let header_str = std::str::from_utf8(&data[..end_of_header]) - .expect("Invalid UTF-8 in header"); + let header_str = + std::str::from_utf8(&data[..end_of_header]).expect("Invalid UTF-8 in header"); let mut parts = header_str.splitn(2, ' '); let obj_type = parts.next().expect("No object type in header").to_string(); @@ -175,7 +177,12 @@ impl ClientStorage { } /// Save content to `objects` - pub fn put(&self, obj_id: &SHA1, content: &[u8], obj_type: ObjectType) -> Result { + pub fn put( + &self, + obj_id: &SHA1, + content: &[u8], + obj_type: ObjectType, + ) -> Result { let path = self.get_obj_path(obj_id); let dir = path.parent().unwrap(); fs::create_dir_all(dir)?; @@ -224,7 +231,8 @@ impl ClientStorage { for pack in packs { let idx = pack.with_extension("idx"); if !idx.exists() { - command::index_pack::build_index_v1(pack.to_str().unwrap(), idx.to_str().unwrap()).unwrap(); + command::index_pack::build_index_v1(pack.to_str().unwrap(), idx.to_str().unwrap()) + .unwrap(); } idxs.push(idx); } @@ -333,7 +341,7 @@ impl ClientStorage { let base_obj = Self::read_pack_obj(pack_file, base_offset as u64)?; let base_obj = Arc::new(base_obj); Pack::rebuild_delta(obj, base_obj) // new obj - }, + } ObjectType::HashDelta => { let base_hash = obj.hash_delta().unwrap(); let idx_file = pack_file.with_extension("idx"); @@ -341,11 +349,16 @@ impl ClientStorage { let base_obj = Self::read_pack_obj(pack_file, base_offset)?; let base_obj = Arc::new(base_obj); Pack::rebuild_delta(obj, base_obj) // new obj - }, + } _ => obj, }; // write cache - if PACK_OBJ_CACHE.lock().unwrap().insert(cache_key, full_obj.clone()).is_err() { + if PACK_OBJ_CACHE + .lock() + .unwrap() + .insert(cache_key, full_obj.clone()) + .is_err() + { eprintln!("Warn: EntryTooLarge"); } Ok(full_obj) @@ -354,12 +367,12 @@ impl ClientStorage { #[cfg(test)] mod tests { - use std::{env, fs}; use std::path::PathBuf; + use std::{env, fs}; use mercury::internal::object::blob::Blob; - use mercury::internal::object::ObjectTrait; use mercury::internal::object::types::ObjectType; + use mercury::internal::object::ObjectTrait; use crate::utils::{test, util}; @@ -374,7 +387,9 @@ mod tests { source.push("tests/objects"); let client_storage = ClientStorage::init(source.clone()); - assert!(client_storage.put(&blob.id, &blob.data, blob.get_type()).is_ok()); + assert!(client_storage + .put(&blob.id, &blob.data, blob.get_type()) + .is_ok()); assert!(client_storage.exist(&blob.id)); let data = client_storage.get(&blob.id).unwrap(); @@ -390,7 +405,9 @@ mod tests { source.push("tests/objects"); let client_storage = ClientStorage::init(source.clone()); - assert!(client_storage.put(&blob.id, &blob.data, blob.get_type()).is_ok()); + assert!(client_storage + .put(&blob.id, &blob.data, blob.get_type()) + .is_ok()); let objs = client_storage.search("5dd01c177"); @@ -399,7 +416,9 @@ mod tests { #[test] fn test_list_objs() { - let source = PathBuf::from(test::TEST_DIR).join(util::ROOT_DIR).join("objects"); + let source = PathBuf::from(test::TEST_DIR) + .join(util::ROOT_DIR) + .join("objects"); if !source.exists() { return; } @@ -418,7 +437,9 @@ mod tests { source.push("tests/objects"); let client_storage = ClientStorage::init(source.clone()); - assert!(client_storage.put(&blob.id, &blob.data, blob.get_type()).is_ok()); + assert!(client_storage + .put(&blob.id, &blob.data, blob.get_type()) + .is_ok()); let obj_type = client_storage.get_object_type(&blob.id).unwrap(); assert_eq!(obj_type, ObjectType::Blob); diff --git a/libra/src/utils/mod.rs b/libra/src/utils/mod.rs index 21d01c6b..6d312e53 100644 --- a/libra/src/utils/mod.rs +++ b/libra/src/utils/mod.rs @@ -1,7 +1,7 @@ -pub(crate) mod util; -pub(crate) mod test; -pub(crate) mod path; +pub(crate) mod client_storage; +pub mod lfs; pub(crate) mod object_ext; +pub(crate) mod path; pub(crate) mod path_ext; -pub(crate) mod client_storage; -pub mod lfs; \ No newline at end of file +pub(crate) mod test; +pub(crate) mod util; diff --git a/libra/src/utils/object_ext.rs b/libra/src/utils/object_ext.rs index 53e4576b..9836ffc9 100644 --- a/libra/src/utils/object_ext.rs +++ b/libra/src/utils/object_ext.rs @@ -1,12 +1,12 @@ -use std::fs; -use std::io::{BufReader, Read}; -use std::path::{Path, PathBuf}; use colored::Colorize; use mercury::hash::SHA1; use mercury::internal::object::blob::Blob; use mercury::internal::object::commit::Commit; -use mercury::internal::object::ObjectTrait; use mercury::internal::object::tree::{Tree, TreeItemMode}; +use mercury::internal::object::ObjectTrait; +use std::fs; +use std::io::{BufReader, Read}; +use std::path::{Path, PathBuf}; use crate::utils::{lfs, util}; @@ -37,8 +37,10 @@ impl TreeExt for Tree { fn get_plain_items(&self) -> Vec<(PathBuf, SHA1)> { let mut items = Vec::new(); for item in self.tree_items.iter() { - if item.mode != TreeItemMode::Tree { // Not Tree, maybe Blob, link, etc. - if item.mode == TreeItemMode::Commit { // submodule + if item.mode != TreeItemMode::Tree { + // Not Tree, maybe Blob, link, etc. + if item.mode == TreeItemMode::Commit { + // submodule eprintln!("{}", "Warning: Submodule is not supported yet".red()); } items.push((PathBuf::from(item.name.clone()), item.id)); @@ -102,4 +104,4 @@ impl BlobExt for Blob { } self.id } -} \ No newline at end of file +} diff --git a/libra/src/utils/path.rs b/libra/src/utils/path.rs index 812f39c9..ef84f142 100644 --- a/libra/src/utils/path.rs +++ b/libra/src/utils/path.rs @@ -1,5 +1,5 @@ -use std::path::PathBuf; use crate::utils::util; +use std::path::PathBuf; pub fn index() -> PathBuf { util::storage_path().join("index") @@ -15,4 +15,4 @@ pub fn database() -> PathBuf { pub fn attributes() -> PathBuf { util::working_dir().join(util::ATTRIBUTES) -} \ No newline at end of file +} diff --git a/libra/src/utils/path_ext.rs b/libra/src/utils/path_ext.rs index a987b271..47362141 100644 --- a/libra/src/utils/path_ext.rs +++ b/libra/src/utils/path_ext.rs @@ -1,5 +1,5 @@ -use std::path::{Path, PathBuf}; use crate::utils::util; +use std::path::{Path, PathBuf}; pub trait PathExt { fn to_workdir(&self) -> PathBuf; @@ -10,9 +10,9 @@ pub trait PathExt { #[allow(dead_code)] fn sub_of(&self, parent: &Path) -> bool; fn sub_of_paths(&self, paths: U) -> bool - where - P: AsRef, - U: IntoIterator; + where + P: AsRef, + U: IntoIterator; } impl PathExt for PathBuf { @@ -42,10 +42,11 @@ impl PathExt for PathBuf { } fn sub_of_paths(&self, paths: U) -> bool - where - P: AsRef, - U: IntoIterator - { // TODO 接口都改成 to workdir好了 + where + P: AsRef, + U: IntoIterator, + { + // TODO 接口都改成 to workdir好了 util::is_sub_of_paths(self, paths) } -} \ No newline at end of file +} diff --git a/libra/src/utils/test.rs b/libra/src/utils/test.rs index 7332cdeb..e8d0b2ae 100644 --- a/libra/src/utils/test.rs +++ b/libra/src/utils/test.rs @@ -8,8 +8,8 @@ use std::io::Write; use std::path::Path; use std::{env, fs, path::PathBuf}; -use crate::utils::util; use crate::command; +use crate::utils::util; pub const TEST_DIR: &str = "libra_test_repo"; @@ -40,7 +40,6 @@ fn find_cargo_dir() -> PathBuf { } } - /// Sets up the environment for testing. /// /// This function performs the following steps: @@ -81,7 +80,7 @@ pub fn setup_clean_testing_env() { let cur_path = util::cur_dir(); // Append the Libra root directory to the current directory - let root_path=cur_path.join(util::ROOT_DIR); + let root_path = cur_path.join(util::ROOT_DIR); // If the Libra root directory exists, remove it if root_path.exists() { @@ -106,7 +105,12 @@ pub fn setup_clean_testing_env() { /// switch to test dir and create a new .libra pub async fn setup_with_new_libra() { setup_clean_testing_env(); - let args = command::init::InitArgs { bare: false, initial_branch: None, repo_directory: util::cur_dir().to_str().unwrap().to_string(),quiet:false }; + let args = command::init::InitArgs { + bare: false, + initial_branch: None, + repo_directory: util::cur_dir().to_str().unwrap().to_string(), + quiet: false, + }; command::init::init(args).await.unwrap(); } diff --git a/libra/src/utils/util.rs b/libra/src/utils/util.rs index 91443181..9576f60d 100644 --- a/libra/src/utils/util.rs +++ b/libra/src/utils/util.rs @@ -1,11 +1,11 @@ +use indicatif::{ProgressBar, ProgressStyle}; +use mercury::hash::SHA1; +use mercury::internal::object::types::ObjectType; +use path_absolutize::*; use std::collections::HashSet; use std::io::Write; use std::path::{Path, PathBuf}; -use path_absolutize::*; use std::{env, fs, io}; -use indicatif::{ProgressBar, ProgressStyle}; -use mercury::hash::SHA1; -use mercury::internal::object::types::ObjectType; use crate::utils::client_storage::ClientStorage; use crate::utils::path; @@ -374,7 +374,10 @@ mod test { #[tokio::test] async fn test_to_workdir_path() { test::setup_with_new_libra().await; - assert_eq!(to_workdir_path("./src/abc/../main.rs"), PathBuf::from("src/main.rs")); + assert_eq!( + to_workdir_path("./src/abc/../main.rs"), + PathBuf::from("src/main.rs") + ); assert_eq!(to_workdir_path("."), PathBuf::from(".")); assert_eq!(to_workdir_path("./"), PathBuf::from(".")); assert_eq!(to_workdir_path(""), PathBuf::from(".")); diff --git a/mega/src/build.rs b/mega/src/build.rs index 27ec449e..577494be 100644 --- a/mega/src/build.rs +++ b/mega/src/build.rs @@ -1,7 +1,7 @@ //! Using shadow_rs to build-time information stored in Mega. -//! -//! -//! +//! +//! +//! fn main() -> shadow_rs::SdResult<()> { #[cfg(target_os = "linux")] @@ -9,4 +9,4 @@ fn main() -> shadow_rs::SdResult<()> { #[cfg(target_os = "macos")] println!("cargo:rustc-link-arg=-Wl,-rpath,@executable_path"); shadow_rs::new() -} \ No newline at end of file +} diff --git a/mega/src/cli.rs b/mega/src/cli.rs index e8f42588..4e0482d5 100644 --- a/mega/src/cli.rs +++ b/mega/src/cli.rs @@ -1,8 +1,8 @@ //! Cli module is responsible for parsing command line arguments and executing the appropriate. +use clap::{Arg, ArgMatches, Command}; use std::env; use std::path::PathBuf; -use clap::{Arg, ArgMatches, Command}; use tracing_subscriber::fmt::writer::MakeWriterExt; use common::{ @@ -22,8 +22,11 @@ use crate::commands::{builtin, builtin_exec}; /// If there is an error during the parsing, it will return an error. pub fn parse(args: Option>) -> MegaResult { let matches = match args { - Some(args) => cli().no_binary_name(true).try_get_matches_from(args).unwrap_or_else(|e| e.exit()), - None => cli().try_get_matches().unwrap_or_else(|e| e.exit()) + Some(args) => cli() + .no_binary_name(true) + .try_get_matches_from(args) + .unwrap_or_else(|e| e.exit()), + None => cli().try_get_matches().unwrap_or_else(|e| e.exit()), }; // Get the current directory @@ -45,7 +48,8 @@ pub fn parse(args: Option>) -> MegaResult { ctrlc::set_handler(move || { tracing::info!("Received Ctrl-C signal, exiting..."); std::process::exit(0); - }).unwrap(); + }) + .unwrap(); let (cmd, subcommand_args) = match matches.subcommand() { Some((cmd, args)) => (cmd, args), diff --git a/mega/src/commands/service/https.rs b/mega/src/commands/service/https.rs index f6709c68..13203baa 100644 --- a/mega/src/commands/service/https.rs +++ b/mega/src/commands/service/https.rs @@ -4,7 +4,6 @@ use common::{config::Config, errors::MegaResult}; use gateway::https_server::{self, HttpsOptions}; use jupiter::context::Context; - pub fn cli() -> Command { HttpsOptions::augment_args_for_update(Command::new("https").about("Start Mega HTTPS server")) } diff --git a/mega/src/lib.rs b/mega/src/lib.rs index d07186e5..663d2188 100644 --- a/mega/src/lib.rs +++ b/mega/src/lib.rs @@ -1,14 +1,15 @@ pub mod cli; mod commands; -#[cfg(test)] -mod tests { - use super::*; +// This test will stuck the whole test process, because it will never exit +// #[cfg(test)] +// mod tests { +// use super::*; - #[test] - fn test_cli() { - let config_path = "config.toml"; - let args = vec!["-c", config_path, "service", "multi", "http"]; - cli::parse(Some(args)).expect("Failed to start http service"); - } -} \ No newline at end of file +// #[test] +// fn test_cli() { +// let config_path = "config.toml"; +// let args = vec!["-c", config_path, "service", "multi", "http"]; +// cli::parse(Some(args)).expect("Failed to start http service"); +// } +// } diff --git a/mega/tests/common/mod.rs b/mega/tests/common/mod.rs index 11bce282..0d1cf7d7 100644 --- a/mega/tests/common/mod.rs +++ b/mega/tests/common/mod.rs @@ -1 +1 @@ -// use `common/mod.rs` rather than `common.rs`, to declare it's not a test file \ No newline at end of file +// use `common/mod.rs` rather than `common.rs`, to declare it's not a test file diff --git a/mega/tests/lfs_test.rs b/mega/tests/lfs_test.rs index e7da558f..b2ae29bb 100644 --- a/mega/tests/lfs_test.rs +++ b/mega/tests/lfs_test.rs @@ -1,16 +1,16 @@ mod common; -/// integration tests for the mega module -use std::process::{Child, Command}; -use std::{env, fs, io, thread}; +use lazy_static::lazy_static; +use rand::Rng; +use serial_test::serial; use std::io::Write; use std::net::TcpStream; use std::path::{Path, PathBuf}; +/// integration tests for the mega module +use std::process::{Child, Command}; use std::time::Duration; -use rand::Rng; -use serial_test::serial; +use std::{env, fs, io, thread}; use tempfile::TempDir; -use lazy_static::lazy_static; const PORT: u16 = 8000; // mega server port const LARGE_FILE_SIZE_MB: usize = 60; @@ -58,7 +58,8 @@ fn run_cmd(program: &str, args: &[&str], stdin: Option<&str>, envs: Option, envs: Option bool { - TcpStream::connect_timeout(&format!("127.0.0.1:{}", port).parse().unwrap(), Duration::from_millis(1000)) - .is_ok() + TcpStream::connect_timeout( + &format!("127.0.0.1:{}", port).parse().unwrap(), + Duration::from_millis(1000), + ) + .is_ok() } /// Run mega server in a new process @@ -180,9 +184,11 @@ fn libra_lfs_push(url: &str) -> io::Result<()> { // try lock API run_libra_cmd(&["lfs", "lock", "large_file.bin"]); // push to mega server - run_libra_cmd_with_stdin(&["push", "mega", "master"], - Some("mega\nmega"), // basic auth, can be overridden by env var - Some(vec![("LIBRA_NO_HIDE_PASSWORD", "1")])); + run_libra_cmd_with_stdin( + &["push", "mega", "master"], + Some("mega\nmega"), // basic auth, can be overridden by env var + Some(vec![("LIBRA_NO_HIDE_PASSWORD", "1")]), + ); Ok(()) } @@ -193,11 +199,19 @@ fn git_lfs_clone(url: &str) -> io::Result<()> { env::set_current_dir(temp_dir.path())?; // git clone url // `--config`: temporary set lfs.url - run_git_cmd(&["clone", url, "--config", &("lfs.url=".to_owned() + &LFS_URL)]); + run_git_cmd(&[ + "clone", + url, + "--config", + &("lfs.url=".to_owned() + &LFS_URL), + ]); let file = Path::new("lfs/large_file.bin"); assert!(file.exists(), "Failed to clone large file"); - assert_eq!(file.metadata()?.len(), LARGE_FILE_SIZE_MB as u64 * 1024 * 1024); + assert_eq!( + file.metadata()?.len(), + LARGE_FILE_SIZE_MB as u64 * 1024 * 1024 + ); Ok(()) } @@ -210,7 +224,10 @@ fn libra_lfs_clone(url: &str) -> io::Result<()> { let file = Path::new("lfs-libra/large_file.bin"); assert!(file.exists(), "Failed to clone large file"); - assert_eq!(file.metadata()?.len(), LARGE_FILE_SIZE_MB as u64 * 1024 * 1024); + assert_eq!( + file.metadata()?.len(), + LARGE_FILE_SIZE_MB as u64 * 1024 * 1024 + ); Ok(()) } @@ -221,7 +238,7 @@ fn lfs_split_with_git() { let mega_dir = TempDir::new().unwrap(); env::set_var("MEGA_authentication__enable_http_auth", "false"); // no need for git - // start mega server at background (new process) + // start mega server at background (new process) let mut mega = run_mega_server(mega_dir.path()); let url = &format!("http://localhost:{}/third-part/lfs.git", PORT); @@ -256,4 +273,4 @@ fn lfs_split_with_libra() { mega.kill().expect("Failed to kill mega server"); let _ = mega.wait(); thread::sleep(Duration::from_secs(1)); // wait for server to stop, avoiding affecting other tests -} \ No newline at end of file +} diff --git a/mercury/delta/src/decode/mod.rs b/mercury/delta/src/decode/mod.rs index 4aa76d4b..4f555cd8 100644 --- a/mercury/delta/src/decode/mod.rs +++ b/mercury/delta/src/decode/mod.rs @@ -1,19 +1,23 @@ -use std::io::{Read, ErrorKind}; -use crate::{utils, errors::GitDeltaError}; +use crate::{errors::GitDeltaError, utils}; +use std::io::{ErrorKind, Read}; const COPY_INSTRUCTION_FLAG: u8 = 1 << 7; const COPY_OFFSET_BYTES: u8 = 4; const COPY_SIZE_BYTES: u8 = 3; const COPY_ZERO_SIZE: usize = 0x10000; -pub fn delta_decode(mut stream : &mut impl Read,base_info: &[u8]) -> Result, GitDeltaError>{ +pub fn delta_decode( + mut stream: &mut impl Read, + base_info: &[u8], +) -> Result, GitDeltaError> { // Read the bash object size & Result Size let base_size = utils::read_size_encoding(&mut stream).unwrap(); - if base_info.len() != base_size{ - return Err(GitDeltaError::DeltaDecoderError("base object len is not equal".to_owned())); + if base_info.len() != base_size { + return Err(GitDeltaError::DeltaDecoderError( + "base object len is not equal".to_owned(), + )); } - let result_size = utils::read_size_encoding(&mut stream).unwrap(); let mut buffer = Vec::with_capacity(result_size); loop { @@ -24,7 +28,10 @@ pub fn delta_decode(mut stream : &mut impl Read,base_info: &[u8]) -> Result { panic!( "{}", - GitDeltaError::DeltaDecoderError(format!("Wrong instruction in delta :{}", err)) + GitDeltaError::DeltaDecoderError(format!( + "Wrong instruction in delta :{}", + err + )) ); } }; @@ -57,13 +64,13 @@ pub fn delta_decode(mut stream : &mut impl Read,base_info: &[u8]) -> Result buffer.extend_from_slice(data), - Err(e) =>return Err(e), + Err(e) => return Err(e), } } } diff --git a/mercury/delta/src/errors.rs b/mercury/delta/src/errors.rs index 0e7e8e27..a4003dcd 100644 --- a/mercury/delta/src/errors.rs +++ b/mercury/delta/src/errors.rs @@ -1,10 +1,10 @@ use thiserror::Error; #[derive(Error, Debug)] -pub enum GitDeltaError{ +pub enum GitDeltaError { #[error("The `{0}` is not a valid git object type.")] DeltaEncoderError(String), #[error("The `{0}` is not a valid git object type.")] DeltaDecoderError(String), -} \ No newline at end of file +} diff --git a/mercury/delta/src/lib.rs b/mercury/delta/src/lib.rs index c1bbdf84..29ca1e60 100644 --- a/mercury/delta/src/lib.rs +++ b/mercury/delta/src/lib.rs @@ -5,21 +5,15 @@ mod encode; mod errors; mod utils; - - pub use decode::delta_decode as decode; -pub fn encode_rate(old_data: & [u8], new_data: & [u8]) -> f64{ +pub fn encode_rate(old_data: &[u8], new_data: &[u8]) -> f64 { let differ = DeltaDiff::new(old_data, new_data); differ.get_ssam_rate() } -pub fn encode(old_data: & [u8], new_data: & [u8]) -> Vec { +pub fn encode(old_data: &[u8], new_data: &[u8]) -> Vec { let differ = DeltaDiff::new(old_data, new_data); differ.encode() } - - #[cfg(test)] -mod tests { - -} +mod tests {} diff --git a/mercury/delta/src/utils.rs b/mercury/delta/src/utils.rs index 4351a712..40ffd01b 100644 --- a/mercury/delta/src/utils.rs +++ b/mercury/delta/src/utils.rs @@ -1,7 +1,5 @@ - use std::io::Read; - const VAR_INT_ENCODING_BITS: u8 = 7; const VAR_INT_CONTINUE_FLAG: u8 = 1 << VAR_INT_ENCODING_BITS; @@ -78,5 +76,3 @@ pub fn read_var_int_byte(stream: &mut R) -> std::io::Result<(u8, bool)> Ok((value, more_bytes)) } - - diff --git a/mercury/src/errors.rs b/mercury/src/errors.rs index 98ecb4cd..068699c0 100644 --- a/mercury/src/errors.rs +++ b/mercury/src/errors.rs @@ -1,4 +1,3 @@ - use std::string::FromUtf8Error; use thiserror::Error; diff --git a/mercury/src/hash.rs b/mercury/src/hash.rs index 5cccb4d1..e691290d 100644 --- a/mercury/src/hash.rs +++ b/mercury/src/hash.rs @@ -133,7 +133,7 @@ impl SHA1 { self.0.to_vec() } - /// [`core::fmt::Display`] is somewhat expensive, + /// [`core::fmt::Display`] is somewhat expensive, /// use this hack to get a string more efficiently pub fn _to_string(&self) -> String { hex::encode(self.0) diff --git a/mercury/src/internal/index.rs b/mercury/src/internal/index.rs index 44e3cb97..04178b51 100644 --- a/mercury/src/internal/index.rs +++ b/mercury/src/internal/index.rs @@ -10,10 +10,10 @@ use std::os::unix::fs::MetadataExt; use std::path::{Path, PathBuf}; use std::time::{SystemTime, UNIX_EPOCH}; -use crate::utils; use crate::errors::GitError; use crate::hash::SHA1; use crate::internal::pack::wrapper::Wrapper; +use crate::utils; #[derive(PartialEq, Eq, Debug, Clone)] pub struct Time { @@ -83,7 +83,7 @@ impl TryInto for &Flags { flags |= 0x4000; // 15 } flags |= (self.stage as u16) << 12; // 13-14 - if self.name_length > 0xFFF { + if self.name_length > 0xFFF { return Err("Name length is too long"); } flags |= self.name_length; // 0-11 @@ -177,8 +177,14 @@ impl IndexEntry { pub fn new_from_blob(name: String, hash: SHA1, size: u32) -> Self { IndexEntry { - ctime: Time { seconds: 0, nanos: 0 }, - mtime: Time { seconds: 0, nanos: 0 }, + ctime: Time { + seconds: 0, + nanos: 0, + }, + mtime: Time { + seconds: 0, + nanos: 0, + }, dev: 0, ino: 0, mode: 0o100644, @@ -255,7 +261,9 @@ impl Index { file.read_exact(&mut name)?; // The exact encoding is undefined, but the '.' and '/' characters are encoded in 7-bit ASCII entry.name = String::from_utf8(name)?; // TODO check the encoding - index.entries.insert((entry.name.clone(), entry.flags.stage), entry); + index + .entries + .insert((entry.name.clone(), entry.flags.stage), entry); // 1-8 nul bytes as necessary to pad the entry to a multiple of eight bytes // while keeping the name NUL-terminated. // so at least 1 byte nul @@ -348,7 +356,8 @@ impl Index { } pub fn add(&mut self, entry: IndexEntry) { - self.entries.insert((entry.name.clone(), entry.flags.stage), entry); + self.entries + .insert((entry.name.clone(), entry.flags.stage), entry); } pub fn remove(&mut self, name: &str, stage: u8) -> Option { @@ -382,9 +391,11 @@ impl Index { let path_abs = workdir.join(file); let meta = path_abs.symlink_metadata().unwrap(); // TODO more fields - let same = entry.ctime == Time::from_system_time(meta.created().unwrap_or(SystemTime::now())) - && entry.mtime == Time::from_system_time(meta.modified().unwrap_or(SystemTime::now())) - && entry.size == meta.len() as u32; + let same = entry.ctime + == Time::from_system_time(meta.created().unwrap_or(SystemTime::now())) + && entry.mtime + == Time::from_system_time(meta.modified().unwrap_or(SystemTime::now())) + && entry.size == meta.len() as u32; !same } else { @@ -404,7 +415,10 @@ impl Index { /// Get all tracked files(stage = 0) pub fn tracked_files(&self) -> Vec { - self.tracked_entries(0).iter().map(|entry| PathBuf::from(&entry.name)).collect() + self.tracked_entries(0) + .iter() + .map(|entry| PathBuf::from(&entry.name)) + .collect() } /// Judge if the file(s) of `dir` is in the index diff --git a/mercury/src/internal/mod.rs b/mercury/src/internal/mod.rs index b8a4489e..5c42e308 100644 --- a/mercury/src/internal/mod.rs +++ b/mercury/src/internal/mod.rs @@ -1,5 +1,5 @@ +pub mod index; pub mod model; pub mod object; pub mod pack; pub mod zlib; -pub mod index; diff --git a/mercury/src/internal/model/commit.rs b/mercury/src/internal/model/commit.rs index cffbf419..7d131127 100644 --- a/mercury/src/internal/model/commit.rs +++ b/mercury/src/internal/model/commit.rs @@ -5,7 +5,10 @@ use common::utils::generate_id; use crate::{ hash::SHA1, - internal::{object::{commit::Commit, signature::Signature, ObjectTrait}, pack::entry::Entry}, + internal::{ + object::{commit::Commit, signature::Signature, ObjectTrait}, + pack::entry::Entry, + }, }; impl From for Commit { diff --git a/mercury/src/internal/model/tree.rs b/mercury/src/internal/model/tree.rs index 3713b024..4b071f36 100644 --- a/mercury/src/internal/model/tree.rs +++ b/mercury/src/internal/model/tree.rs @@ -19,7 +19,6 @@ impl From for mega_tree::Model { } } - impl From for git_tree::Model { fn from(value: Tree) -> Self { git_tree::Model { @@ -40,7 +39,6 @@ impl From for Tree { } } - impl From for Tree { fn from(value: git_tree::Model) -> Self { Tree::from_bytes(&value.sub_trees, SHA1::from_str(&value.tree_id).unwrap()).unwrap() diff --git a/mercury/src/internal/object/commit.rs b/mercury/src/internal/object/commit.rs index b299c6a8..d23fb726 100644 --- a/mercury/src/internal/object/commit.rs +++ b/mercury/src/internal/object/commit.rs @@ -14,14 +14,14 @@ use std::fmt::Display; use std::str::FromStr; -use bstr::ByteSlice; -use serde::Deserialize; -use serde::Serialize; use crate::errors::GitError; use crate::hash::SHA1; use crate::internal::object::signature::Signature; use crate::internal::object::ObjectTrait; use crate::internal::object::ObjectType; +use bstr::ByteSlice; +use serde::Deserialize; +use serde::Serialize; /// The `Commit` struct is used to represent a commit object. /// diff --git a/mercury/src/internal/object/signature.rs b/mercury/src/internal/object/signature.rs index c7ba8651..de3950cc 100644 --- a/mercury/src/internal/object/signature.rs +++ b/mercury/src/internal/object/signature.rs @@ -178,33 +178,33 @@ impl Signature { Ok(sign) } -/// Represents a signature with author, email, timestamp, and timezone information. -pub fn new(sign_type: SignatureType, author: String, email: String) -> Signature { - // Get the current local time (with timezone) - let local_time = chrono::Local::now(); - - // Get the offset from UTC in minutes (local time - UTC time) - let offset = local_time.offset().fix().local_minus_utc(); - - // Calculate the hours part of the offset (divide by 3600 to convert from seconds to hours) - let hours = offset / 60 / 60; - - // Calculate the minutes part of the offset (remaining minutes after dividing by 60) - let minutes = offset / 60 % 60; - - // Format the offset as a string (e.g., "+0800", "-0300", etc.) - let offset_str = format!("{:+03}{:02}", hours, minutes); - - // Return the Signature struct with the provided information - Signature { - signature_type: sign_type, // The type of signature (e.g., commit, merge) - name: author, // The author's name - email, // The author's email - timestamp: chrono::Utc::now().timestamp() as usize, // The timestamp of the signature (seconds since Unix epoch) - timezone: offset_str, // The timezone offset (e.g., "+0800") + /// Represents a signature with author, email, timestamp, and timezone information. + pub fn new(sign_type: SignatureType, author: String, email: String) -> Signature { + // Get the current local time (with timezone) + let local_time = chrono::Local::now(); + + // Get the offset from UTC in minutes (local time - UTC time) + let offset = local_time.offset().fix().local_minus_utc(); + + // Calculate the hours part of the offset (divide by 3600 to convert from seconds to hours) + let hours = offset / 60 / 60; + + // Calculate the minutes part of the offset (remaining minutes after dividing by 60) + let minutes = offset / 60 % 60; + + // Format the offset as a string (e.g., "+0800", "-0300", etc.) + let offset_str = format!("{:+03}{:02}", hours, minutes); + + // Return the Signature struct with the provided information + Signature { + signature_type: sign_type, // The type of signature (e.g., commit, merge) + name: author, // The author's name + email, // The author's email + timestamp: chrono::Utc::now().timestamp() as usize, // The timestamp of the signature (seconds since Unix epoch) + timezone: offset_str, // The timezone offset (e.g., "+0800") + } } } -} #[cfg(test)] mod tests { @@ -289,13 +289,17 @@ mod tests { } #[test] - fn test_signature_with_time(){ - let sign = Signature::new(SignatureType::Author, "MEGA".to_owned(), "admin@mega.com".to_owned()); + fn test_signature_with_time() { + let sign = Signature::new( + SignatureType::Author, + "MEGA".to_owned(), + "admin@mega.com".to_owned(), + ); assert_eq!(sign.signature_type, SignatureType::Author); assert_eq!(sign.name, "MEGA"); assert_eq!(sign.email, "admin@mega.com"); assert_eq!(sign.timezone, "+0800"); - + let naive_datetime = DateTime::from_timestamp(sign.timestamp as i64, 0).unwrap(); println!("Formatted DateTime: {}", naive_datetime.naive_local()); } diff --git a/mercury/src/internal/object/utils.rs b/mercury/src/internal/object/utils.rs index 738b296d..cf92c62a 100644 --- a/mercury/src/internal/object/utils.rs +++ b/mercury/src/internal/object/utils.rs @@ -1,4 +1,3 @@ - use std::io::{self, Read, Write}; use flate2::{write::ZlibEncoder, Compression}; diff --git a/mercury/src/internal/pack/cache.rs b/mercury/src/internal/pack/cache.rs index b3ddf142..7298b16a 100644 --- a/mercury/src/internal/pack/cache.rs +++ b/mercury/src/internal/pack/cache.rs @@ -10,9 +10,11 @@ use dashmap::{DashMap, DashSet}; use lru_mem::LruCache; use threadpool::ThreadPool; -use crate::time_it; use crate::hash::SHA1; -use crate::internal::pack::cache_object::{ArcWrapper, CacheObject, MemSizeRecorder, FileLoadStore}; +use crate::internal::pack::cache_object::{ + ArcWrapper, CacheObject, FileLoadStore, MemSizeRecorder, +}; +use crate::time_it; pub trait _Cache { fn new(mem_size: Option, tmp_path: PathBuf, thread_num: usize) -> Self @@ -88,7 +90,7 @@ impl Caches { /// generate the temp file path, hex string of the hash fn generate_temp_path(&self, tmp_path: &Path, hash: SHA1) -> PathBuf { // This is enough for the original path, 2 chars directory, 40 chars hash, and extra slashes - let mut path = PathBuf::with_capacity(self.tmp_path.capacity() + SHA1::SIZE * 2 + 5); + let mut path = PathBuf::with_capacity(self.tmp_path.capacity() + SHA1::SIZE * 2 + 5); path.push(tmp_path); let hash_str = hash._to_string(); path.push(&hash_str[..2]); // use first 2 chars as the directory @@ -114,7 +116,7 @@ impl Caches { /// memory used by the index (exclude lru_cache which is contained in CacheObject::get_mem_size()) pub fn memory_used_index(&self) -> usize { self.map_offset.capacity() * (std::mem::size_of::() + std::mem::size_of::()) - + self.hash_set.capacity() * (std::mem::size_of::()) + + self.hash_set.capacity() * (std::mem::size_of::()) } /// remove the tmp dir @@ -208,8 +210,7 @@ impl _Cache for Caches { self.hash_set.len() } fn memory_used(&self) -> usize { - self.lru_cache.lock().unwrap().current_size() - + self.memory_used_index() + self.lru_cache.lock().unwrap().current_size() + self.memory_used_index() } fn clear(&self) { time_it!("Caches clear", { @@ -233,7 +234,10 @@ mod test { use std::env; use super::*; - use crate::{hash::SHA1, internal::{object::types::ObjectType, pack::cache_object::CacheObjectInfo}}; + use crate::{ + hash::SHA1, + internal::{object::types::ObjectType, pack::cache_object::CacheObjectInfo}, + }; #[test] fn test_cache_single_thread() { diff --git a/mercury/src/internal/pack/cache_object.rs b/mercury/src/internal/pack/cache_object.rs index 58ab5d1c..07b16752 100644 --- a/mercury/src/internal/pack/cache_object.rs +++ b/mercury/src/internal/pack/cache_object.rs @@ -103,9 +103,9 @@ impl HeapSize for CacheObject { /// If a [`CacheObject`] is [`ObjectType::HashDelta`] or [`ObjectType::OffsetDelta`], /// it will expand to another [`CacheObject`] of other types. To prevent potential OOM, /// we record the size of the expanded object as well as that of the object itself. - /// - /// Base objects, *i.e.*, [`ObjectType::Blob`], [`ObjectType::Tree`], [`ObjectType::Commit`], - /// and [`ObjectType::Tag`], will not be expanded, so the heap-size of the object is the same + /// + /// Base objects, *i.e.*, [`ObjectType::Blob`], [`ObjectType::Tree`], [`ObjectType::Commit`], + /// and [`ObjectType::Tag`], will not be expanded, so the heap-size of the object is the same /// as the size of the data. /// /// See [Comment in PR #755](https://github.com/web3infra-foundation/mega/pull/755#issuecomment-2543100481) for more details. @@ -189,7 +189,7 @@ impl CacheObject { } /// Get the [`SHA1`] hash of the object. - /// + /// /// If the object is a delta object, return [`None`]. pub fn base_object_hash(&self) -> Option { match &self.info { @@ -199,7 +199,7 @@ impl CacheObject { } /// Get the offset delta of the object. - /// + /// /// If the object is not an offset delta, return [`None`]. pub fn offset_delta(&self) -> Option { match &self.info { @@ -209,7 +209,7 @@ impl CacheObject { } /// Get the hash delta of the object. - /// + /// /// If the object is not a hash delta, return [`None`]. pub fn hash_delta(&self) -> Option { match &self.info { @@ -372,7 +372,7 @@ mod test { #[test] fn test_cache_object_with_lru() { let mut cache = LruCache::new(2048); - + let hash_a = SHA1::default(); let hash_b = SHA1::new(b"b"); // whatever different hash let a = CacheObject { diff --git a/mercury/src/internal/pack/entry.rs b/mercury/src/internal/pack/entry.rs index 19fb5b4f..2dd4afb9 100644 --- a/mercury/src/internal/pack/entry.rs +++ b/mercury/src/internal/pack/entry.rs @@ -21,7 +21,8 @@ pub struct Entry { } impl PartialEq for Entry { - fn eq(&self, other: &Self) -> bool { // hash is enough to compare, right? + fn eq(&self, other: &Self) -> bool { + // hash is enough to compare, right? self.obj_type == other.obj_type && self.hash == other.hash } } @@ -39,15 +40,9 @@ impl Entry { ObjectType::Commit => { GitObject::Commit(Commit::from_bytes(&self.data, self.hash).unwrap()) } - ObjectType::Tree => { - GitObject::Tree(Tree::from_bytes(&self.data, self.hash).unwrap()) - } - ObjectType::Blob => { - GitObject::Blob(Blob::from_bytes(&self.data, self.hash).unwrap()) - } - ObjectType::Tag => { - GitObject::Tag(Tag::from_bytes(&self.data, self.hash).unwrap()) - } + ObjectType::Tree => GitObject::Tree(Tree::from_bytes(&self.data, self.hash).unwrap()), + ObjectType::Blob => GitObject::Blob(Blob::from_bytes(&self.data, self.hash).unwrap()), + ObjectType::Tag => GitObject::Tag(Tag::from_bytes(&self.data, self.hash).unwrap()), _ => unreachable!("can not parse delta!"), } } diff --git a/mercury/src/internal/pack/utils.rs b/mercury/src/internal/pack/utils.rs index 520cd6b7..2cede0a5 100644 --- a/mercury/src/internal/pack/utils.rs +++ b/mercury/src/internal/pack/utils.rs @@ -1,7 +1,7 @@ +use sha1::{Digest, Sha1}; use std::fs; use std::io::{self, Read}; use std::path::Path; -use sha1::{Digest, Sha1}; use crate::hash::SHA1; use crate::internal::object::types::ObjectType; @@ -72,7 +72,10 @@ pub fn read_byte_and_check_continuation(stream: &mut R) -> io::Result<( /// # Returns /// Returns an `io::Result` containing a tuple of the type and the computed size. /// -pub fn read_type_and_varint_size(stream: &mut R, offset: &mut usize) -> io::Result<(u8, usize)> { +pub fn read_type_and_varint_size( + stream: &mut R, + offset: &mut usize, +) -> io::Result<(u8, usize)> { let (first_byte, continuation) = read_byte_and_check_continuation(stream)?; // Increment the offset by one byte @@ -131,7 +134,10 @@ pub fn read_varint_le(reader: &mut R) -> io::Result<(u64, usize)> { let byte = buf[0]; if shift > 63 { // VarInt too long for u64 - return Err(io::Error::new(io::ErrorKind::InvalidData, "VarInt too long")); + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "VarInt too long", + )); } // Take the lower 7 bits of the byte @@ -191,7 +197,6 @@ pub fn read_bytes(stream: &mut R) -> io::Result<[u8; N] Ok(bytes) } - /// Reads a partial integer from a stream. (little-endian order) /// /// # Arguments @@ -231,19 +236,19 @@ pub fn read_partial_int( } /// Reads the base size and result size of a delta object from the given stream. -/// +/// /// **Note**: The stream MUST be positioned at the start of the delta object. -/// +/// /// The base size and result size are encoded as variable-length integers in little-endian order. -/// +/// /// The base size is the size of the base object, and the result size is the size of the result object. -/// +/// /// # Parameters /// * `stream`: The stream from which the sizes are read. -/// +/// /// # Returns /// Returns a tuple containing the base size and result size. -/// +/// pub fn read_delta_object_size(stream: &mut R) -> io::Result<(usize, usize)> { let base_size = read_varint_le(stream)?.0 as usize; let result_size = read_varint_le(stream)?.0 as usize; @@ -297,24 +302,22 @@ pub fn count_dir_files(path: &Path) -> io::Result { /// Count the time taken to execute a block of code. #[macro_export] macro_rules! time_it { - ($msg:expr, $block:block) => { - { - let start = std::time::Instant::now(); - let result = $block; - let elapsed = start.elapsed(); - // println!("{}: {:?}", $msg, elapsed); - tracing::info!("{}: {:?}", $msg, elapsed); - result - } - }; + ($msg:expr, $block:block) => {{ + let start = std::time::Instant::now(); + let result = $block; + let elapsed = start.elapsed(); + // println!("{}: {:?}", $msg, elapsed); + tracing::info!("{}: {:?}", $msg, elapsed); + result + }}; } #[cfg(test)] mod tests { + use crate::internal::object::types::ObjectType; use std::io; use std::io::Cursor; use std::io::Read; - use crate::internal::object::types::ObjectType; use crate::internal::pack::utils::*; @@ -422,8 +425,8 @@ mod tests { assert_eq!(offset, 2); // Offset is 2 assert_eq!(type_bits, 5); // Expected type is 5 - // Expected size 000000110101 - // 110101 = 1 * 2^5 + 1 * 2^4 + 0 * 2^3 + 1 * 2^2 + 0 * 2^1 + 1 * 2^0= 53 + // Expected size 000000110101 + // 110101 = 1 * 2^5 + 1 * 2^4 + 0 * 2^3 + 1 * 2^2 + 0 * 2^1 + 1 * 2^0= 53 assert_eq!(size, 53); } @@ -438,7 +441,7 @@ mod tests { assert_eq!(offset, 1); // Offset is 1 assert_eq!(type_bits, 1); // Expected type is 1 - // Expected size is 15 + // Expected size is 15 assert_eq!(size, 15); } @@ -492,7 +495,9 @@ mod tests { #[test] fn test_read_varint_le_too_long() { - let data = vec![0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01]; + let data = vec![ + 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01, + ]; let mut cursor = Cursor::new(data); let result = read_varint_le(&mut cursor); @@ -500,8 +505,8 @@ mod tests { } #[test] - fn test_read_offset_encoding(){ - let data:Vec = vec![0b_1101_0101,0b_0000_0101]; + fn test_read_offset_encoding() { + let data: Vec = vec![0b_1101_0101, 0b_0000_0101]; let mut cursor = Cursor::new(data); let result = read_offset_encoding(&mut cursor); assert!(result.is_ok()); diff --git a/mercury/src/internal/pack/waitlist.rs b/mercury/src/internal/pack/waitlist.rs index 0041b9fe..13107b79 100644 --- a/mercury/src/internal/pack/waitlist.rs +++ b/mercury/src/internal/pack/waitlist.rs @@ -1,11 +1,12 @@ -use dashmap::DashMap; use crate::hash::SHA1; use crate::internal::pack::cache_object::CacheObject; +use dashmap::DashMap; /// Waitlist for Delta objects while the Base object is not ready. /// Easier and faster than Channels. #[derive(Default, Debug)] -pub struct Waitlist { //TODO Memory Control! +pub struct Waitlist { + //TODO Memory Control! pub map_offset: DashMap>, pub map_ref: DashMap>, } @@ -35,4 +36,4 @@ impl Waitlist { } res } -} \ No newline at end of file +} diff --git a/mercury/src/internal/pack/wrapper.rs b/mercury/src/internal/pack/wrapper.rs index e7d7cce2..057d77a0 100644 --- a/mercury/src/internal/pack/wrapper.rs +++ b/mercury/src/internal/pack/wrapper.rs @@ -1,6 +1,6 @@ -use std::io::{self, Read, BufRead}; +use std::io::{self, BufRead, Read}; -use sha1::{Sha1, Digest}; +use sha1::{Digest, Sha1}; use crate::hash::SHA1; @@ -91,9 +91,9 @@ where #[cfg(test)] mod tests { - use std::io::{self, Read, Cursor, BufReader}; - - use sha1::{Sha1, Digest}; + use std::io::{self, BufReader, Cursor, Read}; + + use sha1::{Digest, Sha1}; use crate::internal::pack::wrapper::Wrapper; @@ -129,4 +129,4 @@ mod tests { assert_eq!(hash_result.0, expected_hash); Ok(()) } -} \ No newline at end of file +} diff --git a/mercury/src/internal/zlib/stream/inflate.rs b/mercury/src/internal/zlib/stream/inflate.rs index c375ba4b..c5577b76 100644 --- a/mercury/src/internal/zlib/stream/inflate.rs +++ b/mercury/src/internal/zlib/stream/inflate.rs @@ -62,7 +62,6 @@ where } } - /// Read bytes from `rd` and decompress them using `state` into a pre-allocated fitting buffer `dst`, returning the amount of bytes written. fn read(rd: &mut impl BufRead, state: &mut Decompress, mut dst: &mut [u8]) -> io::Result { let mut total_written = 0; diff --git a/mercury/src/lib.rs b/mercury/src/lib.rs index 989f7cb0..a22d25d9 100644 --- a/mercury/src/lib.rs +++ b/mercury/src/lib.rs @@ -1,8 +1,8 @@ //! Mercury is a library for encoding and decoding Git Pack format files or streams. -pub mod internal; -pub mod hash; pub mod errors; +pub mod hash; +pub mod internal; pub mod utils; #[cfg(test)] diff --git a/mercury/src/utils.rs b/mercury/src/utils.rs index 5a18386d..17ae9e0c 100644 --- a/mercury/src/utils.rs +++ b/mercury/src/utils.rs @@ -11,4 +11,4 @@ pub fn read_bytes(file: &mut impl Read, len: usize) -> io::Result> { pub fn read_sha1(file: &mut impl Read) -> io::Result { SHA1::from_stream(file) -} \ No newline at end of file +} diff --git a/mono/src/api/lfs/mod.rs b/mono/src/api/lfs/mod.rs index d91746fb..638a53e3 100644 --- a/mono/src/api/lfs/mod.rs +++ b/mono/src/api/lfs/mod.rs @@ -1 +1 @@ -pub mod lfs_router; \ No newline at end of file +pub mod lfs_router; diff --git a/mono/src/api/mr/mod.rs b/mono/src/api/mr/mod.rs index 959416c2..e0820e1f 100644 --- a/mono/src/api/mr/mod.rs +++ b/mono/src/api/mr/mod.rs @@ -90,4 +90,4 @@ pub struct FilesChangedItem { pub struct FilesChangedList { pub files: Vec, pub content: String, -} \ No newline at end of file +} diff --git a/mono/src/api/oauth/model.rs b/mono/src/api/oauth/model.rs index 503dccdf..1c58993c 100644 --- a/mono/src/api/oauth/model.rs +++ b/mono/src/api/oauth/model.rs @@ -40,7 +40,6 @@ impl From for user::Model { } } - #[derive(Serialize, Deserialize, Clone, Debug)] pub struct LoginUser { pub user_id: i64, diff --git a/mono/src/api/user/mod.rs b/mono/src/api/user/mod.rs index 0fba9e86..16aa6218 100644 --- a/mono/src/api/user/mod.rs +++ b/mono/src/api/user/mod.rs @@ -1,2 +1,2 @@ +pub mod model; pub mod user_router; -pub mod model; \ No newline at end of file diff --git a/mono/src/api/user/model.rs b/mono/src/api/user/model.rs index 15d3f7ad..9c2c338f 100644 --- a/mono/src/api/user/model.rs +++ b/mono/src/api/user/model.rs @@ -38,7 +38,7 @@ pub struct ListToken { impl From for ListToken { fn from(value: access_token::Model) -> Self { - let mut mask_token = value.token; + let mut mask_token = value.token; mask_token.replace_range(7..32, "-******-"); Self { id: value.id, diff --git a/mono/src/build.rs b/mono/src/build.rs index 27ec449e..577494be 100644 --- a/mono/src/build.rs +++ b/mono/src/build.rs @@ -1,7 +1,7 @@ //! Using shadow_rs to build-time information stored in Mega. -//! -//! -//! +//! +//! +//! fn main() -> shadow_rs::SdResult<()> { #[cfg(target_os = "linux")] @@ -9,4 +9,4 @@ fn main() -> shadow_rs::SdResult<()> { #[cfg(target_os = "macos")] println!("cargo:rustc-link-arg=-Wl,-rpath,@executable_path"); shadow_rs::new() -} \ No newline at end of file +} diff --git a/mono/src/cli.rs b/mono/src/cli.rs index a94622d2..2d1abb79 100644 --- a/mono/src/cli.rs +++ b/mono/src/cli.rs @@ -1,8 +1,8 @@ //! Cli module is responsible for parsing command line arguments and executing the appropriate. +use clap::{Arg, ArgMatches, Command}; use std::env; use std::path::PathBuf; -use clap::{Arg, ArgMatches, Command}; use tracing_subscriber::fmt::writer::MakeWriterExt; use common::{ @@ -22,8 +22,11 @@ use crate::commands::{builtin, builtin_exec}; /// If there is an error during the parsing, it will return an error. pub fn parse(args: Option>) -> MegaResult { let matches = match args { - Some(args) => cli().no_binary_name(true).try_get_matches_from(args).unwrap_or_else(|e| e.exit()), - None => cli().try_get_matches().unwrap_or_else(|e| e.exit()) + Some(args) => cli() + .no_binary_name(true) + .try_get_matches_from(args) + .unwrap_or_else(|e| e.exit()), + None => cli().try_get_matches().unwrap_or_else(|e| e.exit()), }; // Get the current directory @@ -45,7 +48,8 @@ pub fn parse(args: Option>) -> MegaResult { ctrlc::set_handler(move || { tracing::info!("Received Ctrl-C signal, exiting..."); std::process::exit(0); - }).unwrap(); + }) + .unwrap(); let (cmd, subcommand_args) = match matches.subcommand() { Some((cmd, args)) => (cmd, args), diff --git a/mono/src/commands/mod.rs b/mono/src/commands/mod.rs index e6a347d5..4ec6df05 100644 --- a/mono/src/commands/mod.rs +++ b/mono/src/commands/mod.rs @@ -4,7 +4,6 @@ use clap::{ArgMatches, Command}; use common::{config::Config, errors::MegaResult}; - pub fn builtin() -> Vec { vec![service::cli()] } diff --git a/mono/src/commands/service/http.rs b/mono/src/commands/service/http.rs index 094e54df..c1bd7d03 100644 --- a/mono/src/commands/service/http.rs +++ b/mono/src/commands/service/http.rs @@ -1,10 +1,8 @@ use clap::{ArgMatches, Args, Command, FromArgMatches}; +use crate::server::https_server::{self, HttpOptions}; use common::{config::Config, errors::MegaResult}; use jupiter::context::Context; -use crate::server::https_server::{self, HttpOptions}; - - pub fn cli() -> Command { HttpOptions::augment_args_for_update(Command::new("http").about("Start Mega HTTP server")) diff --git a/mono/src/commands/service/ssh.rs b/mono/src/commands/service/ssh.rs index 7048a936..1b2e85b6 100644 --- a/mono/src/commands/service/ssh.rs +++ b/mono/src/commands/service/ssh.rs @@ -1,10 +1,9 @@ use clap::{ArgMatches, Args, Command, FromArgMatches}; +use crate::server::ssh_server::{start_server, SshOptions}; use common::config::Config; use common::errors::MegaResult; use jupiter::context::Context; -use crate::server::ssh_server::{start_server, SshOptions}; - pub fn cli() -> Command { SshOptions::augment_args_for_update(Command::new("ssh").about("Start Git SSH server")) diff --git a/mono/src/git_protocol/http.rs b/mono/src/git_protocol/http.rs index 3045f9e1..fafdd675 100644 --- a/mono/src/git_protocol/http.rs +++ b/mono/src/git_protocol/http.rs @@ -195,7 +195,7 @@ pub async fn git_receive_pack( let mut report_status = Bytes::new(); let mut chunk_buffer = BytesMut::new(); // Used to cache the data of chunks before the PACK subsequence is found. - // Process the data stream to handle the Git receive-pack protocol. + // Process the data stream to handle the Git receive-pack protocol. while let Some(chunk) = data_stream.next().await { let chunk = chunk.unwrap(); // Process the data up to the "PACK" subsequence. diff --git a/mono/src/git_protocol/mod.rs b/mono/src/git_protocol/mod.rs index 0550318f..39fc2c9b 100644 --- a/mono/src/git_protocol/mod.rs +++ b/mono/src/git_protocol/mod.rs @@ -1,2 +1,2 @@ +pub mod http; pub mod ssh; -pub mod http; \ No newline at end of file diff --git a/mono/src/lib.rs b/mono/src/lib.rs index b5ee9056..10db49a8 100644 --- a/mono/src/lib.rs +++ b/mono/src/lib.rs @@ -4,14 +4,15 @@ mod commands; pub mod git_protocol; pub mod server; -#[cfg(test)] -mod tests { - use super::*; +// This test will stuck the whole test process, because it will never exit +// #[cfg(test)] +// mod tests { +// use super::*; - #[test] - fn test_cli() { - let config_path = "config.toml"; - let args = vec!["-c", config_path, "service", "multi", "http"]; - cli::parse(Some(args)).expect("Failed to start http service"); - } -} +// #[test] +// fn test_cli() { +// let config_path = "config.toml"; +// let args = vec!["-c", config_path, "service", "multi", "http"]; +// cli::parse(Some(args)).expect("Failed to start http service"); +// } +// } diff --git a/mono/src/server/mod.rs b/mono/src/server/mod.rs index d45f9d0c..cb358d2b 100644 --- a/mono/src/server/mod.rs +++ b/mono/src/server/mod.rs @@ -1,2 +1,2 @@ pub mod https_server; -pub mod ssh_server; \ No newline at end of file +pub mod ssh_server; diff --git a/mono/src/server/ssh_server.rs b/mono/src/server/ssh_server.rs index c83c5370..ab65881f 100644 --- a/mono/src/server/ssh_server.rs +++ b/mono/src/server/ssh_server.rs @@ -83,9 +83,11 @@ pub async fn load_key() -> PrivateKey { .as_object() .unwrap() .clone(); - write_secret("ssh_server_key", Some(secret)).await.unwrap_or_else(|e| { - panic!("Failed to write ssh_server_key: {:?}", e); - }); + write_secret("ssh_server_key", Some(secret)) + .await + .unwrap_or_else(|e| { + panic!("Failed to write ssh_server_key: {:?}", e); + }); keys } } diff --git a/orion/src/api.rs b/orion/src/api.rs index c95f45ef..095ad184 100644 --- a/orion/src/api.rs +++ b/orion/src/api.rs @@ -1,22 +1,22 @@ -use axum::{Json, Router}; -use axum::response::{IntoResponse, Sse}; +use crate::buck_controller; +use crate::model::builds; +use crate::server::AppState; +use axum::extract::{Path, State}; use axum::response::sse::{Event, KeepAlive}; +use axum::response::{IntoResponse, Sse}; use axum::routing::{get, post}; -use futures_util::stream::{self, Stream}; -use serde::{Deserialize, Serialize}; -use std::{time::Duration, convert::Infallible}; -use axum::extract::{Path, State}; +use axum::{Json, Router}; use dashmap::DashSet; +use futures_util::stream::{self, Stream}; use futures_util::StreamExt; use once_cell::sync::Lazy; +use sea_orm::sqlx::types::chrono; use sea_orm::ActiveModelTrait; use sea_orm::ActiveValue::Set; -use sea_orm::sqlx::types::chrono; +use serde::{Deserialize, Serialize}; +use std::{convert::Infallible, time::Duration}; use tokio::io::AsyncReadExt; use uuid::Uuid; -use crate::buck_controller; -use crate::model::builds; -use crate::server::AppState; pub fn routers() -> Router { Router::new() @@ -46,7 +46,10 @@ static BUILDING: Lazy> = Lazy::new(DashSet::new); // TODO avoid multi-task in one repo? // #[debug_handler] // better error msg // `Json` must be last arg, because it consumes the request body -async fn buck_build(State(state): State, Json(req): Json) -> impl IntoResponse { +async fn buck_build( + State(state): State, + Json(req): Json, +) -> impl IntoResponse { let id = Uuid::now_v7(); let id_c = id; BUILDING.insert(id.to_string()); @@ -58,11 +61,19 @@ async fn buck_build(State(state): State, Json(req): Json req.repo.clone(), req.target.clone(), req.args.unwrap_or_default(), - output_path.clone() - ).await { + output_path.clone(), + ) + .await + { Ok(status) => { - let message = format!("Build {}", - if status.success() {"success"} else {"failed"}); + let message = format!( + "Build {}", + if status.success() { + "success" + } else { + "failed" + } + ); tracing::info!("{}; Exit code: {:?}", message, status.code()); BuildResult { success: status.success(), @@ -108,9 +119,7 @@ async fn buck_build(State(state): State, Json(req): Json // notify webhook if let Some(webhook) = req.webhook { let client = reqwest::Client::new(); - let resp = client.post(webhook.clone()) - .json(&build_resp) - .send().await; + let resp = client.post(webhook.clone()).json(&build_resp).send().await; match resp { Ok(resp) => { if resp.status().is_success() { @@ -135,10 +144,13 @@ async fn buck_build(State(state): State, Json(req): Json } /// SSE -async fn build_output(State(state): State, Path(id): Path) - -> Sse>> // impl IntoResponse +async fn build_output( + State(state): State, + Path(id): Path, +) -> Sse>> // impl IntoResponse { - if !BUILDING.contains(&id) { // build end, no file, in database + if !BUILDING.contains(&id) { + // build end, no file, in database let build_id: Uuid = id.parse().expect("Invalid build id"); let output = builds::Model::get_by_build_id(build_id, state.conn).await; let output = match output { @@ -157,7 +169,9 @@ async fn build_output(State(state): State, Path(id): Path) if !std::path::Path::new(&path).exists() { // 2 return types must same, which is hard without `.boxed()` // `Sse, ..., ...>>` != Sse> != Sse> - return Sse::new(stream::once(async { Ok(Event::default().data("Build task not found")) }).boxed()); + return Sse::new( + stream::once(async { Ok(Event::default().data("Build task not found")) }).boxed(), + ); } let file = tokio::fs::File::open(&path).await.unwrap(); // read-only mode @@ -194,4 +208,4 @@ async fn build_output(State(state): State, Path(id): Path) }); Sse::new(stream.boxed()).keep_alive(KeepAlive::new()) // empty comment to keep alive -} \ No newline at end of file +} diff --git a/orion/src/buck_controller.rs b/orion/src/buck_controller.rs index 5d999368..5bab9a6f 100644 --- a/orion/src/buck_controller.rs +++ b/orion/src/buck_controller.rs @@ -1,11 +1,16 @@ +use crate::util; use std::io; use std::process::ExitStatus; use tokio::process::Command; -use crate::util; const PROJECT_ROOT: &str = "/home/bean/projects/buck2"; -pub async fn build(repo: String, target: String, args: Vec, log_path: String) -> io::Result { +pub async fn build( + repo: String, + target: String, + args: Vec, + log_path: String, +) -> io::Result { util::ensure_parent_dirs(&log_path)?; let output_file = std::fs::File::create(log_path)?; @@ -25,4 +30,4 @@ pub async fn build(repo: String, target: String, args: Vec, log_path: St let status = child.wait().await?; Ok(status) -} \ No newline at end of file +} diff --git a/orion/src/main.rs b/orion/src/main.rs index 2616f3e9..5af31768 100644 --- a/orion/src/main.rs +++ b/orion/src/main.rs @@ -1,8 +1,8 @@ -mod server; -mod buck_controller; mod api; -mod util; +mod buck_controller; mod model; +mod server; +mod util; #[tokio::main] async fn main() { diff --git a/orion/src/model/builds.rs b/orion/src/model/builds.rs index a358f788..6f0e283e 100644 --- a/orion/src/model/builds.rs +++ b/orion/src/model/builds.rs @@ -26,4 +26,4 @@ impl Model { .await .expect("Failed to get by `build_id`") } -} \ No newline at end of file +} diff --git a/orion/src/model/mod.rs b/orion/src/model/mod.rs index 82ef4540..875fab08 100644 --- a/orion/src/model/mod.rs +++ b/orion/src/model/mod.rs @@ -1 +1 @@ -pub mod builds; \ No newline at end of file +pub mod builds; diff --git a/orion/src/server.rs b/orion/src/server.rs index 56a03c91..240437de 100644 --- a/orion/src/server.rs +++ b/orion/src/server.rs @@ -1,12 +1,12 @@ -use axum::Router; -use axum::routing::get; -use sea_orm::{ConnectionTrait, Database, DatabaseConnection, DbErr, Schema, TransactionTrait}; use crate::api; use crate::model::builds; +use axum::routing::get; +use axum::Router; +use sea_orm::{ConnectionTrait, Database, DatabaseConnection, DbErr, Schema, TransactionTrait}; #[derive(Clone)] pub struct AppState { - pub(crate) conn: DatabaseConnection + pub(crate) conn: DatabaseConnection, } pub async fn start_server(port: u16) { @@ -24,7 +24,9 @@ pub async fn start_server(port: u16) { tracing::info!("Listening on port {}", port); - let addr = tokio::net::TcpListener::bind(&format!("0.0.0.0:{}", port)).await.unwrap(); + let addr = tokio::net::TcpListener::bind(&format!("0.0.0.0:{}", port)) + .await + .unwrap(); axum::serve(addr, app.into_make_service()).await.unwrap(); } @@ -40,4 +42,4 @@ async fn setup_tables(conn: &DatabaseConnection) -> Result<(), DbErr> { trans.execute(statement).await?; trans.commit().await -} \ No newline at end of file +} diff --git a/orion/src/util.rs b/orion/src/util.rs index 4eb13729..a5317279 100644 --- a/orion/src/util.rs +++ b/orion/src/util.rs @@ -1,5 +1,5 @@ -use std::{fs, io}; use std::path::Path; +use std::{fs, io}; #[allow(dead_code)] /// Ensure the file exists, create it(with all parent dirs) if not. @@ -25,4 +25,4 @@ pub fn ensure_parent_dirs(path: impl AsRef) -> io::Result<()> { pub fn ensure_file_content(path: impl AsRef, content: &str) -> io::Result<()> { ensure_parent_dirs(path.as_ref())?; fs::write(path, content) -} \ No newline at end of file +} diff --git a/saturn/src/context.rs b/saturn/src/context.rs index d5e0e4bb..d7143c29 100644 --- a/saturn/src/context.rs +++ b/saturn/src/context.rs @@ -43,9 +43,7 @@ pub enum Error { } impl CedarContext { - pub fn new( - entities: EntityStore, - ) -> Result { + pub fn new(entities: EntityStore) -> Result { let schema_content = include_str!("../mega.cedarschema"); let policy_content = include_str!("../mega_policies.cedar"); let (schema, _) = Schema::from_cedarschema_str(schema_content).unwrap(); diff --git a/saturn/src/entitystore.rs b/saturn/src/entitystore.rs index 30153367..09f55857 100644 --- a/saturn/src/entitystore.rs +++ b/saturn/src/entitystore.rs @@ -2,7 +2,7 @@ use std::collections::HashMap; use cedar_policy::{Entities, Schema}; use serde::{Deserialize, Serialize}; -use serde_json::{ json, to_string_pretty}; +use serde_json::{json, to_string_pretty}; use crate::{ objects::{Issue, MergeRequest, Repo, User, UserGroup}, diff --git a/saturn/src/lib.rs b/saturn/src/lib.rs index c371106e..95fd4e4d 100644 --- a/saturn/src/lib.rs +++ b/saturn/src/lib.rs @@ -5,7 +5,6 @@ pub mod entitystore; mod objects; pub mod util; - pub enum ActionEnum { // ** Anyone // ViewRepo, @@ -168,14 +167,14 @@ mod test { ) .is_err_and(|e| matches!(e, Error::AuthDenied(_)))); - assert!(app_context - .is_authorized( - &maintainer, - r#"Action::"approveMergeRequest""#.parse::().unwrap(), - &resource, - Context::empty() - ) - .is_ok()); + assert!(app_context + .is_authorized( + &maintainer, + r#"Action::"approveMergeRequest""#.parse::().unwrap(), + &resource, + Context::empty() + ) + .is_ok()); } #[test] diff --git a/taurus/src/cache.rs b/taurus/src/cache.rs index 730a3cba..c5d9aaf8 100644 --- a/taurus/src/cache.rs +++ b/taurus/src/cache.rs @@ -1,8 +1,18 @@ -use std::{mem::swap, sync::{atomic::{AtomicBool, AtomicI64}, Arc, Mutex, OnceLock}, time::Duration}; +use std::{ + mem::swap, + sync::{ + atomic::{AtomicBool, AtomicI64}, + Arc, Mutex, OnceLock, + }, + time::Duration, +}; use chrono::Utc; -use crate::{event::Message, queue::{get_mq, MessageQueue}}; +use crate::{ + event::Message, + queue::{get_mq, MessageQueue}, +}; const FLUSH_INTERVAL: u64 = 10; @@ -34,7 +44,7 @@ impl MessageCache { inner: Arc::new(Mutex::new(Vec::new())), bound_mq: get_mq(), last_flush: Arc::new(AtomicI64::new(now.timestamp_millis())), - stop: Arc::new(AtomicBool::new(false)) + stop: Arc::new(AtomicBool::new(false)), } } @@ -43,7 +53,7 @@ impl MessageCache { tokio::spawn(async move { loop { if stop.load(std::sync::atomic::Ordering::Acquire) { - return + return; } tokio::time::sleep(Duration::from_secs(FLUSH_INTERVAL)).await; @@ -56,7 +66,7 @@ impl MessageCache { let mut res = Vec::new(); let inner = self.inner.clone(); - let mut locked = inner.lock().unwrap(); + let mut locked = inner.lock().unwrap(); if !locked.is_empty() { swap(locked.as_mut(), &mut res); } @@ -68,7 +78,7 @@ impl MessageCache { let inner = self.inner.clone(); let should_flush: bool; { - let mut locked = inner.lock().unwrap(); + let mut locked = inner.lock().unwrap(); let l = locked.len(); should_flush = l >= 1; locked.push(msg); @@ -89,10 +99,13 @@ pub async fn instant_flush() { let st = mc.bound_mq.context.services.mq_storage.clone(); let data = mc .get_cache() - .into_iter().map(Into::::into) + .into_iter() + .map(Into::::into) .collect::>(); st.save_messages(data).await; - let now = Utc::now(); - mc.last_flush.to_owned().store(now.timestamp_millis(), std::sync::atomic::Ordering::Relaxed); + let now = Utc::now(); + mc.last_flush + .to_owned() + .store(now.timestamp_millis(), std::sync::atomic::Ordering::Relaxed); } diff --git a/taurus/src/event/api_request.rs b/taurus/src/event/api_request.rs index b1d99bc5..404ef9f6 100644 --- a/taurus/src/event/api_request.rs +++ b/taurus/src/event/api_request.rs @@ -1,6 +1,6 @@ +use async_trait::async_trait; use common::config::Config; use serde::{Deserialize, Serialize}; -use async_trait::async_trait; use crate::{event::EventBase, event::EventType, queue::get_mq}; @@ -73,7 +73,6 @@ impl TryFrom for ApiRequestEvent { let res: ApiRequestEvent = serde_json::from_value(value)?; Ok(res) } - } #[cfg(test)] @@ -82,12 +81,14 @@ mod tests { use common::config::Config; use serde_json::Value; - const SER: &str = - r#"{"api":"Blob","config":{"base_dir":"","database":{"db_path":"/tmp/.mega/mega.db","db_type":"sqlite","db_url":"postgres://mega:mega@localhost:5432/mega","max_connection":32,"min_connection":16,"sqlx_logging":false},"lfs":{"enable_split":true,"split_size":1073741824},"log":{"level":"info","log_path":"/tmp/.mega/logs","print_std":true},"monorepo":{"import_dir":"/third-part"},"oauth":{"github_client_id":"","github_client_secret":""},"pack":{"channel_message_size":1000000,"clean_cache_after_decode":true,"pack_decode_cache_path":"/tmp/.mega/cache","pack_decode_mem_size":4,pack_decode_disk_size:"20%"},"ssh":{"ssh_key_path":"/tmp/.mega/ssh"},"storage":{"big_obj_threshold":1024,"lfs_obj_local_path":"/tmp/.mega/lfs","obs_access_key":"","obs_endpoint":"https://obs.cn-east-3.myhuaweicloud.com","obs_region":"cn-east-3","obs_secret_key":"","raw_obj_local_path":"/tmp/.mega/objects","raw_obj_storage_type":"LOCAL"},"ztm":{"agent":"127.0.0.1:7777","ca":"127.0.0.1:9999","hub":"127.0.0.1:8888"}}}"#; + const SER: &str = r#"{"api":"Blob","config":{"base_dir":"","database":{"db_path":"/tmp/.mega/mega.db","db_type":"sqlite","db_url":"postgres://mega:mega@localhost:5432/mega","max_connection":32,"min_connection":16,"sqlx_logging":false},"lfs":{"enable_split":true,"split_size":1073741824},"log":{"level":"info","log_path":"/tmp/.mega/logs","print_std":true},"monorepo":{"import_dir":"/third-part"},"oauth":{"github_client_id":"","github_client_secret":""},"pack":{"channel_message_size":1000000,"clean_cache_after_decode":true,"pack_decode_cache_path":"/tmp/.mega/cache","pack_decode_mem_size":4,pack_decode_disk_size:"20%"},"ssh":{"ssh_key_path":"/tmp/.mega/ssh"},"storage":{"big_obj_threshold":1024,"lfs_obj_local_path":"/tmp/.mega/lfs","obs_access_key":"","obs_endpoint":"https://obs.cn-east-3.myhuaweicloud.com","obs_region":"cn-east-3","obs_secret_key":"","raw_obj_local_path":"/tmp/.mega/objects","raw_obj_storage_type":"LOCAL"},"ztm":{"agent":"127.0.0.1:7777","ca":"127.0.0.1:9999","hub":"127.0.0.1:8888"}}}"#; #[test] fn test_conversion() { - let evt = ApiRequestEvent {api: ApiType::Blob, config: Config::default()}; + let evt = ApiRequestEvent { + api: ApiType::Blob, + config: Config::default(), + }; // Convert into value let serialized: Value = Value::from(evt); diff --git a/taurus/src/event/github_webhook.rs b/taurus/src/event/github_webhook.rs index 84bbb9fc..43ffbdc3 100644 --- a/taurus/src/event/github_webhook.rs +++ b/taurus/src/event/github_webhook.rs @@ -1,8 +1,8 @@ +use crate::event::{EventBase, EventType}; +use crate::queue::get_mq; use async_trait::async_trait; use serde::{Deserialize, Serialize}; use serde_json::Value; -use crate::event::{EventBase, EventType}; -use crate::queue::get_mq; #[derive(Debug, Clone, Serialize, Deserialize)] pub struct GithubWebhookEvent { @@ -65,4 +65,4 @@ impl TryFrom for GithubWebhookEvent { let res: GithubWebhookEvent = serde_json::from_value(value)?; Ok(res) } -} \ No newline at end of file +} diff --git a/taurus/src/event/mod.rs b/taurus/src/event/mod.rs index 0cd47353..9d9a0e72 100644 --- a/taurus/src/event/mod.rs +++ b/taurus/src/event/mod.rs @@ -4,10 +4,10 @@ use api_request::ApiRequestEvent; use async_trait::async_trait; use chrono::{DateTime, Utc}; +use github_webhook::GithubWebhookEvent; use serde::{Deserialize, Serialize}; use serde_json::Value; use thiserror::Error; -use github_webhook::GithubWebhookEvent; pub mod api_request; pub mod github_webhook; @@ -56,7 +56,6 @@ impl EventType { // so you have to manually add a process logic for your event here. EventType::ApiRequest(evt) => evt.process().await, // EventType::SomeOtherEvent(xxx) => xxx.process().await, - EventType::GithubWebhook(evt) => evt.process().await, // This won't happen unless failed to load events from database. @@ -69,11 +68,7 @@ impl EventType { impl Display for Message { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "ID: {}, Created at: {}", - self.id, self.create_time - ) + write!(f, "ID: {}, Created at: {}", self.id, self.create_time) } } @@ -116,11 +111,15 @@ impl From for Message { } else { EventType::ErrorEvent } - }, + } - _ => EventType::ErrorEvent + _ => EventType::ErrorEvent, }; - Self { id, create_time, evt } + Self { + id, + create_time, + evt, + } } } diff --git a/taurus/src/init.rs b/taurus/src/init.rs index 78764cdf..16a1320d 100644 --- a/taurus/src/init.rs +++ b/taurus/src/init.rs @@ -1,6 +1,6 @@ +use crate::queue::{MessageQueue, MQ}; use common::config::Config; use jupiter::context::Context; -use crate::queue::{MessageQueue, MQ}; pub async fn init_mq(config: &Config) { let ctx = Context::new(config.clone()).await; diff --git a/taurus/src/lib.rs b/taurus/src/lib.rs index 55f636af..73168c20 100644 --- a/taurus/src/lib.rs +++ b/taurus/src/lib.rs @@ -1,4 +1,4 @@ -pub mod init; +pub mod cache; pub mod event; +pub mod init; pub mod queue; -pub mod cache; diff --git a/taurus/src/queue.rs b/taurus/src/queue.rs index 9df483c7..7c14f806 100644 --- a/taurus/src/queue.rs +++ b/taurus/src/queue.rs @@ -3,12 +3,12 @@ use std::sync::atomic::AtomicI64; use std::sync::{Arc, OnceLock}; use chrono::Utc; -use crossbeam_channel::{unbounded, Sender}; use crossbeam_channel::Receiver; +use crossbeam_channel::{unbounded, Sender}; use jupiter::context::Context; use crate::cache::get_mcache; -use crate::event::{Message, EventType}; +use crate::event::{EventType, Message}; // Lazy initialized static MessageQueue instance. pub(crate) static MQ: OnceLock = OnceLock::new(); @@ -24,13 +24,16 @@ pub struct MessageQueue { pub(crate) context: Context, } -unsafe impl Send for MessageQueue{} -unsafe impl Sync for MessageQueue{} +unsafe impl Send for MessageQueue {} +unsafe impl Sync for MessageQueue {} impl Debug for MessageQueue { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { // Just ignore context field. - f.debug_struct("MessageQueue").field("sender", &self.sender).field("receiver", &self.receiver).finish() + f.debug_struct("MessageQueue") + .field("sender", &self.sender) + .field("receiver", &self.receiver) + .finish() } } @@ -62,7 +65,7 @@ impl MessageQueue { tokio::spawn(async move { msg.evt.process().await; }); - }, + } Err(e) => { // Should not error here. panic!("Event Loop Panic: {e}"); @@ -74,9 +77,11 @@ impl MessageQueue { pub(crate) fn send(&self, evt: EventType) { let _ = self.sender.send(Message { - id: self.cur_id.fetch_add(1, std::sync::atomic::Ordering::Relaxed), + id: self + .cur_id + .fetch_add(1, std::sync::atomic::Ordering::Relaxed), create_time: Utc::now(), - evt + evt, }); } } diff --git a/vault/src/nostr.rs b/vault/src/nostr.rs index 17741a52..cbfa8df9 100644 --- a/vault/src/nostr.rs +++ b/vault/src/nostr.rs @@ -1,4 +1,4 @@ -use secp256k1::{PublicKey, rand, Secp256k1, SecretKey}; +use secp256k1::{rand, PublicKey, Secp256k1, SecretKey}; pub fn generate_nostr_id() -> (String, (SecretKey, PublicKey)) { let secp = Secp256k1::new(); @@ -11,8 +11,8 @@ pub fn generate_nostr_id() -> (String, (SecretKey, PublicKey)) { #[cfg(test)] mod tests { - use secp256k1::Message; use super::*; + use secp256k1::Message; #[test] fn test_generate_nostr_id() { @@ -31,4 +31,4 @@ mod tests { let sig = secp.sign_ecdsa(&message, &secret_key); assert_eq!(secp.verify_ecdsa(&message, &sig, &public_key), Ok(())); } -} \ No newline at end of file +} diff --git a/vault/src/pki.rs b/vault/src/pki.rs index e859ae63..808fb35a 100644 --- a/vault/src/pki.rs +++ b/vault/src/pki.rs @@ -8,7 +8,7 @@ use rusty_vault::core::Core; use serde_json::{json, Value}; use tokio::sync::OnceCell; -use super::vault::{CoreInfo, CORE, read_api, write_api}; +use super::vault::{read_api, write_api, CoreInfo, CORE}; const ROLE: &str = "test"; @@ -16,19 +16,24 @@ const ROLE: &str = "test"; static _CA: OnceCell = OnceCell::const_new(); // Automatically initialize CA when you first use it pub async fn ca() -> &'static CoreInfo { - _CA.get_or_init(|| async { - init_ca().await - }).await + _CA.get_or_init(|| async { init_ca().await }).await } #[allow(clippy::await_holding_lock)] async fn init_ca() -> CoreInfo { let c = CORE.clone(); // init CA if not let token = &c.token; - if read_api(&c.core.read().unwrap(), token, "pki/ca/pem").await.is_err() { // err = not found + if read_api(&c.core.read().unwrap(), token, "pki/ca/pem") + .await + .is_err() + { + // err = not found config_ca(c.core.clone(), token).await; generate_root(c.core.clone(), token, false).await; - config_role(c.core.clone(), token, json!({ // TODO You may want to customize this + config_role( + c.core.clone(), + token, + json!({ // TODO You may want to customize this "ttl": "60d", "max_ttl": "365d", "key_type": "rsa", @@ -38,7 +43,9 @@ async fn init_ca() -> CoreInfo { "locality": "Beijing", "organization": "OpenAtom-Mega", "no_store": false, - })).await; + }), + ) + .await; } c } @@ -62,14 +69,21 @@ async fn config_ca(core: Arc>, token: &str) { /// - `data`: see [RoleEntry](rusty_vault::modules::pki::path_roles) #[allow(clippy::await_holding_lock)] pub async fn config_role(core: Arc>, token: &str, data: Value) { - let role_data = data.as_object() + let role_data = data + .as_object() .expect("`data` must be a JSON object") .clone(); // config role let result = async_std::task::block_on(async { let core = core.read().unwrap(); - write_api(&core, token, &format!("pki/roles/{}", ROLE), Some(role_data)).await + write_api( + &core, + token, + &format!("pki/roles/{}", ROLE), + Some(role_data), + ) + .await }); assert!(result.is_ok()); } @@ -84,22 +98,27 @@ async fn generate_root(core: Arc>, token: &str, exported: bool) { let key_bits = 4096; let common_name = "mega-ca"; let req_data = json!({ - "common_name": common_name, - "ttl": "365d", - "country": "cn", - "key_type": key_type, - "key_bits": key_bits, - }) - .as_object() - .unwrap() - .clone(); + "common_name": common_name, + "ttl": "365d", + "country": "cn", + "key_type": key_type, + "key_bits": key_bits, + }) + .as_object() + .unwrap() + .clone(); let resp = write_api( &core, token, - format!("pki/root/generate/{}", if exported { "exported" } else { "internal" }).as_str(), + format!( + "pki/root/generate/{}", + if exported { "exported" } else { "internal" } + ) + .as_str(), Some(req_data), - ).await; + ) + .await; assert!(resp.is_ok()); } @@ -109,7 +128,8 @@ async fn generate_root(core: Arc>, token: &str, exported: bool) { #[allow(clippy::await_holding_lock)] pub async fn issue_cert(data: Value) -> (String, String) { // let dns_sans = ["test.com", "a.test.com", "b.test.com"]; - let issue_data = data.as_object() + let issue_data = data + .as_object() .expect("`data` must be a JSON object") .clone(); @@ -117,7 +137,13 @@ pub async fn issue_cert(data: Value) -> (String, String) { let resp = async_std::task::block_on(async { let core = ca().await.core.read().unwrap(); let token = &ca().await.token; - write_api(&core, token, &format!("pki/issue/{}", ROLE), Some(issue_data)).await + write_api( + &core, + token, + &format!("pki/issue/{}", ROLE), + Some(issue_data), + ) + .await }); assert!(resp.is_ok()); let resp_body = resp.unwrap(); @@ -135,7 +161,10 @@ pub async fn verify_cert(cert_pem: &[u8]) -> bool { let cert = X509::from_pem(cert_pem).unwrap(); // verify time - let now = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs() as i64; + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs() as i64; let now = Asn1Time::from_unix(now).unwrap(); let not_before = cert.not_before(); let not_after = cert.not_after(); @@ -157,7 +186,10 @@ pub async fn verify_cert(cert_pem: &[u8]) -> bool { pub async fn get_root_cert() -> String { let resp_ca_pem = async_std::task::block_on(async { let core = ca().await.core.read().unwrap(); - read_api(&core, &ca().await.token, "pki/ca/pem").await.unwrap().unwrap() + read_api(&core, &ca().await.token, "pki/ca/pem") + .await + .unwrap() + .unwrap() }); let ca_data = resp_ca_pem.data.unwrap(); @@ -167,16 +199,17 @@ pub async fn get_root_cert() -> String { #[cfg(test)] mod tests { + use super::*; use std::fs; use std::io::Write; - use super::*; #[tokio::test] async fn test_pki_issue() { let (cert_pem, private_key) = issue_cert(json!({ "ttl": "10d", "common_name": "oqpXWgEhXa1WDqMWBnpUW4jvrxGqJKVuJATy4MSPdKNS", //nostr id - })).await; + })) + .await; println!("cert_pem: {}", cert_pem); println!("private_key: {}", private_key); @@ -191,6 +224,7 @@ mod tests { #[allow(clippy::await_holding_lock)] #[cfg(test)] mod tests_raw { + use std::io::Write; use std::{ collections::HashMap, default::Default, @@ -198,16 +232,25 @@ mod tests_raw { sync::{Arc, RwLock}, time::{SystemTime, UNIX_EPOCH}, }; - use std::io::Write; use go_defer::defer; use openssl::{asn1::Asn1Time, ec::EcKey, nid::Nid, pkey::PKey, rsa::Rsa, x509::X509}; - use rusty_vault::{core::{Core, SealConfig}, logical::{Operation, Request}, storage, storage::barrier_aes_gcm}; use rusty_vault::errors::RvError; use rusty_vault::logical::Response; + use rusty_vault::{ + core::{Core, SealConfig}, + logical::{Operation, Request}, + storage, + storage::barrier_aes_gcm, + }; use serde_json::{json, Map, Value}; - async fn test_read_api(core: &Core, token: &str, path: &str, is_ok: bool) -> Result, RvError> { + async fn test_read_api( + core: &Core, + token: &str, + path: &str, + is_ok: bool, + ) -> Result, RvError> { let mut req = Request::new(path); req.operation = Operation::Read; req.client_token = token.to_string(); @@ -241,9 +284,9 @@ mod tests_raw { let mount_data = json!({ "type": "pki", }) - .as_object() - .unwrap() - .clone(); + .as_object() + .unwrap() + .clone(); let resp = test_write_api(&core, token, "sys/mounts/pki/", true, Some(mount_data)).await; assert!(resp.is_ok()); @@ -263,12 +306,16 @@ mod tests_raw { "organization": "OpenAtom", "no_store": false, }) - .as_object() - .unwrap() - .clone(); + .as_object() + .unwrap() + .clone(); // config role - assert!(test_write_api(&core, token, "pki/roles/test", true, Some(role_data)).await.is_ok()); + assert!( + test_write_api(&core, token, "pki/roles/test", true, Some(role_data)) + .await + .is_ok() + ); let resp = test_read_api(&core, token, "pki/roles/test", true).await; assert!(resp.as_ref().unwrap().is_some()); let resp = resp.unwrap(); @@ -289,7 +336,12 @@ mod tests_raw { assert!(!role_data["no_store"].as_bool().unwrap()); } - async fn test_pki_generate_root(core: Arc>, token: &str, exported: bool, is_ok: bool) { + async fn test_pki_generate_root( + core: Arc>, + token: &str, + exported: bool, + is_ok: bool, + ) { let core = core.read().unwrap(); let key_type = "rsa"; @@ -302,17 +354,22 @@ mod tests_raw { "key_type": key_type, "key_bits": key_bits, }) - .as_object() - .unwrap() - .clone(); + .as_object() + .unwrap() + .clone(); // println!("generate root req_data: {:?}, is_ok: {}", req_data, is_ok); let resp = test_write_api( &core, token, - format!("pki/root/generate/{}", if exported { "exported" } else { "internal" }).as_str(), + format!( + "pki/root/generate/{}", + if exported { "exported" } else { "internal" } + ) + .as_str(), is_ok, Some(req_data), - ).await; + ) + .await; if !is_ok { return; } @@ -325,7 +382,13 @@ mod tests_raw { let resp_ca_pem = test_read_api(&core, token, "pki/ca/pem", true).await; let resp_ca_pem_cert_data = resp_ca_pem.unwrap().unwrap().data.unwrap(); - let ca_cert = X509::from_pem(resp_ca_pem_cert_data["certificate"].as_str().unwrap().as_bytes()).unwrap(); + let ca_cert = X509::from_pem( + resp_ca_pem_cert_data["certificate"] + .as_str() + .unwrap() + .as_bytes(), + ) + .unwrap(); let subject = ca_cert.subject_name(); let cn = subject.entries_by_nid(Nid::COMMONNAME).next().unwrap(); assert_eq!(cn.data().as_slice(), common_name.as_bytes()); @@ -368,9 +431,9 @@ mod tests_raw { "common_name": "test.com", "alt_names": "a.test.com,b.test.com", }) - .as_object() - .unwrap() - .clone(); + .as_object() + .unwrap() + .clone(); // issue cert let resp = test_write_api(&core, token, "pki/issue/test", true, Some(issue_data)).await; @@ -383,7 +446,8 @@ mod tests_raw { println!("issue cert result: {:?}", cert_data["certificate"]); let mut file = fs::File::create("/tmp/cert.crt").unwrap(); - file.write_all(cert_data["certificate"].as_str().unwrap().as_ref()).unwrap(); + file.write_all(cert_data["certificate"].as_str().unwrap().as_ref()) + .unwrap(); let cert = X509::from_pem(cert_data["certificate"].as_str().unwrap().as_bytes()).unwrap(); let alt_names = cert.subject_alt_names(); @@ -394,20 +458,31 @@ mod tests_raw { assert!(dns_sans.contains(&alt_name.dnsname().unwrap())); } assert_eq!(cert_data["private_key_type"].as_str().unwrap(), "rsa"); - let priv_key = PKey::private_key_from_pem(cert_data["private_key"].as_str().unwrap().as_bytes()).unwrap(); + let priv_key = + PKey::private_key_from_pem(cert_data["private_key"].as_str().unwrap().as_bytes()) + .unwrap(); assert_eq!(priv_key.bits(), 4096); assert!(priv_key.public_eq(&cert.public_key().unwrap())); let serial_number = cert.serial_number().to_bn().unwrap(); let serial_number_hex = serial_number.to_hex_str().unwrap(); assert_eq!( - cert_data["serial_number"].as_str().unwrap().replace(':', "").to_lowercase().as_str(), + cert_data["serial_number"] + .as_str() + .unwrap() + .replace(':', "") + .to_lowercase() + .as_str(), serial_number_hex.to_lowercase().as_str() ); - let expiration_time = Asn1Time::from_unix(cert_data["expiration"].as_i64().unwrap()).unwrap(); + let expiration_time = + Asn1Time::from_unix(cert_data["expiration"].as_i64().unwrap()).unwrap(); let ttl_compare = cert.not_after().compare(&expiration_time); assert!(ttl_compare.is_ok()); assert_eq!(ttl_compare.unwrap(), std::cmp::Ordering::Equal); - let now_timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(); + let now_timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap() + .as_secs(); let expiration_ttl = cert_data["expiration"].as_u64().unwrap(); let ttl = expiration_ttl - now_timestamp; let expect_ttl = 10 * 24 * 60 * 60; @@ -417,17 +492,32 @@ mod tests_raw { let authority_key_id = cert.authority_key_id(); assert!(authority_key_id.is_some()); - println!("authority_key_id: {}", hex::encode(authority_key_id.unwrap().as_slice())); + println!( + "authority_key_id: {}", + hex::encode(authority_key_id.unwrap().as_slice()) + ); let resp_ca_pem = test_read_api(&core, token, "pki/ca/pem", true).await; let resp_ca_pem_cert_data = resp_ca_pem.unwrap().unwrap().data.unwrap(); - let ca_cert = X509::from_pem(resp_ca_pem_cert_data["certificate"].as_str().unwrap().as_bytes()).unwrap(); + let ca_cert = X509::from_pem( + resp_ca_pem_cert_data["certificate"] + .as_str() + .unwrap() + .as_bytes(), + ) + .unwrap(); let subject = ca_cert.subject_name(); let cn = subject.entries_by_nid(Nid::COMMONNAME).next().unwrap(); assert_eq!(cn.data().as_slice(), "test-ca".as_bytes()); - println!("ca subject_key_id: {}", hex::encode(ca_cert.subject_key_id().unwrap().as_slice())); - assert_eq!(ca_cert.subject_key_id().unwrap().as_slice(), authority_key_id.unwrap().as_slice()); + println!( + "ca subject_key_id: {}", + hex::encode(ca_cert.subject_key_id().unwrap().as_slice()) + ); + assert_eq!( + ca_cert.subject_key_id().unwrap().as_slice(), + authority_key_id.unwrap().as_slice() + ); } #[tokio::test] @@ -442,18 +532,28 @@ mod tests_raw { println!("root_token: {:?}", root_token); let mut conf: HashMap = HashMap::new(); - conf.insert("path".to_string(), Value::String(dir.to_string_lossy().into_owned())); + conf.insert( + "path".to_string(), + Value::String(dir.to_string_lossy().into_owned()), + ); let backend = storage::new_backend("file", &conf).unwrap(); let barrier = barrier_aes_gcm::AESGCMBarrier::new(Arc::clone(&backend)); - let c = Arc::new(RwLock::new(Core { physical: backend, barrier: Arc::new(barrier), ..Default::default() })); + let c = Arc::new(RwLock::new(Core { + physical: backend, + barrier: Arc::new(barrier), + ..Default::default() + })); { let mut core = c.write().unwrap(); assert!(core.config(Arc::clone(&c), None).is_ok()); - let seal_config = SealConfig { secret_shares: 10, secret_threshold: 5 }; + let seal_config = SealConfig { + secret_shares: 10, + secret_threshold: 5, + }; let result = core.init(&seal_config); assert!(result.is_ok()); diff --git a/vault/src/vault.rs b/vault/src/vault.rs index db84ea7c..1dc2315f 100644 --- a/vault/src/vault.rs +++ b/vault/src/vault.rs @@ -1,7 +1,3 @@ -use std::collections::HashMap; -use std::fs; -use std::path::PathBuf; -use std::sync::{Arc, RwLock}; use lazy_static::lazy_static; use rusty_vault::core::{Core, SealConfig}; use rusty_vault::errors::RvError; @@ -10,6 +6,10 @@ use rusty_vault::storage; use rusty_vault::storage::barrier_aes_gcm; use serde::{Deserialize, Serialize}; use serde_json::{Map, Value}; +use std::collections::HashMap; +use std::fs; +use std::path::PathBuf; +use std::sync::{Arc, RwLock}; #[derive(Serialize, Deserialize, Debug)] struct CoreKey { @@ -41,19 +41,29 @@ fn init() -> CoreInfo { } let mut conf: HashMap = HashMap::new(); - conf.insert("path".to_string(), Value::String(dir.to_string_lossy().into_owned())); + conf.insert( + "path".to_string(), + Value::String(dir.to_string_lossy().into_owned()), + ); let backend = storage::new_backend("file", &conf).unwrap(); // file or database let barrier = barrier_aes_gcm::AESGCMBarrier::new(Arc::clone(&backend)); - let c = Arc::new(RwLock::new(Core { physical: backend, barrier: Arc::new(barrier), ..Default::default() })); + let c = Arc::new(RwLock::new(Core { + physical: backend, + barrier: Arc::new(barrier), + ..Default::default() + })); let root_token; { let mut core = c.write().unwrap(); assert!(core.config(Arc::clone(&c), None).is_ok()); - let seal_config = SealConfig { secret_shares: 10, secret_threshold: 5 }; + let seal_config = SealConfig { + secret_shares: 10, + secret_threshold: 5, + }; let mut unsealed = false; if !inited { @@ -94,7 +104,10 @@ fn init() -> CoreInfo { println!("root_token: {:?}", root_token); } - CoreInfo { core: c, token: root_token } + CoreInfo { + core: c, + token: root_token, + } } pub async fn read_api(core: &Core, token: &str, path: &str) -> Result, RvError> { @@ -121,21 +134,33 @@ pub async fn write_api( } /// Write a secret to the vault (k-v) -pub async fn write_secret(name: &str, data: Option>) -> Result, RvError> { +pub async fn write_secret( + name: &str, + data: Option>, +) -> Result, RvError> { // async_std: stop spread of `!Send` (RwLockReadGuard cross .await), for `tokio::spawn` - async_std::task::block_on(write_api(&CORE.core.read().unwrap(), &CORE.token, &format!("secret/{}", name), data)) + async_std::task::block_on(write_api( + &CORE.core.read().unwrap(), + &CORE.token, + &format!("secret/{}", name), + data, + )) } /// Read a secret from the vault (k-v) pub async fn read_secret(name: &str) -> Result, RvError> { // async_std: stop spread of `!Send` (RwLockReadGuard cross .await), for `tokio::spawn` - async_std::task::block_on(read_api(&CORE.core.read().unwrap(), &CORE.token, &format!("secret/{}", name))) + async_std::task::block_on(read_api( + &CORE.core.read().unwrap(), + &CORE.token, + &format!("secret/{}", name), + )) } #[cfg(test)] mod tests { - use serde_json::json; use super::*; + use serde_json::json; #[allow(clippy::await_holding_lock)] #[tokio::test] @@ -148,13 +173,19 @@ mod tests { .as_object() .unwrap() .clone(); - write_secret("keyInfo", Some(kv_data.clone())).await.unwrap(); + write_secret("keyInfo", Some(kv_data.clone())) + .await + .unwrap(); let secret = read_secret("keyInfo").await.unwrap().unwrap().data; assert_eq!(secret, Some(kv_data)); println!("secret: {:?}", secret.unwrap()); assert!(read_secret("foo").await.unwrap().is_none()); - assert!(read_api(&CORE.core.read().unwrap(), &CORE.token, "secret1/foo").await.is_err()); + assert!( + read_api(&CORE.core.read().unwrap(), &CORE.token, "secret1/foo") + .await + .is_err() + ); } -} \ No newline at end of file +}