From 20f2e44190f3f9dfd8ebdf8a7598f7b1838857e7 Mon Sep 17 00:00:00 2001 From: Tiago Castro Date: Wed, 27 Nov 2024 23:52:14 +0000 Subject: [PATCH] refactor(fmt): use fmt style from rust stable Signed-off-by: Tiago Castro --- .rustfmt.toml | 19 - io-engine-bench/build.rs | 3 +- io-engine-bench/src/nexus.rs | 26 +- io-engine-tests/src/bdev.rs | 17 +- io-engine-tests/src/bdev_io.rs | 12 +- io-engine-tests/src/compose/mod.rs | 17 +- io-engine-tests/src/compose/rpc/v0.rs | 17 +- io-engine-tests/src/compose/rpc/v1.rs | 46 +- io-engine-tests/src/error_bdev.rs | 26 +- io-engine-tests/src/file_io.rs | 14 +- io-engine-tests/src/fio.rs | 19 +- io-engine-tests/src/lib.rs | 35 +- io-engine-tests/src/nexus.rs | 125 +---- io-engine-tests/src/nvme.rs | 12 +- io-engine-tests/src/nvmf.rs | 20 +- io-engine-tests/src/pool.rs | 27 +- io-engine-tests/src/replica.rs | 32 +- io-engine-tests/src/snapshot.rs | 8 +- io-engine-tests/src/test.rs | 21 +- io-engine-tests/src/test_task.rs | 5 +- io-engine/examples/lvs-eval/display.rs | 29 +- io-engine/examples/lvs-eval/main.rs | 19 +- io-engine/src/bdev/aio.rs | 46 +- io-engine/src/bdev/dev.rs | 20 +- io-engine/src/bdev/device.rs | 155 ++---- io-engine/src/bdev/ftl.rs | 69 +-- io-engine/src/bdev/loopback.rs | 17 +- io-engine/src/bdev/lvs.rs | 81 ++- io-engine/src/bdev/malloc.rs | 59 +-- io-engine/src/bdev/mod.rs | 7 +- io-engine/src/bdev/nexus/mod.rs | 69 +-- io-engine/src/bdev/nexus/nexus_bdev.rs | 206 ++------ .../src/bdev/nexus/nexus_bdev_children.rs | 196 ++----- io-engine/src/bdev/nexus/nexus_bdev_error.rs | 158 ++---- .../src/bdev/nexus/nexus_bdev_rebuild.rs | 74 +-- .../src/bdev/nexus/nexus_bdev_snapshot.rs | 18 +- io-engine/src/bdev/nexus/nexus_channel.rs | 23 +- io-engine/src/bdev/nexus/nexus_child.rs | 220 +++----- io-engine/src/bdev/nexus/nexus_io.rs | 136 ++--- io-engine/src/bdev/nexus/nexus_io_log.rs | 34 +- .../src/bdev/nexus/nexus_io_subsystem.rs | 54 +- io-engine/src/bdev/nexus/nexus_iter.rs | 25 +- io-engine/src/bdev/nexus/nexus_module.rs | 9 +- io-engine/src/bdev/nexus/nexus_nbd.rs | 60 +-- io-engine/src/bdev/nexus/nexus_persistence.rs | 29 +- io-engine/src/bdev/nexus/nexus_share.rs | 43 +- io-engine/src/bdev/null_bdev.rs | 46 +- io-engine/src/bdev/null_ng.rs | 22 +- io-engine/src/bdev/nvme.rs | 20 +- io-engine/src/bdev/nvmf.rs | 63 +-- io-engine/src/bdev/nvmx/channel.rs | 51 +- io-engine/src/bdev/nvmx/controller.rs | 156 ++---- io-engine/src/bdev/nvmx/controller_inner.rs | 62 +-- io-engine/src/bdev/nvmx/controller_state.rs | 11 +- io-engine/src/bdev/nvmx/device.rs | 95 ++-- io-engine/src/bdev/nvmx/handle.rs | 285 +++------- io-engine/src/bdev/nvmx/mod.rs | 30 +- io-engine/src/bdev/nvmx/namespace.rs | 32 +- io-engine/src/bdev/nvmx/poll_group.rs | 12 +- io-engine/src/bdev/nvmx/qpair.rs | 48 +- io-engine/src/bdev/nvmx/snapshot.rs | 4 +- io-engine/src/bdev/nvmx/uri.rs | 96 ++-- io-engine/src/bdev/nvmx/utils.rs | 3 +- io-engine/src/bdev/nx.rs | 42 +- io-engine/src/bdev/uring.rs | 33 +- io-engine/src/bdev/util/uri.rs | 9 +- io-engine/src/bdev_api.rs | 16 +- io-engine/src/bin/casperf.rs | 40 +- io-engine/src/bin/initiator.rs | 118 +++-- io-engine/src/bin/io-engine-client/context.rs | 35 +- io-engine/src/bin/io-engine-client/main.rs | 7 +- .../src/bin/io-engine-client/v0/bdev_cli.rs | 26 +- .../bin/io-engine-client/v0/controller_cli.rs | 16 +- .../src/bin/io-engine-client/v0/device_cli.rs | 36 +- .../bin/io-engine-client/v0/jsonrpc_cli.rs | 13 +- io-engine/src/bin/io-engine-client/v0/mod.rs | 17 +- .../io-engine-client/v0/nexus_child_cli.rs | 12 +- .../src/bin/io-engine-client/v0/nexus_cli.rs | 330 +++++------- .../src/bin/io-engine-client/v0/perf_cli.rs | 8 +- .../src/bin/io-engine-client/v0/pool_cli.rs | 15 +- .../bin/io-engine-client/v0/rebuild_cli.rs | 11 +- .../bin/io-engine-client/v0/replica_cli.rs | 182 +++---- .../bin/io-engine-client/v0/snapshot_cli.rs | 10 +- .../src/bin/io-engine-client/v1/bdev_cli.rs | 33 +- .../bin/io-engine-client/v1/controller_cli.rs | 13 +- .../src/bin/io-engine-client/v1/device_cli.rs | 36 +- .../bin/io-engine-client/v1/jsonrpc_cli.rs | 13 +- io-engine/src/bin/io-engine-client/v1/mod.rs | 18 +- .../io-engine-client/v1/nexus_child_cli.rs | 12 +- .../src/bin/io-engine-client/v1/nexus_cli.rs | 338 ++++++------ .../src/bin/io-engine-client/v1/perf_cli.rs | 8 +- .../src/bin/io-engine-client/v1/pool_cli.rs | 17 +- .../bin/io-engine-client/v1/rebuild_cli.rs | 15 +- .../bin/io-engine-client/v1/replica_cli.rs | 155 +++--- .../bin/io-engine-client/v1/snapshot_cli.rs | 83 ++- .../v1/snapshot_rebuild_cli.rs | 58 +-- .../src/bin/io-engine-client/v1/stats_cli.rs | 21 +- .../src/bin/io-engine-client/v1/test_cli.rs | 66 +-- io-engine/src/bin/io-engine.rs | 58 +-- io-engine/src/bin/jsonrpc.rs | 11 +- io-engine/src/bin/nvmet.rs | 20 +- io-engine/src/bin/spdk.rs | 11 +- io-engine/src/core/bdev.rs | 74 +-- io-engine/src/core/block_device.rs | 63 +-- io-engine/src/core/device_events.rs | 28 +- io-engine/src/core/device_monitor.rs | 10 +- io-engine/src/core/env.rs | 195 ++----- .../core/fault_injection/bdev_io_injection.rs | 26 +- .../src/core/fault_injection/fault_method.rs | 22 +- .../src/core/fault_injection/inject_io_ctx.rs | 8 +- .../src/core/fault_injection/injection.rs | 80 +-- .../src/core/fault_injection/injection_api.rs | 22 +- .../core/fault_injection/injection_state.rs | 4 +- io-engine/src/core/fault_injection/mod.rs | 12 +- io-engine/src/core/handle.rs | 103 +--- io-engine/src/core/io_device.rs | 24 +- io-engine/src/core/io_driver.rs | 12 +- io-engine/src/core/lock.rs | 13 +- io-engine/src/core/mempool.rs | 16 +- io-engine/src/core/mod.rs | 251 ++------- io-engine/src/core/nic.rs | 11 +- io-engine/src/core/partition.rs | 9 +- io-engine/src/core/reactor.rs | 34 +- io-engine/src/core/runtime.rs | 8 +- io-engine/src/core/segment_map.rs | 2 +- io-engine/src/core/share.rs | 8 +- io-engine/src/core/snapshot.rs | 11 +- io-engine/src/core/wiper.rs | 90 +--- io-engine/src/delay.rs | 10 +- io-engine/src/eventing/clone_events.rs | 20 +- io-engine/src/eventing/host_events.rs | 23 +- io-engine/src/eventing/io_engine_events.rs | 28 +- io-engine/src/eventing/nexus_child_events.rs | 20 +- io-engine/src/eventing/nexus_events.rs | 67 +-- io-engine/src/eventing/pool_events.rs | 12 +- io-engine/src/eventing/replica_events.rs | 34 +- io-engine/src/eventing/snapshot_events.rs | 28 +- io-engine/src/grpc/controller_grpc.rs | 7 +- io-engine/src/grpc/mod.rs | 90 +--- io-engine/src/grpc/server.rs | 74 +-- io-engine/src/grpc/v0/bdev_grpc.rs | 38 +- io-engine/src/grpc/v0/json_grpc.rs | 31 +- io-engine/src/grpc/v0/mayastor_grpc.rs | 489 +++++------------- io-engine/src/grpc/v0/nexus_grpc.rs | 43 +- io-engine/src/grpc/v1/bdev.rs | 62 +-- io-engine/src/grpc/v1/host.rs | 48 +- io-engine/src/grpc/v1/json.rs | 25 +- io-engine/src/grpc/v1/lvm/mod.rs | 27 +- io-engine/src/grpc/v1/nexus.rs | 188 ++----- io-engine/src/grpc/v1/pool.rs | 182 ++----- io-engine/src/grpc/v1/replica.rs | 159 ++---- io-engine/src/grpc/v1/snapshot.rs | 122 ++--- io-engine/src/grpc/v1/snapshot_rebuild.rs | 91 +--- io-engine/src/grpc/v1/stats.rs | 60 +-- io-engine/src/grpc/v1/test.rs | 129 ++--- io-engine/src/host/blk_device.rs | 39 +- io-engine/src/jsonrpc.rs | 38 +- io-engine/src/logger.rs | 32 +- io-engine/src/lvm/cli.rs | 64 +-- io-engine/src/lvm/error.rs | 100 +--- io-engine/src/lvm/lv_replica.rs | 124 ++--- io-engine/src/lvm/mod.rs | 113 ++-- io-engine/src/lvm/property.rs | 21 +- io-engine/src/lvm/vg_pool.rs | 69 +-- io-engine/src/lvs/lvol_snapshot.rs | 253 +++------ io-engine/src/lvs/lvs_error.rs | 161 ++---- io-engine/src/lvs/lvs_iter.rs | 6 +- io-engine/src/lvs/lvs_lvol.rs | 245 +++------ io-engine/src/lvs/lvs_store.rs | 214 ++------ io-engine/src/lvs/mod.rs | 117 ++--- io-engine/src/persistent_store.rs | 31 +- io-engine/src/pool_backend.rs | 75 +-- io-engine/src/rebuild/bdev_rebuild.rs | 33 +- io-engine/src/rebuild/mod.rs | 9 +- io-engine/src/rebuild/nexus_rebuild.rs | 73 +-- io-engine/src/rebuild/rebuild_descriptor.rs | 59 +-- io-engine/src/rebuild/rebuild_error.rs | 19 +- io-engine/src/rebuild/rebuild_instances.rs | 16 +- io-engine/src/rebuild/rebuild_job.rs | 33 +- io-engine/src/rebuild/rebuild_job_backend.rs | 51 +- io-engine/src/rebuild/rebuild_state.rs | 4 +- io-engine/src/rebuild/rebuild_task.rs | 24 +- io-engine/src/rebuild/rebuilders.rs | 21 +- io-engine/src/rebuild/snapshot_rebuild.rs | 10 +- io-engine/src/replica_backend.rs | 75 +-- io-engine/src/sleep.rs | 4 +- io-engine/src/store/etcd.rs | 32 +- io-engine/src/store/store_defs.rs | 50 +- io-engine/src/subsys/config/mod.rs | 26 +- io-engine/src/subsys/config/opts.rs | 64 +-- io-engine/src/subsys/config/pool.rs | 14 +- io-engine/src/subsys/mod.rs | 21 +- io-engine/src/subsys/nvmf/admin_cmd.rs | 56 +- io-engine/src/subsys/nvmf/mod.rs | 18 +- io-engine/src/subsys/nvmf/poll_groups.rs | 6 +- io-engine/src/subsys/nvmf/subsystem.rs | 301 ++++------- io-engine/src/subsys/nvmf/target.rs | 139 ++--- io-engine/src/subsys/nvmf/transport.rs | 30 +- io-engine/src/subsys/registration/mod.rs | 8 +- .../subsys/registration/registration_grpc.rs | 16 +- io-engine/tests/block_device_nvmf.rs | 164 ++---- io-engine/tests/child_size.rs | 18 +- io-engine/tests/core.rs | 28 +- io-engine/tests/ftl_mount_fs.rs | 9 +- io-engine/tests/lock.rs | 3 +- io-engine/tests/lock_lba_range.rs | 14 +- io-engine/tests/lvs_grow.rs | 57 +- io-engine/tests/lvs_import.rs | 7 +- io-engine/tests/lvs_limits.rs | 25 +- io-engine/tests/lvs_pool.rs | 50 +- io-engine/tests/malloc_bdev.rs | 2 +- io-engine/tests/memory_pool.rs | 10 +- io-engine/tests/mount_fs.rs | 2 +- io-engine/tests/nexus_add_remove.rs | 4 +- io-engine/tests/nexus_child_location.rs | 3 +- io-engine/tests/nexus_child_online.rs | 15 +- io-engine/tests/nexus_child_retire.rs | 43 +- io-engine/tests/nexus_children_add_remove.rs | 47 +- io-engine/tests/nexus_crd.rs | 28 +- io-engine/tests/nexus_create_destroy.rs | 7 +- io-engine/tests/nexus_fault_injection.rs | 85 +-- io-engine/tests/nexus_fio.rs | 14 +- io-engine/tests/nexus_io.rs | 95 +--- io-engine/tests/nexus_rebuild.rs | 42 +- io-engine/tests/nexus_rebuild_parallel.rs | 26 +- io-engine/tests/nexus_rebuild_partial.rs | 28 +- io-engine/tests/nexus_rebuild_partial_loop.rs | 29 +- io-engine/tests/nexus_rebuild_source.rs | 16 +- io-engine/tests/nexus_rebuild_verify.rs | 12 +- io-engine/tests/nexus_replica_resize.rs | 16 +- io-engine/tests/nexus_restart.rs | 9 +- io-engine/tests/nexus_share.rs | 9 +- io-engine/tests/nexus_thin.rs | 4 +- io-engine/tests/nexus_thin_no_space.rs | 41 +- io-engine/tests/nexus_thin_rebuild.rs | 14 +- io-engine/tests/nexus_with_local.rs | 16 +- io-engine/tests/nvme_device_timeout.rs | 26 +- io-engine/tests/nvmf.rs | 51 +- io-engine/tests/nvmf_connect.rs | 4 +- io-engine/tests/persistence.rs | 43 +- io-engine/tests/poller.rs | 11 +- io-engine/tests/reactor.rs | 8 +- io-engine/tests/reactor_block_on.rs | 8 +- io-engine/tests/replica_crd.rs | 6 +- io-engine/tests/replica_snapshot.rs | 11 +- io-engine/tests/replica_thin.rs | 4 +- io-engine/tests/replica_thin_no_space.rs | 18 +- io-engine/tests/replica_timeout.rs | 10 +- io-engine/tests/replica_uri.rs | 18 +- io-engine/tests/reset.rs | 3 +- io-engine/tests/resource_stats.rs | 60 +-- io-engine/tests/snapshot_lvol.rs | 188 +++---- io-engine/tests/snapshot_nexus.rs | 71 +-- io-engine/tests/snapshot_rebuild.rs | 39 +- io-engine/tests/wipe.rs | 27 +- jsonrpc/src/error.rs | 15 +- jsonrpc/src/lib.rs | 12 +- jsonrpc/src/test.rs | 11 +- libnvme-rs/src/error.rs | 4 +- libnvme-rs/src/nvme_tree.rs | 4 +- libnvme-rs/src/nvme_uri.rs | 132 ++--- scripts/rust-style.sh | 2 +- spdk-rs | 2 +- utils/dependencies | 2 +- 264 files changed, 3847 insertions(+), 9447 deletions(-) delete mode 100644 .rustfmt.toml diff --git a/.rustfmt.toml b/.rustfmt.toml deleted file mode 100644 index f1d06f558..000000000 --- a/.rustfmt.toml +++ /dev/null @@ -1,19 +0,0 @@ -# changed from 100 to 80 -max_width = 80 -# default is false -wrap_comments = true -comment_width = 80 -# was true -struct_lit_single_line = false -#changed from Mixed -imports_layout = "HorizontalVertical" -# changed from Preserve (merge_imports = false) -imports_granularity="Crate" -#default false -spaces_around_ranges = true -# was 2015 -edition = "2018" - -ignore = [ - "utils/dependencies/apis/io-engine" -] diff --git a/io-engine-bench/build.rs b/io-engine-bench/build.rs index e1cc949fb..021c4a028 100644 --- a/io-engine-bench/build.rs +++ b/io-engine-bench/build.rs @@ -1,7 +1,6 @@ fn main() { let profile = std::env::var("PROFILE").unwrap(); - let spdk_rpath = - format!("{}/target/{}", std::env::var("SRCDIR").unwrap(), profile); + let spdk_rpath = format!("{}/target/{}", std::env::var("SRCDIR").unwrap(), profile); println!("cargo:rustc-link-search=native={spdk_rpath}"); println!("cargo:rustc-link-arg=-Wl,-rpath={spdk_rpath}"); } diff --git a/io-engine-bench/src/nexus.rs b/io-engine-bench/src/nexus.rs index a97aeef8a..5230ecea6 100644 --- a/io-engine-bench/src/nexus.rs +++ b/io-engine-bench/src/nexus.rs @@ -1,8 +1,6 @@ use criterion::{criterion_group, criterion_main, Criterion}; use io_engine::{ - bdev::nexus::nexus_create, - constants::NVME_NQN_PREFIX, - core::MayastorCliArgs, + bdev::nexus::nexus_create, constants::NVME_NQN_PREFIX, core::MayastorCliArgs, grpc::v1::nexus::nexus_destroy, }; use std::sync::Arc; @@ -15,10 +13,7 @@ use common::compose::{ mayastor::{BdevShareRequest, BdevUri, CreateNexusRequest, Null}, GrpcConnect, }, - Binary, - Builder, - ComposeTest, - MayastorTest, + Binary, Builder, ComposeTest, MayastorTest, }; /// Infer the build type from the `OUT_DIR` and `SRCDIR`. @@ -60,8 +55,7 @@ fn new_environment<'a>() -> Arc> { /// Get remote nvmf targets to use as nexus children. async fn get_children(compose: Arc) -> &'static Vec { - static STATIC_TARGETS: tokio::sync::OnceCell> = - tokio::sync::OnceCell::const_new(); + static STATIC_TARGETS: tokio::sync::OnceCell> = tokio::sync::OnceCell::const_new(); STATIC_TARGETS .get_or_init(|| async move { @@ -171,9 +165,7 @@ impl Drop for GrpcNexus { let nexus_hdl = &mut hdls.last_mut().unwrap(); nexus_hdl .mayastor - .destroy_nexus(mayastor::DestroyNexusRequest { - uuid, - }) + .destroy_nexus(mayastor::DestroyNexusRequest { uuid }) .await .unwrap(); }); @@ -183,10 +175,7 @@ impl Drop for GrpcNexus { } } /// Create a new nexus via grpc and return it as droppable to be destroyed. -async fn nexus_create_grpc( - compose: &Arc, - nr_children: usize, -) -> GrpcNexus { +async fn nexus_create_grpc(compose: &Arc, nr_children: usize) -> GrpcNexus { let children = get_children(compose.clone()) .await .iter() @@ -219,9 +208,8 @@ fn criterion_benchmark(c: &mut Criterion) { group // Benchmark nexus create in-binary .bench_function("direct", |b| { - b.to_async(&runtime).iter_with_large_drop(|| { - nexus_create_direct(&ms_environment, &compose, 3) - }) + b.to_async(&runtime) + .iter_with_large_drop(|| nexus_create_direct(&ms_environment, &compose, 3)) }) // Benchmark nexus create via gRPC .bench_function("grpc", |b| { diff --git a/io-engine-tests/src/bdev.rs b/io-engine-tests/src/bdev.rs index 68560f444..40b960674 100644 --- a/io-engine-tests/src/bdev.rs +++ b/io-engine-tests/src/bdev.rs @@ -1,14 +1,10 @@ use super::compose::rpc::v1::{ bdev::{Bdev, CreateBdevRequest, ListBdevOptions}, - SharedRpcHandle, - Status, + SharedRpcHandle, Status, }; /// Creates a bdev. -pub async fn create_bdev( - rpc: SharedRpcHandle, - uri: &str, -) -> Result { +pub async fn create_bdev(rpc: SharedRpcHandle, uri: &str) -> Result { rpc.lock() .await .bdev @@ -24,18 +20,13 @@ pub async fn list_bdevs(rpc: SharedRpcHandle) -> Result, Status> { rpc.lock() .await .bdev - .list(ListBdevOptions { - name: None, - }) + .list(ListBdevOptions { name: None }) .await .map(|r| r.into_inner().bdevs) } /// Finds a bdev by its name. -pub async fn find_bdev_by_name( - rpc: SharedRpcHandle, - name: &str, -) -> Option { +pub async fn find_bdev_by_name(rpc: SharedRpcHandle, name: &str) -> Option { match list_bdevs(rpc).await { Err(_) => None, Ok(nn) => nn.into_iter().find(|p| p.name == name), diff --git a/io-engine-tests/src/bdev_io.rs b/io-engine-tests/src/bdev_io.rs index cbef056d0..c34864c43 100644 --- a/io-engine-tests/src/bdev_io.rs +++ b/io-engine-tests/src/bdev_io.rs @@ -54,22 +54,14 @@ pub async fn read_some( Ok(()) } -pub async fn write_zeroes_some( - nexus_name: &str, - offset: u64, - len: u64, -) -> Result<(), CoreError> { +pub async fn write_zeroes_some(nexus_name: &str, offset: u64, len: u64) -> Result<(), CoreError> { let h = UntypedBdevHandle::open(nexus_name, true, false)?; h.write_zeroes_at(offset, len).await?; Ok(()) } -pub async fn read_some_safe( - nexus_name: &str, - offset: u64, - fill: u8, -) -> Result { +pub async fn read_some_safe(nexus_name: &str, offset: u64, fill: u8) -> Result { let h = UntypedBdevHandle::open(nexus_name, true, false)?; let buflen = u64::from(h.get_bdev().block_len() * 2); diff --git a/io-engine-tests/src/compose/mod.rs b/io-engine-tests/src/compose/mod.rs index 40b00066d..560717d6c 100644 --- a/io-engine-tests/src/compose/mod.rs +++ b/io-engine-tests/src/compose/mod.rs @@ -7,16 +7,8 @@ use tokio::sync::oneshot::channel; use crate::mayastor_test_init_ex; use io_engine::{ core::{ - device_monitor_loop, - mayastor_env_stop, - runtime, - MayastorCliArgs, - MayastorEnvironment, - ProtectedSubsystems, - Reactor, - Reactors, - ResourceLockManager, - ResourceLockManagerConfig, + device_monitor_loop, mayastor_env_stop, runtime, MayastorCliArgs, MayastorEnvironment, + ProtectedSubsystems, Reactor, Reactors, ResourceLockManager, ResourceLockManagerConfig, GLOBAL_RC, }, grpc, @@ -81,10 +73,7 @@ impl<'a> MayastorTest<'a> { Self::new_ex(args, None) } - pub fn new_ex( - args: MayastorCliArgs, - log_level: Option<&str>, - ) -> MayastorTest<'static> { + pub fn new_ex(args: MayastorCliArgs, log_level: Option<&str>) -> MayastorTest<'static> { let (tx, rx) = bounded(1); mayastor_test_init_ex(args.log_format.unwrap_or_default(), log_level); let thdl = std::thread::Builder::new() diff --git a/io-engine-tests/src/compose/rpc/v0.rs b/io-engine-tests/src/compose/rpc/v0.rs index 805ceb6fb..fac22798c 100644 --- a/io-engine-tests/src/compose/rpc/v0.rs +++ b/io-engine-tests/src/compose/rpc/v0.rs @@ -1,9 +1,7 @@ use composer::ComposeTest; use mayastor::{ - bdev_rpc_client::BdevRpcClient, - json_rpc_client::JsonRpcClient, - mayastor_client::MayastorClient, + bdev_rpc_client::BdevRpcClient, json_rpc_client::JsonRpcClient, mayastor_client::MayastorClient, }; use std::{ @@ -28,15 +26,10 @@ pub struct RpcHandle { impl RpcHandle { /// connect to the containers and construct a handle - pub(super) async fn connect( - name: String, - endpoint: SocketAddr, - ) -> Result { + pub(super) async fn connect(name: String, endpoint: SocketAddr) -> Result { let mut attempts = 40; loop { - if TcpStream::connect_timeout(&endpoint, Duration::from_millis(100)) - .is_ok() - { + if TcpStream::connect_timeout(&endpoint, Duration::from_millis(100)).is_ok() { break; } else { thread::sleep(Duration::from_millis(101)); @@ -74,9 +67,7 @@ pub struct GrpcConnect<'a> { impl<'a> GrpcConnect<'a> { /// create new gRPC connect object pub fn new(comp: &'a ComposeTest) -> Self { - Self { - ct: comp, - } + Self { ct: comp } } /// return grpc handles to the containers diff --git a/io-engine-tests/src/compose/rpc/v1.rs b/io-engine-tests/src/compose/rpc/v1.rs index feb9fcd13..dcb3b21ce 100644 --- a/io-engine-tests/src/compose/rpc/v1.rs +++ b/io-engine-tests/src/compose/rpc/v1.rs @@ -57,15 +57,10 @@ pub struct RpcHandle { impl RpcHandle { /// connect to the containers and construct a handle - pub(super) async fn connect( - name: String, - endpoint: SocketAddr, - ) -> Result { + pub(super) async fn connect(name: String, endpoint: SocketAddr) -> Result { let mut attempts = 40; loop { - if TcpStream::connect_timeout(&endpoint, Duration::from_millis(100)) - .is_ok() - { + if TcpStream::connect_timeout(&endpoint, Duration::from_millis(100)).is_ok() { break; } else { thread::sleep(Duration::from_millis(101)); @@ -88,29 +83,25 @@ impl RpcHandle { .await .unwrap(); - let replica = - replica::ReplicaRpcClient::connect(format!("http://{endpoint}")) - .await - .unwrap(); + let replica = replica::ReplicaRpcClient::connect(format!("http://{endpoint}")) + .await + .unwrap(); let host = host::HostRpcClient::connect(format!("http://{endpoint}")) .await .unwrap(); - let nexus = - nexus::NexusRpcClient::connect(format!("http://{endpoint}")) - .await - .unwrap(); + let nexus = nexus::NexusRpcClient::connect(format!("http://{endpoint}")) + .await + .unwrap(); - let snapshot = - snapshot::SnapshotRpcClient::connect(format!("http://{endpoint}")) - .await - .unwrap(); + let snapshot = snapshot::SnapshotRpcClient::connect(format!("http://{endpoint}")) + .await + .unwrap(); - let stats = - stats::StatsRpcClient::connect(format!("http://{endpoint}")) - .await - .unwrap(); + let stats = stats::StatsRpcClient::connect(format!("http://{endpoint}")) + .await + .unwrap(); let test = test::TestRpcClient::connect(format!("http://{endpoint}")) .await @@ -139,9 +130,7 @@ pub struct GrpcConnect<'a> { impl<'a> GrpcConnect<'a> { /// create new gRPC connect object pub fn new(comp: &'a ComposeTest) -> Self { - Self { - ct: comp, - } + Self { ct: comp } } /// return grpc handles to the containers @@ -176,10 +165,7 @@ impl<'a> GrpcConnect<'a> { } } - pub async fn grpc_handle_shared( - &self, - name: &str, - ) -> Result { + pub async fn grpc_handle_shared(&self, name: &str) -> Result { self.grpc_handle(name).await.map(|rpc| { let name = rpc.name.clone(); let endpoint = rpc.endpoint; diff --git a/io-engine-tests/src/error_bdev.rs b/io-engine-tests/src/error_bdev.rs index bd857b290..cc0a605d3 100644 --- a/io-engine-tests/src/error_bdev.rs +++ b/io-engine-tests/src/error_bdev.rs @@ -1,8 +1,5 @@ use spdk_rs::libspdk::{ - create_aio_bdev, - vbdev_error_create, - vbdev_error_inject_error, - vbdev_error_inject_opts, + create_aio_bdev, vbdev_error_create, vbdev_error_inject_error, vbdev_error_inject_opts, }; pub use spdk_rs::libspdk::{SPDK_BDEV_IO_TYPE_READ, SPDK_BDEV_IO_TYPE_WRITE}; @@ -16,32 +13,23 @@ pub fn create_error_bdev(error_device: &str, backing_device: &str) { unsafe { // this allows us to create a bdev without its name being a uri - retval = create_aio_bdev( - cname.as_ptr(), - filename.as_ptr(), - 512, - false, - false, - ) + retval = create_aio_bdev(cname.as_ptr(), filename.as_ptr(), 512, false, false) }; assert_eq!(retval, 0); - let err_bdev_name_str = std::ffi::CString::new(error_device.to_string()) - .expect("Failed to create name string"); + let err_bdev_name_str = + std::ffi::CString::new(error_device.to_string()).expect("Failed to create name string"); unsafe { // create the error bdev around it - retval = vbdev_error_create( - err_bdev_name_str.as_ptr(), - std::ptr::null_mut(), - ); + retval = vbdev_error_create(err_bdev_name_str.as_ptr(), std::ptr::null_mut()); } assert_eq!(retval, 0); } pub fn inject_error(error_device: &str, op: u32, mode: u32, count: u32) { let retval: i32; - let err_bdev_name_str = std::ffi::CString::new(error_device) - .expect("Failed to create name string"); + let err_bdev_name_str = + std::ffi::CString::new(error_device).expect("Failed to create name string"); let raw = err_bdev_name_str.into_raw(); let opts = vbdev_error_inject_opts { diff --git a/io-engine-tests/src/file_io.rs b/io-engine-tests/src/file_io.rs index 871064064..7293eba8b 100644 --- a/io-engine-tests/src/file_io.rs +++ b/io-engine-tests/src/file_io.rs @@ -113,17 +113,17 @@ pub async fn test_write_to_file( f.seek(SeekFrom::Start(offset.bytes())).await?; // Write. - for _i in 0 .. count { + for _i in 0..count { f.write_all(&src_buf).await?; } // Validate. f.seek(SeekFrom::Start(offset.bytes())).await?; let mut pos: u64 = offset.bytes(); - for _i in 0 .. count { + for _i in 0..count { f.read_exact(&mut dst_buf).await?; - for k in 0 .. src_buf.len() { + for k in 0..src_buf.len() { if src_buf[k] != dst_buf[k] { return Err(std::io::Error::new( std::io::ErrorKind::Other, @@ -143,9 +143,7 @@ pub async fn test_write_to_file( Ok(()) } -pub async fn compute_file_checksum( - path: impl AsRef, -) -> std::io::Result { +pub async fn compute_file_checksum(path: impl AsRef) -> std::io::Result { let mut f = OpenOptions::new() .write(false) .read(true) @@ -162,7 +160,7 @@ pub async fn compute_file_checksum( if n == 0 { break; } - hasher.consume(&buf[.. n]); + hasher.consume(&buf[..n]); } Ok(hex::encode(hasher.compute().0)) @@ -213,7 +211,7 @@ pub async fn compare_files( break; } - for i in 0 .. na { + for i in 0..na { if buf_a[i] != buf_b[i] { return Err(Error::new( ErrorKind::Other, diff --git a/io-engine-tests/src/fio.rs b/io-engine-tests/src/fio.rs index a7c5288ec..805c35774 100644 --- a/io-engine-tests/src/fio.rs +++ b/io-engine-tests/src/fio.rs @@ -225,10 +225,7 @@ impl FioBuilder { .expect("FIO builder is expected to succeed") } - pub fn with_jobs( - &mut self, - jobs: impl Iterator, - ) -> &mut Self { + pub fn with_jobs(&mut self, jobs: impl Iterator) -> &mut Self { jobs.for_each(|j| { self.with_job(j); }); @@ -254,8 +251,7 @@ impl Fio { self.fio_binary = "$FIO".to_string(); } - let cmd = - format!("sudo LD_PRELOAD=$FIO_SPDK {fio}", fio = self.fio_binary); + let cmd = format!("sudo LD_PRELOAD=$FIO_SPDK {fio}", fio = self.fio_binary); let args = self .jobs @@ -271,12 +267,8 @@ impl Fio { } let start_time = Instant::now(); - let (exit, stdout, stderr) = run_script::run( - &self.script, - &Vec::new(), - &run_script::ScriptOptions::new(), - ) - .unwrap(); + let (exit, stdout, stderr) = + run_script::run(&self.script, &Vec::new(), &run_script::ScriptOptions::new()).unwrap(); self.total_time = start_time.elapsed(); self.push_err(&stderr); @@ -325,8 +317,7 @@ impl Fio { .ok_or_else(|| "'jobs' item in output is not an array".to_string())? .iter() .for_each(|j| { - let name = - j.get("jobname").unwrap().as_str().unwrap().to_string(); + let name = j.get("jobname").unwrap().as_str().unwrap().to_string(); let err = j.get("error").unwrap().as_i64().unwrap() as i32; if let Some(j) = self.find_job_mut(&name) { diff --git a/io-engine-tests/src/lib.rs b/io-engine-tests/src/lib.rs index 09ccfa5b0..ba87ed84c 100644 --- a/io-engine-tests/src/lib.rs +++ b/io-engine-tests/src/lib.rs @@ -84,7 +84,7 @@ macro_rules! reactor_poll { } }; ($n:expr) => { - for _ in 0 .. $n { + for _ in 0..$n { io_engine::core::Reactors::current().poll_once(); } io_engine::core::Reactors::current(); @@ -346,10 +346,10 @@ pub fn fio_run_verify(device: &str) -> Result { --runtime=5 --bs=4k --verify=crc32 --group_reporting=1 --output-format=terse \ --verify_fatal=1 --verify_async=2 --filename=$1 ", - &vec![device.into()], - &run_script::ScriptOptions::new(), + &vec![device.into()], + &run_script::ScriptOptions::new(), ) - .unwrap(); + .unwrap(); if exit == 0 { Ok(stdout) } else { @@ -379,11 +379,7 @@ pub fn dd_urandom_blkdev_test(device: &str) -> i32 { &run_script::ScriptOptions::new(), ) .unwrap(); - tracing::debug!( - "dd_urandom_blkdev:\nstdout: {}\nstderr: {}", - stdout, - stderr - ); + tracing::debug!("dd_urandom_blkdev:\nstdout: {}\nstderr: {}", stdout, stderr); exit } pub fn dd_urandom_blkdev(device: &str) -> i32 { @@ -406,16 +402,12 @@ pub fn dd_urandom_file_size(device: &str, size: u64) -> String { &vec![device.into(), size.to_string()], &run_script::ScriptOptions::new(), ) - .unwrap(); + .unwrap(); assert_eq!(exit, 0); stdout } -pub fn compare_nexus_device( - nexus_device: &str, - device: &str, - expected_pass: bool, -) -> String { +pub fn compare_nexus_device(nexus_device: &str, device: &str, expected_pass: bool) -> String { let (exit, stdout, _stderr) = run_script::run( r#" cmp -n `blockdev --getsize64 $1` $1 $2 0 5M @@ -433,12 +425,7 @@ pub fn compare_nexus_device( stdout } -pub fn compare_devices( - first_device: &str, - second_device: &str, - size: u64, - expected_pass: bool, -) { +pub fn compare_devices(first_device: &str, second_device: &str, size: u64, expected_pass: bool) { let (exit, stdout, stderr) = run_script::run( r#" cmp -b $1 $2 -n $3 @@ -480,11 +467,7 @@ pub fn get_device_size(nexus_device: &str) -> u64 { } /// Waits for the rebuild to reach `state`, up to `timeout` -pub async fn wait_for_rebuild( - dst_uri: String, - state: RebuildState, - timeout: Duration, -) { +pub async fn wait_for_rebuild(dst_uri: String, state: RebuildState, timeout: Duration) { let (s, r) = unbounded::<()>(); let job = match NexusRebuildJob::lookup(&dst_uri) { Ok(job) => job, diff --git a/io-engine-tests/src/nexus.rs b/io-engine-tests/src/nexus.rs index db67b6196..0ad01eef1 100644 --- a/io-engine-tests/src/nexus.rs +++ b/io-engine-tests/src/nexus.rs @@ -1,38 +1,19 @@ use super::{ compose::rpc::v1::{ nexus::{ - AddChildNexusRequest, - Child, - ChildAction, - ChildOperationRequest, - ChildState, - ChildStateReason, - CreateNexusRequest, - DestroyNexusRequest, - ListNexusOptions, - Nexus, - NexusNvmePreemption, - NvmeReservation, - PublishNexusRequest, - RebuildHistoryRecord, - RebuildHistoryRequest, - RemoveChildNexusRequest, - ResizeNexusRequest, + AddChildNexusRequest, Child, ChildAction, ChildOperationRequest, ChildState, + ChildStateReason, CreateNexusRequest, DestroyNexusRequest, ListNexusOptions, Nexus, + NexusNvmePreemption, NvmeReservation, PublishNexusRequest, RebuildHistoryRecord, + RebuildHistoryRequest, RemoveChildNexusRequest, ResizeNexusRequest, ShutdownNexusRequest, }, snapshot::SnapshotInfo, - SharedRpcHandle, - Status, + SharedRpcHandle, Status, }, file_io::DataSize, fio::Fio, generate_uuid, - nvmf::{ - test_fio_to_nvmf, - test_fio_to_nvmf_aio, - test_write_to_nvmf, - NvmfLocation, - }, + nvmf::{test_fio_to_nvmf, test_fio_to_nvmf_aio, test_write_to_nvmf, NvmfLocation}, replica::ReplicaBuilder, }; use io_engine::{ @@ -44,9 +25,7 @@ use std::time::{Duration, Instant}; use tonic::Code; use io_engine_api::v1::snapshot::{ - ListSnapshotsRequest, - NexusCreateSnapshotReplicaDescriptor, - NexusCreateSnapshotReplicaStatus, + ListSnapshotsRequest, NexusCreateSnapshotReplicaDescriptor, NexusCreateSnapshotReplicaStatus, NexusCreateSnapshotRequest, }; @@ -222,9 +201,7 @@ impl NexusBuilder { .lock() .await .nexus - .shutdown_nexus(ShutdownNexusRequest { - uuid: self.uuid(), - }) + .shutdown_nexus(ShutdownNexusRequest { uuid: self.uuid() }) .await .map(|_| ()) } @@ -234,9 +211,7 @@ impl NexusBuilder { .lock() .await .nexus - .destroy_nexus(DestroyNexusRequest { - uuid: self.uuid(), - }) + .destroy_nexus(DestroyNexusRequest { uuid: self.uuid() }) .await .map(|_| ()) } @@ -269,11 +244,7 @@ impl NexusBuilder { .map(|r| r.into_inner().nexus.unwrap()) } - pub async fn add_child( - &self, - bdev: &str, - norebuild: bool, - ) -> Result { + pub async fn add_child(&self, bdev: &str, norebuild: bool) -> Result { self.rpc() .lock() .await @@ -287,11 +258,7 @@ impl NexusBuilder { .map(|r| r.into_inner().nexus.unwrap()) } - pub async fn add_replica( - &self, - r: &ReplicaBuilder, - norebuild: bool, - ) -> Result { + pub async fn add_replica(&self, r: &ReplicaBuilder, norebuild: bool) -> Result { self.add_child(&self.replica_uri(r), norebuild).await } @@ -344,10 +311,7 @@ impl NexusBuilder { .map(|r| r.into_inner().nexus.unwrap()) } - pub async fn remove_child_replica( - &self, - r: &ReplicaBuilder, - ) -> Result { + pub async fn remove_child_replica(&self, r: &ReplicaBuilder) -> Result { self.remove_child_bdev(&self.replica_uri(r)).await } @@ -365,10 +329,7 @@ impl NexusBuilder { .map(|r| r.into_inner().nexus.unwrap()) } - pub async fn online_child_replica( - &self, - r: &ReplicaBuilder, - ) -> Result { + pub async fn online_child_replica(&self, r: &ReplicaBuilder) -> Result { self.online_child_bdev(&self.replica_uri(r)).await } @@ -382,10 +343,7 @@ impl NexusBuilder { .await } - pub async fn offline_child_bdev( - &self, - bdev: &str, - ) -> Result { + pub async fn offline_child_bdev(&self, bdev: &str) -> Result { self.rpc() .lock() .await @@ -399,10 +357,7 @@ impl NexusBuilder { .map(|r| r.into_inner().nexus.unwrap()) } - pub async fn offline_child_replica( - &self, - r: &ReplicaBuilder, - ) -> Result { + pub async fn offline_child_replica(&self, r: &ReplicaBuilder) -> Result { self.offline_child_bdev(&self.replica_uri(r)).await } @@ -412,13 +367,8 @@ impl NexusBuilder { d: Duration, ) -> Result<(), Status> { self.offline_child_replica(r).await?; - self.wait_replica_state( - r, - ChildState::Degraded, - Some(ChildStateReason::ByClient), - d, - ) - .await + self.wait_replica_state(r, ChildState::Degraded, Some(ChildStateReason::ByClient), d) + .await } pub async fn add_injection_at_replica( @@ -441,16 +391,12 @@ impl NexusBuilder { Ok(inj_uri) } - pub async fn get_rebuild_history( - &self, - ) -> Result, Status> { + pub async fn get_rebuild_history(&self) -> Result, Status> { self.rpc() .lock() .await .nexus - .get_rebuild_history(RebuildHistoryRequest { - uuid: self.uuid(), - }) + .get_rebuild_history(RebuildHistoryRequest { uuid: self.uuid() }) .await .map(|r| r.into_inner().records) } @@ -461,15 +407,10 @@ impl NexusBuilder { .await? .into_iter() .find(|p| p.uuid == uuid) - .ok_or_else(|| { - Status::new(Code::NotFound, format!("Nexus '{uuid}' not found")) - }) + .ok_or_else(|| Status::new(Code::NotFound, format!("Nexus '{uuid}' not found"))) } - pub async fn get_nexus_replica_child( - &self, - r: &ReplicaBuilder, - ) -> Result { + pub async fn get_nexus_replica_child(&self, r: &ReplicaBuilder) -> Result { let child_uri = self.replica_uri(r); let n = find_nexus_by_uuid(self.rpc(), &self.uuid()).await?; n.children @@ -486,10 +427,7 @@ impl NexusBuilder { }) } - pub async fn wait_children_online( - &self, - timeout: Duration, - ) -> Result<(), Status> { + pub async fn wait_children_online(&self, timeout: Duration) -> Result<(), Status> { let start = Instant::now(); loop { @@ -564,17 +502,12 @@ pub async fn find_nexus(rpc: SharedRpcHandle, uuid: &str) -> Option { } /// TODO -pub async fn find_nexus_by_uuid( - rpc: SharedRpcHandle, - uuid: &str, -) -> Result { +pub async fn find_nexus_by_uuid(rpc: SharedRpcHandle, uuid: &str) -> Result { list_nexuses(rpc) .await? .into_iter() .find(|n| n.uuid == uuid) - .ok_or_else(|| { - Status::new(Code::NotFound, format!("Nexus '{uuid}' not found")) - }) + .ok_or_else(|| Status::new(Code::NotFound, format!("Nexus '{uuid}' not found"))) } /// TODO @@ -588,18 +521,12 @@ pub async fn test_write_to_nexus( } /// TODO -pub async fn test_fio_to_nexus( - nex: &NexusBuilder, - fio: Fio, -) -> std::io::Result<()> { +pub async fn test_fio_to_nexus(nex: &NexusBuilder, fio: Fio) -> std::io::Result<()> { test_fio_to_nvmf(&nex.nvmf_location(), fio).await } /// TODO -pub async fn test_fio_to_nexus_aio( - nex: &NexusBuilder, - fio: Fio, -) -> std::io::Result<()> { +pub async fn test_fio_to_nexus_aio(nex: &NexusBuilder, fio: Fio) -> std::io::Result<()> { test_fio_to_nvmf_aio(&nex.nvmf_location(), fio).await } diff --git a/io-engine-tests/src/nvme.rs b/io-engine-tests/src/nvme.rs index 6faf5b4b0..046310764 100644 --- a/io-engine-tests/src/nvme.rs +++ b/io-engine-tests/src/nvme.rs @@ -99,9 +99,7 @@ pub fn nvme_connect( .unwrap(); if !status.success() { - let msg = format!( - "failed to connect to {target_addr}, nqn '{nqn}': {status}" - ); + let msg = format!("failed to connect to {target_addr}, nqn '{nqn}': {status}"); if must_succeed { panic!("{}", msg); } else { @@ -146,18 +144,14 @@ pub fn list_mayastor_nvme_devices() -> Vec { .collect() } -pub fn find_mayastor_nvme_device( - serial: &str, -) -> Option { +pub fn find_mayastor_nvme_device(serial: &str) -> Option { list_mayastor_nvme_devices() .into_iter() .find(|d| d.serial == serial) } /// Returns /dev/ file path for the given NVMe serial. -pub fn find_mayastor_nvme_device_path( - serial: &str, -) -> std::io::Result { +pub fn find_mayastor_nvme_device_path(serial: &str) -> std::io::Result { list_mayastor_nvme_devices() .into_iter() .find(|d| d.serial == serial) diff --git a/io-engine-tests/src/nvmf.rs b/io-engine-tests/src/nvmf.rs index 4c9375ae4..ad5549844 100644 --- a/io-engine-tests/src/nvmf.rs +++ b/io-engine-tests/src/nvmf.rs @@ -15,11 +15,7 @@ pub struct NvmfLocation { } impl NvmfLocation { - pub fn from_nexus_info( - addr: &str, - nexus_name: &str, - nexus_uuid: &str, - ) -> Self { + pub fn from_nexus_info(addr: &str, nexus_name: &str, nexus_uuid: &str) -> Self { Self { addr: addr.parse().unwrap(), nqn: make_nexus_nqn(nexus_name), @@ -57,9 +53,7 @@ pub async fn test_write_to_nvmf( } /// Checks that all given NVMF devices contain identical copies of data. -pub async fn test_devices_identical( - devices: &[NvmfLocation], -) -> std::io::Result<()> { +pub async fn test_devices_identical(devices: &[NvmfLocation]) -> std::io::Result<()> { assert!(devices.len() > 1); let (_cg0, path0) = devices[0].open()?; @@ -73,10 +67,7 @@ pub async fn test_devices_identical( } /// TODO -pub async fn test_fio_to_nvmf( - nvmf: &NvmfLocation, - mut fio: Fio, -) -> std::io::Result<()> { +pub async fn test_fio_to_nvmf(nvmf: &NvmfLocation, mut fio: Fio) -> std::io::Result<()> { let tgt = format!("'{}'", nvmf.as_args().join(" ")); fio.jobs.iter_mut().for_each(|j| { @@ -89,10 +80,7 @@ pub async fn test_fio_to_nvmf( } /// TODO -pub async fn test_fio_to_nvmf_aio( - nvmf: &NvmfLocation, - mut fio: Fio, -) -> std::io::Result<()> { +pub async fn test_fio_to_nvmf_aio(nvmf: &NvmfLocation, mut fio: Fio) -> std::io::Result<()> { let _cg = NmveConnectGuard::connect_addr(&nvmf.addr, &nvmf.nqn); let path = find_mayastor_nvme_device_path(&nvmf.serial)?; let path_str = path.to_str().unwrap(); diff --git a/io-engine-tests/src/pool.rs b/io-engine-tests/src/pool.rs index 486cbee21..f833f118f 100644 --- a/io-engine-tests/src/pool.rs +++ b/io-engine-tests/src/pool.rs @@ -3,8 +3,7 @@ use super::{ compose::rpc::v1::{ pool::{CreatePoolRequest, GrowPoolRequest, ListPoolOptions}, replica::{ListReplicaOptions, Replica}, - SharedRpcHandle, - Status, + SharedRpcHandle, Status, }, generate_uuid, }; @@ -135,9 +134,7 @@ impl PoolBuilderOpts { size_mb: u64, blk_size: u64, ) -> &mut Self { - let bdev = format!( - "malloc:///{bdev_name}?size_mb={size_mb}&blk_size={blk_size}" - ); + let bdev = format!("malloc:///{bdev_name}?size_mb={size_mb}&blk_size={blk_size}"); self.with_bdev(&bdev) } @@ -180,12 +177,7 @@ impl PoolBuilderRpc { self } - pub fn with_malloc_blk_size( - mut self, - bdev_name: &str, - size_mb: u64, - blk_size: u64, - ) -> Self { + pub fn with_malloc_blk_size(mut self, bdev_name: &str, size_mb: u64, blk_size: u64) -> Self { self.builder .with_malloc_blk_size(bdev_name, size_mb, blk_size); self @@ -230,9 +222,7 @@ impl PoolBuilderRpc { .await? .into_iter() .find(|p| p.uuid == uuid) - .ok_or_else(|| { - Status::new(Code::NotFound, format!("Pool '{uuid}' not found")) - }) + .ok_or_else(|| Status::new(Code::NotFound, format!("Pool '{uuid}' not found"))) } pub async fn get_replicas(&self) -> Result, Status> { @@ -256,9 +246,7 @@ impl PoolBuilderRpc { impl PoolBuilderLocal { pub async fn malloc(name: &str, size_mb: u64) -> Result { let lvs = PoolBuilderLocal::default() - .with_builder(|b| { - b.with_name(name).with_new_uuid().with_malloc(name, size_mb) - }) + .with_builder(|b| b.with_name(name).with_new_uuid().with_malloc(name, size_mb)) .create() .await?; Ok(PoolLocal { @@ -296,9 +284,8 @@ impl PoolBuilderLocal { pub async fn get_pool(&self) -> Result { let uuid = self.uuid(); - lvs::Lvs::lookup_by_uuid(&uuid).ok_or_else(|| { - Status::new(Code::NotFound, format!("Pool '{uuid}' not found")) - }) + lvs::Lvs::lookup_by_uuid(&uuid) + .ok_or_else(|| Status::new(Code::NotFound, format!("Pool '{uuid}' not found"))) } } diff --git a/io-engine-tests/src/replica.rs b/io-engine-tests/src/replica.rs index f0774d487..7c3223d49 100644 --- a/io-engine-tests/src/replica.rs +++ b/io-engine-tests/src/replica.rs @@ -6,13 +6,8 @@ use super::{ }; use io_engine::{constants::NVME_NQN_PREFIX, subsys::make_subsystem_serial}; use io_engine_api::v1::replica::{ - destroy_replica_request, - CreateReplicaRequest, - DestroyReplicaRequest, - ListReplicaOptions, - Replica, - ResizeReplicaRequest, - ShareReplicaRequest, + destroy_replica_request, CreateReplicaRequest, DestroyReplicaRequest, ListReplicaOptions, + Replica, ResizeReplicaRequest, ShareReplicaRequest, }; use tonic::{Code, Status}; @@ -212,18 +207,11 @@ impl ReplicaBuilder { .await? .into_iter() .find(|p| p.uuid == uuid) - .ok_or_else(|| { - Status::new( - Code::NotFound, - format!("Replica '{uuid}' not found"), - ) - }) + .ok_or_else(|| Status::new(Code::NotFound, format!("Replica '{uuid}' not found"))) } } -pub async fn list_replicas( - rpc: SharedRpcHandle, -) -> Result, Status> { +pub async fn list_replicas(rpc: SharedRpcHandle) -> Result, Status> { rpc.lock() .await .replica @@ -239,10 +227,7 @@ pub async fn list_replicas( .map(|r| r.into_inner().replicas) } -pub async fn find_replica_by_uuid( - rpc: SharedRpcHandle, - uuid: &str, -) -> Result { +pub async fn find_replica_by_uuid(rpc: SharedRpcHandle, uuid: &str) -> Result { rpc.lock() .await .replica @@ -258,14 +243,11 @@ pub async fn find_replica_by_uuid( .map(|r| r.into_inner().replicas)? .into_iter() .find(|n| n.uuid == uuid) - .ok_or_else(|| { - Status::new(Code::NotFound, format!("Replica '{uuid}' not found")) - }) + .ok_or_else(|| Status::new(Code::NotFound, format!("Replica '{uuid}' not found"))) } /// Reads all given replicas and checks if all them contain the same data. pub async fn validate_replicas(replicas: &[ReplicaBuilder]) { - let ls: Vec = - replicas.iter().map(|r| r.nvmf_location()).collect(); + let ls: Vec = replicas.iter().map(|r| r.nvmf_location()).collect(); test_devices_identical(&ls).await.unwrap(); } diff --git a/io-engine-tests/src/snapshot.rs b/io-engine-tests/src/snapshot.rs index 66d92d534..47ae5dba6 100644 --- a/io-engine-tests/src/snapshot.rs +++ b/io-engine-tests/src/snapshot.rs @@ -99,9 +99,7 @@ impl ReplicaSnapshotBuilder { .collect::>()) } } -pub async fn list_snapshot( - rpc: SharedRpcHandle, -) -> Result, Status> { +pub async fn list_snapshot(rpc: SharedRpcHandle) -> Result, Status> { rpc.lock() .await .snapshot @@ -184,9 +182,7 @@ impl SnapshotCloneBuilder { .collect::>()) } } -pub async fn list_snapshot_clone( - rpc: SharedRpcHandle, -) -> Result, Status> { +pub async fn list_snapshot_clone(rpc: SharedRpcHandle) -> Result, Status> { rpc.lock() .await .snapshot diff --git a/io-engine-tests/src/test.rs b/io-engine-tests/src/test.rs index 9dfd24f35..49b496190 100644 --- a/io-engine-tests/src/test.rs +++ b/io-engine-tests/src/test.rs @@ -1,18 +1,12 @@ use super::compose::rpc::v1::{ test::{ - AddFaultInjectionRequest, - FaultInjection, - ListFaultInjectionsRequest, + AddFaultInjectionRequest, FaultInjection, ListFaultInjectionsRequest, RemoveFaultInjectionRequest, }, - SharedRpcHandle, - Status, + SharedRpcHandle, Status, }; -pub async fn add_fault_injection( - rpc: SharedRpcHandle, - inj_uri: &str, -) -> Result<(), Status> { +pub async fn add_fault_injection(rpc: SharedRpcHandle, inj_uri: &str) -> Result<(), Status> { rpc.lock() .await .test @@ -23,10 +17,7 @@ pub async fn add_fault_injection( .map(|r| r.into_inner()) } -pub async fn remove_fault_injection( - rpc: SharedRpcHandle, - inj_uri: &str, -) -> Result<(), Status> { +pub async fn remove_fault_injection(rpc: SharedRpcHandle, inj_uri: &str) -> Result<(), Status> { rpc.lock() .await .test @@ -37,9 +28,7 @@ pub async fn remove_fault_injection( .map(|r| r.into_inner()) } -pub async fn list_fault_injections( - rpc: SharedRpcHandle, -) -> Result, Status> { +pub async fn list_fault_injections(rpc: SharedRpcHandle) -> Result, Status> { rpc.lock() .await .test diff --git a/io-engine-tests/src/test_task.rs b/io-engine-tests/src/test_task.rs index b5d9b1469..4af03f5f2 100644 --- a/io-engine-tests/src/test_task.rs +++ b/io-engine-tests/src/test_task.rs @@ -34,10 +34,7 @@ where sc }); - let task = TestTask { - sx, - f: Box::new(f), - }; + let task = TestTask { sx, f: Box::new(f) }; sc.send(task).unwrap(); rx.await.unwrap(); diff --git a/io-engine/examples/lvs-eval/display.rs b/io-engine/examples/lvs-eval/display.rs index 338a9eda6..4a91e44f6 100644 --- a/io-engine/examples/lvs-eval/display.rs +++ b/io-engine/examples/lvs-eval/display.rs @@ -4,16 +4,9 @@ use io_engine::{ }; use prettytable::{row, Table}; use spdk_rs::libspdk::{ - spdk_bit_array, - spdk_bit_array_capacity, - spdk_bit_array_get, - spdk_bit_pool, - spdk_bit_pool_capacity, - spdk_bit_pool_is_allocated, - spdk_blob_calc_used_clusters, - spdk_blob_is_thin_provisioned, - spdk_blob_mut_data, - spdk_blob_store, + spdk_bit_array, spdk_bit_array_capacity, spdk_bit_array_get, spdk_bit_pool, + spdk_bit_pool_capacity, spdk_bit_pool_is_allocated, spdk_blob_calc_used_clusters, + spdk_blob_is_thin_provisioned, spdk_blob_mut_data, spdk_blob_store, }; /// TODO @@ -142,11 +135,7 @@ pub fn print_replica(lvol: &Lvol) { } /// TODO -pub fn print_blob_data( - name: &str, - bs: &spdk_blob_store, - blob: &spdk_blob_mut_data, -) { +pub fn print_blob_data(name: &str, bs: &spdk_blob_store, blob: &spdk_blob_mut_data) { println!("{name}:"); // Clusters. @@ -155,7 +144,7 @@ pub fn print_blob_data( blob.num_clusters, blob.num_allocated_clusters ); print!(" "); - for i in 0 .. blob.num_allocated_clusters as isize { + for i in 0..blob.num_allocated_clusters as isize { let lba = unsafe { *blob.clusters.offset(i) }; let num = lba_to_cluster(bs, lba); print!("0x{num:x} "); @@ -168,7 +157,7 @@ pub fn print_blob_data( blob.num_clusters, blob.num_allocated_clusters ); print!(" "); - for i in 0 .. blob.num_allocated_clusters as isize { + for i in 0..blob.num_allocated_clusters as isize { let c = unsafe { *blob.clusters.offset(i) }; print!("0x{c:x} "); } @@ -180,7 +169,7 @@ pub fn print_blob_data( blob.num_extent_pages, blob.extent_pages_array_size ); print!(" "); - for i in 0 .. blob.extent_pages_array_size as isize { + for i in 0..blob.extent_pages_array_size as isize { let c = unsafe { *blob.extent_pages.offset(i) }; print!("0x{c:x} "); } @@ -199,7 +188,7 @@ fn print_used_array_bits(ba: *const spdk_bit_array, cnt: Option) { let cnt = cnt.unwrap_or_else(|| unsafe { spdk_bit_array_capacity(ba) }); let mut total = 0; - for i in 0 .. cnt { + for i in 0..cnt { let v = unsafe { spdk_bit_array_get(ba, i) }; if v { print!("0x{i:x} "); @@ -216,7 +205,7 @@ fn print_used_pool_bits(bp: *const spdk_bit_pool, cnt: Option) { let cnt = cnt.unwrap_or_else(|| unsafe { spdk_bit_pool_capacity(bp) }); let mut total = 0; - for i in 0 .. cnt { + for i in 0..cnt { let v = unsafe { spdk_bit_pool_is_allocated(bp, i) }; if v { print!("0x{i:x} "); diff --git a/io-engine/examples/lvs-eval/main.rs b/io-engine/examples/lvs-eval/main.rs index e4d88013d..d0e2dd514 100644 --- a/io-engine/examples/lvs-eval/main.rs +++ b/io-engine/examples/lvs-eval/main.rs @@ -82,7 +82,7 @@ async fn main() { // Create replicas. println!("Creating {n} replicas ...", n = args.replicas); - for idx in 0 .. args.replicas { + for idx in 0..args.replicas { if args.fillers { match create_filler_replica(&lvs, idx, 1).await { Ok(lvol) => fillers.push(lvol), @@ -142,12 +142,7 @@ async fn create_lvs(args: &CliArgs) -> Lvs { } /// TODO -async fn create_replica( - lvs: &Lvs, - serial: u32, - n: u64, - thin: bool, -) -> Result { +async fn create_replica(lvs: &Lvs, serial: u32, n: u64, thin: bool) -> Result { let name = format!("replica_{serial}"); let uuid = format!("45c23e54-dc86-45f6-b55b-e44d05f1{serial:04}"); @@ -155,11 +150,7 @@ async fn create_replica( } /// TODO -async fn create_filler_replica( - lvs: &Lvs, - serial: u32, - n: u64, -) -> Result { +async fn create_filler_replica(lvs: &Lvs, serial: u32, n: u64) -> Result { let name = format!("filler_{serial}"); let uuid = format!("56723e54-dc86-45f6-b55b-e44d05f1{serial:04}"); @@ -176,9 +167,7 @@ async fn create_lvol( ) -> Result { let et = unsafe { G_USE_EXTENT_TABLE }; - println!( - "Creating lvol '{name}': size = {n} cluster(s), thin: {thin}, et: {et}", - ); + println!("Creating lvol '{name}': size = {n} cluster(s), thin: {thin}, et: {et}",); let opts = ReplicaArgs { name: name.to_owned(), diff --git a/io-engine/src/bdev/aio.rs b/io-engine/src/bdev/aio.rs index c9b7b752d..cc4e7076d 100644 --- a/io-engine/src/bdev/aio.rs +++ b/io-engine/src/bdev/aio.rs @@ -55,17 +55,14 @@ impl TryFrom<&Url> for Aio { .ok() .map_or(false, |meta| meta.file_type().is_block_device()); - let mut parameters: HashMap = - url.query_pairs().into_owned().collect(); + let mut parameters: HashMap = url.query_pairs().into_owned().collect(); let blk_size: u32 = match parameters.remove("blk_size") { - Some(value) => { - value.parse().context(bdev_api::IntParamParseFailed { - uri: url.to_string(), - parameter: String::from("blk_size"), - value: value.clone(), - })? - } + Some(value) => value.parse().context(bdev_api::IntParamParseFailed { + uri: url.to_string(), + parameter: String::from("blk_size"), + value: value.clone(), + })?, None => { if path_is_blockdev { 0 @@ -74,11 +71,10 @@ impl TryFrom<&Url> for Aio { } } }; - let uuid = uri::uuid(parameters.remove("uuid")).context( - bdev_api::UuidParamParseFailed { + let uuid = + uri::uuid(parameters.remove("uuid")).context(bdev_api::UuidParamParseFailed { uri: url.to_string(), - }, - )?; + })?; let rescan = parameters.remove("rescan").is_some(); @@ -120,15 +116,8 @@ impl CreateDestroy for Aio { let cname = CString::new(self.get_name()).unwrap(); - let errno = unsafe { - create_aio_bdev( - cname.as_ptr(), - cname.as_ptr(), - self.blk_size, - false, - false, - ) - }; + let errno = + unsafe { create_aio_bdev(cname.as_ptr(), cname.as_ptr(), self.blk_size, false, false) }; if errno != 0 { let err = BdevError::CreateBdevFailed { @@ -190,19 +179,14 @@ impl CreateDestroy for Aio { } impl Aio { - fn try_rescan( - &self, - bdev: UntypedBdev, - ) -> Result::Error> { + fn try_rescan(&self, bdev: UntypedBdev) -> Result::Error> { let before = bdev.num_blocks(); debug!("{self:?}: rescanning existing AIO bdev ({before} blocks) ..."); let cname = self.name.clone().into_cstring(); - let errno = unsafe { - bdev_aio_rescan(cname.as_ptr() as *mut std::os::raw::c_char) - }; + let errno = unsafe { bdev_aio_rescan(cname.as_ptr() as *mut std::os::raw::c_char) }; if errno != 0 { let err = BdevError::ResizeBdevFailed { @@ -217,9 +201,7 @@ impl Aio { let after = bdev.num_blocks(); - debug!( - "{self:?}: rescanning existing AIO bdev okay: {before} -> {after} blocks" - ); + debug!("{self:?}: rescanning existing AIO bdev okay: {before} -> {after} blocks"); Ok(self.name.clone()) } diff --git a/io-engine/src/bdev/dev.rs b/io-engine/src/bdev/dev.rs index 97674a7ba..eb1b1439b 100644 --- a/io-engine/src/bdev/dev.rs +++ b/io-engine/src/bdev/dev.rs @@ -38,33 +38,19 @@ pub(crate) mod uri { use crate::{ bdev::{ - aio, - ftl, - loopback, - lvs, - malloc, - null_bdev, - nvme, - nvmx, - nx, - uring, - BdevCreateDestroy, + aio, ftl, loopback, lvs, malloc, null_bdev, nvme, nvmx, nx, uring, BdevCreateDestroy, }, bdev_api::{self, BdevError}, }; - pub fn parse( - uri: &str, - ) -> Result>, BdevError> { + pub fn parse(uri: &str) -> Result>, BdevError> { let url = url::Url::parse(uri).context(bdev_api::UriParseFailed { uri: uri.to_string(), })?; match url.scheme() { "aio" => Ok(Box::new(aio::Aio::try_from(&url)?)), - "bdev" | "loopback" => { - Ok(Box::new(loopback::Loopback::try_from(&url)?)) - } + "bdev" | "loopback" => Ok(Box::new(loopback::Loopback::try_from(&url)?)), "ftl" => Ok(Box::new(ftl::Ftl::try_from(&url)?)), "malloc" => Ok(Box::new(malloc::Malloc::try_from(&url)?)), "null" => Ok(Box::new(null_bdev::Null::try_from(&url)?)), diff --git a/io-engine/src/bdev/device.rs b/io-engine/src/bdev/device.rs index 7c8f41573..a1dbeeabc 100644 --- a/io-engine/src/bdev/device.rs +++ b/io-engine/src/bdev/device.rs @@ -14,58 +14,25 @@ use once_cell::sync::{Lazy, OnceCell}; use spdk_rs::{ libspdk::{ - spdk_bdev_comparev_blocks, - spdk_bdev_flush, - spdk_bdev_free_io, - spdk_bdev_io, - spdk_bdev_readv_blocks_with_flags, - spdk_bdev_reset, - spdk_bdev_unmap_blocks, - spdk_bdev_write_zeroes_blocks, - spdk_bdev_writev_blocks, - SPDK_NVME_IO_FLAGS_UNWRITTEN_READ_FAIL, - SPDK_NVME_IO_FLAG_CURRENT_UNWRITTEN_READ_FAIL, + spdk_bdev_comparev_blocks, spdk_bdev_flush, spdk_bdev_free_io, spdk_bdev_io, + spdk_bdev_readv_blocks_with_flags, spdk_bdev_reset, spdk_bdev_unmap_blocks, + spdk_bdev_write_zeroes_blocks, spdk_bdev_writev_blocks, + SPDK_NVME_IO_FLAGS_UNWRITTEN_READ_FAIL, SPDK_NVME_IO_FLAG_CURRENT_UNWRITTEN_READ_FAIL, }, - nvme_admin_opc, - AsIoVecPtr, - BdevOps, - DmaBuf, - DmaError, - IoType, - IoVec, + nvme_admin_opc, AsIoVecPtr, BdevOps, DmaBuf, DmaError, IoType, IoVec, }; use crate::core::{ - mempool::MemoryPool, - Bdev, - BdevHandle, - BlockDevice, - BlockDeviceDescriptor, - BlockDeviceHandle, - BlockDeviceIoStats, - CoreError, - DeviceEventDispatcher, - DeviceEventSink, - DeviceEventType, - DeviceIoController, - IoCompletionCallback, - IoCompletionCallbackArg, - IoCompletionStatus, - NvmeStatus, - ReadOptions, - SnapshotParams, - ToErrno, - UntypedBdev, - UntypedBdevHandle, + mempool::MemoryPool, Bdev, BdevHandle, BlockDevice, BlockDeviceDescriptor, BlockDeviceHandle, + BlockDeviceIoStats, CoreError, DeviceEventDispatcher, DeviceEventSink, DeviceEventType, + DeviceIoController, IoCompletionCallback, IoCompletionCallbackArg, IoCompletionStatus, + NvmeStatus, ReadOptions, SnapshotParams, ToErrno, UntypedBdev, UntypedBdevHandle, UntypedDescriptorGuard, }; #[cfg(feature = "fault-injection")] use crate::core::fault_injection::{ - inject_completion_error, - inject_submission_error, - FaultDomain, - InjectIoCtx, + inject_completion_error, inject_submission_error, FaultDomain, InjectIoCtx, }; use crate::replica_backend::ReplicaFactory; @@ -150,10 +117,7 @@ impl BlockDevice for SpdkBlockDevice { self.0.stats_async().await } /// open the device returning descriptor to the device - fn open( - &self, - read_write: bool, - ) -> Result, CoreError> { + fn open(&self, read_write: bool) -> Result, CoreError> { let descr = self.0.open(read_write)?; Ok(Box::new(SpdkBlockDeviceDescriptor::from(descr))) } @@ -163,10 +127,7 @@ impl BlockDevice for SpdkBlockDevice { None } /// add a callback to be called when a particular event is received - fn add_event_listener( - &self, - listener: DeviceEventSink, - ) -> Result<(), CoreError> { + fn add_event_listener(&self, listener: DeviceEventSink) -> Result<(), CoreError> { let mut map = BDEV_EVENT_DISPATCHER.lock().expect("lock poisoned"); let disp = map.entry(self.device_name()).or_default(); disp.add_listener(listener); @@ -194,9 +155,7 @@ impl BlockDeviceDescriptor for SpdkBlockDeviceDescriptor { self.0.bdev().name().to_string() } - fn into_handle( - self: Box, - ) -> Result, CoreError> { + fn into_handle(self: Box) -> Result, CoreError> { let handle = SpdkBlockDeviceHandle::try_from(self.0)?; Ok(Box::new(handle)) } @@ -206,9 +165,7 @@ impl BlockDeviceDescriptor for SpdkBlockDeviceDescriptor { Ok(Box::new(handle)) } - async fn get_io_handle_nonblock( - &self, - ) -> Result, CoreError> { + async fn get_io_handle_nonblock(&self) -> Result, CoreError> { let handle = SpdkBlockDeviceHandle::try_from(self.0.clone())?; Ok(Box::new(handle)) } @@ -228,9 +185,7 @@ struct SpdkBlockDeviceHandle { impl TryFrom> for SpdkBlockDeviceHandle { type Error = CoreError; - fn try_from( - desc: Arc, - ) -> Result { + fn try_from(desc: Arc) -> Result { let handle = BdevHandle::try_from(desc)?; Ok(SpdkBlockDeviceHandle::from(handle)) } @@ -255,19 +210,11 @@ impl BlockDeviceHandle for SpdkBlockDeviceHandle { DmaBuf::new(size, self.device.alignment()) } - async fn read_at( - &self, - offset: u64, - buffer: &mut DmaBuf, - ) -> Result { + async fn read_at(&self, offset: u64, buffer: &mut DmaBuf) -> Result { self.handle.read_at(offset, buffer).await } - async fn write_at( - &self, - offset: u64, - buffer: &DmaBuf, - ) -> Result { + async fn write_at(&self, offset: u64, buffer: &DmaBuf) -> Result { self.handle.write_at(offset, buffer).await } @@ -452,14 +399,8 @@ impl BlockDeviceHandle for SpdkBlockDeviceHandle { )?; let (desc, chan) = self.handle.io_tuple(); - let rc = unsafe { - spdk_bdev_reset( - desc, - chan, - Some(bdev_io_completion), - ctx as *mut c_void, - ) - }; + let rc = + unsafe { spdk_bdev_reset(desc, chan, Some(bdev_io_completion), ctx as *mut c_void) }; if rc < 0 { Err(CoreError::ResetDispatch { @@ -585,10 +526,7 @@ impl BlockDeviceHandle for SpdkBlockDeviceHandle { } /// NVMe commands are not applicable for non-NVMe devices. - async fn create_snapshot( - &self, - snapshot: SnapshotParams, - ) -> Result { + async fn create_snapshot(&self, snapshot: SnapshotParams) -> Result { let bdev = self.handle.get_bdev(); let Some(mut replica) = ReplicaFactory::bdev_as_replica(bdev) else { @@ -597,12 +535,13 @@ impl BlockDeviceHandle for SpdkBlockDeviceHandle { }); }; - replica.create_snapshot(snapshot).await.map_err(|e| { - CoreError::SnapshotCreate { + replica + .create_snapshot(snapshot) + .await + .map_err(|e| CoreError::SnapshotCreate { reason: e.to_string(), source: e.to_errno(), - } - })?; + })?; Ok(0) } @@ -659,12 +598,7 @@ struct IoCtx { /// TODO #[inline] -pub fn io_type_to_err( - op: IoType, - source: Errno, - offset: u64, - len: u64, -) -> CoreError { +pub fn io_type_to_err(op: IoType, source: Errno, offset: u64, len: u64) -> CoreError { match op { IoType::Read => CoreError::ReadDispatch { source, @@ -686,14 +620,10 @@ pub fn io_type_to_err( offset, len, }, - IoType::Reset => CoreError::ResetDispatch { - source, - }, + IoType::Reset => CoreError::ResetDispatch { source }, _ => { warn!("Unsupported I/O operation: {:?}", op); - CoreError::NotSupported { - source, - } + CoreError::NotSupported { source } } } } @@ -702,9 +632,8 @@ pub fn io_type_to_err( /// This must be called before the first I/O operations take place. pub fn bdev_io_ctx_pool_init(size: u64) { BDEV_IOCTX_POOL.get_or_init(|| { - MemoryPool::::create("bdev_io_ctx", size).expect( - "Failed to create memory pool [bdev_io_ctx] for bdev I/O contexts", - ) + MemoryPool::::create("bdev_io_ctx", size) + .expect("Failed to create memory pool [bdev_io_ctx] for bdev I/O contexts") }); } @@ -716,9 +645,8 @@ fn alloc_bdev_io_ctx( num_blocks: u64, ) -> Result<*mut IoCtx, CoreError> { let pool = BDEV_IOCTX_POOL.get().unwrap(); - pool.get(ctx).ok_or_else(|| { - io_type_to_err(op, Errno::ENOMEM, offset_blocks, num_blocks) - }) + pool.get(ctx) + .ok_or_else(|| io_type_to_err(op, Errno::ENOMEM, offset_blocks, num_blocks)) } /// Release the memory used by the bdev I/O context back to the pool. @@ -727,11 +655,7 @@ fn free_bdev_io_ctx(ctx: *mut IoCtx) { pool.put(ctx); } -extern "C" fn bdev_io_completion( - child_bio: *mut spdk_bdev_io, - success: bool, - ctx: *mut c_void, -) { +extern "C" fn bdev_io_completion(child_bio: *mut spdk_bdev_io, success: bool, ctx: *mut c_void) { let bio = unsafe { &mut *(ctx as *mut IoCtx) }; // Get extended NVMe error status from original bio in case of error. @@ -764,10 +688,7 @@ fn dispatch_bdev_event(event: DeviceEventType, name: &str) { } /// Called by spdk when there is an asynchronous bdev event i.e. removal. -pub fn bdev_event_callback( - event: spdk_rs::BdevEvent, - bdev: spdk_rs::Bdev, -) { +pub fn bdev_event_callback(event: spdk_rs::BdevEvent, bdev: spdk_rs::Bdev) { let dev = Bdev::::new(bdev); // Translate SPDK events into common device events. @@ -801,12 +722,8 @@ impl From for u32 { fn from(opts: ReadOptions) -> Self { match opts { ReadOptions::None => 0, - ReadOptions::UnwrittenFail => { - SPDK_NVME_IO_FLAGS_UNWRITTEN_READ_FAIL - } - ReadOptions::CurrentUnwrittenFail => { - SPDK_NVME_IO_FLAG_CURRENT_UNWRITTEN_READ_FAIL - } + ReadOptions::UnwrittenFail => SPDK_NVME_IO_FLAGS_UNWRITTEN_READ_FAIL, + ReadOptions::CurrentUnwrittenFail => SPDK_NVME_IO_FLAG_CURRENT_UNWRITTEN_READ_FAIL, } } } diff --git a/io-engine/src/bdev/ftl.rs b/io-engine/src/bdev/ftl.rs index 3d457b10c..f33d2fd59 100644 --- a/io-engine/src/bdev/ftl.rs +++ b/io-engine/src/bdev/ftl.rs @@ -58,12 +58,8 @@ use url::Url; use spdk_rs::{ ffihelper::errno_result_from_i32, libspdk::{ - bdev_ftl_create_bdev, - bdev_ftl_delete_bdev, - ftl_bdev_info, - spdk_ftl_conf, - spdk_ftl_get_default_conf, - spdk_ftl_mode, + bdev_ftl_create_bdev, bdev_ftl_delete_bdev, ftl_bdev_info, spdk_ftl_conf, + spdk_ftl_get_default_conf, spdk_ftl_mode, }, UntypedBdev, }; @@ -111,46 +107,36 @@ impl TryFrom<&Url> for Ftl { }); } - let mut parameters: HashMap = - uri.query_pairs().into_owned().collect(); + let mut parameters: HashMap = uri.query_pairs().into_owned().collect(); - let uuid = uri::uuid(parameters.remove("uuid")).context( - bdev_api::UuidParamParseFailed { + let uuid = + uri::uuid(parameters.remove("uuid")).context(bdev_api::UuidParamParseFailed { uri: uri.to_string(), - }, - )?; - - let encoded_bbdev_uri = - parameters.remove("bbdev").context(bdev_api::InvalidUri { - uri: uri.to_string(), - message: String::from("No bbdev parameter found"), })?; + let encoded_bbdev_uri = parameters.remove("bbdev").context(bdev_api::InvalidUri { + uri: uri.to_string(), + message: String::from("No bbdev parameter found"), + })?; + let bbdev_uri = percent_decode_str(&encoded_bbdev_uri) .decode_utf8() .map_err(|e| BdevError::InvalidUri { uri: uri.to_string(), - message: format!( - "Could not percent decode bbdev_uri sub-uri - {}", - e - ), + message: format!("Could not percent decode bbdev_uri sub-uri - {}", e), })? .to_string(); - let encoded_cbdev_uri = - parameters.remove("cbdev").context(bdev_api::InvalidUri { - uri: uri.to_string(), - message: String::from("No cbdev parameter found"), - })?; + let encoded_cbdev_uri = parameters.remove("cbdev").context(bdev_api::InvalidUri { + uri: uri.to_string(), + message: String::from("No cbdev parameter found"), + })?; let cbdev_uri = percent_decode_str(&encoded_cbdev_uri) .decode_utf8() .map_err(|e| BdevError::InvalidUri { uri: uri.to_string(), - message: format!( - "Could not percent decode cbdev_uri sub-uri - {}", - e - ), + message: format!("Could not percent decode cbdev_uri sub-uri - {}", e), })? .to_string(); @@ -159,7 +145,7 @@ impl TryFrom<&Url> for Ftl { reject_unknown_parameters(uri, parameters)?; Ok(Self { - name: uri.path()[1 ..].into(), + name: uri.path()[1..].into(), alias: uri.to_string(), uuid, bbdev_uri, @@ -180,9 +166,7 @@ pub extern "C" fn ftl_bdev_init_fn_cb( errno: i32, ) { info!("{:?}: ftl_bdev_init_fn_cb", errno); - let sender = unsafe { - Box::from_raw(sender_ptr as *mut oneshot::Sender>) - }; + let sender = unsafe { Box::from_raw(sender_ptr as *mut oneshot::Sender>) }; sender .send(errno_result_from_i32((), errno)) .expect("done callback receiver side disappeared"); @@ -233,13 +217,8 @@ impl CreateDestroy for Ftl { ftl_conf.verbose_mode = true; ftl_conf.mode = spdk_ftl_mode::SPDK_FTL_MODE_CREATE as u32; - let errno = unsafe { - bdev_ftl_create_bdev( - &ftl_conf, - Some(ftl_bdev_init_fn_cb), - cb_arg(s), - ) - }; + let errno = + unsafe { bdev_ftl_create_bdev(&ftl_conf, Some(ftl_bdev_init_fn_cb), cb_arg(s)) }; if errno != 0 { let err = BdevError::CreateBdevFailed { @@ -288,9 +267,7 @@ impl CreateDestroy for Ftl { debug!("{:?}: deleting", self); let Some(mut bdev) = UntypedBdev::lookup_by_name(&self.name) else { - return Err(BdevError::BdevNotFound { - name: self.name, - }); + return Err(BdevError::BdevNotFound { name: self.name }); }; bdev.remove_alias(&self.alias); @@ -309,9 +286,7 @@ impl CreateDestroy for Ftl { .context(bdev_api::BdevCommandCanceled { name: self.name.clone(), })? - .context(bdev_api::DestroyBdevFailed { - name: self.name, - })?; + .context(bdev_api::DestroyBdevFailed { name: self.name })?; let mut result = bdev_destroy(&self.bbdev_uri).await; diff --git a/io-engine/src/bdev/loopback.rs b/io-engine/src/bdev/loopback.rs index 9fbd93a13..9b637c09a 100644 --- a/io-engine/src/bdev/loopback.rs +++ b/io-engine/src/bdev/loopback.rs @@ -10,11 +10,8 @@ use url::Url; use crate::{ bdev::{ - dev::reject_unknown_parameters, - device::dispatch_loopback_removed, - util::uri, - CreateDestroy, - GetName, + dev::reject_unknown_parameters, device::dispatch_loopback_removed, util::uri, + CreateDestroy, GetName, }, bdev_api::{self, BdevError}, core::UntypedBdev, @@ -45,14 +42,12 @@ impl TryFrom<&Url> for Loopback { }); } - let mut parameters: HashMap = - url.query_pairs().into_owned().collect(); + let mut parameters: HashMap = url.query_pairs().into_owned().collect(); - let uuid = uri::uuid(parameters.remove("uuid")).context( - bdev_api::UuidParamParseFailed { + let uuid = + uri::uuid(parameters.remove("uuid")).context(bdev_api::UuidParamParseFailed { uri: url.to_string(), - }, - )?; + })?; reject_unknown_parameters(url, parameters)?; diff --git a/io-engine/src/bdev/lvs.rs b/io-engine/src/bdev/lvs.rs index 3784ec13f..0d7272eb4 100644 --- a/io-engine/src/bdev/lvs.rs +++ b/io-engine/src/bdev/lvs.rs @@ -75,8 +75,7 @@ impl TryFrom<&Url> for Lvol { }); } - let mut parameters: HashMap = - uri.query_pairs().into_owned().collect(); + let mut parameters: HashMap = uri.query_pairs().into_owned().collect(); let size = parameters .remove("size") @@ -85,11 +84,9 @@ impl TryFrom<&Url> for Lvol { message: "'size' is not specified".to_string(), }) .and_then(|size| { - byte_unit::Byte::parse_str(&size, true).map_err(|error| { - BdevError::InvalidUri { - uri: uri.to_string(), - message: format!("'size' is invalid: {error}"), - } + byte_unit::Byte::parse_str(&size, true).map_err(|error| BdevError::InvalidUri { + uri: uri.to_string(), + message: format!("'size' is invalid: {error}"), }) })? .as_u64(); @@ -108,7 +105,7 @@ impl TryFrom<&Url> for Lvol { reject_unknown_parameters(uri, parameters)?; Ok(Self { - name: uri.path()[1 ..].into(), + name: uri.path()[1..].into(), size, lvs, }) @@ -119,11 +116,7 @@ impl TryFrom for Lvs { type Error = BdevError; fn try_from(uri: String) -> Result { - let uri = - Url::parse(&uri).map_err(|source| BdevError::UriParseFailed { - uri, - source, - })?; + let uri = Url::parse(&uri).map_err(|source| BdevError::UriParseFailed { uri, source })?; let segments = uri::segments(&uri); if segments.is_empty() { return Err(BdevError::InvalidUri { @@ -132,8 +125,7 @@ impl TryFrom for Lvs { }); } - let mut parameters: HashMap = - uri.query_pairs().into_owned().collect(); + let mut parameters: HashMap = uri.query_pairs().into_owned().collect(); let disk = parameters.remove("disk").ok_or(BdevError::InvalidUri { uri: uri.to_string(), @@ -149,7 +141,7 @@ impl TryFrom for Lvs { .map(LvsMode::from)?; Ok(Lvs { - name: uri.path()[1 ..].into(), + name: uri.path()[1..].into(), disk, mode, }) @@ -219,22 +211,18 @@ impl Lvs { backend: PoolBackend::Lvs, }; match &self.mode { - LvsMode::Create => { - match crate::lvs::Lvs::import_from_args(args.clone()).await { - Err(crate::lvs::LvsError::Import { - .. - }) => crate::lvs::Lvs::create_or_import(args).await, - _ => { - return Err(BdevError::BdevExists { - name: self.name.to_owned(), - }) - } + LvsMode::Create => match crate::lvs::Lvs::import_from_args(args.clone()).await { + Err(crate::lvs::LvsError::Import { .. }) => { + crate::lvs::Lvs::create_or_import(args).await } - } + _ => { + return Err(BdevError::BdevExists { + name: self.name.to_owned(), + }) + } + }, LvsMode::Import => crate::lvs::Lvs::import_from_args(args).await, - LvsMode::CreateOrImport => { - crate::lvs::Lvs::create_or_import(args).await - } + LvsMode::CreateOrImport => crate::lvs::Lvs::create_or_import(args).await, LvsMode::Purge => { Self::wipe_super(args.clone()).await?; crate::lvs::Lvs::create_or_import(args).await @@ -248,22 +236,16 @@ impl Lvs { async fn wipe_super(args: PoolArgs) -> Result<(), BdevError> { let disk = - crate::lvs::Lvs::parse_disk(args.disks.clone()).map_err(|_| { - BdevError::InvalidUri { - uri: String::new(), - message: String::new(), - } + crate::lvs::Lvs::parse_disk(args.disks.clone()).map_err(|_| BdevError::InvalidUri { + uri: String::new(), + message: String::new(), })?; let parsed = super::uri::parse(&disk)?; let bdev_str = parsed.create().await?; { - let bdev = - crate::core::Bdev::get_by_name(&bdev_str).map_err(|_| { - BdevError::BdevNotFound { - name: bdev_str, - } - })?; + let bdev = crate::core::Bdev::get_by_name(&bdev_str) + .map_err(|_| BdevError::BdevNotFound { name: bdev_str })?; let hdl = crate::core::Bdev::open(&bdev, true) .and_then(|desc| desc.into_handle()) @@ -271,11 +253,9 @@ impl Lvs { name: bdev.name().into(), })?; - let mut wiper = crate::core::wiper::Wiper::new( - hdl, - crate::core::wiper::WipeMethod::WriteZeroes, - ) - .map_err(|_| BdevError::WipeFailed {})?; + let mut wiper = + crate::core::wiper::Wiper::new(hdl, crate::core::wiper::WipeMethod::WriteZeroes) + .map_err(|_| BdevError::WipeFailed {})?; wiper .wipe(0, 8 * 1024 * 1024) .await @@ -316,11 +296,12 @@ impl Lvs { let Some(lvol) = lvols.into_iter().find(|l| l.name() == name) else { return Ok(()); }; - lvol.destroy().await.map(|_| ()).map_err(|error| { - BdevError::DestroyBdevFailedStr { + lvol.destroy() + .await + .map(|_| ()) + .map_err(|error| BdevError::DestroyBdevFailedStr { error: error.to_string(), name: self.name.to_owned(), - } - }) + }) } } diff --git a/io-engine/src/bdev/malloc.rs b/io-engine/src/bdev/malloc.rs index 5ab90754e..f6e8fcfb5 100644 --- a/io-engine/src/bdev/malloc.rs +++ b/io-engine/src/bdev/malloc.rs @@ -17,11 +17,7 @@ use url::Url; use spdk_rs::{ libspdk::{ - create_malloc_disk, - delete_malloc_disk, - malloc_bdev_opts, - resize_malloc_disk, - spdk_bdev, + create_malloc_disk, delete_malloc_disk, malloc_bdev_opts, resize_malloc_disk, spdk_bdev, SPDK_DIF_DISABLE, }, UntypedBdev, @@ -68,8 +64,7 @@ impl TryFrom<&Url> for Malloc { }); } - let mut parameters: HashMap = - uri.query_pairs().into_owned().collect(); + let mut parameters: HashMap = uri.query_pairs().into_owned().collect(); let blk_size: u32 = if let Some(value) = parameters.remove("blk_size") { value.parse().context(bdev_api::IntParamParseFailed { @@ -91,22 +86,20 @@ impl TryFrom<&Url> for Malloc { 0 }; - let num_blocks: u32 = - if let Some(value) = parameters.remove("num_blocks") { - value.parse().context(bdev_api::IntParamParseFailed { - uri: uri.to_string(), - parameter: String::from("num_blocks"), - value: value.clone(), - })? - } else { - 0 - }; + let num_blocks: u32 = if let Some(value) = parameters.remove("num_blocks") { + value.parse().context(bdev_api::IntParamParseFailed { + uri: uri.to_string(), + parameter: String::from("num_blocks"), + value: value.clone(), + })? + } else { + 0 + }; - let uuid = uri::uuid(parameters.remove("uuid")).context( - bdev_api::UuidParamParseFailed { + let uuid = + uri::uuid(parameters.remove("uuid")).context(bdev_api::UuidParamParseFailed { uri: uri.to_string(), - }, - )?; + })?; let resizing = parameters.remove("resize").is_some(); @@ -123,21 +116,19 @@ impl TryFrom<&Url> for Malloc { if size != 0 && num_blocks != 0 { return Err(BdevError::InvalidUri { uri: uri.to_string(), - message: "'num_blocks' and 'size_mb' are mutually exclusive" - .to_string(), + message: "'num_blocks' and 'size_mb' are mutually exclusive".to_string(), }); } if size == 0 && num_blocks == 0 { return Err(BdevError::InvalidUri { uri: uri.to_string(), - message: "either 'num_blocks' or 'size_mb' must be specified" - .to_string(), + message: "either 'num_blocks' or 'size_mb' must be specified".to_string(), }); } Ok(Self { - name: uri.path()[1 ..].into(), + name: uri.path()[1..].into(), alias: uri.to_string(), num_blocks: if num_blocks != 0 { num_blocks @@ -245,13 +236,9 @@ impl CreateDestroy for Malloc { .context(bdev_api::BdevCommandCanceled { name: self.name.clone(), })? - .context(bdev_api::DestroyBdevFailed { - name: self.name, - }) + .context(bdev_api::DestroyBdevFailed { name: self.name }) } else { - Err(BdevError::BdevNotFound { - name: self.name, - }) + Err(BdevError::BdevNotFound { name: self.name }) } } } @@ -263,12 +250,8 @@ impl Malloc { let cname = self.name.clone().into_cstring(); let new_sz_mb = self.num_blocks * self.blk_size as u64 / (1024 * 1024); - let errno = unsafe { - resize_malloc_disk( - cname.as_ptr() as *mut std::os::raw::c_char, - new_sz_mb, - ) - }; + let errno = + unsafe { resize_malloc_disk(cname.as_ptr() as *mut std::os::raw::c_char, new_sz_mb) }; if errno != 0 { let err = BdevError::ResizeBdevFailed { diff --git a/io-engine/src/bdev/mod.rs b/io-engine/src/bdev/mod.rs index 38066b4d3..fa4960cd0 100644 --- a/io-engine/src/bdev/mod.rs +++ b/io-engine/src/bdev/mod.rs @@ -3,12 +3,7 @@ use async_trait::async_trait; pub use dev::{device_create, device_destroy, device_lookup, device_open}; pub use device::{bdev_event_callback, bdev_io_ctx_pool_init, SpdkBlockDevice}; pub use nexus::{Nexus, NexusInfo, NexusState}; -pub use nvmx::{ - nvme_io_ctx_pool_init, - NvmeController, - NvmeControllerState, - NVME_CONTROLLERS, -}; +pub use nvmx::{nvme_io_ctx_pool_init, NvmeController, NvmeControllerState, NVME_CONTROLLERS}; mod aio; pub(crate) mod dev; diff --git a/io-engine/src/bdev/nexus/mod.rs b/io-engine/src/bdev/nexus/mod.rs index 4f8624cd0..03b8a0439 100644 --- a/io-engine/src/bdev/nexus/mod.rs +++ b/io-engine/src/bdev/nexus/mod.rs @@ -28,42 +28,22 @@ use crate::{ }; pub(crate) use nexus_bdev::NEXUS_PRODUCT_ID; pub use nexus_bdev::{ - nexus_create, - nexus_create_v2, - Nexus, - NexusNvmeParams, - NexusNvmePreemption, - NexusOperation, - NexusState, - NexusStatus, - NexusTarget, - NvmeAnaState, - NvmeReservation, + nexus_create, nexus_create_v2, Nexus, NexusNvmeParams, NexusNvmePreemption, NexusOperation, + NexusState, NexusStatus, NexusTarget, NvmeAnaState, NvmeReservation, }; pub(crate) use nexus_bdev_error::nexus_err; pub use nexus_bdev_error::Error; pub(crate) use nexus_channel::{DrEvent, IoMode, NexusChannel}; pub use nexus_child::{ - ChildError, - ChildState, - ChildStateClient, - ChildSyncState, - FaultReason, - NexusChild, + ChildError, ChildState, ChildStateClient, ChildSyncState, FaultReason, NexusChild, }; use nexus_io::{NexusBio, NioCtx}; use nexus_io_log::{IOLog, IOLogChannel}; use nexus_io_subsystem::NexusIoSubsystem; pub use nexus_io_subsystem::NexusPauseState; pub use nexus_iter::{ - nexus_iter, - nexus_iter_mut, - nexus_lookup, - nexus_lookup_mut, - nexus_lookup_name_uuid, - nexus_lookup_nqn, - nexus_lookup_nqn_mut, - nexus_lookup_uuid_mut, + nexus_iter, nexus_iter_mut, nexus_lookup, nexus_lookup_mut, nexus_lookup_name_uuid, + nexus_lookup_nqn, nexus_lookup_nqn_mut, nexus_lookup_uuid_mut, }; pub(crate) use nexus_module::{NexusModule, NEXUS_MODULE_NAME}; pub(crate) use nexus_nbd::{NbdDisk, NbdError}; @@ -72,9 +52,7 @@ pub use nexus_persistence::{ChildInfo, NexusInfo}; pub(crate) use nexus_share::NexusPtpl; pub use nexus_bdev_snapshot::{ - NexusReplicaSnapshotDescriptor, - NexusReplicaSnapshotStatus, - NexusSnapshotStatus, + NexusReplicaSnapshotDescriptor, NexusReplicaSnapshotStatus, NexusSnapshotStatus, }; /// TODO @@ -126,21 +104,20 @@ pub fn register_module(register_json: bool) { let mut bdev = Pin::new(&mut bdev); match proto.as_str() { "nvmf" => { - let share = NvmfShareProps::new().with_range(Some((args.cntlid_min, args.cntlid_max))).with_ana(true); - bdev.as_mut().share_nvmf(Some(share)) + let share = NvmfShareProps::new() + .with_range(Some((args.cntlid_min, args.cntlid_max))) + .with_ana(true); + bdev.as_mut() + .share_nvmf(Some(share)) .await - .map_err(|e| { - JsonRpcError { - code: Code::InternalError, - message: e.to_string(), - } + .map_err(|e| JsonRpcError { + code: Code::InternalError, + message: e.to_string(), }) - .map(|share| { - NexusShareReply { - uri: bdev.share_uri().unwrap_or(share), - } - }) - }, + .map(|share| NexusShareReply { + uri: bdev.share_uri().unwrap_or(share), + }) + } _ => unreachable!(), } } else { @@ -172,8 +149,7 @@ pub async fn shutdown_nexuses() { // internally, so it may become invalid if another Bdev is destroyed // in parallel. // Clippy's complains about that, so it is disabled for this function. - let nexuses: Vec< as Iterator>::Item> = - nexus_iter_mut().collect(); + let nexuses: Vec< as Iterator>::Item> = nexus_iter_mut().collect(); for mut nexus in nexuses.into_iter() { // Destroy nexus and persist its state in the ETCd. @@ -187,12 +163,7 @@ pub async fn shutdown_nexuses() { error = error.verbose(), "Failed to destroy nexus" ); - EventWithMeta::event( - &(*nexus), - EventAction::Shutdown, - error.meta(), - ) - .generate(); + EventWithMeta::event(&(*nexus), EventAction::Shutdown, error.meta()).generate(); } } } diff --git a/io-engine/src/bdev/nexus/nexus_bdev.rs b/io-engine/src/bdev/nexus/nexus_bdev.rs index a28c9d28d..2c9b3577f 100644 --- a/io-engine/src/bdev/nexus/nexus_bdev.rs +++ b/io-engine/src/bdev/nexus/nexus_bdev.rs @@ -24,44 +24,25 @@ use snafu::ResultExt; use uuid::Uuid; use super::{ - nexus_err, - nexus_lookup_name_uuid, - DrEvent, - Error, - NbdDisk, - NexusBio, - NexusChannel, - NexusChild, - NexusModule, - PersistOp, + nexus_err, nexus_lookup_name_uuid, DrEvent, Error, NbdDisk, NexusBio, NexusChannel, NexusChild, + NexusModule, PersistOp, }; use crate::{ bdev::{ device_destroy, nexus::{ - nexus_io_subsystem::NexusPauseState, - nexus_persistence::PersistentNexusInfo, - NexusIoSubsystem, - ENABLE_NEXUS_RESET, + nexus_io_subsystem::NexusPauseState, nexus_persistence::PersistentNexusInfo, + NexusIoSubsystem, ENABLE_NEXUS_RESET, }, PtplFileOps, }, core::{ - partition, - Bdev, - DeviceEventSink, - IoType, - Protocol, - Reactor, - Reactors, - Share, - VerboseError, + partition, Bdev, DeviceEventSink, IoType, Protocol, Reactor, Reactors, Share, VerboseError, }, eventing::{ nexus_events::{state_change_event_meta, subsystem_pause_event_meta}, - Event, - EventWithMeta, + Event, EventWithMeta, }, rebuild::HistoryRecord, subsys::NvmfSubsystem, @@ -70,14 +51,8 @@ use crate::{ use crate::core::{BdevStater, BdevStats, CoreError, IoCompletionStatus}; use events_api::event::EventAction; use spdk_rs::{ - libspdk::spdk_bdev_notify_blockcnt_change, - BdevIo, - BdevOps, - ChannelTraverseStatus, - IoChannel, - IoDevice, - IoDeviceChannelTraverse, - JsonWriteContext, + libspdk::spdk_bdev_notify_blockcnt_change, BdevIo, BdevOps, ChannelTraverseStatus, IoChannel, + IoDevice, IoDeviceChannelTraverse, JsonWriteContext, }; pub static NVME_MIN_CNTLID: u16 = 1; @@ -124,9 +99,7 @@ impl NvmeAnaState { 3 => Ok(NvmeAnaState::InaccessibleState), 4 => Ok(NvmeAnaState::PersistentLossState), 15 => Ok(NvmeAnaState::ChangeState), - _ => Err(Error::InvalidNvmeAnaState { - ana_value: value, - }), + _ => Err(Error::InvalidNvmeAnaState { ana_value: value }), } } } @@ -154,11 +127,7 @@ impl TryFrom for NvmeReservation { 4 => Self::ExclusiveAccessRegsOnly, 5 => Self::WriteExclusiveAllRegs, 6 => Self::ExclusiveAccessAllRegs, - reservation => { - return Err(Error::InvalidReservation { - reservation, - }) - } + reservation => return Err(Error::InvalidReservation { reservation }), }) } } @@ -218,10 +187,7 @@ impl NexusNvmeParams { self.resv_key = resv_key; } /// Set the preemption key. - pub fn set_preempt_key( - &mut self, - preempt_key: Option, - ) { + pub fn set_preempt_key(&mut self, preempt_key: Option) { self.preempt_key = preempt_key; } /// Set the reservation type. @@ -397,9 +363,7 @@ impl<'n> Nexus<'n> { nvme_params, has_io_device: false, initiators: parking_lot::Mutex::new(HashSet::new()), - nexus_info: futures::lock::Mutex::new(PersistentNexusInfo::new( - nexus_info_key, - )), + nexus_info: futures::lock::Mutex::new(PersistentNexusInfo::new(nexus_info_key)), io_subsystem: None, nexus_uuid: Default::default(), event_sink: None, @@ -527,8 +491,7 @@ impl<'n> Nexus<'n> { return; } - Reactors::master() - .send_future(Nexus::reset_all_children(self.name.clone())); + Reactors::master().send_future(Nexus::reset_all_children(self.name.clone())); } /// Sets the state of the Nexus. @@ -591,10 +554,7 @@ impl<'n> Nexus<'n> { } /// TODO - pub(super) unsafe fn child_add_unsafe( - self: Pin<&mut Self>, - child: NexusChild<'n>, - ) { + pub(super) unsafe fn child_add_unsafe(self: Pin<&mut Self>, child: NexusChild<'n>) { self.unpin_mut().children.push(child) } @@ -610,10 +570,7 @@ impl<'n> Nexus<'n> { /// TODO #[allow(dead_code)] - pub(super) unsafe fn child_at_mut( - self: Pin<&mut Self>, - idx: usize, - ) -> &mut NexusChild<'n> { + pub(super) unsafe fn child_at_mut(self: Pin<&mut Self>, idx: usize) -> &mut NexusChild<'n> { self.unpin_mut() .children .get_mut(idx) @@ -638,18 +595,13 @@ impl<'n> Nexus<'n> { } /// Check whether nexus can perform target operation. - pub(crate) fn check_nexus_operation( - &self, - _op: NexusOperation, - ) -> Result<(), Error> { + pub(crate) fn check_nexus_operation(&self, _op: NexusOperation) -> Result<(), Error> { match *self.state.lock() { // When nexus under shutdown or is shutdown, no further nexus // operations allowed. - NexusState::ShuttingDown | NexusState::Shutdown => { - Err(Error::OperationNotAllowed { - reason: "Nexus is shutdown".to_string(), - }) - } + NexusState::ShuttingDown | NexusState::Shutdown => Err(Error::OperationNotAllowed { + reason: "Nexus is shutdown".to_string(), + }), _ if self.io_subsystem_state() == Some(NexusPauseState::Frozen) => { Err(Error::OperationNotAllowed { reason: "Nexus io subsystem is frozen".to_string(), @@ -691,10 +643,7 @@ impl<'n> Nexus<'n> { } /// Configure nexus's block device to match parameters of the child devices. - async fn setup_nexus_bdev( - mut self: Pin<&mut Self>, - resizing: bool, - ) -> Result<(), Error> { + async fn setup_nexus_bdev(mut self: Pin<&mut Self>, resizing: bool) -> Result<(), Error> { let name = self.name.clone(); if self.children().is_empty() { @@ -716,10 +665,7 @@ impl<'n> Nexus<'n> { Err(_) => { return Err(Error::NexusIncomplete { name, - reason: format!( - "No block device available for child {}", - child.uri(), - ), + reason: format!("No block device available for child {}", child.uri(),), }) } }; @@ -788,14 +734,9 @@ impl<'n> Nexus<'n> { if !resizing { self.as_mut().set_num_blocks(end_blk - start_blk); } else { - let rc = spdk_bdev_notify_blockcnt_change( - nbdev, - end_blk - start_blk, - ); + let rc = spdk_bdev_notify_blockcnt_change(nbdev, end_blk - start_blk); if rc != 0 { - error!( - "{self:?}: failed to notify block cnt change on nexus" - ); + error!("{self:?}: failed to notify block cnt change on nexus"); return Err(Error::NexusResize { source: Errno::from_raw(rc), name, @@ -821,9 +762,7 @@ impl<'n> Nexus<'n> { /// Opens the Nexus instance for IO. /// Once this function is called, the device is visible and can /// be used for IO. - async fn register_instance( - bdev: &mut spdk_rs::Bdev>, - ) -> Result<(), Error> { + async fn register_instance(bdev: &mut spdk_rs::Bdev>) -> Result<(), Error> { let mut nex = bdev.data_mut(); assert_eq!(*nex.state.lock(), NexusState::Init); @@ -888,10 +827,7 @@ impl<'n> Nexus<'n> { /// # Arguments /// * `sigterm`: Indicates whether this is as a result of process /// termination. - pub async fn destroy_ext( - mut self: Pin<&mut Self>, - sigterm: bool, - ) -> Result<(), Error> { + pub async fn destroy_ext(mut self: Pin<&mut Self>, sigterm: bool) -> Result<(), Error> { info!("{:?}: destroying nexus...", self); self.as_mut().unshare_nexus().await?; @@ -929,13 +865,8 @@ impl<'n> Nexus<'n> { Ok(()) } Err(err) => { - error!( - "Nexus '{name}': failed to destroy: {e}", - e = err.verbose() - ); - Err(Error::NexusDestroy { - name, - }) + error!("Nexus '{name}': failed to destroy: {e}", e = err.verbose()); + Err(Error::NexusDestroy { name }) } } } @@ -943,10 +874,7 @@ impl<'n> Nexus<'n> { /// Resize the nexus as part of volume resize workflow. The underlying /// replicas are already resized before nexus resize is called. - pub async fn resize( - mut self: Pin<&mut Self>, - resize_to: u64, - ) -> Result<(), Error> { + pub async fn resize(mut self: Pin<&mut Self>, resize_to: u64) -> Result<(), Error> { // XXX: This check is likely relevant for resize as well to // avoid unforeseen complications. self.check_nexus_operation(NexusOperation::NexusResize)?; @@ -1047,8 +975,7 @@ impl<'n> Nexus<'n> { // In case of active shutdown operation, bail out. NexusState::ShuttingDown => { return Err(Error::OperationNotAllowed { - reason: "Shutdown operation is already in progress" - .to_string(), + reason: "Shutdown operation is already in progress".to_string(), }); } // Save current state and mark nexus as being under shutdown. @@ -1106,10 +1033,7 @@ impl<'n> Nexus<'n> { EventWithMeta::event( self.deref(), EventAction::StateChange, - state_change_event_meta( - NexusState::ShuttingDown, - NexusState::Shutdown, - ), + state_change_event_meta(NexusState::ShuttingDown, NexusState::Shutdown), ) .generate(); @@ -1180,10 +1104,7 @@ impl<'n> Nexus<'n> { } /// set ANA state of the NVMe subsystem - pub async fn set_ana_state( - &self, - ana_state: NvmeAnaState, - ) -> Result<(), Error> { + pub async fn set_ana_state(&self, ana_state: NvmeAnaState) -> Result<(), Error> { if let Some(Protocol::Nvmf) = self.shared() { if let Some(subsystem) = NvmfSubsystem::nqn_lookup(&self.name) { subsystem.pause().await?; @@ -1266,9 +1187,7 @@ impl<'n> Nexus<'n> { /// Returns a pinned mutable reference of the same lifetime as the Nexus /// itself. #[inline(always)] - pub(super) unsafe fn pinned_mut( - self: Pin<&mut Self>, - ) -> Pin<&'n mut Nexus<'n>> { + pub(super) unsafe fn pinned_mut(self: Pin<&mut Self>) -> Pin<&'n mut Nexus<'n>> { Pin::new_unchecked(self.unpin_mut()) } @@ -1282,9 +1201,7 @@ impl<'n> Nexus<'n> { /// Returns a mutable reference to Nexus's Bdev. #[inline(always)] - pub(super) unsafe fn bdev_mut( - self: Pin<&mut Self>, - ) -> &mut Bdev> { + pub(super) unsafe fn bdev_mut(self: Pin<&mut Self>) -> &mut Bdev> { self.get_unchecked_mut().bdev.as_mut().unwrap() } @@ -1300,20 +1217,12 @@ impl<'n> Nexus<'n> { /// No checks are performed (e.g. bdev module name check), as it is assumed /// that the provided bdev is a nexus bdev. #[inline(always)] - pub(crate) unsafe fn unsafe_from_untyped_bdev( - bdev: spdk_rs::UntypedBdev, - ) -> &'n Nexus<'n> { - spdk_rs::Bdev::>::unsafe_from_inner_ptr( - bdev.unsafe_inner_ptr() as *mut _, - ) - .data() + pub(crate) unsafe fn unsafe_from_untyped_bdev(bdev: spdk_rs::UntypedBdev) -> &'n Nexus<'n> { + spdk_rs::Bdev::>::unsafe_from_inner_ptr(bdev.unsafe_inner_ptr() as *mut _).data() } /// Sets the required alignment of the Nexus. - pub(crate) unsafe fn set_required_alignment( - self: Pin<&mut Self>, - new_val: u8, - ) { + pub(crate) unsafe fn set_required_alignment(self: Pin<&mut Self>, new_val: u8) { (*self.bdev_mut().unsafe_inner_mut_ptr()).required_alignment = new_val; } @@ -1379,8 +1288,7 @@ impl<'n> BdevOps for Nexus<'n> { return; } - let open_children = - self.children.iter().filter(|c| c.is_opened()).count(); + let open_children = self.children.iter().filter(|c| c.is_opened()).count(); // TODO: This doesn't seem possible to happen at this stage, but seems // we should still try to handle this in separate future since // we're handling it here anyway as a block_on is not safe to @@ -1395,14 +1303,10 @@ impl<'n> BdevOps for Nexus<'n> { // TODO: double-check interaction with rebuild job logic // TODO: cancel rebuild jobs? - let n = - self_ref.children.iter().filter(|c| c.is_opened()).count(); + let n = self_ref.children.iter().filter(|c| c.is_opened()).count(); if n > 0 { - warn!( - "{:?}: {} open children remain(s), closing...", - self_ref, n - ); + warn!("{:?}: {} open children remain(s), closing...", self_ref, n); for child in self_ref.children.iter() { if child.is_opened() { @@ -1428,11 +1332,7 @@ impl<'n> BdevOps for Nexus<'n> { /// Main entry point to submit IO to the underlying children this uses /// callbacks rather than futures and closures for performance reasons. /// This function is not called when the IO is re-submitted (see below). - fn submit_request( - &self, - chan: IoChannel>, - bio: BdevIo>, - ) { + fn submit_request(&self, chan: IoChannel>, bio: BdevIo>) { let io = NexusBio::new(chan, bio); io.submit_request(); } @@ -1442,10 +1342,7 @@ impl<'n> BdevOps for Nexus<'n> { // we always assume the device supports read/write commands // allow NVMe Admin as it is needed for local replicas IoType::Read | IoType::Write | IoType::NvmeAdmin => true, - IoType::Flush - | IoType::Reset - | IoType::Unmap - | IoType::WriteZeros => { + IoType::Flush | IoType::Reset | IoType::Unmap | IoType::WriteZeros => { let supported = self.io_is_supported(io_type); if !supported { if io_type == IoType::Flush { @@ -1558,10 +1455,8 @@ pub async fn nexus_create_v2( match uuid::Uuid::parse_str(name) { Ok(name_uuid) => { let bdev_uuid = name_uuid.to_string(); - let nexus_uuid = uuid::Uuid::parse_str(uuid).map_err(|_| { - Error::InvalidUuid { - uuid: uuid.to_string(), - } + let nexus_uuid = uuid::Uuid::parse_str(uuid).map_err(|_| Error::InvalidUuid { + uuid: uuid.to_string(), })?; nexus_create_internal( name, @@ -1613,9 +1508,7 @@ async fn nexus_create_internal( name: name.to_owned(), }); } - if nexus.name != name - || (nexus_uuid.is_some() && Some(nexus.nexus_uuid) != nexus_uuid) - { + if nexus.name != name || (nexus_uuid.is_some() && Some(nexus.nexus_uuid) != nexus_uuid) { return Err(Error::UuidExists { uuid: nexus.nexus_uuid.to_string(), nexus: name.to_string(), @@ -1661,11 +1554,7 @@ async fn nexus_create_internal( } match Nexus::register_instance(&mut nexus_bdev).await { - Err(Error::NexusIncomplete { - name, - reason, - .. - }) => { + Err(Error::NexusIncomplete { name, reason, .. }) => { // We still have code that waits for children to come online, // although this currently only works for config files. // We need to explicitly clean up child devices @@ -1694,10 +1583,7 @@ async fn nexus_create_internal( } } - Err(Error::NexusCreate { - name, - reason, - }) + Err(Error::NexusCreate { name, reason }) } Err(error) => { diff --git a/io-engine/src/bdev/nexus/nexus_bdev_children.rs b/io-engine/src/bdev/nexus/nexus_bdev_children.rs index 96fc67ca6..02ed4eefe 100644 --- a/io-engine/src/bdev/nexus/nexus_bdev_children.rs +++ b/io-engine/src/bdev/nexus/nexus_bdev_children.rs @@ -29,33 +29,16 @@ use futures::channel::oneshot; use snafu::ResultExt; use super::{ - nexus_err, - nexus_lookup, - nexus_lookup_mut, - ChildState, - ChildSyncState, - Error, - FaultReason, - IOLogChannel, - IoMode, - Nexus, - NexusChild, - NexusOperation, - NexusPauseState, - NexusState, - NexusStatus, - PersistOp, + nexus_err, nexus_lookup, nexus_lookup_mut, ChildState, ChildSyncState, Error, FaultReason, + IOLogChannel, IoMode, Nexus, NexusChild, NexusOperation, NexusPauseState, NexusState, + NexusStatus, PersistOp, }; use crate::{ bdev::{dev::device_name, device_create, device_destroy, device_lookup}, bdev_api::BdevError, core::{ - device_cmd_queue, - DeviceCommand, - DeviceEventListener, - DeviceEventType, - Reactors, + device_cmd_queue, DeviceCommand, DeviceEventListener, DeviceEventType, Reactors, VerboseError, }, eventing::{EventMetaGen, EventWithMeta}, @@ -64,19 +47,12 @@ use crate::{ use events_api::event::EventAction; -use spdk_rs::{ - ffihelper::cb_arg, - ChannelTraverseStatus, - IoDeviceChannelTraverse, -}; +use spdk_rs::{ffihelper::cb_arg, ChannelTraverseStatus, IoDeviceChannelTraverse}; impl<'n> Nexus<'n> { /// Create and register a single child to nexus, only allowed during the /// nexus init phase - pub async fn new_child( - mut self: Pin<&mut Self>, - uri: &str, - ) -> Result<(), BdevError> { + pub async fn new_child(mut self: Pin<&mut Self>, uri: &str) -> Result<(), BdevError> { assert_eq!(*self.state.lock(), NexusState::Init); info!("{:?}: adding child: '{}'...", self, uri); @@ -84,11 +60,7 @@ impl<'n> Nexus<'n> { let nexus_name = self.nexus_name().to_owned(); let device_name = device_create(uri).await?; - let c = NexusChild::new( - uri.to_string(), - nexus_name, - device_lookup(&device_name), - ); + let c = NexusChild::new(uri.to_string(), nexus_name, device_lookup(&device_name)); info!("{:?}: added to nexus", c); @@ -121,16 +93,9 @@ impl<'n> Nexus<'n> { match self.start_rebuild(uri).await { Err(e) => { // todo: CAS-253 retry starting the rebuild again when ready - error!( - "Child added but rebuild failed to start: {}", - e.verbose() - ); + error!("Child added but rebuild failed to start: {}", e.verbose()); match self.child(uri) { - Ok(child) => { - child - .close_faulted(FaultReason::RebuildFailed) - .await - } + Ok(child) => child.close_faulted(FaultReason::RebuildFailed).await, Err(e) => error!( "Failed to find newly added child {}, error: {}", uri, @@ -151,16 +116,12 @@ impl<'n> Nexus<'n> { /// The child may require a rebuild first, so the nexus will /// transition to degraded mode when the addition has been successful. - async fn add_child_only( - mut self: Pin<&mut Self>, - uri: &str, - ) -> Result { + async fn add_child_only(mut self: Pin<&mut Self>, uri: &str) -> Result { self.check_nexus_operation(NexusOperation::ReplicaAdd)?; - let name = - device_create(uri).await.context(nexus_err::CreateChild { - name: self.name.clone(), - })?; + let name = device_create(uri).await.context(nexus_err::CreateChild { + name: self.name.clone(), + })?; assert!(self.num_blocks() > 0); assert!(self.block_len() > 0); @@ -235,10 +196,7 @@ impl<'n> Nexus<'n> { } match self - .persist(PersistOp::AddChild { - child_uri, - healthy, - }) + .persist(PersistOp::AddChild { child_uri, healthy }) .await { Ok(_) => Ok(self.status()), @@ -284,10 +242,7 @@ impl<'n> Nexus<'n> { /// Destroy child with given uri. /// If the child does not exist the method returns success. - pub async fn remove_child( - mut self: Pin<&mut Self>, - uri: &str, - ) -> Result<(), Error> { + pub async fn remove_child(mut self: Pin<&mut Self>, uri: &str) -> Result<(), Error> { info!("{:?}: remove child request: '{}'", self, uri); self.check_nexus_operation(NexusOperation::ReplicaRemove)?; @@ -302,9 +257,7 @@ impl<'n> Nexus<'n> { debug!("{self:?}: remove child '{uri}': pausing..."); let paused = self.pause_rebuild_jobs(uri).await; if let Err(e) = self.as_mut().pause().await { - error!( - "{self:?}: remove child '{uri}': failed to pause subsystem: {e}" - ); + error!("{self:?}: remove child '{uri}': failed to pause subsystem: {e}"); paused.resume().await; return Ok(()); } @@ -402,10 +355,7 @@ impl<'n> Nexus<'n> { } /// Checks that the given child can be removed or offlined. - fn check_child_remove_operation( - &self, - child_uri: &str, - ) -> Result<(), Error> { + fn check_child_remove_operation(&self, child_uri: &str) -> Result<(), Error> { let _ = self.child(child_uri)?; if self.child_count() == 1 { @@ -505,9 +455,7 @@ impl<'n> Nexus<'n> { /// Tries to open all the child devices. /// Opens children, determines and validates block size and block count /// of underlying devices. - pub(crate) async fn try_open_children( - mut self: Pin<&mut Self>, - ) -> Result<(), Error> { + pub(crate) async fn try_open_children(mut self: Pin<&mut Self>) -> Result<(), Error> { info!("{:?}: opening nexus children...", self); let name = self.name.clone(); @@ -555,9 +503,7 @@ impl<'n> Nexus<'n> { // if any one fails, close all children. let mut write_ex_err: Result<(), Error> = Ok(()); for child in self.children_iter() { - if let Err(error) = - child.reservation_acquire(&self.nvme_params).await - { + if let Err(error) = child.reservation_acquire(&self.nvme_params).await { write_ex_err = Err(Error::ChildWriteExclusiveResvFailed { source: error, child: child.uri().to_owned(), @@ -624,19 +570,13 @@ impl<'n> Nexus<'n> { } /// Looks up a child by device name. - pub fn lookup_child_by_device( - &self, - device_name: &str, - ) -> Option<&NexusChild<'n>> { + pub fn lookup_child_by_device(&self, device_name: &str) -> Option<&NexusChild<'n>> { self.children_iter() .find(|c| c.match_device_name(device_name)) } /// Looks up a child by its UUID. - pub fn child_by_uuid( - &self, - device_uuid: &str, - ) -> Result<&NexusChild<'n>, Error> { + pub fn child_by_uuid(&self, device_uuid: &str) -> Result<&NexusChild<'n>, Error> { let dev = self.children_iter().find(|c| match c.get_uuid() { Some(u) => u.eq(device_uuid), None => false, @@ -649,16 +589,12 @@ impl<'n> Nexus<'n> { /// Looks up a child by device name. /// Returns an error if child is not found. - pub fn child_by_device( - &self, - device_name: &str, - ) -> Result<&NexusChild<'n>, Error> { - self.lookup_child_by_device(device_name).ok_or_else(|| { - Error::ChildNotFound { + pub fn child_by_device(&self, device_name: &str) -> Result<&NexusChild<'n>, Error> { + self.lookup_child_by_device(device_name) + .ok_or_else(|| Error::ChildNotFound { child: device_name.to_owned(), name: self.name.clone(), - } - }) + }) } /// Looks up a child by device name and returns a mutable reference. @@ -680,12 +616,11 @@ impl<'n> Nexus<'n> { device_name: &str, ) -> Result<&mut NexusChild<'n>, Error> { let nexus_name = self.name.clone(); - self.lookup_child_by_device_mut(device_name).ok_or_else(|| { - Error::ChildNotFound { + self.lookup_child_by_device_mut(device_name) + .ok_or_else(|| Error::ChildNotFound { child: device_name.to_owned(), name: nexus_name, - } - }) + }) } /// Looks up a child by its URI. @@ -704,19 +639,13 @@ impl<'n> Nexus<'n> { } /// Looks up a child by its URI and returns a mutable reference. - pub fn lookup_child_mut( - self: Pin<&mut Self>, - child_uri: &str, - ) -> Option<&mut NexusChild<'n>> { + pub fn lookup_child_mut(self: Pin<&mut Self>, child_uri: &str) -> Option<&mut NexusChild<'n>> { unsafe { self.children_iter_mut().find(|c| c.uri() == child_uri) } } /// Looks up a child by its URI and returns a mutable reference. /// Returns an error if child is not found. - pub fn child_mut( - self: Pin<&mut Self>, - child_uri: &str, - ) -> Result<&mut NexusChild<'n>, Error> { + pub fn child_mut(self: Pin<&mut Self>, child_uri: &str) -> Result<&mut NexusChild<'n>, Error> { let nexus_name = self.name.clone(); self.lookup_child_mut(child_uri) .ok_or_else(|| Error::ChildNotFound { @@ -733,24 +662,18 @@ impl<'n> Nexus<'n> { child_uri: &str, ) -> Result<&'static mut NexusChild<'static>, Error> { self.child_mut(child_uri).map(|c| { - std::mem::transmute::< - &mut NexusChild<'n>, - &'static mut NexusChild<'static>, - >(c) + std::mem::transmute::<&mut NexusChild<'n>, &'static mut NexusChild<'static>>(c) }) } /// Looks up a child by its URI and returns child device name. - pub fn get_child_device_name( - &self, - child_uri: &str, - ) -> Result { - self.child(child_uri)?.get_device_name().ok_or_else(|| { - Error::ChildDeviceNotOpen { + pub fn get_child_device_name(&self, child_uri: &str) -> Result { + self.child(child_uri)? + .get_device_name() + .ok_or_else(|| Error::ChildDeviceNotOpen { child: child_uri.to_owned(), name: self.name.clone(), - } - }) + }) } /// Returns the list of URIs of all children. @@ -769,8 +692,7 @@ impl<'n> Nexus<'n> { impl DeviceEventListener for Nexus<'_> { fn handle_device_event(&self, evt: DeviceEventType, dev_name: &str) { match evt { - DeviceEventType::DeviceRemoved - | DeviceEventType::LoopbackRemoved => { + DeviceEventType::DeviceRemoved | DeviceEventType::LoopbackRemoved => { Reactors::master().send_future(Nexus::child_remove_routine( self.name.clone(), dev_name.to_owned(), @@ -782,11 +704,7 @@ impl DeviceEventListener for Nexus<'_> { retiring child '{}'", self, dev_name ); - self.retire_child_device( - dev_name, - FaultReason::AdminCommandFailed, - false, - ); + self.retire_child_device(dev_name, FaultReason::AdminCommandFailed, false); } DeviceEventType::AdminQNoticeCtrlFailed => { Reactors::master().send_future(Nexus::disconnect_failed_child( @@ -904,17 +822,11 @@ impl<'n> Nexus<'n> { if let Some(mut nexus) = nexus_lookup_mut(&nexus_name) { match nexus.as_mut().lookup_child_by_device_mut(&child_device) { Some(child) => { - info!( - nexus_name, - child_device, "Unplugging nexus child device", - ); + info!(nexus_name, child_device, "Unplugging nexus child device",); child.unplug().await; } None => { - warn!( - nexus_name, - child_device, "Nexus child device not found", - ); + warn!(nexus_name, child_device, "Nexus child device not found",); } } } else { @@ -948,11 +860,7 @@ impl<'n> Nexus<'n> { } /// Retires a child device for the given nexus. - async fn child_retire_routine( - nexus_name: String, - dev: String, - retry: bool, - ) { + async fn child_retire_routine(nexus_name: String, dev: String, retry: bool) { let Some(mut nex) = nexus_lookup_mut(&nexus_name) else { warn!( "Nexus '{nexus_name}': retiring device '{dev}': \ @@ -972,9 +880,8 @@ impl<'n> Nexus<'n> { assert!(Reactors::is_master()); - Reactors::current().send_future(Nexus::child_retire_routine( - nexus_name, dev, retry, - )); + Reactors::current() + .send_future(Nexus::child_retire_routine(nexus_name, dev, retry)); } else { warn!( "{nex:?}: retire failed (double pause): {err}", @@ -990,10 +897,7 @@ impl<'n> Nexus<'n> { } /// Retires a child with the given device. - async fn do_child_retire( - mut self: Pin<&mut Self>, - dev: String, - ) -> Result<(), Error> { + async fn do_child_retire(mut self: Pin<&mut Self>, dev: String) -> Result<(), Error> { warn!("{self:?}: retiring child device '{dev}'..."); // Update persistent store. To prevent data inconsistency across @@ -1072,13 +976,11 @@ impl<'n> Nexus<'n> { // Determine the amount of healthy replicas in the persistent // state and check against the last healthy // replica remaining. - let num_healthy = nexus_info.children.iter().fold(0, |n, c| { - if c.healthy { - n + 1 - } else { - n - } - }); + let num_healthy = + nexus_info + .children + .iter() + .fold(0, |n, c| if c.healthy { n + 1 } else { n }); match num_healthy { 0 => { diff --git a/io-engine/src/bdev/nexus/nexus_bdev_error.rs b/io-engine/src/bdev/nexus/nexus_bdev_error.rs index ca25065c8..bdc73d2c4 100644 --- a/io-engine/src/bdev/nexus/nexus_bdev_error.rs +++ b/io-engine/src/bdev/nexus/nexus_bdev_error.rs @@ -23,11 +23,7 @@ pub enum Error { NexusInitialising { name: String }, #[snafu(display("Invalid nexus uuid \"{}\"", uuid))] InvalidUuid { uuid: String }, - #[snafu(display( - "Nexus uuid \"{}\" already exists for nexus \"{}\"", - uuid, - nexus - ))] + #[snafu(display("Nexus uuid \"{}\" already exists for nexus \"{}\"", uuid, nexus))] UuidExists { uuid: String, nexus: String }, #[snafu(display("Nexus with name \"{}\" already exists", name))] NameExists { name: String }, @@ -37,10 +33,7 @@ pub enum Error { CreateCryptoBdev { source: Errno, name: String }, #[snafu(display("Failed to destroy crypto bdev for nexus {}", name))] DestroyCryptoBdev { source: Errno, name: String }, - #[snafu(display( - "The nexus {} has been already shared with a different protocol", - name - ))] + #[snafu(display("The nexus {} has been already shared with a different protocol", name))] AlreadyShared { name: String }, #[snafu(display("The nexus {} has not been shared", name))] NotShared { name: String }, @@ -52,11 +45,7 @@ pub enum Error { ShareNvmfNexus { source: CoreError, name: String }, #[snafu(display("Failed to unshare nexus {}", name))] UnshareNexus { source: CoreError, name: String }, - #[snafu(display( - "Failed to register IO device nexus {}: {}", - name, - source - ))] + #[snafu(display("Failed to register IO device nexus {}: {}", name, source))] RegisterNexus { source: Errno, name: String }, #[snafu(display("Failed to create child of nexus {}: {}", name, source))] CreateChild { source: BdevError, name: String }, @@ -122,17 +111,9 @@ pub enum Error { child: String, name: String, }, - #[snafu(display( - "Cannot delete the last child {} of nexus {}", - child, - name - ))] + #[snafu(display("Cannot delete the last child {} of nexus {}", child, name))] RemoveLastChild { child: String, name: String }, - #[snafu(display( - "Cannot remove or offline the last child {} of nexus {}", - child, - name - ))] + #[snafu(display("Cannot remove or offline the last child {} of nexus {}", child, name))] RemoveLastHealthyChild { child: String, name: String }, #[snafu(display("Child {} of nexus {} not found", child, name))] ChildNotFound { child: String, name: String }, @@ -144,33 +125,17 @@ pub enum Error { PauseChild { child: String, name: String }, #[snafu(display("Suitable rebuild source for nexus {} not found", name))] NoRebuildSource { name: String }, - #[snafu(display( - "Failed to create rebuild job for child {} of nexus {}", - child, - name, - ))] + #[snafu(display("Failed to create rebuild job for child {} of nexus {}", child, name,))] CreateRebuild { source: RebuildError, child: String, name: String, }, - #[snafu(display( - "Rebuild job not found for child {} of nexus {}", - child, - name, - ))] + #[snafu(display("Rebuild job not found for child {} of nexus {}", child, name,))] RebuildJobNotFound { child: String, name: String }, - #[snafu(display( - "Rebuild job already exists for child {} of nexus {}", - child, - name, - ))] + #[snafu(display("Rebuild job already exists for child {} of nexus {}", child, name,))] RebuildJobAlreadyExists { child: String, name: String }, - #[snafu(display( - "Failed to execute rebuild operation on job {} of nexus {}", - job, - name, - ))] + #[snafu(display("Failed to execute rebuild operation on job {} of nexus {}", job, name,))] RebuildOperation { job: String, name: String, @@ -188,12 +153,7 @@ pub enum Error { NexusDestroy { name: String }, #[snafu(display("Failed to resize nexus {}", name))] NexusResize { source: Errno, name: String }, - #[snafu(display( - "Child {} of nexus {} is not degraded but {}", - child, - name, - state - ))] + #[snafu(display("Child {} of nexus {} is not degraded but {}", child, name, state))] ChildNotDegraded { child: String, name: String, @@ -201,11 +161,7 @@ pub enum Error { }, #[snafu(display("Failed to get BdevHandle for snapshot operation"))] FailedGetHandle, - #[snafu(display( - "Failed to create snapshot on nexus {}: {}", - name, - reason - ))] + #[snafu(display("Failed to create snapshot on nexus {}: {}", name, reason))] FailedCreateSnapshot { name: String, reason: String }, #[snafu(display("NVMf subsystem error: {}", e))] SubsysNvmf { e: String }, @@ -235,75 +191,29 @@ impl From for Error { impl From for tonic::Status { fn from(e: Error) -> Self { match e { - Error::InvalidUuid { - .. - } => Status::invalid_argument(e.to_string()), - Error::InvalidKey { - .. - } => Status::invalid_argument(e.to_string()), - Error::InvalidShareProtocol { - .. - } => Status::invalid_argument(e.to_string()), - Error::InvalidReservation { - .. - } => Status::invalid_argument(e.to_string()), - Error::AlreadyShared { - .. - } => Status::invalid_argument(e.to_string()), - Error::NotShared { - .. - } => Status::invalid_argument(e.to_string()), - Error::NotSharedNvmf { - .. - } => Status::invalid_argument(e.to_string()), - Error::CreateChild { - .. - } => Status::invalid_argument(e.to_string()), - Error::MixedBlockSizes { - .. - } => Status::invalid_argument(e.to_string()), - Error::ChildGeometry { - .. - } => Status::invalid_argument(e.to_string()), - Error::ChildTooSmall { - .. - } => Status::invalid_argument(e.to_string()), - Error::OpenChild { - .. - } => Status::invalid_argument(e.to_string()), - Error::OperationNotAllowed { - .. - } => Status::failed_precondition(e.to_string()), - Error::RemoveLastChild { - .. - } => Status::failed_precondition(e.to_string()), - Error::RemoveLastHealthyChild { - .. - } => Status::failed_precondition(e.to_string()), - Error::ChildNotFound { - .. - } => Status::not_found(e.to_string()), - Error::RebuildJobNotFound { - .. - } => Status::not_found(e.to_string()), - Error::NexusIncomplete { - .. - } => Status::failed_precondition(e.verbose()), - Error::NexusResize { - .. - } => Status::failed_precondition(e.to_string()), - Error::NexusNotFound { - .. - } => Status::not_found(e.to_string()), - Error::ChildAlreadyExists { - .. - } => Status::already_exists(e.to_string()), - Error::NameExists { - .. - } => Status::already_exists(e.to_string()), - Error::InvalidArguments { - .. - } => Status::invalid_argument(e.to_string()), + Error::InvalidUuid { .. } => Status::invalid_argument(e.to_string()), + Error::InvalidKey { .. } => Status::invalid_argument(e.to_string()), + Error::InvalidShareProtocol { .. } => Status::invalid_argument(e.to_string()), + Error::InvalidReservation { .. } => Status::invalid_argument(e.to_string()), + Error::AlreadyShared { .. } => Status::invalid_argument(e.to_string()), + Error::NotShared { .. } => Status::invalid_argument(e.to_string()), + Error::NotSharedNvmf { .. } => Status::invalid_argument(e.to_string()), + Error::CreateChild { .. } => Status::invalid_argument(e.to_string()), + Error::MixedBlockSizes { .. } => Status::invalid_argument(e.to_string()), + Error::ChildGeometry { .. } => Status::invalid_argument(e.to_string()), + Error::ChildTooSmall { .. } => Status::invalid_argument(e.to_string()), + Error::OpenChild { .. } => Status::invalid_argument(e.to_string()), + Error::OperationNotAllowed { .. } => Status::failed_precondition(e.to_string()), + Error::RemoveLastChild { .. } => Status::failed_precondition(e.to_string()), + Error::RemoveLastHealthyChild { .. } => Status::failed_precondition(e.to_string()), + Error::ChildNotFound { .. } => Status::not_found(e.to_string()), + Error::RebuildJobNotFound { .. } => Status::not_found(e.to_string()), + Error::NexusIncomplete { .. } => Status::failed_precondition(e.verbose()), + Error::NexusResize { .. } => Status::failed_precondition(e.to_string()), + Error::NexusNotFound { .. } => Status::not_found(e.to_string()), + Error::ChildAlreadyExists { .. } => Status::already_exists(e.to_string()), + Error::NameExists { .. } => Status::already_exists(e.to_string()), + Error::InvalidArguments { .. } => Status::invalid_argument(e.to_string()), e => Status::new(Code::Internal, e.verbose()), } } diff --git a/io-engine/src/bdev/nexus/nexus_bdev_rebuild.rs b/io-engine/src/bdev/nexus/nexus_bdev_rebuild.rs index f90f8b18f..9dfc463d6 100644 --- a/io-engine/src/bdev/nexus/nexus_bdev_rebuild.rs +++ b/io-engine/src/bdev/nexus/nexus_bdev_rebuild.rs @@ -3,28 +3,16 @@ use snafu::ResultExt; use std::{marker::PhantomData, sync::Arc}; use super::{ - nexus_err, - nexus_lookup_mut, - nexus_persistence::PersistOp, - ChildSyncState, - DrEvent, - Error, - FaultReason, - Nexus, + nexus_err, nexus_lookup_mut, nexus_persistence::PersistOp, ChildSyncState, DrEvent, Error, + FaultReason, Nexus, }; use crate::{ core::{Reactors, VerboseError}, eventing::{EventMetaGen, EventWithMeta}, rebuild::{ - HistoryRecord, - NexusRebuildJob, - NexusRebuildJobStarter, - RebuildError, - RebuildJobOptions, - RebuildState, - RebuildStats, - RebuildVerifyMode, + HistoryRecord, NexusRebuildJob, NexusRebuildJobStarter, RebuildError, RebuildJobOptions, + RebuildState, RebuildStats, RebuildVerifyMode, }, }; use events_api::event::EventAction; @@ -79,18 +67,13 @@ impl<'a> RebuildPauseGuard<'a> { impl<'n> Nexus<'n> { /// Starts a rebuild job and returns a receiver channel /// which can be used to await the rebuild completion - pub async fn start_rebuild( - &self, - child_uri: &str, - ) -> Result, Error> { + pub async fn start_rebuild(&self, child_uri: &str) -> Result, Error> { let name = self.name.clone(); info!("{self:?}: start rebuild request for {child_uri}"); // Find a healthy child to rebuild from. let Some(src_child_uri) = self.find_src_replica(child_uri) else { - return Err(Error::NoRebuildSource { - name: name.clone(), - }); + return Err(Error::NoRebuildSource { name: name.clone() }); }; let dst_child_uri = match self.lookup_child(child_uri) { @@ -303,10 +286,7 @@ impl<'n> Nexus<'n> { } /// Return the stats of a rebuild job for the given destination. - pub(crate) async fn rebuild_stats( - &self, - dst_uri: &str, - ) -> Result { + pub(crate) async fn rebuild_stats(&self, dst_uri: &str) -> Result { let rj = self.rebuild_job(dst_uri)?; Ok(rj.stats().await) } @@ -317,25 +297,17 @@ impl<'n> Nexus<'n> { } /// Return a mutex guard of the replica rebuild history. - pub fn rebuild_history_guard( - &self, - ) -> parking_lot::MutexGuard> { + pub fn rebuild_history_guard(&self) -> parking_lot::MutexGuard> { self.rebuild_history.lock() } /// Returns the rebuild progress of a rebuild job for the given destination. - pub(crate) async fn rebuild_progress( - &self, - dst_uri: &str, - ) -> Result { + pub(crate) async fn rebuild_progress(&self, dst_uri: &str) -> Result { self.rebuild_stats(dst_uri).await.map(|s| s.progress as u32) } /// Pauses rebuild jobs, returning rebuild pause guard. - pub(super) async fn pause_rebuild_jobs<'a>( - &self, - src_uri: &str, - ) -> RebuildPauseGuard<'a> { + pub(super) async fn pause_rebuild_jobs<'a>(&self, src_uri: &str) -> RebuildPauseGuard<'a> { let cancelled = self.cancel_rebuild_jobs(src_uri).await; RebuildPauseGuard::new(self.nexus_name().to_owned(), cancelled) @@ -395,11 +367,9 @@ impl<'n> Nexus<'n> { &self, dst_child_uri: &str, ) -> Result, Error> { - NexusRebuildJob::lookup(dst_child_uri).map_err(|_| { - Error::RebuildJobNotFound { - child: dst_child_uri.to_owned(), - name: self.name.to_owned(), - } + NexusRebuildJob::lookup(dst_child_uri).map_err(|_| Error::RebuildJobNotFound { + child: dst_child_uri.to_owned(), + name: self.name.to_owned(), }) } @@ -410,11 +380,9 @@ impl<'n> Nexus<'n> { dst_child_uri: &str, ) -> Result, Error> { let name = self.name.clone(); - NexusRebuildJob::lookup(dst_child_uri).map_err(|_| { - Error::RebuildJobNotFound { - child: dst_child_uri.to_owned(), - name, - } + NexusRebuildJob::lookup(dst_child_uri).map_err(|_| Error::RebuildJobNotFound { + child: dst_child_uri.to_owned(), + name, }) } @@ -481,10 +449,7 @@ impl<'n> Nexus<'n> { // rebuild has failed so we need to set the child as faulted // allowing the control plane to replace it with another - if let Some(RebuildError::ReadIoFailed { - .. - }) = job.error() - { + if let Some(RebuildError::ReadIoFailed { .. }) = job.error() { // todo: retry rebuild using another child as source? } @@ -496,10 +461,7 @@ impl<'n> Nexus<'n> { c.close_faulted(FaultReason::RebuildFailed).await; } _ => { - error!( - "{c:?}: rebuild job failed with state {s:?}", - s = job_state - ); + error!("{c:?}: rebuild job failed with state {s:?}", s = job_state); self.event(EventAction::RebuildEnd, job.meta()).generate(); c.close_faulted(FaultReason::RebuildFailed).await; } diff --git a/io-engine/src/bdev/nexus/nexus_bdev_snapshot.rs b/io-engine/src/bdev/nexus/nexus_bdev_snapshot.rs index 8a0834616..930b108eb 100644 --- a/io-engine/src/bdev/nexus/nexus_bdev_snapshot.rs +++ b/io-engine/src/bdev/nexus/nexus_bdev_snapshot.rs @@ -6,13 +6,7 @@ use futures::future::join_all; use super::{Error, Nexus, NexusOperation, NexusState}; use crate::{ bdev::nexus::{nexus_lookup, NexusChild}, - core::{ - snapshot::ISnapshotDescriptor, - CoreError, - Reactor, - SnapshotParams, - ToErrno, - }, + core::{snapshot::ISnapshotDescriptor, CoreError, Reactor, SnapshotParams, ToErrno}, }; use chrono::{DateTime, Utc}; use std::pin::Pin; @@ -95,10 +89,7 @@ impl ReplicaSnapshotExecutor { if seen_replicas.contains(&r.replica_uuid) { return Err(Error::FailedCreateSnapshot { name: nexus.bdev_name(), - reason: format!( - "Duplicated replica {}", - &r.replica_uuid, - ), + reason: format!("Duplicated replica {}", &r.replica_uuid,), }); } seen_replicas.insert(r.replica_uuid.to_string()); @@ -122,10 +113,7 @@ impl ReplicaSnapshotExecutor { if !replica.is_healthy() { return Err(Error::FailedCreateSnapshot { name: nexus.bdev_name(), - reason: format!( - "Replica {} is not healthy", - &r.replica_uuid, - ), + reason: format!("Replica {} is not healthy", &r.replica_uuid,), }); } diff --git a/io-engine/src/bdev/nexus/nexus_channel.rs b/io-engine/src/bdev/nexus/nexus_channel.rs index 424e109b4..149b97598 100644 --- a/io-engine/src/bdev/nexus/nexus_channel.rs +++ b/io-engine/src/bdev/nexus/nexus_channel.rs @@ -88,11 +88,9 @@ impl<'n> NexusChannel<'n> { pub(crate) fn new(nexus: Pin<&mut Nexus<'n>>) -> Self { debug!("{nexus:?}: new channel on core {c}", c = Cores::current()); - let b_init_thrd_hdls = - super::ENABLE_IO_ALL_THRD_NX_CHAN.load(Ordering::SeqCst); + let b_init_thrd_hdls = super::ENABLE_IO_ALL_THRD_NX_CHAN.load(Ordering::SeqCst); - let is_io_chan = - Thread::current().unwrap() != Thread::primary() || b_init_thrd_hdls; + let is_io_chan = Thread::current().unwrap() != Thread::primary() || b_init_thrd_hdls; if !is_io_chan { // If we are here, this means the nexus channel being created is not @@ -374,9 +372,9 @@ impl<'n> NexusChannel<'n> { child_device: &str, reason: FaultReason, ) -> Option { - let io_log = - self.nexus_mut() - .retire_child_device(child_device, reason, true)?; + let io_log = self + .nexus_mut() + .retire_child_device(child_device, reason, true)?; self.reconnect_io_logs(); Some(io_log) } @@ -450,20 +448,13 @@ impl<'n> NexusChannel<'n> { ) }); - fn dbg_devs( - prefix: &str, - name: &str, - devs: &[Box], - ) { + fn dbg_devs(prefix: &str, name: &str, devs: &[Box]) { if devs.is_empty() { debug!("{prefix}: no {name}"); } else { debug!("{prefix}: {n} {name}:", n = devs.len()); devs.iter().for_each(|dev| { - debug!( - "{prefix}: {d}", - d = dev.get_device().device_name() - ); + debug!("{prefix}: {d}", d = dev.get_device().device_name()); }); } } diff --git a/io-engine/src/bdev/nexus/nexus_child.rs b/io-engine/src/bdev/nexus/nexus_child.rs index 247b7e6a9..b00055c3c 100644 --- a/io-engine/src/bdev/nexus/nexus_child.rs +++ b/io-engine/src/bdev/nexus/nexus_child.rs @@ -19,11 +19,7 @@ use crate::{ bdev::{device_create, device_destroy, device_lookup}, bdev_api::BdevError, core::{ - BlockDevice, - BlockDeviceDescriptor, - BlockDeviceHandle, - CoreError, - DeviceEventSink, + BlockDevice, BlockDeviceDescriptor, BlockDeviceHandle, CoreError, DeviceEventSink, VerboseError, }, eventing::replica_events::state_change_event_meta, @@ -32,11 +28,7 @@ use crate::{ }; use crate::{ - bdev::nexus::{ - nexus_bdev::NexusNvmePreemption, - NexusNvmeParams, - NvmeReservation, - }, + bdev::nexus::{nexus_bdev::NexusNvmePreemption, NexusNvmeParams, NvmeReservation}, core::MayastorEnvironment, eventing::EventWithMeta, }; @@ -45,13 +37,10 @@ use events_api::event::EventAction; use spdk_rs::{ libspdk::{ - spdk_nvme_registered_ctrlr_extended_data, - spdk_nvme_reservation_status_extended_data, + spdk_nvme_registered_ctrlr_extended_data, spdk_nvme_reservation_status_extended_data, }, - nvme_reservation_acquire_action, - nvme_reservation_register_action, - nvme_reservation_register_cptpl, - DmaError, + nvme_reservation_acquire_action, nvme_reservation_register_action, + nvme_reservation_register_cptpl, DmaError, }; #[derive(Debug, Snafu)] @@ -63,11 +52,7 @@ pub enum ChildError { ChildFaulted {}, #[snafu(display("Child is being destroyed"))] ChildBeingDestroyed {}, - #[snafu(display( - "Child is smaller than parent {} vs {}", - child_size, - parent_size - ))] + #[snafu(display("Child is smaller than parent {} vs {}", child_size, parent_size))] ChildTooSmall { child_size: u64, parent_size: u64 }, #[snafu(display("Open child"))] OpenChild { source: CoreError }, @@ -89,10 +74,7 @@ pub enum ChildError { ResvAcquire { source: CoreError }, #[snafu(display("Failed to release reservation for child: {}", source))] ResvRelease { source: CoreError }, - #[snafu(display( - "Failed to get reservation report for child: {}", - source - ))] + #[snafu(display("Failed to get reservation report for child: {}", source))] ResvReport { source: CoreError }, #[snafu(display("Invalid reservation type for child: {}", resv_type))] ResvType { resv_type: u8 }, @@ -417,9 +399,7 @@ impl<'c> NexusChild<'c> { let desc = dev.open(true).map_err(|source| { self.set_faulted_state(FaultReason::CantOpen); - ChildError::OpenChild { - source, - } + ChildError::OpenChild { source } })?; self.device_descriptor = Some(desc); @@ -509,15 +489,13 @@ impl<'c> NexusChild<'c> { /// being rebuilt). #[inline] pub fn is_opened_unsync(&self) -> bool { - self.state() == ChildState::Open - && self.sync_state() == ChildSyncState::OutOfSync + self.state() == ChildState::Open && self.sync_state() == ChildSyncState::OutOfSync } /// Determines if the child is opened and fully synced. #[inline] pub fn is_healthy(&self) -> bool { - self.state() == ChildState::Open - && self.sync_state() == ChildSyncState::Synced + self.state() == ChildState::Open && self.sync_state() == ChildSyncState::Synced } /// Determines if the child is being rebuilt. @@ -559,17 +537,10 @@ impl<'c> NexusChild<'c> { .unwrap_or(nvme_reservation_acquire_action::ACQUIRE); let preempt_key = preempt_key.unwrap_or_default(); if let Err(e) = hdl - .nvme_resv_acquire( - current_key, - preempt_key, - acquire_action, - resv_type as u8, - ) + .nvme_resv_acquire(current_key, preempt_key, acquire_action, resv_type as u8) .await { - return Err(ChildError::ResvAcquire { - source: e, - }); + return Err(ChildError::ResvAcquire { source: e }); } info!( @@ -603,9 +574,7 @@ impl<'c> NexusChild<'c> { ) -> Result, ChildError> { let mut buffer = hdl.dma_malloc(4096).context(HandleDmaMalloc {})?; if let Err(e) = hdl.nvme_resv_report(1, &mut buffer).await { - return Err(ChildError::ResvReport { - source: e, - }); + return Err(ChildError::ResvReport { source: e }); } trace!("{:?}: received reservation report", self); @@ -613,9 +582,8 @@ impl<'c> NexusChild<'c> { let (stext, sl) = buffer.as_slice().split_at(std::mem::size_of::< spdk_nvme_reservation_status_extended_data, >()); - let (pre, resv_status_ext, post) = unsafe { - stext.align_to::() - }; + let (pre, resv_status_ext, post) = + unsafe { stext.align_to::() }; assert!(pre.is_empty()); assert!(post.is_empty()); @@ -624,14 +592,11 @@ impl<'c> NexusChild<'c> { info!( "reservation status: rtype {}, regctl {}, ptpls {}", - resv_status_ext[0].data.rtype, - regctl, - resv_status_ext[0].data.ptpls, + resv_status_ext[0].data.rtype, regctl, resv_status_ext[0].data.ptpls, ); - let (pre, reg_ctrlr_ext, _post) = unsafe { - sl.align_to::() - }; + let (pre, reg_ctrlr_ext, _post) = + unsafe { sl.align_to::() }; if !pre.is_empty() { return Ok(None); @@ -659,11 +624,7 @@ impl<'c> NexusChild<'c> { rkey, ); if c.rcsts.status() == 1 { - return Ok(Some(( - resv_status_ext[0].data.rtype, - rkey, - c.hostid, - ))); + return Ok(Some((resv_status_ext[0].data.rtype, rkey, c.hostid))); } } Ok(None) @@ -671,29 +632,21 @@ impl<'c> NexusChild<'c> { /// Check if we're the reservation holder. /// # Warning: Ignores bdevs without NVMe reservation support. - async fn resv_check_holder( - &self, - args: &NexusNvmeParams, - ) -> Result<(), ChildError> { + async fn resv_check_holder(&self, args: &NexusNvmeParams) -> Result<(), ChildError> { let hdl = self.get_io_handle_nonblock().await.context(HandleOpen {})?; let mut buffer = hdl.dma_malloc(4096).context(HandleDmaMalloc {})?; match hdl.nvme_resv_report(1, &mut buffer).await { - Err(CoreError::NotSupported { - .. - }) => return Ok(()), - Err(error) => Err(ChildError::ResvReport { - source: error, - }), + Err(CoreError::NotSupported { .. }) => return Ok(()), + Err(error) => Err(ChildError::ResvReport { source: error }), Ok(_) => Ok(()), }?; let (stext, sl) = buffer.as_slice().split_at(std::mem::size_of::< spdk_nvme_reservation_status_extended_data, >()); - let (pre, resv_status_ext, post) = unsafe { - stext.align_to::() - }; + let (pre, resv_status_ext, post) = + unsafe { stext.align_to::() }; assert!(pre.is_empty()); assert!(post.is_empty()); @@ -702,26 +655,22 @@ impl<'c> NexusChild<'c> { info!( "{:?}: reservation status: rtype {}, regctl {}, ptpls {}", - self, - resv_status_ext[0].data.rtype, - regctl, - resv_status_ext[0].data.ptpls, + self, resv_status_ext[0].data.rtype, regctl, resv_status_ext[0].data.ptpls, ); let shared = |resv_type| { matches!( resv_type, - NvmeReservation::ExclusiveAccessAllRegs - | NvmeReservation::WriteExclusiveAllRegs + NvmeReservation::ExclusiveAccessAllRegs | NvmeReservation::WriteExclusiveAllRegs ) }; if args.resv_type as u8 != resv_status_ext[0].data.rtype { - let rtype = - NvmeReservation::try_from(resv_status_ext[0].data.rtype) - .map_err(|_| ChildError::ResvType { - resv_type: resv_status_ext[0].data.rtype, - })?; + let rtype = NvmeReservation::try_from(resv_status_ext[0].data.rtype).map_err(|_| { + ChildError::ResvType { + resv_type: resv_status_ext[0].data.rtype, + } + })?; // If we're shared, then we don't care which type it is since we're // registered... @@ -734,16 +683,14 @@ impl<'c> NexusChild<'c> { if matches!( args.resv_type, - NvmeReservation::ExclusiveAccessAllRegs - | NvmeReservation::WriteExclusiveAllRegs + NvmeReservation::ExclusiveAccessAllRegs | NvmeReservation::WriteExclusiveAllRegs ) { // if we're in "shared" mode, we don't need to know more return Ok(()); } - let (pre, reg_ctrlr_ext, _post) = unsafe { - sl.align_to::() - }; + let (pre, reg_ctrlr_ext, _post) = + unsafe { sl.align_to::() }; if !pre.is_empty() { // todo: why did the previous report return no holder in this // scenario? @@ -769,9 +716,7 @@ impl<'c> NexusChild<'c> { let my_hostid = match hdl.host_id().await { Ok(h) => h, Err(e) => { - return Err(ChildError::NvmeHostId { - source: e, - }); + return Err(ChildError::NvmeHostId { source: e }); } }; if owner.rkey != args.resv_key || owner.hostid != my_hostid { @@ -802,12 +747,8 @@ impl<'c> NexusChild<'c> { let resv_key = params.resv_key; if let Err(e) = self.resv_register(&*hdl, resv_key).await { return match e { - CoreError::NotSupported { - .. - } => Ok(()), - _ => Err(ChildError::ResvRegisterKey { - source: e, - }), + CoreError::NotSupported { .. } => Ok(()), + _ => Err(ChildError::ResvRegisterKey { source: e }), }; } @@ -862,12 +803,8 @@ impl<'c> NexusChild<'c> { // To be able to issue any other commands we must first register. if let Err(e) = self.resv_register(&*hdl, args.resv_key).await { return match e { - CoreError::NotSupported { - .. - } => Ok(()), - _ => Err(ChildError::ResvRegisterKey { - source: e, - }), + CoreError::NotSupported { .. } => Ok(()), + _ => Err(ChildError::ResvRegisterKey { source: e }), }; } @@ -887,9 +824,7 @@ impl<'c> NexusChild<'c> { let my_hostid = match hdl.host_id().await { Ok(h) => h, Err(e) => { - return Err(ChildError::NvmeHostId { - source: e, - }); + return Err(ChildError::NvmeHostId { source: e }); } }; info!( @@ -897,21 +832,14 @@ impl<'c> NexusChild<'c> { self, my_hostid, hostid, pkey ); - let rtype = NvmeReservation::try_from(rtype).map_err(|_| { - ChildError::ResvType { - resv_type: rtype, - } - })?; - if rtype == args.resv_type - && hostid == my_hostid - && pkey == args.resv_key - { + let rtype = NvmeReservation::try_from(rtype) + .map_err(|_| ChildError::ResvType { resv_type: rtype })?; + if rtype == args.resv_type && hostid == my_hostid && pkey == args.resv_key { return Ok(()); } if !matches!( rtype, - NvmeReservation::WriteExclusiveAllRegs - | NvmeReservation::ExclusiveAccessAllRegs + NvmeReservation::WriteExclusiveAllRegs | NvmeReservation::ExclusiveAccessAllRegs ) { // This is the most straightforward case where we can simply preempt // the existing holder with our own key and type. @@ -931,9 +859,7 @@ impl<'c> NexusChild<'c> { // registration, so we need to start over. self.resv_register(&*hdl, args.resv_key) .await - .map_err(|e| ChildError::ResvRegisterKey { - source: e, - })?; + .map_err(|e| ChildError::ResvRegisterKey { source: e })?; self.resv_acquire(&*hdl, args.resv_key, None, args.resv_type) .await?; return Ok(()); @@ -951,9 +877,7 @@ impl<'c> NexusChild<'c> { // 8.19.7 self.resv_release(&*hdl, args.resv_key, rtype, 0) .await - .map_err(|e| ChildError::ResvRelease { - source: e, - })?; + .map_err(|e| ChildError::ResvRelease { source: e })?; // And now we can acquire the reservation with our own more // restricted reservation type. self.resv_acquire(&*hdl, args.resv_key, None, args.resv_type) @@ -992,10 +916,7 @@ impl<'c> NexusChild<'c> { /// Onlines a previously offlined child. /// The child is set out-of-sync so that it will be rebuilt. /// TODO: channels need to be updated when block devices are opened. - pub(crate) async fn online( - &mut self, - parent_size: u64, - ) -> Result { + pub(crate) async fn online(&mut self, parent_size: u64) -> Result { info!("{:?}: bringing child online", self); let state = self.state.load(); @@ -1023,10 +944,9 @@ impl<'c> NexusChild<'c> { // Re-create the block device as it will have been previously // destroyed. - let name = - device_create(&self.name).await.context(ChildBdevCreate { - child: self.name.clone(), - })?; + let name = device_create(&self.name).await.context(ChildBdevCreate { + child: self.name.clone(), + })?; self.device = device_lookup(&name); if self.device.is_none() { @@ -1055,10 +975,10 @@ impl<'c> NexusChild<'c> { pub(crate) async fn close(&self) -> Result<(), BdevError> { info!("{self:?}: closing child..."); - if self.destroy_state.compare_exchange( - ChildDestroyState::None, - ChildDestroyState::Destroying, - ) != Ok(ChildDestroyState::None) + if self + .destroy_state + .compare_exchange(ChildDestroyState::None, ChildDestroyState::Destroying) + != Ok(ChildDestroyState::None) { warn!("{self:?}: already being closed"); return Ok(()); @@ -1079,9 +999,7 @@ impl<'c> NexusChild<'c> { info!("{self:?}: destroying block device..."); match device_destroy(&self.name).await { Ok(_) => { - info!( - "{self:?}: block device destroyed, waiting for removal..." - ); + info!("{self:?}: block device destroyed, waiting for removal..."); // Only wait for block device removal if the child has been // initialised. @@ -1161,11 +1079,7 @@ impl<'c> NexusChild<'c> { } /// create a new nexus child - pub fn new( - name: String, - parent: String, - device: Option>, - ) -> Self { + pub fn new(name: String, parent: String, device: Option>) -> Self { // TODO: Remove check for persistent store if PersistentStore::enabled() && Self::uuid(&name).is_none() { panic!("Child name does not contain a UUID."); @@ -1196,16 +1110,12 @@ impl<'c> NexusChild<'c> { } /// TODO - pub(super) fn remove_rebuild_job( - &self, - ) -> Option> { + pub(super) fn remove_rebuild_job(&self) -> Option> { NexusRebuildJob::remove(&self.name).ok() } /// Return the rebuild job which is rebuilding this child, if rebuilding. - pub(crate) fn rebuild_job( - &self, - ) -> Option> { + pub(crate) fn rebuild_job(&self) -> Option> { NexusRebuildJob::lookup(&self.name).ok() } @@ -1230,9 +1140,7 @@ impl<'c> NexusChild<'c> { } /// Get I/O handle for the block device associated with this Nexus child. - pub fn get_io_handle( - &self, - ) -> Result, CoreError> { + pub fn get_io_handle(&self) -> Result, CoreError> { if let Some(desc) = self.device_descriptor.as_ref() { desc.get_io_handle() } else { @@ -1243,9 +1151,7 @@ impl<'c> NexusChild<'c> { } } - pub async fn get_io_handle_nonblock( - &self, - ) -> Result, CoreError> { + pub async fn get_io_handle_nonblock(&self) -> Result, CoreError> { if let Some(desc) = self.device_descriptor.as_ref() { desc.get_io_handle_nonblock().await } else { @@ -1313,11 +1219,7 @@ impl<'c> NexusChild<'c> { if io_log.is_none() { if let Some(d) = &self.device { - *io_log = Some(IOLog::new( - &d.device_name(), - d.num_blocks(), - d.block_len(), - )); + *io_log = Some(IOLog::new(&d.device_name(), d.num_blocks(), d.block_len())); debug!("{self:?}: started new I/O log: {log:?}", log = *io_log); } diff --git a/io-engine/src/bdev/nexus/nexus_io.rs b/io-engine/src/bdev/nexus/nexus_io.rs index 62ced7db6..c68426b4b 100644 --- a/io-engine/src/bdev/nexus/nexus_io.rs +++ b/io-engine/src/bdev/nexus/nexus_io.rs @@ -9,13 +9,9 @@ use nix::errno::Errno; use spdk_rs::{ libspdk::{ - spdk_bdev_io, - spdk_bdev_io_complete_nvme_status, - spdk_io_channel, - SPDK_NVME_SC_ABORTED_SQ_DELETION, - SPDK_NVME_SC_CAPACITY_EXCEEDED, - SPDK_NVME_SC_INVALID_OPCODE, - SPDK_NVME_SC_RESERVATION_CONFLICT, + spdk_bdev_io, spdk_bdev_io_complete_nvme_status, spdk_io_channel, + SPDK_NVME_SC_ABORTED_SQ_DELETION, SPDK_NVME_SC_CAPACITY_EXCEEDED, + SPDK_NVME_SC_INVALID_OPCODE, SPDK_NVME_SC_RESERVATION_CONFLICT, }, BdevIo, }; @@ -23,18 +19,8 @@ use spdk_rs::{ use super::{FaultReason, IOLogChannel, Nexus, NexusChannel, NEXUS_PRODUCT_ID}; use crate::core::{ - BlockDevice, - BlockDeviceHandle, - CoreError, - Cores, - IoCompletionStatus, - IoStatus, - IoSubmissionFailure, - IoType, - LvolFailure, - Mthread, - NvmeStatus, - ReadOptions, + BlockDevice, BlockDeviceHandle, CoreError, Cores, IoCompletionStatus, IoStatus, + IoSubmissionFailure, IoType, LvolFailure, Mthread, NvmeStatus, ReadOptions, }; #[cfg(feature = "nexus-io-tracing")] @@ -187,11 +173,9 @@ impl<'n> NexusBio<'n> { if let Err(_e) = match self.io_type() { IoType::Read => self.readv(), // these IOs are submitted to all the underlying children - IoType::Write - | IoType::WriteZeros - | IoType::Reset - | IoType::Unmap - | IoType::Flush => self.submit_all(), + IoType::Write | IoType::WriteZeros | IoType::Reset | IoType::Unmap | IoType::Flush => { + self.submit_all() + } IoType::NvmeAdmin => { self.fail(); Err(CoreError::NotSupported { @@ -224,11 +208,7 @@ impl<'n> NexusBio<'n> { } /// Invoked when a nexus IO completes. - fn child_completion( - device: &dyn BlockDevice, - status: IoCompletionStatus, - ctx: *mut c_void, - ) { + fn child_completion(device: &dyn BlockDevice, status: IoCompletionStatus, ctx: *mut c_void) { let mut nexus_io = NexusBio::from(ctx as *mut spdk_bdev_io); nexus_io.complete(device, status); } @@ -246,11 +226,7 @@ impl<'n> NexusBio<'n> { } /// Completion handler for the nexus when a child I/O completes. - fn complete( - &mut self, - child: &dyn BlockDevice, - status: IoCompletionStatus, - ) { + fn complete(&mut self, child: &dyn BlockDevice, status: IoCompletionStatus) { #[cfg(feature = "fault-injection")] let status = self.inject_completion_error(child, status); @@ -295,10 +271,9 @@ impl<'n> NexusBio<'n> { fn fail(&self) { match self.nexus().last_error { IoCompletionStatus::NvmeError(s) => self.fail_nvme_status(s), - IoCompletionStatus::LvolError(LvolFailure::NoSpace) => self - .fail_nvme_status(NvmeStatus::Generic( - SPDK_NVME_SC_CAPACITY_EXCEEDED, - )), + IoCompletionStatus::LvolError(LvolFailure::NoSpace) => { + self.fail_nvme_status(NvmeStatus::Generic(SPDK_NVME_SC_CAPACITY_EXCEEDED)) + } _ => self.0.fail(), } } @@ -361,10 +336,7 @@ impl<'n> NexusBio<'n> { /// submit a read operation to one of the children of this nexus #[inline] - fn submit_read( - &self, - hdl: &dyn BlockDeviceHandle, - ) -> Result<(), CoreError> { + fn submit_read(&self, hdl: &dyn BlockDeviceHandle) -> Result<(), CoreError> { #[cfg(feature = "fault-injection")] self.inject_submission_error(hdl)?; @@ -394,15 +366,11 @@ impl<'n> NexusBio<'n> { // TODO: ENOMEM and ENXIO should be handled differently and // device should not be retired in case of ENOMEM. let device = hdl.get_device().device_name(); - error!( - "{self:?}: read I/O to '{device}' submission failed: {r:?}" - ); + error!("{self:?}: read I/O to '{device}' submission failed: {r:?}"); self.fault_device( &device, - IoCompletionStatus::IoSubmissionError( - IoSubmissionFailure::Read, - ), + IoCompletionStatus::IoSubmissionError(IoSubmissionFailure::Read), ); r } else { @@ -410,9 +378,7 @@ impl<'n> NexusBio<'n> { r } } else { - error!( - "{self:?}: read I/O submission failed: no children available" - ); + error!("{self:?}: read I/O submission failed: no children available"); Err(CoreError::NoDevicesAvailable {}) } @@ -503,10 +469,7 @@ impl<'n> NexusBio<'n> { } #[inline] - fn submit_write( - &self, - hdl: &dyn BlockDeviceHandle, - ) -> Result<(), CoreError> { + fn submit_write(&self, hdl: &dyn BlockDeviceHandle) -> Result<(), CoreError> { trace_nexus_io!( "Submitting: {self:?} -> {name}", name = hdl.get_device().device_name() @@ -525,10 +488,7 @@ impl<'n> NexusBio<'n> { } #[inline] - fn submit_unmap( - &self, - hdl: &dyn BlockDeviceHandle, - ) -> Result<(), CoreError> { + fn submit_unmap(&self, hdl: &dyn BlockDeviceHandle) -> Result<(), CoreError> { trace_nexus_io!( "Submitting: {self:?} -> {name}", name = hdl.get_device().device_name() @@ -543,10 +503,7 @@ impl<'n> NexusBio<'n> { } #[inline] - fn submit_write_zeroes( - &self, - hdl: &dyn BlockDeviceHandle, - ) -> Result<(), CoreError> { + fn submit_write_zeroes(&self, hdl: &dyn BlockDeviceHandle) -> Result<(), CoreError> { trace_nexus_io!( "Submitting: {self:?} -> {name}", name = hdl.get_device().device_name() @@ -564,10 +521,7 @@ impl<'n> NexusBio<'n> { } #[inline] - fn submit_reset( - &self, - hdl: &dyn BlockDeviceHandle, - ) -> Result<(), CoreError> { + fn submit_reset(&self, hdl: &dyn BlockDeviceHandle) -> Result<(), CoreError> { trace_nexus_io!( "Submitting: {self:?} -> {name}", name = hdl.get_device().device_name() @@ -577,10 +531,7 @@ impl<'n> NexusBio<'n> { } #[inline] - fn submit_flush( - &self, - hdl: &dyn BlockDeviceHandle, - ) -> Result<(), CoreError> { + fn submit_flush(&self, hdl: &dyn BlockDeviceHandle) -> Result<(), CoreError> { trace_nexus_io!( "Submitting: {self:?} -> {name}", name = hdl.get_device().device_name() @@ -647,9 +598,7 @@ impl<'n> NexusBio<'n> { if let Some(log) = self.fault_device( &device, - IoCompletionStatus::IoSubmissionError( - IoSubmissionFailure::Write, - ), + IoCompletionStatus::IoSubmissionError(IoSubmissionFailure::Write), ) { self.log_io(&log); } @@ -666,9 +615,7 @@ impl<'n> NexusBio<'n> { self.ctx_mut().status = IoStatus::Success; } else { debug_assert_eq!(self.ctx().in_flight, 0); - error!( - "{self:?}: failing nexus I/O: all child I/O submissions failed" - ); + error!("{self:?}: failing nexus I/O: all child I/O submissions failed"); self.fail(); } @@ -702,9 +649,7 @@ impl<'n> NexusBio<'n> { io_status: IoCompletionStatus, ) -> Option { let reason = match io_status { - IoCompletionStatus::LvolError(LvolFailure::NoSpace) => { - FaultReason::NoSpace - } + IoCompletionStatus::LvolError(LvolFailure::NoSpace) => FaultReason::NoSpace, _ => FaultReason::IoError, }; @@ -712,11 +657,7 @@ impl<'n> NexusBio<'n> { } /// TODO - fn completion_error( - &mut self, - child: &dyn BlockDevice, - status: IoCompletionStatus, - ) { + fn completion_error(&mut self, child: &dyn BlockDevice, status: IoCompletionStatus) { // We have experienced a failure on one of the child devices. We need to // ensure we do not submit more IOs to this child. We do not // need to tell other cores about this because @@ -731,9 +672,7 @@ impl<'n> NexusBio<'n> { if matches!( status, - IoCompletionStatus::NvmeError(NvmeStatus::Generic( - SPDK_NVME_SC_INVALID_OPCODE - )) + IoCompletionStatus::NvmeError(NvmeStatus::Generic(SPDK_NVME_SC_INVALID_OPCODE)) ) { warn!( "{self:?}: invalid opcode error on '{dev}', skipping retire", @@ -746,9 +685,7 @@ impl<'n> NexusBio<'n> { // replica should not be retired. if matches!( status, - IoCompletionStatus::NvmeError(NvmeStatus::Generic( - SPDK_NVME_SC_RESERVATION_CONFLICT - )) + IoCompletionStatus::NvmeError(NvmeStatus::Generic(SPDK_NVME_SC_RESERVATION_CONFLICT)) ) { warn!( "{self:?}: reservation conflict on '{dev}', shutdown nexus", @@ -760,9 +697,7 @@ impl<'n> NexusBio<'n> { if matches!( status, - IoCompletionStatus::NvmeError(NvmeStatus::Generic( - SPDK_NVME_SC_ABORTED_SQ_DELETION - )) + IoCompletionStatus::NvmeError(NvmeStatus::Generic(SPDK_NVME_SC_ABORTED_SQ_DELETION)) ) { warn!( "{self:?}: aborted submission queue deleted on '{dev}'", @@ -784,14 +719,9 @@ impl<'n> NexusBio<'n> { /// Checks if an error is to be injected upon submission. #[cfg(feature = "fault-injection")] #[inline] - fn inject_submission_error( - &self, - hdl: &dyn BlockDeviceHandle, - ) -> Result<(), CoreError> { + fn inject_submission_error(&self, hdl: &dyn BlockDeviceHandle) -> Result<(), CoreError> { use crate::core::fault_injection::{ - inject_submission_error, - FaultDomain::NexusChild, - InjectIoCtx, + inject_submission_error, FaultDomain::NexusChild, InjectIoCtx, }; inject_submission_error(&InjectIoCtx::with_iovs( @@ -813,9 +743,7 @@ impl<'n> NexusBio<'n> { status: IoCompletionStatus, ) -> IoCompletionStatus { use crate::core::fault_injection::{ - inject_completion_error, - FaultDomain::NexusChild, - InjectIoCtx, + inject_completion_error, FaultDomain::NexusChild, InjectIoCtx, }; inject_completion_error( diff --git a/io-engine/src/bdev/nexus/nexus_io_log.rs b/io-engine/src/bdev/nexus/nexus_io_log.rs index 4ae6be655..a80ac7090 100644 --- a/io-engine/src/bdev/nexus/nexus_io_log.rs +++ b/io-engine/src/bdev/nexus/nexus_io_log.rs @@ -39,19 +39,10 @@ impl Debug for IOLogChannelInner { impl IOLogChannelInner { /// Creates new I/O log channel for the given channel. - fn new( - core: u32, - device_name: &str, - num_blocks: u64, - block_len: u64, - ) -> Self { + fn new(core: u32, device_name: &str, num_blocks: u64, block_len: u64) -> Self { Self { core, - segments: UnsafeCell::new(Some(SegmentMap::new( - num_blocks, - block_len, - SEGMENT_SIZE, - ))), + segments: UnsafeCell::new(Some(SegmentMap::new(num_blocks, block_len, SEGMENT_SIZE))), device_name: device_name.to_owned(), } } @@ -67,8 +58,7 @@ impl IOLogChannelInner { pub(crate) fn log_io(&self, io_type: IoType, lbn: u64, lbn_cnt: u64) { assert_eq!(self.core, Cores::current()); - if matches!(io_type, IoType::Write | IoType::WriteZeros | IoType::Unmap) - { + if matches!(io_type, IoType::Write | IoType::WriteZeros | IoType::Unmap) { unsafe { &mut *self.segments.get() } .as_mut() .expect("Accessing stopped I/O log channel") @@ -119,12 +109,7 @@ impl Debug for IOLogChannel { impl IOLogChannel { /// Creates new I/O log channel for the given channel. - fn new( - core: u32, - device_name: &str, - num_blocks: u64, - block_len: u64, - ) -> Self { + fn new(core: u32, device_name: &str, num_blocks: u64, block_len: u64) -> Self { Self(Rc::new(IOLogChannelInner::new( core, device_name, @@ -155,20 +140,13 @@ impl Debug for IOLog { impl IOLog { /// Creates a new I/O log instance for the given device. - pub(crate) fn new( - device_name: &str, - num_blocks: u64, - block_len: u64, - ) -> Self { + pub(crate) fn new(device_name: &str, num_blocks: u64, block_len: u64) -> Self { assert!(!device_name.is_empty() && num_blocks > 0 && block_len > 0); let mut channels = HashMap::new(); for i in Cores::list_cores() { - channels.insert( - i, - IOLogChannel::new(i, device_name, num_blocks, block_len), - ); + channels.insert(i, IOLogChannel::new(i, device_name, num_blocks, block_len)); } Self { diff --git a/io-engine/src/bdev/nexus/nexus_io_subsystem.rs b/io-engine/src/bdev/nexus/nexus_io_subsystem.rs index a02c2ee1a..d760c677a 100644 --- a/io-engine/src/bdev/nexus/nexus_io_subsystem.rs +++ b/io-engine/src/bdev/nexus/nexus_io_subsystem.rs @@ -102,10 +102,9 @@ impl<'n> NexusIoSubsystem<'n> { trace!("{:?}: pausing I/O...", self); loop { - let state = self.pause_state.compare_exchange( - NexusPauseState::Unpaused, - NexusPauseState::Pausing, - ); + let state = self + .pause_state + .compare_exchange(NexusPauseState::Unpaused, NexusPauseState::Pausing); match state { Ok(NexusPauseState::Unpaused) => { @@ -118,37 +117,20 @@ impl<'n> NexusIoSubsystem<'n> { ); if let Some(Protocol::Nvmf) = self.bdev.shared() { - if let Some(subsystem) = - NvmfSubsystem::nqn_lookup(&self.name) - { - trace!( - "{:?}: pausing subsystem '{}'...", - self, - subsystem.get_nqn() - ); + if let Some(subsystem) = NvmfSubsystem::nqn_lookup(&self.name) { + trace!("{:?}: pausing subsystem '{}'...", self, subsystem.get_nqn()); if let Err(e) = subsystem.pause().await { - panic!( - "Failed to pause subsystem '{}: {}", - subsystem.get_nqn(), - e - ); + panic!("Failed to pause subsystem '{}: {}", subsystem.get_nqn(), e); } - trace!( - "{:?}: subsystem '{}' paused", - self, - subsystem.get_nqn() - ); + trace!("{:?}: subsystem '{}' paused", self, subsystem.get_nqn()); } } // Mark subsystem as paused after it has been paused. self.pause_state - .compare_exchange( - NexusPauseState::Pausing, - NexusPauseState::Paused, - ) + .compare_exchange(NexusPauseState::Pausing, NexusPauseState::Paused) .expect("Failed to mark subsystem as Paused"); break; } @@ -164,8 +146,7 @@ impl<'n> NexusIoSubsystem<'n> { } // Wait till the subsystem has completed transition and retry // operation. - Err(NexusPauseState::Unpausing) - | Err(NexusPauseState::Pausing) => { + Err(NexusPauseState::Unpausing) | Err(NexusPauseState::Pausing) => { trace!( "{:?}: nexus is in intermediate state, \ deferring pause operation", @@ -253,9 +234,7 @@ impl<'n> NexusIoSubsystem<'n> { // In case the last pause discarded, resume the subsystem. if v == 1 { if state == NexusPauseState::Frozen || freeze { - if let Some(subsystem) = - NvmfSubsystem::nqn_lookup(&self.name) - { + if let Some(subsystem) = NvmfSubsystem::nqn_lookup(&self.name) { trace!( "{:?}: subsystem '{}' not being resumed", self, @@ -264,11 +243,8 @@ impl<'n> NexusIoSubsystem<'n> { } self.pause_state.store(NexusPauseState::Frozen); } else { - if let Some(subsystem) = - NvmfSubsystem::nqn_lookup(&self.name) - { - self.pause_state - .store(NexusPauseState::Unpausing); + if let Some(subsystem) = NvmfSubsystem::nqn_lookup(&self.name) { + self.pause_state.store(NexusPauseState::Unpausing); trace!( "{:?}: resuming subsystem '{}'...", self, @@ -281,11 +257,7 @@ impl<'n> NexusIoSubsystem<'n> { e ); } - trace!( - "{:?}: subsystem '{}' resumed", - self, - subsystem.get_nqn() - ); + trace!("{:?}: subsystem '{}' resumed", self, subsystem.get_nqn()); } self.pause_state.store(NexusPauseState::Unpaused); } diff --git a/io-engine/src/bdev/nexus/nexus_iter.rs b/io-engine/src/bdev/nexus/nexus_iter.rs index e9d552d7c..8662c375a 100644 --- a/io-engine/src/bdev/nexus/nexus_iter.rs +++ b/io-engine/src/bdev/nexus/nexus_iter.rs @@ -14,16 +14,12 @@ pub fn nexus_iter_mut<'n>() -> NexusIterMut<'n> { } /// Looks up a Nexus by its name, and returns a reference to it. -pub fn nexus_lookup<'n>( - name: &str, -) -> Option< as Iterator>::Item> { +pub fn nexus_lookup<'n>(name: &str) -> Option< as Iterator>::Item> { NexusIter::new().find(|n| n.name == name) } /// Looks up a Nexus by its name, and returns a mutable reference to it. -pub fn nexus_lookup_mut<'n>( - name: &str, -) -> Option< as Iterator>::Item> { +pub fn nexus_lookup_mut<'n>(name: &str) -> Option< as Iterator>::Item> { NexusIterMut::new().find(|n| n.name == name) } @@ -32,15 +28,12 @@ pub fn nexus_lookup_name_uuid<'n>( name: &str, nexus_uuid: Option, ) -> Option< as Iterator>::Item> { - NexusIter::new().find(|n| { - n.name == name || (nexus_uuid.is_some() && Some(n.uuid()) == nexus_uuid) - }) + NexusIter::new() + .find(|n| n.name == name || (nexus_uuid.is_some() && Some(n.uuid()) == nexus_uuid)) } /// Looks up a Nexus by its uuid, and returns a mutable reference to it. -pub fn nexus_lookup_uuid_mut<'n>( - uuid: &str, -) -> Option< as Iterator>::Item> { +pub fn nexus_lookup_uuid_mut<'n>(uuid: &str) -> Option< as Iterator>::Item> { NexusIterMut::new().find(|n| n.uuid().to_string() == uuid) } @@ -51,17 +44,13 @@ fn try_nqn_to_nexus_name(nqn: &str) -> Option { } /// Looks up a Nexus by its subsystem NQN, and returns a reference to it. -pub fn nexus_lookup_nqn<'n>( - nqn: &str, -) -> Option< as Iterator>::Item> { +pub fn nexus_lookup_nqn<'n>(nqn: &str) -> Option< as Iterator>::Item> { try_nqn_to_nexus_name(nqn).and_then(|n| nexus_lookup(&n)) } /// Looks up a Nexus by its subsystem NQN, and returns a mutable reference to /// it. -pub fn nexus_lookup_nqn_mut<'n>( - nqn: &str, -) -> Option< as Iterator>::Item> { +pub fn nexus_lookup_nqn_mut<'n>(nqn: &str) -> Option< as Iterator>::Item> { try_nqn_to_nexus_name(nqn).and_then(|n| nexus_lookup_mut(&n)) } diff --git a/io-engine/src/bdev/nexus/nexus_module.rs b/io-engine/src/bdev/nexus/nexus_module.rs index ec1580430..cd142455a 100644 --- a/io-engine/src/bdev/nexus/nexus_module.rs +++ b/io-engine/src/bdev/nexus/nexus_module.rs @@ -3,13 +3,8 @@ use serde_json::json; use super::{nexus_iter, NioCtx}; use spdk_rs::{ - BdevModule, - BdevModuleBuild, - JsonWriteContext, - WithModuleConfigJson, - WithModuleFini, - WithModuleGetCtxSize, - WithModuleInit, + BdevModule, BdevModuleBuild, JsonWriteContext, WithModuleConfigJson, WithModuleFini, + WithModuleGetCtxSize, WithModuleInit, }; /// Name for Nexus Bdev module name. diff --git a/io-engine/src/bdev/nexus/nexus_nbd.rs b/io-engine/src/bdev/nexus/nexus_nbd.rs index 81dc2adf7..35774cb40 100644 --- a/io-engine/src/bdev/nexus/nexus_nbd.rs +++ b/io-engine/src/bdev/nexus/nexus_nbd.rs @@ -17,10 +17,7 @@ use std::{ use sysfs::parse_value; use spdk_rs::libspdk::{ - nbd_disk_find_by_nbd_path, - spdk_nbd_disk, - spdk_nbd_get_path, - spdk_nbd_start, + nbd_disk_find_by_nbd_path, spdk_nbd_disk, spdk_nbd_get_path, spdk_nbd_start, }; use crate::{ @@ -76,9 +73,8 @@ pub(crate) fn wait_until_ready(path: &str) { debug!("Timeout of NBD device {} was set to {}", tpath, timeout); let size: u64 = 0; let mut delay = 1; - for _i in 0i32 .. 10 { - if let Ok(f) = OpenOptions::new().read(true).open(Path::new(&tpath)) - { + for _i in 0i32..10 { + if let Ok(f) = OpenOptions::new().read(true).open(Path::new(&tpath)) { let res = unsafe { convert_ioctl_res!(libc::ioctl( f.as_raw_fd(), @@ -120,15 +116,11 @@ pub(crate) fn wait_until_ready(path: &str) { /// circumstances do not block. pub fn find_unused() -> Result { let nbd_max = - parse_value(Path::new("/sys/class/modules/nbd/parameters"), "nbds_max") - .unwrap_or(16); + parse_value(Path::new("/sys/class/modules/nbd/parameters"), "nbds_max").unwrap_or(16); - for i in 0 .. nbd_max { + for i in 0..nbd_max { let name = format!("nbd{i}"); - match parse_value::( - Path::new(&format!("/sys/class/block/{name}")), - "pid", - ) { + match parse_value::(Path::new(&format!("/sys/class/block/{name}")), "pid") { // if we find a pid file the device is in use Ok(_) => continue, Err(e) => match e.kind() { @@ -137,11 +129,8 @@ pub fn find_unused() -> Result { // The kernel needs time to construct the device // so we need to make sure we are not using it internally // already. - let nbd_device = - CString::new(format!("/dev/{name}")).unwrap(); - let ptr = unsafe { - nbd_disk_find_by_nbd_path(nbd_device.as_ptr()) - }; + let nbd_device = CString::new(format!("/dev/{name}")).unwrap(); + let ptr = unsafe { nbd_disk_find_by_nbd_path(nbd_device.as_ptr()) }; if ptr.is_null() { return Ok(nbd_device.into_string().unwrap()); @@ -157,15 +146,9 @@ pub fn find_unused() -> Result { } /// Callback for spdk_nbd_start(). -extern "C" fn start_cb( - sender_ptr: *mut c_void, - nbd_disk: *mut spdk_nbd_disk, - errno: i32, -) { +extern "C" fn start_cb(sender_ptr: *mut c_void, nbd_disk: *mut spdk_nbd_disk, errno: i32) { let sender = unsafe { - Box::from_raw( - sender_ptr as *mut oneshot::Sender>, - ) + Box::from_raw(sender_ptr as *mut oneshot::Sender>) }; sender .send(errno_result_from_i32(nbd_disk, errno)) @@ -173,14 +156,10 @@ extern "C" fn start_cb( } /// Start nbd disk using provided device name. -pub async fn start( - bdev_name: &str, - device_path: &str, -) -> Result<*mut spdk_nbd_disk, NbdError> { +pub async fn start(bdev_name: &str, device_path: &str) -> Result<*mut spdk_nbd_disk, NbdError> { let c_bdev_name = CString::new(bdev_name).unwrap(); let c_device_path = CString::new(device_path).unwrap(); - let (sender, receiver) = - oneshot::channel::>(); + let (sender, receiver) = oneshot::channel::>(); unsafe { spdk_nbd_start( @@ -223,9 +202,7 @@ impl NbdDisk { wait_until_ready(&device_path); info!("Started nbd disk {} for {}", device_path, bdev_name); - Ok(Self { - nbd_ptr, - }) + Ok(Self { nbd_ptr }) } /// Stop and release nbd device. @@ -249,14 +226,9 @@ impl NbdDisk { // original size and a partition scan. // Set the size to 0 before disconnecting in hopes of stopping // that. - let f = - OpenOptions::new().read(true).open(Path::new(&nbd_name)); - convert_ioctl_res!(libc::ioctl( - f.unwrap().as_raw_fd(), - SET_SIZE as u64, - 0 - )) - .unwrap(); + let f = OpenOptions::new().read(true).open(Path::new(&nbd_name)); + convert_ioctl_res!(libc::ioctl(f.unwrap().as_raw_fd(), SET_SIZE as u64, 0)) + .unwrap(); nbd_disconnect(ptr as *mut _); }; debug!("NBD device disconnected successfully"); diff --git a/io-engine/src/bdev/nexus/nexus_persistence.rs b/io-engine/src/bdev/nexus/nexus_persistence.rs index 7fefa8918..68c03e559 100644 --- a/io-engine/src/bdev/nexus/nexus_persistence.rs +++ b/io-engine/src/bdev/nexus/nexus_persistence.rs @@ -98,23 +98,18 @@ impl<'n> Nexus<'n> { assert!(!nexus_info.clean_shutdown); self.children_iter().for_each(|c| { let child_info = ChildInfo { - uuid: NexusChild::uuid(c.uri()) - .expect("Failed to get child UUID."), + uuid: NexusChild::uuid(c.uri()).expect("Failed to get child UUID."), healthy: c.is_healthy(), }; nexus_info.children.push(child_info); }); } - PersistOp::AddChild { - child_uri, - healthy, - } => { + PersistOp::AddChild { child_uri, healthy } => { // Add the state of a new child. This should only be called // on adding a new child. Take into account that the same child // can be readded again. let child_info = ChildInfo { - uuid: NexusChild::uuid(child_uri) - .expect("Failed to get child UUID."), + uuid: NexusChild::uuid(child_uri).expect("Failed to get child UUID."), healthy: *healthy, }; @@ -129,20 +124,13 @@ impl<'n> Nexus<'n> { None => nexus_info.children.push(child_info), } } - PersistOp::RemoveChild { - child_uri, - } => { - let uuid = NexusChild::uuid(child_uri) - .expect("Failed to get child UUID."); + PersistOp::RemoveChild { child_uri } => { + let uuid = NexusChild::uuid(child_uri).expect("Failed to get child UUID."); nexus_info.children.retain(|child| child.uuid != uuid); } - PersistOp::Update { - child_uri, - healthy, - } => { - let uuid = NexusChild::uuid(child_uri) - .expect("Failed to get child UUID."); + PersistOp::Update { child_uri, healthy } => { + let uuid = NexusChild::uuid(child_uri).expect("Failed to get child UUID."); // Only update the state of the child that has changed. Do not // update the other children or "clean shutdown" information. // This should only be called on a child state change. @@ -164,8 +152,7 @@ impl<'n> Nexus<'n> { return Ok(()); } - let uuid = NexusChild::uuid(child_uri) - .expect("Failed to get child UUID."); + let uuid = NexusChild::uuid(child_uri).expect("Failed to get child UUID."); nexus_info.children.iter_mut().for_each(|c| { if c.uuid == uuid { diff --git a/io-engine/src/bdev/nexus/nexus_share.rs b/io-engine/src/bdev/nexus/nexus_share.rs index 4697b5362..289bd1e9d 100644 --- a/io-engine/src/bdev/nexus/nexus_share.rs +++ b/io-engine/src/bdev/nexus/nexus_share.rs @@ -44,9 +44,7 @@ impl Share for Nexus<'_> { .pin_bdev_mut() .share_nvmf(props) .await - .context(nexus_err::ShareNvmfNexus { - name, - })?; + .context(nexus_err::ShareNvmfNexus { name })?; let uri = self.share_uri().unwrap(); info!("{:?}: shared NVMF target as '{}'", self, uri); @@ -67,11 +65,10 @@ impl Share for Nexus<'_> { props: P, ) -> Result<(), Self::Error> { let name = self.name.clone(); - self.pin_bdev_mut().update_properties(props).await.context( - nexus_err::UpdateShareProperties { - name, - }, - ) + self.pin_bdev_mut() + .update_properties(props) + .await + .context(nexus_err::UpdateShareProperties { name }) } /// TODO @@ -79,11 +76,11 @@ impl Share for Nexus<'_> { info!("{:?}: unsharing nexus bdev...", self); let name = self.name.clone(); - self.as_mut().pin_bdev_mut().unshare().await.context( - nexus_err::UnshareNexus { - name, - }, - )?; + self.as_mut() + .pin_bdev_mut() + .unshare() + .await + .context(nexus_err::UnshareNexus { name })?; info!("{:?}: unshared nexus bdev", self); @@ -150,9 +147,7 @@ impl<'n> Nexus<'n> { warn!("{} is already shared", self.name); self.as_mut() - .update_properties( - UpdateProps::new().with_allowed_hosts(allowed_hosts), - ) + .update_properties(UpdateProps::new().with_allowed_hosts(allowed_hosts)) .await?; return Ok(self.get_share_uri().unwrap()); @@ -168,11 +163,11 @@ impl<'n> Nexus<'n> { // right now Off is mapped to Nbd, will clean up the Nbd related // code once we refactor the rust tests that use nbd. Protocol::Off => { - let disk = NbdDisk::create(&self.name).await.context( - nexus_err::ShareNbdNexus { + let disk = NbdDisk::create(&self.name) + .await + .context(nexus_err::ShareNbdNexus { name: self.name.clone(), - }, - )?; + })?; let uri = disk.as_uri(); unsafe { self.as_mut().get_unchecked_mut().nexus_target = @@ -241,9 +236,7 @@ pub(crate) struct NexusPtpl { impl NexusPtpl { /// Get a `Self` with the given uuid. pub(crate) fn new(uuid: uuid::Uuid) -> Self { - Self { - uuid, - } + Self { uuid } } fn uuid(&self) -> &uuid::Uuid { &self.uuid @@ -251,9 +244,7 @@ impl NexusPtpl { } impl<'n> From<&Nexus<'n>> for NexusPtpl { fn from(n: &Nexus<'n>) -> Self { - NexusPtpl { - uuid: n.uuid(), - } + NexusPtpl { uuid: n.uuid() } } } impl PtplFileOps for NexusPtpl { diff --git a/io-engine/src/bdev/null_bdev.rs b/io-engine/src/bdev/null_bdev.rs index aba4db74d..71e29de7f 100644 --- a/io-engine/src/bdev/null_bdev.rs +++ b/io-engine/src/bdev/null_bdev.rs @@ -44,8 +44,7 @@ impl TryFrom<&Url> for Null { }); } - let mut parameters: HashMap = - uri.query_pairs().into_owned().collect(); + let mut parameters: HashMap = uri.query_pairs().into_owned().collect(); let blk_size: u32 = if let Some(value) = parameters.remove("blk_size") { value.parse().context(bdev_api::IntParamParseFailed { @@ -60,9 +59,7 @@ impl TryFrom<&Url> for Null { if blk_size != 512 && blk_size != 4096 { return Err(BdevError::InvalidUri { uri: uri.to_string(), - message: - "invalid blk_size specified must be one of 512 or 4096" - .to_string(), + message: "invalid blk_size specified must be one of 512 or 4096".to_string(), }); } @@ -76,16 +73,15 @@ impl TryFrom<&Url> for Null { 0 }; - let num_blocks: u64 = - if let Some(value) = parameters.remove("num_blocks") { - value.parse().context(bdev_api::IntParamParseFailed { - uri: uri.to_string(), - parameter: String::from("blk_size"), - value: value.clone(), - })? - } else { - 0 - }; + let num_blocks: u64 = if let Some(value) = parameters.remove("num_blocks") { + value.parse().context(bdev_api::IntParamParseFailed { + uri: uri.to_string(), + parameter: String::from("blk_size"), + value: value.clone(), + })? + } else { + 0 + }; if size != 0 && num_blocks != 0 { return Err(BdevError::InvalidUri { @@ -95,16 +91,15 @@ impl TryFrom<&Url> for Null { }); } - let uuid = uri::uuid(parameters.remove("uuid")).context( - bdev_api::UuidParamParseFailed { + let uuid = + uri::uuid(parameters.remove("uuid")).context(bdev_api::UuidParamParseFailed { uri: uri.to_string(), - }, - )?; + })?; reject_unknown_parameters(uri, parameters)?; Ok(Self { - name: uri.path()[1 ..].into(), + name: uri.path()[1..].into(), alias: uri.to_string(), num_blocks: if num_blocks != 0 { num_blocks @@ -150,8 +145,7 @@ impl CreateDestroy for Null { }; let errno = unsafe { - let mut bdev: *mut spdk_rs::libspdk::spdk_bdev = - std::ptr::null_mut(); + let mut bdev: *mut spdk_rs::libspdk::spdk_bdev = std::ptr::null_mut(); spdk_rs::libspdk::bdev_null_create(&mut bdev, &opts) }; @@ -199,13 +193,9 @@ impl CreateDestroy for Null { .context(bdev_api::BdevCommandCanceled { name: self.name.clone(), })? - .context(bdev_api::DestroyBdevFailed { - name: self.name, - }) + .context(bdev_api::DestroyBdevFailed { name: self.name }) } else { - Err(BdevError::BdevNotFound { - name: self.name, - }) + Err(BdevError::BdevNotFound { name: self.name }) } } } diff --git a/io-engine/src/bdev/null_ng.rs b/io-engine/src/bdev/null_ng.rs index d7c469ca0..242316223 100644 --- a/io-engine/src/bdev/null_ng.rs +++ b/io-engine/src/bdev/null_ng.rs @@ -2,16 +2,8 @@ use parking_lot::Mutex; use std::{cell::RefCell, marker::PhantomData, pin::Pin, time::Duration}; use spdk_rs::{ - BdevIo, - BdevModule, - BdevModuleBuild, - BdevOps, - IoChannel, - IoDevice, - IoType, - Poller, - PollerBuilder, - WithModuleInit, + BdevIo, BdevModule, BdevModuleBuild, BdevOps, IoChannel, IoDevice, IoType, Poller, + PollerBuilder, WithModuleInit, }; const NULL_MODULE_NAME: &str = "NullNg"; @@ -94,17 +86,11 @@ impl<'a> BdevOps for NullIoDevice<'a> { } /// TODO - fn submit_request( - &self, - io_chan: IoChannel, - bio: BdevIo, - ) { + fn submit_request(&self, io_chan: IoChannel, bio: BdevIo) { let chan_data = io_chan.channel_data(); match bio.io_type() { - IoType::Read | IoType::Write => { - chan_data.poller.data().iovs.lock().push(bio) - } + IoType::Read | IoType::Write => chan_data.poller.data().iovs.lock().push(bio), _ => bio.fail(), }; } diff --git a/io-engine/src/bdev/nvme.rs b/io-engine/src/bdev/nvme.rs index b6ec14b3e..3042a621b 100644 --- a/io-engine/src/bdev/nvme.rs +++ b/io-engine/src/bdev/nvme.rs @@ -43,7 +43,7 @@ impl TryFrom<&Url> for NVMe { } Ok(Self { - name: url.path()[1 ..].into(), + name: url.path()[1..].into(), url: url.clone(), }) } @@ -60,14 +60,8 @@ impl CreateDestroy for NVMe { type Error = BdevError; async fn create(&self) -> Result { - extern "C" fn nvme_create_cb( - arg: *mut c_void, - _bdev_count: c_ulong, - errno: c_int, - ) { - let sender = unsafe { - Box::from_raw(arg as *mut oneshot::Sender>) - }; + extern "C" fn nvme_create_cb(arg: *mut c_void, _bdev_count: c_ulong, errno: c_int) { + let sender = unsafe { Box::from_raw(arg as *mut oneshot::Sender>) }; sender .send(errno_result_from_i32((), errno)) @@ -99,11 +93,9 @@ impl CreateDestroy for NVMe { ) }; - errno_result_from_i32((), errno).context( - bdev_api::CreateBdevInvalidParams { - name: self.name.clone(), - }, - )?; + errno_result_from_i32((), errno).context(bdev_api::CreateBdevInvalidParams { + name: self.name.clone(), + })?; receiver .await diff --git a/io-engine/src/bdev/nvmf.rs b/io-engine/src/bdev/nvmf.rs index 7da651c61..3112aaacd 100644 --- a/io-engine/src/bdev/nvmf.rs +++ b/io-engine/src/bdev/nvmf.rs @@ -14,12 +14,8 @@ use spdk_rs::{ bdevs::bdev_nvme_delete_async, ffihelper::copy_str_with_null, libspdk::{ - bdev_nvme_create, - spdk_nvme_transport_id, - SPDK_NVME_IO_FLAGS_PRCHK_GUARD, - SPDK_NVME_IO_FLAGS_PRCHK_REFTAG, - SPDK_NVME_TRANSPORT_TCP, - SPDK_NVMF_ADRFAM_IPV4, + bdev_nvme_create, spdk_nvme_transport_id, SPDK_NVME_IO_FLAGS_PRCHK_GUARD, + SPDK_NVME_IO_FLAGS_PRCHK_REFTAG, SPDK_NVME_TRANSPORT_TCP, SPDK_NVMF_ADRFAM_IPV4, }, }; @@ -77,46 +73,39 @@ impl TryFrom<&Url> for Nvmf { }); } - let mut parameters: HashMap = - url.query_pairs().into_owned().collect(); + let mut parameters: HashMap = url.query_pairs().into_owned().collect(); let mut prchk_flags: u32 = 0; if let Some(value) = parameters.remove("reftag") { - if uri::boolean(&value, true).context( - bdev_api::BoolParamParseFailed { - uri: url.to_string(), - parameter: String::from("reftag"), - value: value.to_string(), - }, - )? { + if uri::boolean(&value, true).context(bdev_api::BoolParamParseFailed { + uri: url.to_string(), + parameter: String::from("reftag"), + value: value.to_string(), + })? { prchk_flags |= SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; } } if let Some(value) = parameters.remove("guard") { - if uri::boolean(&value, true).context( - bdev_api::BoolParamParseFailed { - uri: url.to_string(), - parameter: String::from("guard"), - value: value.to_string(), - }, - )? { + if uri::boolean(&value, true).context(bdev_api::BoolParamParseFailed { + uri: url.to_string(), + parameter: String::from("guard"), + value: value.to_string(), + })? { prchk_flags |= SPDK_NVME_IO_FLAGS_PRCHK_GUARD; } } - let uuid = uri::uuid(parameters.remove("uuid")).context( - bdev_api::UuidParamParseFailed { + let uuid = + uri::uuid(parameters.remove("uuid")).context(bdev_api::UuidParamParseFailed { uri: url.to_string(), - }, - )?; + })?; reject_unknown_parameters(url, parameters)?; Ok(Nvmf { - name: url[url::Position::BeforeHost .. url::Position::AfterPath] - .into(), + name: url[url::Position::BeforeHost..url::Position::AfterPath].into(), alias: url.to_string(), host: host.to_string(), port: url.port().unwrap_or(DEFAULT_NVMF_PORT), @@ -147,14 +136,8 @@ impl CreateDestroy for Nvmf { }); } - extern "C" fn done_nvme_create_cb( - arg: *mut c_void, - bdev_count: c_ulong, - errno: c_int, - ) { - let sender = unsafe { - Box::from_raw(arg as *mut oneshot::Sender>) - }; + extern "C" fn done_nvme_create_cb(arg: *mut c_void, bdev_count: c_ulong, errno: c_int) { + let sender = unsafe { Box::from_raw(arg as *mut oneshot::Sender>) }; sender .send(errno_result_from_i32(bdev_count as usize, errno)) @@ -180,11 +163,9 @@ impl CreateDestroy for Nvmf { ) }; - errno_result_from_i32((), errno).context( - bdev_api::CreateBdevInvalidParams { - name: self.name.clone(), - }, - )?; + errno_result_from_i32((), errno).context(bdev_api::CreateBdevInvalidParams { + name: self.name.clone(), + })?; let bdev_count = receiver .await diff --git a/io-engine/src/bdev/nvmx/channel.rs b/io-engine/src/bdev/nvmx/channel.rs index b8f8da4bc..16cf653b3 100644 --- a/io-engine/src/bdev/nvmx/channel.rs +++ b/io-engine/src/bdev/nvmx/channel.rs @@ -4,16 +4,11 @@ use std::{mem::size_of, os::raw::c_void, ptr::NonNull, time::Duration}; use spdk_rs::{ libspdk::{ - nvme_qpair_abort_all_queued_reqs, - nvme_transport_qpair_abort_reqs, - spdk_io_channel, - spdk_nvme_poll_group_process_completions, - spdk_nvme_qpair, - spdk_nvme_qpair_set_abort_dnr, + nvme_qpair_abort_all_queued_reqs, nvme_transport_qpair_abort_reqs, spdk_io_channel, + spdk_nvme_poll_group_process_completions, spdk_nvme_qpair, spdk_nvme_qpair_set_abort_dnr, spdk_put_io_channel, }, - Poller, - PollerBuilder, + Poller, PollerBuilder, }; use crate::{ @@ -22,11 +17,7 @@ use crate::{ }; use super::{ - nvme_bdev_running_config, - NvmeControllerState, - PollGroup, - QPair, - SpdkNvmeController, + nvme_bdev_running_config, NvmeControllerState, PollGroup, QPair, SpdkNvmeController, NVME_CONTROLLERS, }; @@ -48,17 +39,13 @@ impl<'a> NvmeIoChannel<'a> { } #[inline] - pub fn inner_from_channel( - io_channel: *mut spdk_io_channel, - ) -> &'a mut NvmeIoChannelInner<'a> { + pub fn inner_from_channel(io_channel: *mut spdk_io_channel) -> &'a mut NvmeIoChannelInner<'a> { NvmeIoChannel::from_raw(Self::io_channel_ctx(io_channel)).inner_mut() } #[inline] fn io_channel_ctx(ch: *mut spdk_io_channel) -> *mut c_void { - unsafe { - (ch as *mut u8).add(size_of::()) as *mut c_void - } + unsafe { (ch as *mut u8).add(size_of::()) as *mut c_void } } } @@ -78,9 +65,7 @@ pub struct NvmeIoChannelInner<'a> { io_stats_controller: IoStatsController, pub device: Box, /// to prevent the controller from being destroyed before the channel - ctrl: Option< - std::sync::Arc>>, - >, + ctrl: Option>>>, num_pending_ios: u64, // Flag to indicate the shutdown state of the channel. @@ -179,11 +164,7 @@ impl NvmeIoChannelInner<'_> { } /// Reinitialize channel after reset unless the channel is shutdown. - pub fn reinitialize( - &mut self, - ctrlr_name: &str, - ctrlr_handle: SpdkNvmeController, - ) -> i32 { + pub fn reinitialize(&mut self, ctrlr_name: &str, ctrlr_handle: SpdkNvmeController) -> i32 { if self.is_shutdown { error!( "{} I/O channel is shutdown, channel reinitialization not possible", @@ -256,12 +237,7 @@ impl IoStatsController { #[inline] /// Account amount of blocks and I/O operations. - pub fn account_block_io( - &mut self, - op: IoType, - num_ops: u64, - num_blocks: u64, - ) { + pub fn account_block_io(&mut self, op: IoType, num_ops: u64, num_blocks: u64) { match op { IoType::Read => { self.io_stats.num_read_ops += num_ops; @@ -298,10 +274,7 @@ impl IoStatsController { pub struct NvmeControllerIoChannel(NonNull); -extern "C" fn disconnected_qpair_cb( - _qpair: *mut spdk_nvme_qpair, - ctx: *mut c_void, -) { +extern "C" fn disconnected_qpair_cb(_qpair: *mut spdk_nvme_qpair, ctx: *mut c_void) { let inner = NvmeIoChannel::from_raw(ctx).inner_mut(); if let Some(qpair) = inner.qpair() { @@ -474,9 +447,7 @@ impl NvmeControllerIoChannel { /// Wrapper around SPDK I/O channel. impl NvmeControllerIoChannel { - pub fn from_null_checked( - ch: *mut spdk_io_channel, - ) -> Option { + pub fn from_null_checked(ch: *mut spdk_io_channel) -> Option { if ch.is_null() { None } else { diff --git a/io-engine/src/bdev/nvmx/controller.rs b/io-engine/src/bdev/nvmx/controller.rs index 8c0ff4365..713c7cd2f 100644 --- a/io-engine/src/bdev/nvmx/controller.rs +++ b/io-engine/src/bdev/nvmx/controller.rs @@ -19,51 +19,29 @@ use once_cell::sync::OnceCell; use spdk_rs::{ cpu_cores::{Cores, RoundRobinCoreSelector}, libspdk::{ - spdk_nvme_async_event_completion, - spdk_nvme_cpl, - spdk_nvme_ctrlr, - spdk_nvme_ctrlr_fail, - spdk_nvme_ctrlr_get_ns, - spdk_nvme_ctrlr_is_active_ns, - spdk_nvme_ctrlr_register_aer_callback, - spdk_nvme_detach, + spdk_nvme_async_event_completion, spdk_nvme_cpl, spdk_nvme_ctrlr, spdk_nvme_ctrlr_fail, + spdk_nvme_ctrlr_get_ns, spdk_nvme_ctrlr_is_active_ns, + spdk_nvme_ctrlr_register_aer_callback, spdk_nvme_detach, }, - Poller, - PollerBuilder, + Poller, PollerBuilder, }; use crate::{ bdev::nvmx::{ channel::{NvmeControllerIoChannel, NvmeIoChannel, NvmeIoChannelInner}, controller_inner::{SpdkNvmeController, TimeoutConfig}, - controller_state::{ - ControllerFailureReason, - ControllerFlag, - ControllerStateMachine, - }, + controller_state::{ControllerFailureReason, ControllerFlag, ControllerStateMachine}, nvme_bdev_running_config, uri::NvmeControllerContext, - utils::{ - nvme_cpl_succeeded, - NvmeAerInfoNotice, - NvmeAerInfoNvmCommandSet, - NvmeAerType, - }, + utils::{nvme_cpl_succeeded, NvmeAerInfoNotice, NvmeAerInfoNvmCommandSet, NvmeAerType}, NvmeControllerState, NvmeControllerState::*, - NvmeNamespace, - NVME_CONTROLLERS, + NvmeNamespace, NVME_CONTROLLERS, }, bdev_api::BdevError, core::{ - BlockDeviceIoStats, - CoreError, - DeviceEventDispatcher, - DeviceEventSink, - DeviceEventType, - IoDevice, - OpCompletionCallback, - OpCompletionCallbackArg, + BlockDeviceIoStats, CoreError, DeviceEventDispatcher, DeviceEventSink, DeviceEventType, + IoDevice, OpCompletionCallback, OpCompletionCallbackArg, }, ffihelper::{cb_arg, done_cb}, sleep::mayastor_sleep, @@ -86,15 +64,10 @@ struct ShutdownCtx { } /// CPU core selector for adminq pollers. -static ADMINQ_CORE_SELECTOR: OnceCell> = - OnceCell::new(); +static ADMINQ_CORE_SELECTOR: OnceCell> = OnceCell::new(); impl NvmeControllerInner<'_> { - fn new( - ctrlr: SpdkNvmeController, - name: String, - cfg: NonNull, - ) -> Self { + fn new(ctrlr: SpdkNvmeController, name: String, cfg: NonNull) -> Self { let io_device = Arc::new(IoDevice::new::( NonNull::new(ctrlr.as_ptr().cast()).unwrap(), &name, @@ -179,10 +152,8 @@ impl NvmeController<'_> { state_machine: ControllerStateMachine::new(name), inner: None, event_dispatcher: DeviceEventDispatcher::new(), - timeout_config: NonNull::new(Box::into_raw(Box::new( - TimeoutConfig::new(name), - ))) - .expect("failed to box timeout context"), + timeout_config: NonNull::new(Box::into_raw(Box::new(TimeoutConfig::new(name)))) + .expect("failed to box timeout context"), }; debug!("{}: new NVMe controller created", l.name); @@ -248,11 +219,7 @@ impl NvmeController<'_> { let ctrlr = self.ctrlr_as_ptr(); unsafe { - spdk_nvme_ctrlr_register_aer_callback( - ctrlr, - Some(aer_cb), - ctrlr as *mut c_void, - ); + spdk_nvme_ctrlr_register_aer_callback(ctrlr, Some(aer_cb), ctrlr as *mut c_void); }; } @@ -285,8 +252,7 @@ impl NvmeController<'_> { // Fault the controller in case of inactive namespace. if !ns_active { - self - .state_machine + self.state_machine .transition(Faulted(ControllerFailureReason::NamespaceInit)) .expect("failed to fault controller in response to ns enumeration failure"); } @@ -353,9 +319,7 @@ impl NvmeController<'_> { name: self.name.clone(), cb, cb_arg, - spdk_handle: self - .controller() - .expect("controller is may not be NULL"), + spdk_handle: self.controller().expect("controller is may not be NULL"), io_device, shutdown_in_progress: false, }; @@ -372,20 +336,14 @@ impl NvmeController<'_> { Ok(()) } - fn _shutdown_channels( - channel: &mut NvmeIoChannelInner, - ctx: &mut ShutdownCtx, - ) -> i32 { + fn _shutdown_channels(channel: &mut NvmeIoChannelInner, ctx: &mut ShutdownCtx) -> i32 { debug!(?ctx.name, "shutting down I/O channel"); let rc = channel.shutdown(); if rc == 0 { debug!("{} I/O channel successfully shutdown", ctx.name); } else { - error!( - "{} failed to shutdown I/O channel, reset aborted", - ctx.name - ); + error!("{} failed to shutdown I/O channel, reset aborted", ctx.name); } rc } @@ -401,7 +359,10 @@ impl NvmeController<'_> { // In case I/O channels didn't shutdown successfully, mark // the controller as Faulted. if result != 0 { - error!("{} failed to shutdown I/O channels, rc = {}. Shutdown aborted.", ctx.name, result); + error!( + "{} failed to shutdown I/O channels, rc = {}. Shutdown aborted.", + ctx.name, result + ); controller .state_machine .transition(Faulted(ControllerFailureReason::Shutdown)) @@ -445,11 +406,7 @@ impl NvmeController<'_> { } /// Get I/O statistics for all I/O channels of the controller. - pub fn get_io_stats( - &self, - cb: F, - cb_arg: T, - ) -> Result<(), CoreError> + pub fn get_io_stats(&self, cb: F, cb_arg: T) -> Result<(), CoreError> where F: Fn(Result, T) + 'static, { @@ -571,10 +528,9 @@ impl NvmeController<'_> { // Transition controller into Faulted state, but only if the // controller is in Running state, as concurrent // shutdown might be in place. - let _ = controller.state_machine.transition_checked( - Running, - Faulted(ControllerFailureReason::Reset), - ); + let _ = controller + .state_machine + .transition_checked(Running, Faulted(ControllerFailureReason::Reset)); } // Unlock the controller before calling the callback to avoid @@ -585,10 +541,7 @@ impl NvmeController<'_> { (reset_ctx.cb)(status == 0, reset_ctx.cb_arg); } - fn _reset_destroy_channels( - channel: &mut NvmeIoChannelInner, - ctx: &mut ResetCtx, - ) -> i32 { + fn _reset_destroy_channels(channel: &mut NvmeIoChannelInner, ctx: &mut ResetCtx) -> i32 { debug!(?channel, "resetting"); // Bail out preliminary if shutdown is active. @@ -682,10 +635,7 @@ impl NvmeController<'_> { ); } - fn _reset_create_channels( - channel: &mut NvmeIoChannelInner, - reset_ctx: &mut ResetCtx, - ) -> i32 { + fn _reset_create_channels(channel: &mut NvmeIoChannelInner, reset_ctx: &mut ResetCtx) -> i32 { // Make sure no concurrent shutdown takes place. if channel.is_shutdown() { return 0; @@ -721,10 +671,7 @@ impl NvmeController<'_> { } /// Register listener to monitor device events related to this controller. - pub fn register_device_listener( - &self, - listener: DeviceEventSink, - ) -> Result<(), CoreError> { + pub fn register_device_listener(&self, listener: DeviceEventSink) -> Result<(), CoreError> { self.event_dispatcher.add_listener(listener); debug!("{} added event listener", self.name); Ok(()) @@ -787,9 +734,8 @@ extern "C" fn aer_cb(ctx: *mut c_void, cpl: *const spdk_nvme_cpl) { event.raw = unsafe { (*cpl).cdw0 }; - let (event_type, event_info) = unsafe { - (event.bits.async_event_type(), event.bits.async_event_info()) - }; + let (event_type, event_info) = + unsafe { (event.bits.async_event_type(), event.bits.async_event_info()) }; info!( "Received AER event: event_type={:?}, event_info={:?}", @@ -828,8 +774,8 @@ extern "C" fn aer_cb(ctx: *mut c_void, cpl: *const spdk_nvme_cpl) { /// Poll to process qpair completions on admin queue /// Returns: 0 (SPDK_POLLER_IDLE) or 1 (SPDK_POLLER_BUSY) pub extern "C" fn nvme_poll_adminq(ctx: *mut c_void) -> i32 { - let mut context = NonNull::::new(ctx.cast()) - .expect("ctx pointer may never be null"); + let mut context = + NonNull::::new(ctx.cast()).expect("ctx pointer may never be null"); let context = unsafe { context.as_mut() }; // returns number of completions processed (maybe 0) or the negated error, @@ -857,9 +803,8 @@ pub extern "C" fn nvme_poll_adminq(ctx: *mut c_void) -> i32 { "notifying listeners of admin command completion failure" ); let controller = carc.lock(); - let num_listeners = controller.notify_listeners( - DeviceEventType::AdminCommandCompletionFailed, - ); + let num_listeners = + controller.notify_listeners(DeviceEventType::AdminCommandCompletionFailed); debug!( ?dev_name, ?num_listeners, @@ -883,11 +828,11 @@ pub extern "C" fn nvme_poll_adminq(ctx: *mut c_void) -> i32 { /// Destroy target controller and notify all listeners about device removal. pub(crate) async fn destroy_device(name: String) -> Result<(), BdevError> { - let carc = NVME_CONTROLLERS.lookup_by_name(&name).ok_or( - BdevError::BdevNotFound { + let carc = NVME_CONTROLLERS + .lookup_by_name(&name) + .ok_or(BdevError::BdevNotFound { name: String::from(&name), - }, - )?; + })?; // 1. Initiate controller shutdown, which shuts down all I/O resources // of the controller. @@ -939,8 +884,7 @@ pub(crate) async fn destroy_device(name: String) -> Result<(), BdevError> { debug!(?name, "notifying listeners about device removal"); { let controller = carc.lock(); - let num_listeners = - controller.notify_listeners(DeviceEventType::DeviceRemoved); + let num_listeners = controller.notify_listeners(DeviceEventType::DeviceRemoved); debug!( ?name, ?num_listeners, @@ -969,10 +913,7 @@ pub(crate) async fn destroy_device(name: String) -> Result<(), BdevError> { Ok(()) } -pub(crate) fn connected_attached_cb( - ctx: &mut NvmeControllerContext, - ctrlr: SpdkNvmeController, -) { +pub(crate) fn connected_attached_cb(ctx: &mut NvmeControllerContext, ctrlr: SpdkNvmeController) { // we use the ctrlr address as the controller id in the global table let cid = ctrlr.as_ptr() as u64; @@ -1029,10 +970,7 @@ pub(crate) mod options { use spdk_rs::ffihelper::copy_str_with_null; use std::mem::{size_of, zeroed}; - use spdk_rs::libspdk::{ - spdk_nvme_ctrlr_get_default_ctrlr_opts, - spdk_nvme_ctrlr_opts, - }; + use spdk_rs::libspdk::{spdk_nvme_ctrlr_get_default_ctrlr_opts, spdk_nvme_ctrlr_opts}; /// structure that holds the default NVMe controller options. This is /// different from ['NvmeBdevOpts'] as it exposes more control over @@ -1080,10 +1018,7 @@ pub(crate) mod options { self.admin_timeout_ms = Some(timeout); self } - pub fn with_fabrics_connect_timeout_us>>( - mut self, - timeout: T, - ) -> Self { + pub fn with_fabrics_connect_timeout_us>>(mut self, timeout: T) -> Self { self.fabrics_connect_timeout_us = timeout.into(); self } @@ -1169,10 +1104,7 @@ pub(crate) mod options { pub(crate) mod transport { use std::{ffi::CStr, fmt::Debug}; - use spdk_rs::{ - ffihelper::copy_str_with_null, - libspdk::spdk_nvme_transport_id, - }; + use spdk_rs::{ffihelper::copy_str_with_null, libspdk::spdk_nvme_transport_id}; pub struct NvmeTransportId(spdk_nvme_transport_id); diff --git a/io-engine/src/bdev/nvmx/controller_inner.rs b/io-engine/src/bdev/nvmx/controller_inner.rs index 71b28bd50..0b7ca7f8a 100644 --- a/io-engine/src/bdev/nvmx/controller_inner.rs +++ b/io-engine/src/bdev/nvmx/controller_inner.rs @@ -9,26 +9,16 @@ use std::{ use crossbeam::atomic::AtomicCell; use spdk_rs::libspdk::{ - spdk_nvme_cmd_cb, - spdk_nvme_cpl, - spdk_nvme_ctrlr, - spdk_nvme_ctrlr_cmd_abort, - spdk_nvme_ctrlr_fail, - spdk_nvme_ctrlr_get_regs_csts, - spdk_nvme_ctrlr_process_admin_completions, - spdk_nvme_ctrlr_register_timeout_callback, - spdk_nvme_qpair, - SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT, - SPDK_BDEV_NVME_TIMEOUT_ACTION_NONE, + spdk_nvme_cmd_cb, spdk_nvme_cpl, spdk_nvme_ctrlr, spdk_nvme_ctrlr_cmd_abort, + spdk_nvme_ctrlr_fail, spdk_nvme_ctrlr_get_regs_csts, spdk_nvme_ctrlr_process_admin_completions, + spdk_nvme_ctrlr_register_timeout_callback, spdk_nvme_qpair, + SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT, SPDK_BDEV_NVME_TIMEOUT_ACTION_NONE, SPDK_BDEV_NVME_TIMEOUT_ACTION_RESET, }; use crate::{ bdev::nvmx::{ - nvme_bdev_running_config, - utils::nvme_cpl_succeeded, - NvmeController, - NVME_CONTROLLERS, + nvme_bdev_running_config, utils::nvme_cpl_succeeded, NvmeController, NVME_CONTROLLERS, }, core::{CoreError, DeviceIoController, DeviceTimeoutAction}, }; @@ -43,9 +33,7 @@ impl TryFrom for DeviceTimeoutAction { SPDK_BDEV_NVME_TIMEOUT_ACTION_ABORT => DeviceTimeoutAction::Abort, 4 => DeviceTimeoutAction::HotRemove, _ => { - return Err(format!( - "Invalid timeout action in config: {action}" - )); + return Err(format!("Invalid timeout action in config: {action}")); } }; @@ -113,9 +101,7 @@ impl TimeoutConfig { } pub fn process_adminq(&self) -> i32 { - unsafe { - spdk_nvme_ctrlr_process_admin_completions(self.ctrlr.as_ptr()) - } + unsafe { spdk_nvme_ctrlr_process_admin_completions(self.ctrlr.as_ptr()) } } /// Check if the SPDK's nvme controller is failed. @@ -151,8 +137,7 @@ impl TimeoutConfig { // Setup the reset cooldown interval in case of the last // failed reset attempt. if timeout_ctx.reset_attempts == 0 { - timeout_ctx.next_reset_time = - Instant::now() + RESET_COOLDOWN_INTERVAL; + timeout_ctx.next_reset_time = Instant::now() + RESET_COOLDOWN_INTERVAL; info!( "{} reset cool down interval activated ({} secs)", timeout_ctx.name, @@ -225,10 +210,7 @@ impl TimeoutConfig { self as *mut TimeoutConfig as *mut c_void, false, ) { - error!( - "{}: failed to initiate controller reset: {}", - self.name, e - ); + error!("{}: failed to initiate controller reset: {}", self.name, e); } else { info!( "{} controller reset initiated ({} reset attempts left)", @@ -309,9 +291,7 @@ impl SpdkNvmeController { cb: spdk_nvme_cmd_cb, cb_arg: *mut c_void, ) -> i32 { - unsafe { - spdk_nvme_ctrlr_cmd_abort(self.0.as_ptr(), qpair, cid, cb, cb_arg) - } + unsafe { spdk_nvme_ctrlr_cmd_abort(self.0.as_ptr(), qpair, cid, cb, cb_arg) } } /// Returns a pointer to the underlying SPDK struct. @@ -328,8 +308,7 @@ impl SpdkNvmeController { impl From<*mut spdk_nvme_ctrlr> for SpdkNvmeController { fn from(ctrlr: *mut spdk_nvme_ctrlr) -> Self { - Self::from_ptr(ctrlr) - .expect("nullptr dereference while accessing NVME controller") + Self::from_ptr(ctrlr).expect("nullptr dereference while accessing NVME controller") } } @@ -341,10 +320,7 @@ impl DeviceIoController for NvmeController<'_> { } /// Set current I/O timeout action. - fn set_timeout_action( - &mut self, - action: DeviceTimeoutAction, - ) -> Result<(), CoreError> { + fn set_timeout_action(&mut self, action: DeviceTimeoutAction) -> Result<(), CoreError> { unsafe { self.timeout_config.as_mut().set_timeout_action(action); }; @@ -355,10 +331,7 @@ impl DeviceIoController for NvmeController<'_> { // I/O timeout handling for NVMe controller. impl NvmeController<'_> { - extern "C" fn command_abort_handler( - ctx: *mut c_void, - cpl: *const spdk_nvme_cpl, - ) { + extern "C" fn command_abort_handler(ctx: *mut c_void, cpl: *const spdk_nvme_cpl) { let timeout_ctx = TimeoutConfig::from_ptr(ctx as *mut TimeoutConfig); if nvme_cpl_succeeded(cpl) { @@ -415,10 +388,7 @@ impl NvmeController<'_> { cb_arg, ); if rc == 0 { - info!( - "{}: initiated abort for CID {}", - timeout_cfg.name, cid - ); + info!("{}: initiated abort for CID {}", timeout_cfg.name, cid); return; } error!( @@ -467,9 +437,7 @@ impl NvmeController<'_> { return; } - let action = match DeviceTimeoutAction::try_from( - device_defaults.action_on_timeout, - ) { + let action = match DeviceTimeoutAction::try_from(device_defaults.action_on_timeout) { Ok(action) => action, Err(e) => { error!( diff --git a/io-engine/src/bdev/nvmx/controller_state.rs b/io-engine/src/bdev/nvmx/controller_state.rs index 7cf457181..f2538286c 100644 --- a/io-engine/src/bdev/nvmx/controller_state.rs +++ b/io-engine/src/bdev/nvmx/controller_state.rs @@ -64,11 +64,7 @@ pub struct ControllerStateMachine { #[derive(Debug, Snafu, Clone)] #[snafu(visibility(pub(crate)), context(suffix(false)))] pub enum ControllerStateMachineError { - #[snafu(display( - "invalid transition from {:?} to {:?}", - current_state, - new_state - ))] + #[snafu(display("invalid transition from {:?} to {:?}", current_state, new_state))] ControllerStateTransitionError { current_state: NvmeControllerState, new_state: NvmeControllerState, @@ -88,10 +84,7 @@ pub enum ControllerStateMachineError { /// Check if a transition exists between two given states. /// Initial state: New, final state: Unconfigured. -fn check_transition( - from: NvmeControllerState, - to: NvmeControllerState, -) -> bool { +fn check_transition(from: NvmeControllerState, to: NvmeControllerState) -> bool { use NvmeControllerState::*; match from { New => matches!(to, Initializing), diff --git a/io-engine/src/bdev/nvmx/device.rs b/io-engine/src/bdev/nvmx/device.rs index 7ff985e01..7425ebe64 100644 --- a/io-engine/src/bdev/nvmx/device.rs +++ b/io-engine/src/bdev/nvmx/device.rs @@ -7,23 +7,12 @@ use uuid::Uuid; use crate::{ bdev::nvmx::{ - controller_inner::SpdkNvmeController, - NvmeController, - NvmeControllerState, - NvmeDeviceHandle, - NvmeNamespace, - NVME_CONTROLLERS, + controller_inner::SpdkNvmeController, NvmeController, NvmeControllerState, + NvmeDeviceHandle, NvmeNamespace, NVME_CONTROLLERS, }, core::{ - BlockDevice, - BlockDeviceDescriptor, - BlockDeviceHandle, - BlockDeviceIoStats, - CoreError, - DeviceEventSink, - DeviceIoController, - DeviceTimeoutAction, - IoType, + BlockDevice, BlockDeviceDescriptor, BlockDeviceHandle, BlockDeviceIoStats, CoreError, + DeviceEventSink, DeviceIoController, DeviceTimeoutAction, IoType, }, ffihelper::{cb_arg, done_cb}, }; @@ -48,9 +37,7 @@ unsafe impl Send for NvmeDeviceDescriptor {} impl NvmeDeviceDescriptor { /// TODO - fn create( - controller: &NvmeController, - ) -> Result, CoreError> { + fn create(controller: &NvmeController) -> Result, CoreError> { if let Some(ns) = controller.namespace() { Ok(Box::new(NvmeDeviceDescriptor { ns, @@ -77,9 +64,7 @@ impl BlockDeviceDescriptor for NvmeDeviceDescriptor { self.name.clone() } - fn into_handle( - self: Box, - ) -> Result, CoreError> { + fn into_handle(self: Box) -> Result, CoreError> { Ok(Box::new(NvmeDeviceHandle::create( &self.name, self.io_device_id, @@ -99,9 +84,7 @@ impl BlockDeviceDescriptor for NvmeDeviceDescriptor { )?)) } - async fn get_io_handle_nonblock( - &self, - ) -> Result, CoreError> { + async fn get_io_handle_nonblock(&self) -> Result, CoreError> { let h = NvmeDeviceHandle::create_async( &self.name, self.io_device_id, @@ -128,11 +111,11 @@ impl NvmeBlockDevice { warn!("read-only mode is not supported in NvmeBlockDevice::open_by_name()"); } - let controller = NVME_CONTROLLERS.lookup_by_name(name).ok_or( - CoreError::BdevNotFound { + let controller = NVME_CONTROLLERS + .lookup_by_name(name) + .ok_or(CoreError::BdevNotFound { name: name.to_string(), - }, - )?; + })?; let controller = controller.lock(); @@ -209,14 +192,13 @@ impl BlockDevice for NvmeBlockDevice { } async fn io_stats(&self) -> Result { - let carc = NVME_CONTROLLERS.lookup_by_name(&self.name).ok_or( - CoreError::BdevNotFound { + let carc = NVME_CONTROLLERS + .lookup_by_name(&self.name) + .ok_or(CoreError::BdevNotFound { name: self.name.to_string(), - }, - )?; + })?; - let (s, r) = - oneshot::channel::>(); + let (s, r) = oneshot::channel::>(); // Schedule async I/O stats collection and wait for the result. { let controller = carc.lock(); @@ -232,10 +214,7 @@ impl BlockDevice for NvmeBlockDevice { r.await.expect("Failed awaiting at io_stats") } - fn open( - &self, - read_write: bool, - ) -> Result, CoreError> { + fn open(&self, read_write: bool) -> Result, CoreError> { NvmeBlockDevice::open_by_name(&self.name, read_write) } @@ -243,15 +222,13 @@ impl BlockDevice for NvmeBlockDevice { Some(Box::new(NvmeDeviceIoController::new(self.name.to_string()))) } - fn add_event_listener( - &self, - listener: DeviceEventSink, - ) -> Result<(), CoreError> { - let controller = NVME_CONTROLLERS.lookup_by_name(&self.name).ok_or( - CoreError::BdevNotFound { - name: self.name.clone(), - }, - )?; + fn add_event_listener(&self, listener: DeviceEventSink) -> Result<(), CoreError> { + let controller = + NVME_CONTROLLERS + .lookup_by_name(&self.name) + .ok_or(CoreError::BdevNotFound { + name: self.name.clone(), + })?; let controller = controller.lock(); controller.register_device_listener(listener) } @@ -263,19 +240,16 @@ struct NvmeDeviceIoController { impl NvmeDeviceIoController { pub fn new(name: String) -> Self { - Self { - name, - } + Self { name } } - fn lookup_controller( - &self, - ) -> Result>>, CoreError> { - let controller = NVME_CONTROLLERS.lookup_by_name(&self.name).ok_or( - CoreError::BdevNotFound { - name: self.name.to_string(), - }, - )?; + fn lookup_controller(&self) -> Result>>, CoreError> { + let controller = + NVME_CONTROLLERS + .lookup_by_name(&self.name) + .ok_or(CoreError::BdevNotFound { + name: self.name.to_string(), + })?; Ok(controller) } } @@ -288,10 +262,7 @@ impl DeviceIoController for NvmeDeviceIoController { controller.get_timeout_action() } - fn set_timeout_action( - &mut self, - action: DeviceTimeoutAction, - ) -> Result<(), CoreError> { + fn set_timeout_action(&mut self, action: DeviceTimeoutAction) -> Result<(), CoreError> { let controller = self.lookup_controller()?; let mut controller = controller.lock(); diff --git a/io-engine/src/bdev/nvmx/handle.rs b/io-engine/src/bdev/nvmx/handle.rs index 9277338ab..472eaae47 100644 --- a/io-engine/src/bdev/nvmx/handle.rs +++ b/io-engine/src/bdev/nvmx/handle.rs @@ -12,34 +12,15 @@ use once_cell::sync::OnceCell; use spdk_rs::{ libspdk::{ - iovec, - nvme_cmd_cdw10_get, - spdk_get_io_channel, - spdk_io_channel, - spdk_nvme_cmd, - spdk_nvme_cpl, - spdk_nvme_ctrlr_cmd_admin_raw, - spdk_nvme_ctrlr_cmd_io_raw, - spdk_nvme_dsm_range, - spdk_nvme_ns_cmd_compare, - spdk_nvme_ns_cmd_comparev, - spdk_nvme_ns_cmd_dataset_management, - spdk_nvme_ns_cmd_flush, - spdk_nvme_ns_cmd_read, - spdk_nvme_ns_cmd_readv, - spdk_nvme_ns_cmd_write, - spdk_nvme_ns_cmd_write_zeroes, - spdk_nvme_ns_cmd_writev, - SPDK_NVME_SC_INTERNAL_DEVICE_ERROR, + iovec, nvme_cmd_cdw10_get, spdk_get_io_channel, spdk_io_channel, spdk_nvme_cmd, + spdk_nvme_cpl, spdk_nvme_ctrlr_cmd_admin_raw, spdk_nvme_ctrlr_cmd_io_raw, + spdk_nvme_dsm_range, spdk_nvme_ns_cmd_compare, spdk_nvme_ns_cmd_comparev, + spdk_nvme_ns_cmd_dataset_management, spdk_nvme_ns_cmd_flush, spdk_nvme_ns_cmd_read, + spdk_nvme_ns_cmd_readv, spdk_nvme_ns_cmd_write, spdk_nvme_ns_cmd_write_zeroes, + spdk_nvme_ns_cmd_writev, SPDK_NVME_SC_INTERNAL_DEVICE_ERROR, }, - nvme_admin_opc, - nvme_nvm_opcode, - nvme_reservation_register_action, - AsIoVecPtr, - DmaBuf, - DmaError, - IoVec, - NvmeStatus, + nvme_admin_opc, nvme_nvm_opcode, nvme_reservation_register_action, AsIoVecPtr, DmaBuf, + DmaError, IoVec, NvmeStatus, }; use crate::{ @@ -48,25 +29,12 @@ use crate::{ controller_inner::SpdkNvmeController, utils, utils::{nvme_cpl_is_pi_error, nvme_cpl_succeeded}, - NvmeBlockDevice, - NvmeIoChannel, - NvmeNamespace, - NvmeSnapshotMessage, - NvmeSnapshotMessageV1, + NvmeBlockDevice, NvmeIoChannel, NvmeNamespace, NvmeSnapshotMessage, NvmeSnapshotMessageV1, NVME_CONTROLLERS, }, core::{ - mempool::MemoryPool, - BlockDevice, - BlockDeviceHandle, - CoreError, - IoCompletionCallback, - IoCompletionCallbackArg, - IoCompletionStatus, - IoType, - Reactors, - ReadOptions, - SnapshotParams, + mempool::MemoryPool, BlockDevice, BlockDeviceHandle, CoreError, IoCompletionCallback, + IoCompletionCallbackArg, IoCompletionStatus, IoType, Reactors, ReadOptions, SnapshotParams, }, ffihelper::{cb_arg, done_cb, FfiResult}, subsys, @@ -74,10 +42,7 @@ use crate::{ #[cfg(feature = "fault-injection")] use crate::core::fault_injection::{ - inject_completion_error, - inject_submission_error, - FaultDomain, - InjectIoCtx, + inject_completion_error, inject_submission_error, FaultDomain, InjectIoCtx, }; use super::NvmeIoChannelInner; @@ -227,33 +192,22 @@ impl NvmeDeviceHandle { } } - fn get_nvme_device( - name: &str, - ns: &Arc, - ) -> Box { + fn get_nvme_device(name: &str, ns: &Arc) -> Box { Box::new(NvmeBlockDevice::from_ns(name, ns.clone())) } #[inline] - fn bytes_to_blocks( - &self, - offset_bytes: u64, - num_bytes: u64, - ) -> (bool, u64, u64) { + fn bytes_to_blocks(&self, offset_bytes: u64, num_bytes: u64) -> (bool, u64, u64) { let offset_blocks = offset_bytes / self.block_len; let num_blocks = num_bytes / self.block_len; - let alignment = - (offset_bytes % self.block_len) | (num_bytes % self.block_len); + let alignment = (offset_bytes % self.block_len) | (num_bytes % self.block_len); // TODO: Optimize for ^2. (alignment == 0, offset_blocks, num_blocks) } } -extern "C" fn nvme_admin_passthru_done( - ctx: *mut c_void, - cpl: *const spdk_nvme_cpl, -) { +extern "C" fn nvme_admin_passthru_done(ctx: *mut c_void, cpl: *const spdk_nvme_cpl) { // In case of admin command failures error code is transferred via cdw0. let status = unsafe { (*cpl).cdw0 }; @@ -395,26 +349,17 @@ extern "C" fn nvme_io_done(ctx: *mut c_void, cpl: *const spdk_nvme_cpl) { complete_nvme_command(nvme_io_ctx, cpl); } -extern "C" fn nvme_async_io_completion( - ctx: *mut c_void, - cpl: *const spdk_nvme_cpl, -) { +extern "C" fn nvme_async_io_completion(ctx: *mut c_void, cpl: *const spdk_nvme_cpl) { done_cb(ctx, NvmeStatus::from(cpl)); } -extern "C" fn nvme_unmap_completion( - ctx: *mut c_void, - cpl: *const spdk_nvme_cpl, -) { +extern "C" fn nvme_unmap_completion(ctx: *mut c_void, cpl: *const spdk_nvme_cpl) { let nvme_io_ctx = ctx as *mut NvmeIoCtx; trace!("Async unmap completed"); complete_nvme_command(nvme_io_ctx, cpl); } -extern "C" fn nvme_flush_completion( - ctx: *mut c_void, - cpl: *const spdk_nvme_cpl, -) { +extern "C" fn nvme_flush_completion(ctx: *mut c_void, cpl: *const spdk_nvme_cpl) { let nvme_io_ctx = ctx as *mut NvmeIoCtx; trace!("Async flush completed"); complete_nvme_command(nvme_io_ctx, cpl); @@ -430,33 +375,18 @@ fn check_io_args( // As of now, we assume that I/O vector is fully prepared by the caller. if iovs.is_empty() { error!("empty I/O vector"); - return Err(io_type_to_err( - op, - libc::EINVAL, - offset_blocks, - num_blocks, - )); + return Err(io_type_to_err(op, libc::EINVAL, offset_blocks, num_blocks)); } if !iovs[0].is_initialized() { error!("I/O vector is not initialized"); - return Err(io_type_to_err( - op, - libc::EINVAL, - offset_blocks, - num_blocks, - )); + return Err(io_type_to_err(op, libc::EINVAL, offset_blocks, num_blocks)); } Ok(()) } -fn io_type_to_err( - op: IoType, - errno: i32, - offset_blocks: u64, - num_blocks: u64, -) -> CoreError { +fn io_type_to_err(op: IoType, errno: i32, offset_blocks: u64, num_blocks: u64) -> CoreError { assert!(errno > 0, "Errno code must be provided"); let source = Errno::from_raw(errno); @@ -487,9 +417,7 @@ fn io_type_to_err( }, _ => { warn!("Unsupported I/O operation: {:?}", op); - CoreError::NotSupported { - source, - } + CoreError::NotSupported { source } } } } @@ -498,8 +426,9 @@ fn io_type_to_err( /// This must be called before the first I/O operations take place. pub fn nvme_io_ctx_pool_init(size: u64) { NVME_IOCTX_POOL.get_or_init(|| { - MemoryPool::::create("nvme_ctrl_io_ctx", size) - .expect("Failed to create memory pool [nvme_ctrl_io_ctx] for NVMe controller I/O contexts") + MemoryPool::::create("nvme_ctrl_io_ctx", size).expect( + "Failed to create memory pool [nvme_ctrl_io_ctx] for NVMe controller I/O contexts", + ) }); } @@ -511,9 +440,8 @@ fn alloc_nvme_io_ctx( num_blocks: u64, ) -> Result<*mut NvmeIoCtx, CoreError> { let pool = NVME_IOCTX_POOL.get().unwrap(); - pool.get(ctx).ok_or_else(|| { - io_type_to_err(op, libc::ENOMEM, offset_blocks, num_blocks) - }) + pool.get(ctx) + .ok_or_else(|| io_type_to_err(op, libc::ENOMEM, offset_blocks, num_blocks)) } /// Release the memory used by the NVMe controller I/O context back to the pool. @@ -555,9 +483,7 @@ fn reset_callback(success: bool, arg: *mut c_void) { let status = if success { IoCompletionStatus::Success } else { - IoCompletionStatus::NvmeError(NvmeStatus::Generic( - SPDK_NVME_SC_INTERNAL_DEVICE_ERROR, - )) + IoCompletionStatus::NvmeError(NvmeStatus::Generic(SPDK_NVME_SC_INTERNAL_DEVICE_ERROR)) }; (ctx.cb)(&*ctx.device, status, ctx.cb_arg); @@ -573,13 +499,8 @@ impl BlockDeviceHandle for NvmeDeviceHandle { DmaBuf::new(size, self.ns.alignment()) } - async fn read_at( - &self, - offset: u64, - buffer: &mut DmaBuf, - ) -> Result { - let (valid, offset_blocks, num_blocks) = - self.bytes_to_blocks(offset, buffer.len()); + async fn read_at(&self, offset: u64, buffer: &mut DmaBuf) -> Result { + let (valid, offset_blocks, num_blocks) = self.bytes_to_blocks(offset, buffer.len()); trace!( "{} read(offset={}, size={})", @@ -595,9 +516,7 @@ impl BlockDeviceHandle for NvmeDeviceHandle { offset, buffer.len() ); - return Err(CoreError::InvalidOffset { - offset, - }); + return Err(CoreError::InvalidOffset { offset }); } let inner = NvmeIoChannel::inner_from_channel(self.io_channel.as_ptr()); @@ -632,19 +551,15 @@ impl BlockDeviceHandle for NvmeDeviceHandle { inner.account_io(); let ret = match r.await.expect("Failed awaiting at read_at()") { NvmeStatus::SUCCESS => { - inner.get_io_stats_controller().account_block_io( - IoType::Read, - 1, - num_blocks, - ); + inner + .get_io_stats_controller() + .account_block_io(IoType::Read, 1, num_blocks); Ok(buffer.len()) } - NvmeStatus::UNWRITTEN_BLOCK => { - Err(CoreError::ReadingUnallocatedBlock { - offset, - len: buffer.len(), - }) - } + NvmeStatus::UNWRITTEN_BLOCK => Err(CoreError::ReadingUnallocatedBlock { + offset, + len: buffer.len(), + }), status => Err(CoreError::ReadFailed { status: IoCompletionStatus::NvmeError(status), offset, @@ -655,13 +570,8 @@ impl BlockDeviceHandle for NvmeDeviceHandle { ret } - async fn write_at( - &self, - offset: u64, - buffer: &DmaBuf, - ) -> Result { - let (valid, offset_blocks, num_blocks) = - self.bytes_to_blocks(offset, buffer.len()); + async fn write_at(&self, offset: u64, buffer: &DmaBuf) -> Result { + let (valid, offset_blocks, num_blocks) = self.bytes_to_blocks(offset, buffer.len()); trace!( "{} write(offset={}, size={})", @@ -677,9 +587,7 @@ impl BlockDeviceHandle for NvmeDeviceHandle { offset, buffer.len() ); - return Err(CoreError::InvalidOffset { - offset, - }); + return Err(CoreError::InvalidOffset { offset }); } let inner = NvmeIoChannel::inner_from_channel(self.io_channel.as_ptr()); @@ -714,11 +622,9 @@ impl BlockDeviceHandle for NvmeDeviceHandle { inner.account_io(); let ret = match r.await.expect("Failed awaiting at write_at()") { NvmeStatus::SUCCESS => { - inner.get_io_stats_controller().account_block_io( - IoType::Write, - 1, - num_blocks, - ); + inner + .get_io_stats_controller() + .account_block_io(IoType::Write, 1, num_blocks); Ok(buffer.len()) } status => Err(CoreError::WriteFailed { @@ -922,12 +828,7 @@ impl BlockDeviceHandle for NvmeDeviceHandle { let inner = NvmeIoChannel::inner_from_channel(channel); // Make sure channel allows I/O. - check_channel_for_io( - IoType::Compare, - inner, - offset_blocks, - num_blocks, - )?; + check_channel_for_io(IoType::Compare, inner, offset_blocks, num_blocks)?; let bio = alloc_nvme_io_ctx( IoType::Compare, @@ -994,11 +895,12 @@ impl BlockDeviceHandle for NvmeDeviceHandle { cb: IoCompletionCallback, cb_arg: IoCompletionCallbackArg, ) -> Result<(), CoreError> { - let controller = NVME_CONTROLLERS.lookup_by_name(&self.name).ok_or( - CoreError::BdevNotFound { - name: self.name.to_string(), - }, - )?; + let controller = + NVME_CONTROLLERS + .lookup_by_name(&self.name) + .ok_or(CoreError::BdevNotFound { + name: self.name.to_string(), + })?; let mut controller = controller.lock(); let ctx = Box::new(ResetCtx { @@ -1008,11 +910,7 @@ impl BlockDeviceHandle for NvmeDeviceHandle { }); // Schedule asynchronous controller reset. - controller.reset( - reset_callback, - Box::into_raw(ctx) as *mut c_void, - false, - ) + controller.reset(reset_callback, Box::into_raw(ctx) as *mut c_void, false) } fn flush_io( @@ -1072,8 +970,7 @@ impl BlockDeviceHandle for NvmeDeviceHandle { cb: IoCompletionCallback, cb_arg: IoCompletionCallbackArg, ) -> Result<(), CoreError> { - let num_ranges = - num_blocks.div_ceil(SPDK_NVME_DATASET_MANAGEMENT_RANGE_MAX_BLOCKS); + let num_ranges = num_blocks.div_ceil(SPDK_NVME_DATASET_MANAGEMENT_RANGE_MAX_BLOCKS); if num_ranges > SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES { return Err(CoreError::UnmapDispatch { @@ -1108,12 +1005,10 @@ impl BlockDeviceHandle for NvmeDeviceHandle { num_blocks, )?; - let l = Layout::array::( - SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES as usize, - ) - .unwrap(); - let dsm_ranges = - unsafe { std::alloc::alloc(l) as *mut spdk_nvme_dsm_range }; + let l = + Layout::array::(SPDK_NVME_DATASET_MANAGEMENT_MAX_RANGES as usize) + .unwrap(); + let dsm_ranges = unsafe { std::alloc::alloc(l) as *mut spdk_nvme_dsm_range }; let mut remaining = num_blocks; let mut offset = offset_blocks; @@ -1124,8 +1019,7 @@ impl BlockDeviceHandle for NvmeDeviceHandle { unsafe { let range = spdk_nvme_dsm_range { attributes: zeroed(), - length: SPDK_NVME_DATASET_MANAGEMENT_RANGE_MAX_BLOCKS - as u32, + length: SPDK_NVME_DATASET_MANAGEMENT_RANGE_MAX_BLOCKS as u32, starting_lba: offset, }; @@ -1181,12 +1075,7 @@ impl BlockDeviceHandle for NvmeDeviceHandle { let inner = NvmeIoChannel::inner_from_channel(channel); // Make sure channel allows I/O - check_channel_for_io( - IoType::WriteZeros, - inner, - offset_blocks, - num_blocks, - )?; + check_channel_for_io(IoType::WriteZeros, inner, offset_blocks, num_blocks)?; let bio = alloc_nvme_io_ctx( IoType::WriteZeros, @@ -1231,24 +1120,19 @@ impl BlockDeviceHandle for NvmeDeviceHandle { } } - async fn create_snapshot( - &self, - snapshot: SnapshotParams, - ) -> Result { + async fn create_snapshot(&self, snapshot: SnapshotParams) -> Result { let mut cmd = spdk_nvme_cmd::default(); cmd.set_opc(nvme_admin_opc::CREATE_SNAPSHOT.into()); let now = subsys::set_snapshot_time(&mut cmd); let msg = NvmeSnapshotMessage::V1(NvmeSnapshotMessageV1::new(snapshot)); - let encoded_msg = bincode::serialize(&msg) - .expect("Failed to serialize snapshot message"); + let encoded_msg = bincode::serialize(&msg).expect("Failed to serialize snapshot message"); - let mut payload = - self.dma_malloc(encoded_msg.len() as u64).map_err(|_| { - CoreError::DmaAllocationFailed { - size: encoded_msg.len() as u64, - } - })?; + let mut payload = self.dma_malloc(encoded_msg.len() as u64).map_err(|_| { + CoreError::DmaAllocationFailed { + size: encoded_msg.len() as u64, + } + })?; payload .as_mut_slice() @@ -1323,11 +1207,8 @@ impl BlockDeviceHandle for NvmeDeviceHandle { } async fn nvme_identify_ctrlr(&self) -> Result { - let mut buf = DmaBuf::new(4096, 8).map_err(|_e| { - CoreError::DmaAllocationFailed { - size: 4096, - } - })?; + let mut buf = + DmaBuf::new(4096, 8).map_err(|_e| CoreError::DmaAllocationFailed { size: 4096 })?; let mut cmd = spdk_nvme_cmd::default(); cmd.set_opc(nvme_admin_opc::IDENTIFY.into()); @@ -1359,8 +1240,7 @@ impl BlockDeviceHandle for NvmeDeviceHandle { .cdw10_bits .resv_register .set_cptpl(cptpl.into()); - if register_action == nvme_reservation_register_action::REPLACE_KEY - { + if register_action == nvme_reservation_register_action::REPLACE_KEY { cmd.__bindgen_anon_1.cdw10_bits.resv_register.set_iekey(1); } } @@ -1427,11 +1307,7 @@ impl BlockDeviceHandle for NvmeDeviceHandle { /// NVMe Reservation Report /// cdw11: bit 0- Extended Data Structure - async fn nvme_resv_report( - &self, - cdw11: u32, - buffer: &mut DmaBuf, - ) -> Result<(), CoreError> { + async fn nvme_resv_report(&self, cdw11: u32, buffer: &mut DmaBuf) -> Result<(), CoreError> { let mut cmd = spdk_nvme_cmd::default(); cmd.set_opc(nvme_nvm_opcode::RESERVATION_REPORT.into()); cmd.nsid = 0x1; @@ -1447,10 +1323,7 @@ impl BlockDeviceHandle for NvmeDeviceHandle { nvme_cmd: &spdk_nvme_cmd, buffer: Option<&mut DmaBuf>, ) -> Result<(), CoreError> { - extern "C" fn nvme_io_passthru_done( - ctx: *mut c_void, - cpl: *const spdk_nvme_cpl, - ) { + extern "C" fn nvme_io_passthru_done(ctx: *mut c_void, cpl: *const spdk_nvme_cpl) { debug!( "IO passthrough completed, succeeded={}", nvme_cpl_succeeded(cpl) @@ -1494,8 +1367,7 @@ impl BlockDeviceHandle for NvmeDeviceHandle { })?; inner.account_io(); - let ret = if r.await.expect("Failed awaiting NVMe IO passthru command") - { + let ret = if r.await.expect("Failed awaiting NVMe IO passthru command") { debug!("io_passthru() done"); Ok(()) } else { @@ -1509,11 +1381,12 @@ impl BlockDeviceHandle for NvmeDeviceHandle { /// Returns NVMe extended host identifier async fn host_id(&self) -> Result<[u8; 16], CoreError> { - let controller = NVME_CONTROLLERS.lookup_by_name(&self.name).ok_or( - CoreError::BdevNotFound { - name: self.name.to_string(), - }, - )?; + let controller = + NVME_CONTROLLERS + .lookup_by_name(&self.name) + .ok_or(CoreError::BdevNotFound { + name: self.name.to_string(), + })?; let controller = controller.lock(); let inner = controller.controller().ok_or(CoreError::BdevNotFound { name: self.name.to_string(), diff --git a/io-engine/src/bdev/nvmx/mod.rs b/io-engine/src/bdev/nvmx/mod.rs index 238a5da2f..f50362c66 100644 --- a/io-engine/src/bdev/nvmx/mod.rs +++ b/io-engine/src/bdev/nvmx/mod.rs @@ -40,15 +40,11 @@ pub struct NVMeCtlrList<'a> { } impl<'a> NVMeCtlrList<'a> { - fn write_lock( - &self, - ) -> RwLockWriteGuard>>>> { + fn write_lock(&self) -> RwLockWriteGuard>>>> { self.entries.write() } - fn read_lock( - &self, - ) -> RwLockReadGuard>>>> { + fn read_lock(&self) -> RwLockReadGuard>>>> { self.entries.read() } @@ -63,16 +59,11 @@ impl<'a> NVMeCtlrList<'a> { /// remove a NVMe controller from the list, when the last reference to the /// controller is dropped, the controller will be freed. - pub fn remove_by_name + Display>( - &self, - name: T, - ) -> Result { + pub fn remove_by_name + Display>(&self, name: T) -> Result { let mut entries = self.write_lock(); if !entries.contains_key(&name.to_string()) { - return Err(CoreError::BdevNotFound { - name: name.into(), - }); + return Err(CoreError::BdevNotFound { name: name.into() }); } // Remove 'controller name -> controller' mapping. @@ -89,11 +80,7 @@ impl<'a> NVMeCtlrList<'a> { /// insert a controller into the list using the key, note that different /// keys may refer to the same controller - pub fn insert_controller( - &self, - cid: String, - ctl: Arc>>, - ) { + pub fn insert_controller(&self, cid: String, ctl: Arc>>) { let mut entries = self.write_lock(); entries.insert(cid, ctl); } @@ -112,15 +99,12 @@ impl<'a> NVMeCtlrList<'a> { impl Default for NVMeCtlrList<'_> { fn default() -> Self { Self { - entries: RwLock::new( - HashMap::>>::new(), - ), + entries: RwLock::new(HashMap::>>::new()), } } } -pub static NVME_CONTROLLERS: Lazy = - Lazy::new(NVMeCtlrList::default); +pub static NVME_CONTROLLERS: Lazy = Lazy::new(NVMeCtlrList::default); pub fn nvme_bdev_running_config() -> &'static NvmeBdevOpts { &Config::get().nvme_bdev_opts diff --git a/io-engine/src/bdev/nvmx/namespace.rs b/io-engine/src/bdev/nvmx/namespace.rs index 8123d6a4c..b3bdd855d 100644 --- a/io-engine/src/bdev/nvmx/namespace.rs +++ b/io-engine/src/bdev/nvmx/namespace.rs @@ -1,17 +1,10 @@ use std::ptr::NonNull; use spdk_rs::libspdk::{ - spdk_nvme_ns, - spdk_nvme_ns_get_extended_sector_size, - spdk_nvme_ns_get_flags, - spdk_nvme_ns_get_md_size, - spdk_nvme_ns_get_num_sectors, - spdk_nvme_ns_get_optimal_io_boundary, - spdk_nvme_ns_get_size, - spdk_nvme_ns_get_uuid, - spdk_nvme_ns_supports_compare, - SPDK_NVME_NS_DEALLOCATE_SUPPORTED, - SPDK_NVME_NS_WRITE_ZEROES_SUPPORTED, + spdk_nvme_ns, spdk_nvme_ns_get_extended_sector_size, spdk_nvme_ns_get_flags, + spdk_nvme_ns_get_md_size, spdk_nvme_ns_get_num_sectors, spdk_nvme_ns_get_optimal_io_boundary, + spdk_nvme_ns_get_size, spdk_nvme_ns_get_uuid, spdk_nvme_ns_supports_compare, + SPDK_NVME_NS_DEALLOCATE_SUPPORTED, SPDK_NVME_NS_WRITE_ZEROES_SUPPORTED, }; #[derive(Debug)] @@ -35,10 +28,7 @@ impl NvmeNamespace { } pub fn uuid(&self) -> uuid::Uuid { - spdk_rs::Uuid::legacy_from_ptr(unsafe { - spdk_nvme_ns_get_uuid(self.0.as_ptr()) - }) - .into() + spdk_rs::Uuid::legacy_from_ptr(unsafe { spdk_nvme_ns_get_uuid(self.0.as_ptr()) }).into() } pub fn supports_compare(&self) -> bool { @@ -46,19 +36,11 @@ impl NvmeNamespace { } pub fn supports_deallocate(&self) -> bool { - unsafe { - spdk_nvme_ns_get_flags(self.0.as_ptr()) - & SPDK_NVME_NS_DEALLOCATE_SUPPORTED - > 0 - } + unsafe { spdk_nvme_ns_get_flags(self.0.as_ptr()) & SPDK_NVME_NS_DEALLOCATE_SUPPORTED > 0 } } pub fn supports_write_zeroes(&self) -> bool { - unsafe { - spdk_nvme_ns_get_flags(self.0.as_ptr()) - & SPDK_NVME_NS_WRITE_ZEROES_SUPPORTED - > 0 - } + unsafe { spdk_nvme_ns_get_flags(self.0.as_ptr()) & SPDK_NVME_NS_WRITE_ZEROES_SUPPORTED > 0 } } pub fn alignment(&self) -> u64 { diff --git a/io-engine/src/bdev/nvmx/poll_group.rs b/io-engine/src/bdev/nvmx/poll_group.rs index a03200680..d31e1ba08 100644 --- a/io-engine/src/bdev/nvmx/poll_group.rs +++ b/io-engine/src/bdev/nvmx/poll_group.rs @@ -1,11 +1,8 @@ use std::{os::raw::c_void, ptr::NonNull}; use spdk_rs::libspdk::{ - spdk_nvme_poll_group, - spdk_nvme_poll_group_add, - spdk_nvme_poll_group_create, - spdk_nvme_poll_group_destroy, - spdk_nvme_poll_group_remove, + spdk_nvme_poll_group, spdk_nvme_poll_group_add, spdk_nvme_poll_group_create, + spdk_nvme_poll_group_destroy, spdk_nvme_poll_group_remove, }; use crate::core::CoreError; @@ -17,10 +14,7 @@ pub(super) struct PollGroup(NonNull); impl PollGroup { /// Creates a poll group. - pub(super) fn create( - ctx: *mut c_void, - ctrlr_name: &str, - ) -> Result { + pub(super) fn create(ctx: *mut c_void, ctrlr_name: &str) -> Result { let poll_group: *mut spdk_nvme_poll_group = unsafe { spdk_nvme_poll_group_create(ctx, std::ptr::null_mut()) }; diff --git a/io-engine/src/bdev/nvmx/qpair.rs b/io-engine/src/bdev/nvmx/qpair.rs index a9653300e..d82d7be46 100644 --- a/io-engine/src/bdev/nvmx/qpair.rs +++ b/io-engine/src/bdev/nvmx/qpair.rs @@ -9,16 +9,10 @@ use std::{ use futures::channel::oneshot; use spdk_rs::libspdk::{ - nvme_qpair_abort_all_queued_reqs, - nvme_transport_qpair_abort_reqs, - spdk_nvme_ctrlr, - spdk_nvme_ctrlr_alloc_io_qpair, - spdk_nvme_ctrlr_connect_io_qpair, - spdk_nvme_ctrlr_disconnect_io_qpair, - spdk_nvme_ctrlr_free_io_qpair, - spdk_nvme_ctrlr_get_default_io_qpair_opts, - spdk_nvme_io_qpair_opts, - spdk_nvme_qpair, + nvme_qpair_abort_all_queued_reqs, nvme_transport_qpair_abort_reqs, spdk_nvme_ctrlr, + spdk_nvme_ctrlr_alloc_io_qpair, spdk_nvme_ctrlr_connect_io_qpair, + spdk_nvme_ctrlr_disconnect_io_qpair, spdk_nvme_ctrlr_free_io_qpair, + spdk_nvme_ctrlr_get_default_io_qpair_opts, spdk_nvme_io_qpair_opts, spdk_nvme_qpair, spdk_nvme_qpair_set_abort_dnr, }; @@ -29,13 +23,10 @@ use std::{os::raw::c_void, time::Duration}; #[cfg(feature = "spdk-async-qpair-connect")] use spdk_rs::{ libspdk::{ - spdk_nvme_ctrlr_connect_io_qpair_async, - spdk_nvme_ctrlr_io_qpair_connect_poll_async, + spdk_nvme_ctrlr_connect_io_qpair_async, spdk_nvme_ctrlr_io_qpair_connect_poll_async, spdk_nvme_io_qpair_connect_ctx, }, - Poller, - PollerBuilder, - UnsafeRef, + Poller, PollerBuilder, UnsafeRef, }; #[cfg(feature = "spdk-async-qpair-connect")] @@ -46,9 +37,7 @@ use crate::core::CoreError; use super::{nvme_bdev_running_config, SpdkNvmeController}; /// I/O QPair state. -#[derive( - Debug, Serialize, Clone, Copy, PartialEq, PartialOrd, strum_macros::Display, -)] +#[derive(Debug, Serialize, Clone, Copy, PartialEq, PartialOrd, strum_macros::Display)] pub enum QPairState { /// QPair is not connected. Disconnected, @@ -97,9 +86,7 @@ impl Drop for QPair { } /// Returns default qpair options. -fn get_default_options( - ctrlr_handle: SpdkNvmeController, -) -> spdk_nvme_io_qpair_opts { +fn get_default_options(ctrlr_handle: SpdkNvmeController) -> spdk_nvme_io_qpair_opts { let mut opts: spdk_nvme_io_qpair_opts = unsafe { zeroed() }; let default_opts = nvme_bdev_running_config(); @@ -111,8 +98,7 @@ fn get_default_options( ) }; - opts.io_queue_requests = - max(opts.io_queue_requests, default_opts.io_queue_requests); + opts.io_queue_requests = max(opts.io_queue_requests, default_opts.io_queue_requests); opts.create_only = true; // Always assume async_mode is enabled instread of @@ -414,9 +400,7 @@ impl Connection<'_> { // Error occured, so SPDK won't call connection callback. // Notify about failure and stop the poller. let conn = unsafe { Box::from_raw(arg.as_ptr()) }; - conn.complete(Err(CoreError::OpenBdev { - source: e, - })); + conn.complete(Err(CoreError::OpenBdev { source: e })); 1 // stop the poller } } @@ -443,12 +427,7 @@ impl Connection<'_> { // Poll the probe. In the case of a success or an error, the probe will // be freed by SPDK. - let res = unsafe { - spdk_nvme_ctrlr_io_qpair_connect_poll_async( - self.qpair(), - self.probe, - ) - }; + let res = unsafe { spdk_nvme_ctrlr_io_qpair_connect_poll_async(self.qpair(), self.probe) }; match res { // Connection is complete, callback has been called and this @@ -531,10 +510,7 @@ impl Connection<'_> { /// Async connection callback. #[cfg(feature = "spdk-async-qpair-connect")] -extern "C" fn qpair_connect_cb( - _qpair: *mut spdk_nvme_qpair, - cb_arg: *mut c_void, -) { +extern "C" fn qpair_connect_cb(_qpair: *mut spdk_nvme_qpair, cb_arg: *mut c_void) { let ctx = unsafe { Box::from_raw(cb_arg as *mut Connection) }; ctx.complete(Ok(())); } diff --git a/io-engine/src/bdev/nvmx/snapshot.rs b/io-engine/src/bdev/nvmx/snapshot.rs index 7e6f83ded..196f0740a 100755 --- a/io-engine/src/bdev/nvmx/snapshot.rs +++ b/io-engine/src/bdev/nvmx/snapshot.rs @@ -10,9 +10,7 @@ pub struct NvmeSnapshotMessageV1 { impl NvmeSnapshotMessageV1 { /// Create a V1 snapshot creation message. pub fn new(params: SnapshotParams) -> Self { - Self { - params, - } + Self { params } } /// Get snapshot params payload. diff --git a/io-engine/src/bdev/nvmx/uri.rs b/io-engine/src/bdev/nvmx/uri.rs index 1a9fcd513..dadb5dfd5 100644 --- a/io-engine/src/bdev/nvmx/uri.rs +++ b/io-engine/src/bdev/nvmx/uri.rs @@ -24,27 +24,19 @@ use controller::options::NvmeControllerOpts; use spdk_rs::{ libspdk::{ - spdk_nvme_connect_async, - spdk_nvme_ctrlr, - spdk_nvme_ctrlr_opts, - spdk_nvme_probe_poll_async, + spdk_nvme_connect_async, spdk_nvme_ctrlr, spdk_nvme_ctrlr_opts, spdk_nvme_probe_poll_async, spdk_nvme_transport_id, }, - Poller, - PollerBuilder, + Poller, PollerBuilder, }; use crate::{ bdev::{ nvmx::{ - controller, - controller_inner::SpdkNvmeController, - NvmeControllerState, - NVME_CONTROLLERS, + controller, controller_inner::SpdkNvmeController, NvmeControllerState, NVME_CONTROLLERS, }, util::uri, - CreateDestroy, - GetName, + CreateDestroy, GetName, }, bdev_api::{self, BdevError}, constants::NVME_NQN_PREFIX, @@ -63,8 +55,7 @@ extern "C" fn connect_attach_cb( ctrlr: *mut spdk_nvme_ctrlr, _opts: *const spdk_nvme_ctrlr_opts, ) { - let context = - unsafe { &mut *(_cb_ctx as *const _ as *mut NvmeControllerContext) }; + let context = unsafe { &mut *(_cb_ctx as *const _ as *mut NvmeControllerContext) }; // Normally, the attach handler is called by the poller after // the controller is connected. In such a case 'spdk_nvme_probe_poll_async' @@ -87,8 +78,7 @@ extern "C" fn connect_attach_cb( // Instantiate the controller in case attach succeeded. controller::connected_attached_cb( context, - SpdkNvmeController::from_ptr(ctrlr) - .expect("probe callback with NULL ptr"), + SpdkNvmeController::from_ptr(ctrlr).expect("probe callback with NULL ptr"), ); } } @@ -140,47 +130,39 @@ impl TryFrom<&Url> for NvmfDeviceTemplate { }); } - let mut parameters: HashMap = - url.query_pairs().into_owned().collect(); + let mut parameters: HashMap = url.query_pairs().into_owned().collect(); let mut prchk_flags: u32 = 0; if let Some(value) = parameters.remove("reftag") { - if uri::boolean(&value, true).context( - bdev_api::BoolParamParseFailed { - uri: url.to_string(), - parameter: String::from("reftag"), - value: value.to_string(), - }, - )? { - prchk_flags |= - spdk_rs::libspdk::SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; + if uri::boolean(&value, true).context(bdev_api::BoolParamParseFailed { + uri: url.to_string(), + parameter: String::from("reftag"), + value: value.to_string(), + })? { + prchk_flags |= spdk_rs::libspdk::SPDK_NVME_IO_FLAGS_PRCHK_REFTAG; } } if let Some(value) = parameters.remove("guard") { - if uri::boolean(&value, true).context( - bdev_api::BoolParamParseFailed { - uri: url.to_string(), - parameter: String::from("guard"), - value: value.to_string(), - }, - )? { + if uri::boolean(&value, true).context(bdev_api::BoolParamParseFailed { + uri: url.to_string(), + parameter: String::from("guard"), + value: value.to_string(), + })? { prchk_flags |= spdk_rs::libspdk::SPDK_NVME_IO_FLAGS_PRCHK_GUARD; } } - let uuid = uri::uuid(parameters.remove("uuid")).context( - bdev_api::UuidParamParseFailed { + let uuid = + uri::uuid(parameters.remove("uuid")).context(bdev_api::UuidParamParseFailed { uri: url.to_string(), - }, - )?; + })?; let hostnqn = parameters.remove("hostnqn"); Ok(NvmfDeviceTemplate { - name: url[url::Position::BeforeHost .. url::Position::AfterPath] - .to_string(), + name: url[url::Position::BeforeHost..url::Position::AfterPath].to_string(), alias: url.to_string(), host: host.to_string(), port: url.port().unwrap_or(DEFAULT_NVMF_PORT), @@ -222,29 +204,23 @@ impl NvmeControllerContext<'_> { // HOSTNQN is provided. let mut opts = controller::options::Builder::new() - .with_keep_alive_timeout_ms( - Config::get().nvme_bdev_opts.keep_alive_timeout_ms, - ) - .with_transport_retry_count( - Config::get().nvme_bdev_opts.transport_retry_count as u8, - ) - .with_fabrics_connect_timeout_us( - crate::subsys::config::opts::try_from_env( - "NVMF_FABRICS_CONNECT_TIMEOUT", - 1_000_000, - ), - ); - - let hostnqn = template.hostnqn.clone().or_else(|| { - MayastorEnvironment::global_or_default().make_hostnqn() - }); + .with_keep_alive_timeout_ms(Config::get().nvme_bdev_opts.keep_alive_timeout_ms) + .with_transport_retry_count(Config::get().nvme_bdev_opts.transport_retry_count as u8) + .with_fabrics_connect_timeout_us(crate::subsys::config::opts::try_from_env( + "NVMF_FABRICS_CONNECT_TIMEOUT", + 1_000_000, + )); + + let hostnqn = template + .hostnqn + .clone() + .or_else(|| MayastorEnvironment::global_or_default().make_hostnqn()); if let Ok(ext_host_id) = std::env::var("MAYASTOR_NVMF_HOSTID") { if let Ok(uuid) = Uuid::parse_str(&ext_host_id) { opts = opts.with_ext_host_id(*uuid.as_bytes()); if hostnqn.is_none() { - opts = opts - .with_hostnqn(format!("{NVME_NQN_PREFIX}:uuid:{uuid}")); + opts = opts.with_hostnqn(format!("{NVME_NQN_PREFIX}:uuid:{uuid}")); } } } @@ -288,9 +264,7 @@ impl CreateDestroy for NvmfDeviceTemplate { info!("::create() {}", self.get_name()); let cname = self.get_name(); if NVME_CONTROLLERS.lookup_by_name(&cname).is_some() { - return Err(BdevError::BdevExists { - name: cname, - }); + return Err(BdevError::BdevExists { name: cname }); } // Insert a new controller instance (uninitialized) as a guard, and diff --git a/io-engine/src/bdev/nvmx/utils.rs b/io-engine/src/bdev/nvmx/utils.rs index 8c7bd163c..3a0247f34 100644 --- a/io-engine/src/bdev/nvmx/utils.rs +++ b/io-engine/src/bdev/nvmx/utils.rs @@ -77,8 +77,7 @@ pub(crate) fn nvme_cpl_succeeded(cpl: *const spdk_nvme_cpl) -> bool { sc = cplr.__bindgen_anon_1.status.sc(); } - sct == NvmeStatusCodeType::Generic as u16 - && sc == NvmeGenericCommandStatusCode::Success as u16 + sct == NvmeStatusCodeType::Generic as u16 && sc == NvmeGenericCommandStatusCode::Success as u16 } /* Bit set of attributes for DATASET MANAGEMENT commands. */ diff --git a/io-engine/src/bdev/nx.rs b/io-engine/src/bdev/nx.rs index c959990a7..06d7fb29d 100644 --- a/io-engine/src/bdev/nx.rs +++ b/io-engine/src/bdev/nx.rs @@ -63,8 +63,7 @@ impl TryFrom<&Url> for Nexus { }); } - let mut parameters: HashMap = - uri.query_pairs().into_owned().collect(); + let mut parameters: HashMap = uri.query_pairs().into_owned().collect(); let size: u64 = if let Some(value) = parameters.remove("size") { byte_unit::Byte::parse_str(&value, true) @@ -80,20 +79,19 @@ impl TryFrom<&Url> for Nexus { }); }; - let children: Vec = - if let Some(value) = parameters.remove("children") { - value.split(',').map(|s| s.to_string()).collect::>() - } else { - return Err(BdevError::InvalidUri { - uri: uri.to_string(), - message: "'children' must be specified".to_string(), - }); - }; + let children: Vec = if let Some(value) = parameters.remove("children") { + value.split(',').map(|s| s.to_string()).collect::>() + } else { + return Err(BdevError::InvalidUri { + uri: uri.to_string(), + message: "'children' must be specified".to_string(), + }); + }; reject_unknown_parameters(uri, parameters)?; Ok(Self { - name: uri.path()[1 ..].into(), + name: uri.path()[1..].into(), size, children, }) @@ -111,25 +109,19 @@ impl CreateDestroy for Nexus { type Error = BdevError; async fn create(&self) -> Result { - crate::bdev::nexus::nexus_create( - &self.name, - self.size, - None, - &self.children, - ) - .await - .map_err(|error| BdevError::CreateBdevFailedStr { - error: error.to_string(), - name: self.name.to_owned(), - })?; + crate::bdev::nexus::nexus_create(&self.name, self.size, None, &self.children) + .await + .map_err(|error| BdevError::CreateBdevFailedStr { + error: error.to_string(), + name: self.name.to_owned(), + })?; Ok(self.name.to_owned()) } async fn destroy(self: Box) -> Result<(), Self::Error> { debug!("{:?}: deleting", self); - let Some(nexus) = crate::bdev::nexus::nexus_lookup_mut(&self.name) - else { + let Some(nexus) = crate::bdev::nexus::nexus_lookup_mut(&self.name) else { return Err(BdevError::BdevNotFound { name: self.name.to_owned(), }); diff --git a/io-engine/src/bdev/uring.rs b/io-engine/src/bdev/uring.rs index 6a1c19b03..f95744baa 100644 --- a/io-engine/src/bdev/uring.rs +++ b/io-engine/src/bdev/uring.rs @@ -1,9 +1,4 @@ -use std::{ - collections::HashMap, - convert::TryFrom, - ffi::CString, - os::unix::fs::FileTypeExt, -}; +use std::{collections::HashMap, convert::TryFrom, ffi::CString, os::unix::fs::FileTypeExt}; use async_trait::async_trait; use futures::channel::oneshot; @@ -45,17 +40,14 @@ impl TryFrom<&Url> for Uring { .ok() .map_or(false, |meta| meta.file_type().is_block_device()); - let mut parameters: HashMap = - url.query_pairs().into_owned().collect(); + let mut parameters: HashMap = url.query_pairs().into_owned().collect(); let blk_size: u32 = match parameters.remove("blk_size") { - Some(value) => { - value.parse().context(bdev_api::IntParamParseFailed { - uri: url.to_string(), - parameter: String::from("blk_size"), - value: value.clone(), - })? - } + Some(value) => value.parse().context(bdev_api::IntParamParseFailed { + uri: url.to_string(), + parameter: String::from("blk_size"), + value: value.clone(), + })?, None => { if path_is_blockdev { 0 @@ -65,11 +57,10 @@ impl TryFrom<&Url> for Uring { } }; - let uuid = uri::uuid(parameters.remove("uuid")).context( - bdev_api::UuidParamParseFailed { + let uuid = + uri::uuid(parameters.remove("uuid")).context(bdev_api::UuidParamParseFailed { uri: url.to_string(), - }, - )?; + })?; reject_unknown_parameters(url, parameters)?; @@ -108,9 +99,7 @@ impl CreateDestroy for Uring { uuid: spdk_rs::Uuid::generate().into_raw(), }; - if let Some(mut bdev) = - UntypedBdev::checked_from_ptr(unsafe { create_uring_bdev(&opts) }) - { + if let Some(mut bdev) = UntypedBdev::checked_from_ptr(unsafe { create_uring_bdev(&opts) }) { if let Some(uuid) = self.uuid { unsafe { bdev.set_raw_uuid(uuid.into()) }; } diff --git a/io-engine/src/bdev/util/uri.rs b/io-engine/src/bdev/util/uri.rs index 322641e36..c5139eb4c 100644 --- a/io-engine/src/bdev/util/uri.rs +++ b/io-engine/src/bdev/util/uri.rs @@ -22,10 +22,7 @@ pub(crate) fn segments(url: &Url) -> Vec<&str> { /// Acceptable values are: true, false, yes, no, on, off /// Also accept an (unsigned) integer, where 0 represents false /// and any other value represents true -pub(crate) fn boolean( - value: &str, - empty: bool, -) -> Result { +pub(crate) fn boolean(value: &str, empty: bool) -> Result { if value.is_empty() { return Ok(empty); } @@ -41,8 +38,6 @@ pub(crate) fn boolean( value.parse::() } -pub(crate) fn uuid( - value: Option, -) -> Result, uuid::Error> { +pub(crate) fn uuid(value: Option) -> Result, uuid::Error> { value.map(|uuid| uuid::Uuid::parse_str(&uuid)).transpose() } diff --git a/io-engine/src/bdev_api.rs b/io-engine/src/bdev_api.rs index 4838c5bf7..297a6d0e4 100644 --- a/io-engine/src/bdev_api.rs +++ b/io-engine/src/bdev_api.rs @@ -17,11 +17,7 @@ pub enum BdevError { #[snafu(display("Error parsing URI '{}'", uri))] UriParseFailed { source: ParseError, uri: String }, // No matching URI error. - #[snafu(display( - "No matching URI found for BDEV '{}' in aliases {:?}", - name, - aliases - ))] + #[snafu(display("No matching URI found for BDEV '{}' in aliases {:?}", name, aliases))] BdevNoMatchingUri { name: String, aliases: Vec }, // Unsupported URI scheme. #[snafu(display("Unsupported URI scheme: '{}'", scheme))] @@ -58,16 +54,10 @@ pub enum BdevError { value: String, }, // Bad value of a UUID parameter. - #[snafu(display( - "Invalid URI '{}': could not parse value of UUID parameter", - uri - ))] + #[snafu(display("Invalid URI '{}': could not parse value of UUID parameter", uri))] UuidParamParseFailed { source: uuid::Error, uri: String }, // BDEV name already exists. - #[snafu(display( - "Failed to create a BDEV: name '{}' already exists", - name - ))] + #[snafu(display("Failed to create a BDEV: name '{}' already exists", name))] BdevExists { name: String }, // Creating a BDEV with a different UUID. #[snafu(display( diff --git a/io-engine/src/bin/casperf.rs b/io-engine/src/bin/casperf.rs index 164b58a30..549295a60 100644 --- a/io-engine/src/bin/casperf.rs +++ b/io-engine/src/bin/casperf.rs @@ -6,30 +6,18 @@ use rand::Rng; use io_engine::{ bdev_api::bdev_create, core::{ - mayastor_env_stop, - Cores, - MayastorCliArgs, - MayastorEnvironment, - Mthread, - Reactors, - UntypedBdev, - UntypedDescriptorGuard, + mayastor_env_stop, Cores, MayastorCliArgs, MayastorEnvironment, Mthread, Reactors, + UntypedBdev, UntypedDescriptorGuard, }, logger, subsys::Config, }; use spdk_rs::{ libspdk::{ - spdk_bdev_free_io, - spdk_bdev_io, - spdk_bdev_read, - spdk_bdev_write, - spdk_poller, - spdk_poller_register, - spdk_poller_unregister, + spdk_bdev_free_io, spdk_bdev_io, spdk_bdev_read, spdk_bdev_write, spdk_poller, + spdk_poller_register, spdk_poller_unregister, }, - DmaBuf, - IoChannelGuard, + DmaBuf, IoChannelGuard, }; use version_info::version_info_str; @@ -102,11 +90,7 @@ impl Job { let job = unsafe { ioq.job.as_mut() }; if !success { - eprintln!( - "IO error for bdev {}, LBA {}", - job.bdev.name(), - ioq.offset - ); + eprintln!("IO error for bdev {}, LBA {}", job.bdev.name(), ioq.offset); } job.n_io += 1; @@ -130,7 +114,7 @@ impl Job { return; } - let offset = job.rng.gen_range(0 .. job.io_blocks) * job.io_size; + let offset = job.rng.gen_range(0..job.io_blocks) * job.io_size; ioq.next(offset); } @@ -155,7 +139,7 @@ impl Job { let mut queue = Vec::new(); - (0 ..= qd).for_each(|offset| { + (0..=qd).for_each(|offset| { queue.push(Io { buf: DmaBuf::new(size, bdev.alignment()).unwrap(), iot: io_type, @@ -379,9 +363,7 @@ fn main() { .collect::>(); let io_size = match matches.get_one::("io-size") { - Some(io_size) => { - byte_unit::Byte::parse_str(io_size, true).unwrap().as_u64() - } + Some(io_size) => byte_unit::Byte::parse_str(io_size, true).unwrap().as_u64(), None => IO_SIZE, }; let io_type = match matches @@ -414,9 +396,7 @@ fn main() { for j in jobs { let job = j.await; - let thread = - Mthread::new(job.bdev.name().to_string(), Cores::current()) - .unwrap(); + let thread = Mthread::new(job.bdev.name().to_string(), Cores::current()).unwrap(); thread.send_msg(job, |job| { job.run(); }); diff --git a/io-engine/src/bin/initiator.rs b/io-engine/src/bin/initiator.rs index 2ff05060c..389b6608e 100644 --- a/io-engine/src/bin/initiator.rs +++ b/io-engine/src/bin/initiator.rs @@ -2,8 +2,7 @@ //! target type understood by the nexus. use std::{ - fmt, - fs, + fmt, fs, io::{self, Write}, }; @@ -17,17 +16,11 @@ use io_engine::{ bdev::{device_create, device_open}, bdev_api::{bdev_create, BdevError}, core::{ - mayastor_env_stop, - CoreError, - MayastorCliArgs, - MayastorEnvironment, - Reactor, - SnapshotParams, - UntypedBdev, + mayastor_env_stop, CoreError, MayastorCliArgs, MayastorEnvironment, Reactor, + SnapshotParams, UntypedBdev, }, jsonrpc::print_error_chain, - logger, - subsys, + logger, subsys, subsys::Config, }; use spdk_rs::DmaError; @@ -84,8 +77,7 @@ type Result = std::result::Result; /// Create initiator bdev. async fn create_bdev(uri: &str) -> Result { let bdev_name = bdev_create(uri).await?; - let bdev = UntypedBdev::lookup_by_name(&bdev_name) - .expect("Failed to lookup the created bdev"); + let bdev = UntypedBdev::lookup_by_name(&bdev_name).expect("Failed to lookup the created bdev"); Ok(bdev) } @@ -167,44 +159,64 @@ fn main() { let matches = Command::new("Test initiator for nexus replica") .version(version_info_str!()) .about("Connect, read or write a block to a nexus replica using its URI") - .arg(Arg::new("URI") - .help("URI of the replica to connect to") - .required(true) - .index(1)) - .arg(Arg::new("offset") - .short('o') - .long("offset") - .value_name("NUMBER") - .help("Offset of IO operation on the replica in bytes (default 0)")) - .subcommand(Command::new("connect") - .about("Connect to and disconnect from the replica")) - .subcommand(Command::new("read") - .about("Read bytes from the replica") - .arg(Arg::new("FILE") - .help("File to write data that were read from the replica") + .arg( + Arg::new("URI") + .help("URI of the replica to connect to") .required(true) - .index(1))) - .subcommand(Command::new("write") - .about("Write bytes to the replica") - .arg(Arg::new("FILE") - .help("File to read data from that will be written to the replica") - .required(true) - .index(1))) - .subcommand(Command::new("nvme-admin") - .about("Send a custom NVMe Admin command") - .arg(Arg::new("opcode") - .help("Admin command opcode to send") - .required(true) - .index(1))) - .subcommand(Command::new("id-ctrlr") - .about("Send NVMe Admin identify controller command") - .arg(Arg::new("FILE") - .help("File to write output of identify controller command") - .required(true) - .index(1))) - .subcommand(Command::new("create-snapshot") - .about("Create a snapshot on the replica")) - .subcommand_required(true).subcommand_required(true).get_matches(); + .index(1), + ) + .arg( + Arg::new("offset") + .short('o') + .long("offset") + .value_name("NUMBER") + .help("Offset of IO operation on the replica in bytes (default 0)"), + ) + .subcommand(Command::new("connect").about("Connect to and disconnect from the replica")) + .subcommand( + Command::new("read") + .about("Read bytes from the replica") + .arg( + Arg::new("FILE") + .help("File to write data that were read from the replica") + .required(true) + .index(1), + ), + ) + .subcommand( + Command::new("write") + .about("Write bytes to the replica") + .arg( + Arg::new("FILE") + .help("File to read data from that will be written to the replica") + .required(true) + .index(1), + ), + ) + .subcommand( + Command::new("nvme-admin") + .about("Send a custom NVMe Admin command") + .arg( + Arg::new("opcode") + .help("Admin command opcode to send") + .required(true) + .index(1), + ), + ) + .subcommand( + Command::new("id-ctrlr") + .about("Send NVMe Admin identify controller command") + .arg( + Arg::new("FILE") + .help("File to write output of identify controller command") + .required(true) + .index(1), + ), + ) + .subcommand(Command::new("create-snapshot").about("Create a snapshot on the replica")) + .subcommand_required(true) + .subcommand_required(true) + .get_matches(); logger::init("INFO"); @@ -228,8 +240,7 @@ fn main() { let res = if let Some(matches) = matches.subcommand_matches("read") { read(&uri, offset, matches.get_one::("FILE").unwrap()).await } else if let Some(matches) = matches.subcommand_matches("write") { - write(&uri, offset, matches.get_one::("FILE").unwrap()) - .await + write(&uri, offset, matches.get_one::("FILE").unwrap()).await } else if let Some(matches) = matches.subcommand_matches("nvme-admin") { let opcode: u8 = match matches.get_one::("opcode") { Some(val) => val.parse().expect("Opcode must be a number"), @@ -237,8 +248,7 @@ fn main() { }; nvme_admin(&uri, opcode).await } else if let Some(matches) = matches.subcommand_matches("id-ctrlr") { - identify_ctrlr(&uri, matches.get_one::("FILE").unwrap()) - .await + identify_ctrlr(&uri, matches.get_one::("FILE").unwrap()).await } else if matches.subcommand_matches("create-snapshot").is_some() { create_snapshot(&uri).await } else { diff --git a/io-engine/src/bin/io-engine-client/context.rs b/io-engine/src/bin/io-engine-client/context.rs index ab4459986..e819f6121 100644 --- a/io-engine/src/bin/io-engine-client/context.rs +++ b/io-engine/src/bin/io-engine-client/context.rs @@ -66,8 +66,7 @@ mod v1 { pub type HostRpcClient = host::HostRpcClient; pub type NexusRpcClient = nexus::NexusRpcClient; pub type SnapshotRpcClient = snapshot::SnapshotRpcClient; - pub type SnapshotRebuildRpcClient = - snapshot_rebuild::SnapshotRebuildRpcClient; + pub type SnapshotRebuildRpcClient = snapshot_rebuild::SnapshotRebuildRpcClient; pub type TestRpcClient = test::TestRpcClient; pub type StatsRpcClient = stats::StatsRpcClient; @@ -93,8 +92,7 @@ mod v1 { let host = HostRpcClient::connect(h.clone()).await.unwrap(); let nexus = NexusRpcClient::connect(h.clone()).await.unwrap(); let snapshot = SnapshotRpcClient::connect(h.clone()).await.unwrap(); - let snapshot_rebuild = - SnapshotRebuildRpcClient::connect(h.clone()).await.unwrap(); + let snapshot_rebuild = SnapshotRebuildRpcClient::connect(h.clone()).await.unwrap(); let test = TestRpcClient::connect(h.clone()).await.unwrap(); let stats = StatsRpcClient::connect(h).await.unwrap(); @@ -144,9 +142,11 @@ impl Context { } if let Some(ref mut authority) = parts.authority { if authority.port().is_none() { - parts.authority = Authority::from_maybe_shared(Bytes::from( - format!("{}:{}", authority.host(), 10124), - )) + parts.authority = Authority::from_maybe_shared(Bytes::from(format!( + "{}:{}", + authority.host(), + 10124 + ))) .ok() } } @@ -163,11 +163,12 @@ impl Context { println!("Connecting to {:?}", host.uri()); } - let output = matches.get_one::("output").ok_or_else(|| { - Error::OutputFormatInvalid { - format: "".to_string(), - } - })?; + let output = + matches + .get_one::("output") + .ok_or_else(|| Error::OutputFormatInvalid { + format: "".to_string(), + })?; let output = output.parse()?; let client = MayaClient::connect(host.clone()).await.unwrap(); @@ -211,11 +212,7 @@ impl Context { } } - pub(crate) fn print_list( - &self, - headers: Vec<&str>, - mut data: Vec>, - ) { + pub(crate) fn print_list(&self, headers: Vec<&str>, mut data: Vec>) { assert_ne!(data.len(), 0); let ncols = data.first().unwrap().len(); assert_eq!(headers.len(), ncols); @@ -271,9 +268,7 @@ impl Context { pub(crate) async fn print_streamed_list( &self, headers: Vec<&str>, - mut recv: tokio::sync::mpsc::Receiver< - Result, tonic::Status>, - >, + mut recv: tokio::sync::mpsc::Receiver, tonic::Status>>, ) -> Result<(), tonic::Status> { let Some(data) = recv.recv().await else { return Ok(()); diff --git a/io-engine/src/bin/io-engine-client/main.rs b/io-engine/src/bin/io-engine-client/main.rs index 15f3a5e5c..21d817a76 100644 --- a/io-engine/src/bin/io-engine-client/main.rs +++ b/io-engine/src/bin/io-engine-client/main.rs @@ -4,9 +4,7 @@ use strum::ParseError; use tonic::transport::Channel; use io_engine_api::v0::{ - bdev_rpc_client::BdevRpcClient, - json_rpc_client::JsonRpcClient, - mayastor_client::MayastorClient, + bdev_rpc_client::BdevRpcClient, json_rpc_client::JsonRpcClient, mayastor_client::MayastorClient, }; pub(crate) mod context; mod v0; @@ -47,8 +45,7 @@ type Result = std::result::Result; #[tokio::main(worker_threads = 2)] async fn main() { env_logger::init(); - let result = match std::env::var("API_VERSION").unwrap_or_default().as_str() - { + let result = match std::env::var("API_VERSION").unwrap_or_default().as_str() { "v0" => v0::main_().await, "v1" => v1::main_().await, "" => v1::main_().await, diff --git a/io-engine/src/bin/io-engine-client/v0/bdev_cli.rs b/io-engine/src/bin/io-engine-client/v0/bdev_cli.rs index 899794c6e..ad2669eae 100644 --- a/io-engine/src/bin/io-engine-client/v0/bdev_cli.rs +++ b/io-engine/src/bin/io-engine-client/v0/bdev_cli.rs @@ -3,8 +3,7 @@ use crate::{ context::{Context, OutputFormat}, - ClientError, - GrpcStatus, + ClientError, GrpcStatus, }; use clap::{Arg, ArgMatches, Command}; use colored_json::prelude::*; @@ -20,8 +19,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("destroy", args) => destroy(ctx, args).await, ("unshare", args) => unshare(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -52,9 +50,7 @@ pub fn subcommands() -> Command { .long("allowed-host") .action(clap::ArgAction::Append) .required(false) - .help( - "NQN of hosts which are allowed to connect to the target", - ), + .help("NQN of hosts which are allowed to connect to the target"), ); let unshare = Command::new("unshare") @@ -127,13 +123,7 @@ async fn create(mut ctx: Context, args: &ArgMatches) -> crate::Result<()> { })? .to_owned(); - let response = ctx - .bdev - .create(BdevUri { - uri, - }) - .await - .context(GrpcStatus)?; + let response = ctx.bdev.create(BdevUri { uri }).await.context(GrpcStatus)?; match ctx.output { OutputFormat::Json => { @@ -177,9 +167,7 @@ async fn destroy(mut ctx: Context, args: &ArgMatches) -> crate::Result<()> { // un share the bdev let _ = ctx .bdev - .unshare(CreateReply { - name, - }) + .unshare(CreateReply { name }) .await .context(GrpcStatus)?; @@ -265,9 +253,7 @@ async fn unshare(mut ctx: Context, args: &ArgMatches) -> crate::Result<()> { let response = ctx .bdev - .unshare(CreateReply { - name: name.clone(), - }) + .unshare(CreateReply { name: name.clone() }) .await .context(GrpcStatus)?; diff --git a/io-engine/src/bin/io-engine-client/v0/controller_cli.rs b/io-engine/src/bin/io-engine-client/v0/controller_cli.rs index da47fc421..402cb9643 100755 --- a/io-engine/src/bin/io-engine-client/v0/controller_cli.rs +++ b/io-engine/src/bin/io-engine-client/v0/controller_cli.rs @@ -12,8 +12,7 @@ use tonic::Status; pub fn subcommands() -> Command { let list = Command::new("list").about("List existing NVMe controllers"); - let stats = Command::new("stats") - .about("Display I/O statistics for NVMe controllers"); + let stats = Command::new("stats").about("Display I/O statistics for NVMe controllers"); Command::new("controller") .subcommand_required(true) @@ -28,8 +27,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("list", args) => list_controllers(ctx, args).await, ("stats", args) => controller_stats(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -46,10 +44,7 @@ fn controller_state_to_str(idx: i32) -> String { .to_string() } -async fn controller_stats( - mut ctx: Context, - _matches: &ArgMatches, -) -> crate::Result<()> { +async fn controller_stats(mut ctx: Context, _matches: &ArgMatches) -> crate::Result<()> { let response = ctx .client .stat_nvme_controllers(rpc::Null {}) @@ -101,10 +96,7 @@ async fn controller_stats( Ok(()) } -async fn list_controllers( - mut ctx: Context, - _matches: &ArgMatches, -) -> crate::Result<()> { +async fn list_controllers(mut ctx: Context, _matches: &ArgMatches) -> crate::Result<()> { let response = ctx .client .list_nvme_controllers(rpc::Null {}) diff --git a/io-engine/src/bin/io-engine-client/v0/device_cli.rs b/io-engine/src/bin/io-engine-client/v0/device_cli.rs index b56cb5e51..8c2b9f23d 100644 --- a/io-engine/src/bin/io-engine-client/v0/device_cli.rs +++ b/io-engine/src/bin/io-engine-client/v0/device_cli.rs @@ -10,15 +10,15 @@ use snafu::ResultExt; use tonic::Status; pub fn subcommands() -> Command { - let list = - Command::new("list").about("List available block devices") - .arg( - Arg::new("all") - .short('a') - .long("all") - .action(clap::ArgAction::SetTrue) - .help("List all block devices (ie. also include devices currently in use)"), - ); + let list = Command::new("list") + .about("List available block devices") + .arg( + Arg::new("all") + .short('a') + .long("all") + .action(clap::ArgAction::SetTrue) + .help("List all block devices (ie. also include devices currently in use)"), + ); Command::new("device") .subcommand_required(true) @@ -31,8 +31,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { match matches.subcommand().unwrap() { ("list", args) => list_block_devices(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -45,17 +44,12 @@ fn get_partition_type(device: &rpc::BlockDevice) -> String { } } -async fn list_block_devices( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn list_block_devices(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let all = matches.get_flag("all"); let response = ctx .client - .list_block_devices(rpc::ListBlockDevicesRequest { - all, - }) + .list_block_devices(rpc::ListBlockDevicesRequest { all }) .await .context(GrpcStatus)?; @@ -100,11 +94,7 @@ async fn list_block_devices( device.devmajor.to_string(), device.devminor.to_string(), device.size.to_string(), - String::from(if device.available { - "yes" - } else { - "no" - }), + String::from(if device.available { "yes" } else { "no" }), device.model.clone(), get_partition_type(device), fstype, diff --git a/io-engine/src/bin/io-engine-client/v0/jsonrpc_cli.rs b/io-engine/src/bin/io-engine-client/v0/jsonrpc_cli.rs index 4dabd371a..fd1c78b0f 100644 --- a/io-engine/src/bin/io-engine-client/v0/jsonrpc_cli.rs +++ b/io-engine/src/bin/io-engine-client/v0/jsonrpc_cli.rs @@ -1,7 +1,6 @@ use crate::{ context::{Context, OutputFormat}, - ClientError, - GrpcStatus, + ClientError, GrpcStatus, }; use clap::{Arg, ArgMatches, Command}; use colored_json::ToColoredJson; @@ -27,10 +26,7 @@ pub fn subcommands() -> Command { ) } -pub async fn json_rpc_call( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +pub async fn json_rpc_call(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let method = matches .get_one::("method") .ok_or_else(|| ClientError::MissingValue { @@ -46,10 +42,7 @@ pub async fn json_rpc_call( let response = ctx .json - .json_rpc_call(rpc::JsonRpcRequest { - method, - params, - }) + .json_rpc_call(rpc::JsonRpcRequest { method, params }) .await .context(GrpcStatus)?; diff --git a/io-engine/src/bin/io-engine-client/v0/mod.rs b/io-engine/src/bin/io-engine-client/v0/mod.rs index 6b51961b8..6aec42d52 100644 --- a/io-engine/src/bin/io-engine-client/v0/mod.rs +++ b/io-engine/src/bin/io-engine-client/v0/mod.rs @@ -28,14 +28,16 @@ pub(super) async fn main_() -> crate::Result<()> { .default_value("http://127.0.0.1:10124") .value_name("HOST") .help("The URI of mayastor instance") - .global(true)) + .global(true), + ) .arg( Arg::new("quiet") .short('q') .long("quiet") .action(clap::ArgAction::SetTrue) .global(true) - .help("Do not print any output except for list records")) + .help("Do not print any output except for list records"), + ) .arg( Arg::new("verbose") .short('v') @@ -43,7 +45,8 @@ pub(super) async fn main_() -> crate::Result<()> { .action(clap::ArgAction::Count) .help("Verbose output") .conflicts_with("quiet") - .global(true)) + .global(true), + ) .arg( Arg::new("units") .short('u') @@ -52,7 +55,8 @@ pub(super) async fn main_() -> crate::Result<()> { .value_parser(["i", "d"]) .hide_possible_values(true) .next_line_help(true) - .help("Output with large units: i for kiB, etc. or d for kB, etc.")) + .help("Output with large units: i for kiB, etc. or d for kB, etc."), + ) .arg( Arg::new("output") .short('o') @@ -61,7 +65,7 @@ pub(super) async fn main_() -> crate::Result<()> { .default_value("default") .value_parser(["default", "json"]) .global(true) - .help("Output format.") + .help("Output format."), ) .subcommand(pool_cli::subcommands()) .subcommand(nexus_cli::subcommands()) @@ -73,7 +77,8 @@ pub(super) async fn main_() -> crate::Result<()> { .subcommand(snapshot_cli::subcommands()) .subcommand(jsonrpc_cli::subcommands()) .subcommand(controller_cli::subcommands()) - .subcommand_required(true).arg_required_else_help(true) + .subcommand_required(true) + .arg_required_else_help(true) .get_matches(); let ctx = context::Context::new(&matches) diff --git a/io-engine/src/bin/io-engine-client/v0/nexus_child_cli.rs b/io-engine/src/bin/io-engine-client/v0/nexus_child_cli.rs index 027cd0fe6..f78e82bc4 100644 --- a/io-engine/src/bin/io-engine-client/v0/nexus_child_cli.rs +++ b/io-engine/src/bin/io-engine-client/v0/nexus_child_cli.rs @@ -3,8 +3,7 @@ use crate::{ context::{Context, OutputFormat}, - ClientError, - GrpcStatus, + ClientError, GrpcStatus, }; use clap::{Arg, ArgMatches, Command}; use colored_json::ToColoredJson; @@ -19,8 +18,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("online", args) => child_operation(ctx, args, 1).await, ("retire", args) => child_operation(ctx, args, 2).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -137,11 +135,7 @@ async fn fault(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { Ok(()) } -async fn child_operation( - mut ctx: Context, - matches: &ArgMatches, - action: i32, -) -> crate::Result<()> { +async fn child_operation(mut ctx: Context, matches: &ArgMatches, action: i32) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { diff --git a/io-engine/src/bin/io-engine-client/v0/nexus_cli.rs b/io-engine/src/bin/io-engine-client/v0/nexus_cli.rs index dc123762d..8c1085d14 100644 --- a/io-engine/src/bin/io-engine-client/v0/nexus_cli.rs +++ b/io-engine/src/bin/io-engine-client/v0/nexus_cli.rs @@ -1,9 +1,7 @@ use super::nexus_child_cli; use crate::{ context::{Context, OutputFormat}, - parse_size, - ClientError, - GrpcStatus, + parse_size, ClientError, GrpcStatus, }; use byte_unit::Byte; use clap::{Arg, ArgMatches, Command}; @@ -15,81 +13,78 @@ use tonic::{Code, Status}; use uuid::Uuid; pub fn subcommands() -> Command { - let create = Command::new("create") - .about("Create a new nexus device") - .arg( - Arg::new("uuid") - .required(true) - .index(1) - .help("uuid for the nexus, if uuid is not known please provide \"\" to autogenerate"), - ) - .arg( - Arg::new("size") - .required(true) - .index(2) - .help("size with optional unit suffix"), - ) - .arg( - Arg::new("children") - .required(true) - .action(clap::ArgAction::Append) - .index(3) - .help("list of children to add"), - ); + let create = + Command::new("create") + .about("Create a new nexus device") + .arg(Arg::new("uuid").required(true).index(1).help( + "uuid for the nexus, if uuid is not known please provide \"\" to autogenerate", + )) + .arg( + Arg::new("size") + .required(true) + .index(2) + .help("size with optional unit suffix"), + ) + .arg( + Arg::new("children") + .required(true) + .action(clap::ArgAction::Append) + .index(3) + .help("list of children to add"), + ); - let create_v2 = Command::new("create2") - .about("Create a new nexus device with NVMe options") - .arg( - Arg::new("name") - .required(true) - .index(1) - .help("name of the nexus"), - ) - .arg( - Arg::new("uuid") - .required(true) - .help("uuid for the nexus, if uuid is not known please provide \"\" to autogenerate"), - ) - .arg( - Arg::new("size") - .required(true) - .help("size with optional unit suffix"), - ) - .arg( - Arg::new("min-cntlid") - .required(true) - .value_parser(clap::value_parser!(u32)) - .help("minimum NVMe controller ID for sharing over NVMf"), - ) - .arg( - Arg::new("max-cntlid") - .required(true) - .value_parser(clap::value_parser!(u32)) - .help("maximum NVMe controller ID"), - ) - .arg( - Arg::new("resv-key") - .required(true) - .value_parser(clap::value_parser!(u64)) - .help("NVMe reservation key for children"), - ) - .arg( - Arg::new("preempt-key") - .required(true) - .value_parser(clap::value_parser!(u64)) - .help("NVMe preempt key for children, 0 for no preemption"), - ) - .arg( - Arg::new("nexus-info-key") - .required(true) - .help("Key used to persist the NexusInfo structure to the persistent store"), - ) - .arg( - Arg::new("children") - .required(true) - .action(clap::ArgAction::Append) - .help("list of children to add"), - ); + let create_v2 = + Command::new("create2") + .about("Create a new nexus device with NVMe options") + .arg( + Arg::new("name") + .required(true) + .index(1) + .help("name of the nexus"), + ) + .arg(Arg::new("uuid").required(true).help( + "uuid for the nexus, if uuid is not known please provide \"\" to autogenerate", + )) + .arg( + Arg::new("size") + .required(true) + .help("size with optional unit suffix"), + ) + .arg( + Arg::new("min-cntlid") + .required(true) + .value_parser(clap::value_parser!(u32)) + .help("minimum NVMe controller ID for sharing over NVMf"), + ) + .arg( + Arg::new("max-cntlid") + .required(true) + .value_parser(clap::value_parser!(u32)) + .help("maximum NVMe controller ID"), + ) + .arg( + Arg::new("resv-key") + .required(true) + .value_parser(clap::value_parser!(u64)) + .help("NVMe reservation key for children"), + ) + .arg( + Arg::new("preempt-key") + .required(true) + .value_parser(clap::value_parser!(u64)) + .help("NVMe preempt key for children, 0 for no preemption"), + ) + .arg( + Arg::new("nexus-info-key") + .required(true) + .help("Key used to persist the NexusInfo structure to the persistent store"), + ) + .arg( + Arg::new("children") + .required(true) + .action(clap::ArgAction::Append) + .help("list of children to add"), + ); let destroy = Command::new("destroy") .about("destroy the nexus with given name") @@ -111,19 +106,32 @@ pub fn subcommands() -> Command { let publish = Command::new("publish") .about("publish the nexus") - .arg(Arg::new("protocol").short('p').long("protocol").value_name("PROTOCOL") - .help("Name of a protocol (nvmf) used for publishing the nexus remotely")) - .arg(Arg::new("uuid").required(true).index(1) - .help("uuid for the nexus")) - .arg(Arg::new("key").required(false).index(2) - .help("crypto key to use")) + .arg( + Arg::new("protocol") + .short('p') + .long("protocol") + .value_name("PROTOCOL") + .help("Name of a protocol (nvmf) used for publishing the nexus remotely"), + ) + .arg( + Arg::new("uuid") + .required(true) + .index(1) + .help("uuid for the nexus"), + ) + .arg( + Arg::new("key") + .required(false) + .index(2) + .help("crypto key to use"), + ) .arg( Arg::new("allowed-host") .long("allowed-host") - .action(clap::ArgAction::Append) .required(false) - .help("NQN of hosts which are allowed to connect to the target")); + .help("NQN of hosts which are allowed to connect to the target"), + ); let unpublish = Command::new("unpublish").about("unpublish the nexus").arg( Arg::new("uuid") @@ -207,13 +215,12 @@ pub fn subcommands() -> Command { .help("uuid of nexus"), ); - let children_2 = - Command::new("children2").about("list nexus children").arg( - Arg::new("uuid") - .required(true) - .index(1) - .help("uuid of nexus"), - ); + let children_2 = Command::new("children2").about("list nexus children").arg( + Arg::new("uuid") + .required(true) + .index(1) + .help("uuid of nexus"), + ); Command::new("nexus") .subcommand_required(true) @@ -252,8 +259,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("remove", args) => nexus_remove(ctx, args).await, ("child", args) => nexus_child_cli::handler(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -271,11 +277,13 @@ fn nexus_create_parse( uuid = Uuid::new_v4().to_string() } let size = - parse_size(matches.get_one::("size").ok_or_else(|| { - ClientError::MissingValue { - field: "size".to_string(), - } - })?) + parse_size( + matches + .get_one::("size") + .ok_or_else(|| ClientError::MissingValue { + field: "size".to_string(), + })?, + ) .map_err(|s| Status::invalid_argument(format!("Bad size '{s}'"))) .context(GrpcStatus)?; let children = matches @@ -289,10 +297,7 @@ fn nexus_create_parse( Ok((uuid, size, children)) } -async fn nexus_create( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_create(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let (uuid, size, children) = nexus_create_parse(matches)?; let response = ctx @@ -323,10 +328,7 @@ async fn nexus_create( Ok(()) } -async fn nexus_create_v2( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_create_v2(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let (uuid, size, children) = nexus_create_parse(matches)?; let name = matches.get_one::("name").unwrap().to_string(); let min_cntl_id = *matches.get_one::("min-cntlid").unwrap(); @@ -374,17 +376,12 @@ async fn nexus_create_v2( Ok(()) } -async fn nexus_shutdown( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_shutdown(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches.get_one::("uuid").unwrap().to_string(); let response = ctx .client - .shutdown_nexus(v0::ShutdownNexusRequest { - uuid: uuid.clone(), - }) + .shutdown_nexus(v0::ShutdownNexusRequest { uuid: uuid.clone() }) .await .context(GrpcStatus)?; @@ -406,17 +403,12 @@ async fn nexus_shutdown( Ok(()) } -async fn nexus_destroy( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_destroy(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches.get_one::("uuid").unwrap().to_string(); let response = ctx .client - .destroy_nexus(v0::DestroyNexusRequest { - uuid: uuid.clone(), - }) + .destroy_nexus(v0::DestroyNexusRequest { uuid: uuid.clone() }) .await .context(GrpcStatus)?; @@ -438,10 +430,7 @@ async fn nexus_destroy( Ok(()) } -async fn nexus_list( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_list(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let response = ctx .client .list_nexus(v0::Null {}) @@ -503,10 +492,7 @@ async fn nexus_list( Ok(()) } -async fn nexus_list_v2( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_list_v2(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let response = ctx .client .list_nexus_v2(v0::Null {}) @@ -558,8 +544,7 @@ async fn nexus_list_v2( row }) .collect(); - let mut hdr = - vec!["NAME", "UUID", ">SIZE", "STATE", ">REBUILDS", "PATH"]; + let mut hdr = vec!["NAME", "UUID", ">SIZE", "STATE", ">REBUILDS", "PATH"]; if show_child { hdr.push("CHILDREN"); } @@ -570,10 +555,7 @@ async fn nexus_list_v2( Ok(()) } -async fn nexus_children( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_children(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { @@ -615,9 +597,7 @@ async fn nexus_children( .children .iter() .map(|c| { - let state = child_state_to_str_v0( - v0::ChildState::try_from(c.state).unwrap(), - ); + let state = child_state_to_str_v0(v0::ChildState::try_from(c.state).unwrap()); vec![c.uri.clone(), state.to_string()] }) .collect(); @@ -628,10 +608,7 @@ async fn nexus_children( Ok(()) } -async fn nexus_children_2( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_children_2(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { @@ -677,12 +654,10 @@ async fn nexus_children_2( .children .iter() .map(|c| { - let state = child_state_to_str_v1( - v1::nexus::ChildState::try_from(c.state).unwrap(), - ); + let state = + child_state_to_str_v1(v1::nexus::ChildState::try_from(c.state).unwrap()); let reason = child_reason_to_str_v1( - v1::nexus::ChildStateReason::try_from(c.state_reason) - .unwrap(), + v1::nexus::ChildStateReason::try_from(c.state_reason).unwrap(), ); vec![c.uri.clone(), state.to_string(), reason.to_string()] }) @@ -694,10 +669,7 @@ async fn nexus_children_2( Ok(()) } -async fn nexus_publish( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_publish(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { @@ -708,18 +680,17 @@ async fn nexus_publish( .get_one::("key") .cloned() .unwrap_or_default(); - let protocol = - match matches.get_one::("protocol").map(|s| s.as_str()) { - None => v0::ShareProtocolNexus::NexusNbd, - Some("nvmf") => v0::ShareProtocolNexus::NexusNvmf, - Some(_) => { - return Err(Status::new( - Code::Internal, - "Invalid value of share protocol".to_owned(), - )) - .context(GrpcStatus); - } - }; + let protocol = match matches.get_one::("protocol").map(|s| s.as_str()) { + None => v0::ShareProtocolNexus::NexusNbd, + Some("nvmf") => v0::ShareProtocolNexus::NexusNvmf, + Some(_) => { + return Err(Status::new( + Code::Internal, + "Invalid value of share protocol".to_owned(), + )) + .context(GrpcStatus); + } + }; let allowed_hosts = matches .get_many::("allowed-host") .unwrap_or_default() @@ -755,10 +726,7 @@ async fn nexus_publish( Ok(()) } -async fn nexus_unpublish( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_unpublish(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { @@ -768,9 +736,7 @@ async fn nexus_unpublish( let response = ctx .client - .unpublish_nexus(v0::UnpublishNexusRequest { - uuid: uuid.clone(), - }) + .unpublish_nexus(v0::UnpublishNexusRequest { uuid: uuid.clone() }) .await .context(GrpcStatus)?; @@ -792,10 +758,7 @@ async fn nexus_unpublish( Ok(()) } -async fn nexus_nvme_ana_state( - ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_nvme_ana_state(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches.get_one::("uuid").unwrap().to_string(); let ana_state = matches .get_one::("state") @@ -808,15 +771,10 @@ async fn nexus_nvme_ana_state( } } -async fn nexus_get_nvme_ana_state( - mut ctx: Context, - uuid: String, -) -> crate::Result<()> { +async fn nexus_get_nvme_ana_state(mut ctx: Context, uuid: String) -> crate::Result<()> { let resp = ctx .client - .get_nvme_ana_state(v0::GetNvmeAnaStateRequest { - uuid: uuid.clone(), - }) + .get_nvme_ana_state(v0::GetNvmeAnaStateRequest { uuid: uuid.clone() }) .await .context(GrpcStatus)?; ctx.v1(ana_state_idx_to_str(resp.get_ref().ana_state)); @@ -850,10 +808,7 @@ async fn nexus_set_nvme_ana_state( Ok(()) } -async fn nexus_add( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_add(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { @@ -900,10 +855,7 @@ async fn nexus_add( Ok(()) } -async fn nexus_remove( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_remove(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { diff --git a/io-engine/src/bin/io-engine-client/v0/perf_cli.rs b/io-engine/src/bin/io-engine-client/v0/perf_cli.rs index aab689c2f..9a7494087 100644 --- a/io-engine/src/bin/io-engine-client/v0/perf_cli.rs +++ b/io-engine/src/bin/io-engine-client/v0/perf_cli.rs @@ -28,16 +28,12 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { match matches.subcommand().unwrap() { ("resource", args) => get_resource_usage(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } -async fn get_resource_usage( - mut ctx: Context, - _matches: &ArgMatches, -) -> crate::Result<()> { +async fn get_resource_usage(mut ctx: Context, _matches: &ArgMatches) -> crate::Result<()> { ctx.v2("Requesting resource usage statistics"); let mut table: Vec> = Vec::new(); diff --git a/io-engine/src/bin/io-engine-client/v0/pool_cli.rs b/io-engine/src/bin/io-engine-client/v0/pool_cli.rs index 5cafecd09..5ad784921 100644 --- a/io-engine/src/bin/io-engine-client/v0/pool_cli.rs +++ b/io-engine/src/bin/io-engine-client/v0/pool_cli.rs @@ -1,7 +1,6 @@ use crate::{ context::{Context, OutputFormat}, - ClientError, - GrpcStatus, + ClientError, GrpcStatus, }; use byte_unit::Byte; use clap::{Arg, ArgMatches, Command}; @@ -48,8 +47,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("destroy", args) => destroy(ctx, args).await, ("list", args) => list(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -106,9 +104,7 @@ async fn destroy(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let response = ctx .client - .destroy_pool(rpc::DestroyPoolRequest { - name: name.clone(), - }) + .destroy_pool(rpc::DestroyPoolRequest { name: name.clone() }) .await .context(GrpcStatus)?; @@ -171,10 +167,7 @@ async fn list(mut ctx: Context, _matches: &ArgMatches) -> crate::Result<()> { ] }) .collect(); - ctx.print_list( - vec!["NAME", "STATE", ">CAPACITY", ">USED", "DISKS"], - table, - ); + ctx.print_list(vec!["NAME", "STATE", ">CAPACITY", ">USED", "DISKS"], table); } }; diff --git a/io-engine/src/bin/io-engine-client/v0/rebuild_cli.rs b/io-engine/src/bin/io-engine-client/v0/rebuild_cli.rs index f902e9ab7..fbd9ca453 100644 --- a/io-engine/src/bin/io-engine-client/v0/rebuild_cli.rs +++ b/io-engine/src/bin/io-engine-client/v0/rebuild_cli.rs @@ -3,8 +3,7 @@ use crate::{ context::{Context, OutputFormat}, - ClientError, - GrpcStatus, + ClientError, GrpcStatus, }; use clap::{Arg, ArgMatches, Command}; use colored_json::ToColoredJson; @@ -22,8 +21,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("stats", args) => stats(ctx, args).await, ("progress", args) => progress(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -345,10 +343,7 @@ async fn state(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ); } OutputFormat::Default => { - ctx.print_list( - vec!["state"], - vec![vec![response.get_ref().state.clone()]], - ); + ctx.print_list(vec!["state"], vec![vec![response.get_ref().state.clone()]]); } }; diff --git a/io-engine/src/bin/io-engine-client/v0/replica_cli.rs b/io-engine/src/bin/io-engine-client/v0/replica_cli.rs index cde4bc694..e3c10292d 100644 --- a/io-engine/src/bin/io-engine-client/v0/replica_cli.rs +++ b/io-engine/src/bin/io-engine-client/v0/replica_cli.rs @@ -1,8 +1,6 @@ use crate::{ context::{Context, OutputFormat}, - parse_size, - ClientError, - GrpcStatus, + parse_size, ClientError, GrpcStatus, }; use byte_unit::Byte; use clap::{Arg, ArgMatches, Command}; @@ -19,42 +17,42 @@ pub fn subcommands() -> Command { Arg::new("pool") .required(true) .index(1) - .help("Storage pool name")) + .help("Storage pool name"), + ) .arg( Arg::new("name") - .required(true).index(2) - .help("Replica name")) - + .required(true) + .index(2) + .help("Replica name"), + ) .arg( Arg::new("protocol") .short('p') .long("protocol") - .value_name("PROTOCOL") - .help("Name of a protocol (nvmf) used for sharing the replica (default none)")) + .help("Name of a protocol (nvmf) used for sharing the replica (default none)"), + ) .arg( Arg::new("size") .short('s') .long("size") - .required(true) .value_name("NUMBER") - .help("Size of the replica")) + .help("Size of the replica"), + ) .arg( Arg::new("thin") .short('t') .long("thin") .action(clap::ArgAction::SetTrue) - .help("Whether replica is thin provisioned (default false)")) + .help("Whether replica is thin provisioned (default false)"), + ) .arg( Arg::new("allowed-host") .long("allowed-host") - .action(clap::ArgAction::Append) .required(false) - .help( - "NQN of hosts which are allowed to connect to the target", - ), + .help("NQN of hosts which are allowed to connect to the target"), ); let create_v2 = Command::new("create2") @@ -63,46 +61,48 @@ pub fn subcommands() -> Command { Arg::new("pool") .required(true) .index(1) - .help("Storage pool name")) + .help("Storage pool name"), + ) .arg( Arg::new("name") .required(true) .index(2) - .help("Replica name")) + .help("Replica name"), + ) .arg( Arg::new("uuid") - .required(true).index(3) - .help("Unique replica uuid")) + .required(true) + .index(3) + .help("Unique replica uuid"), + ) .arg( Arg::new("protocol") .short('p') .long("protocol") - .value_name("PROTOCOL") - .help("Name of a protocol (nvmf) used for sharing the replica (default none)")) + .help("Name of a protocol (nvmf) used for sharing the replica (default none)"), + ) .arg( Arg::new("size") .short('s') .long("size") - .required(true) .value_name("NUMBER") - .help("Size of the replica")) + .help("Size of the replica"), + ) .arg( Arg::new("thin") .short('t') .long("thin") .action(clap::ArgAction::SetTrue) - .help("Whether replica is thin provisioned (default false)")) + .help("Whether replica is thin provisioned (default false)"), + ) .arg( Arg::new("allowed-host") .long("allowed-host") - .action(clap::ArgAction::Append) .required(false) - .help( - "NQN of hosts which are allowed to connect to the target", - ), + .help("NQN of hosts which are allowed to connect to the target"), ); let destroy = Command::new("destroy").about("Destroy replica").arg( @@ -112,24 +112,25 @@ pub fn subcommands() -> Command { .help("Replica uuid"), ); - let share = Command::new("share").about("Share or unshare replica") - .arg( - Arg::new("name") - .required(true) - .index(1) - .help("Replica name")) - .arg( - Arg::new("protocol") - .required(true) - .index(2) - .help("Name of a protocol (nvmf) used for sharing or \"none\" to unshare the replica")) - .arg( - Arg::new("allowed-host") - .long("allowed-host") - - .action(clap::ArgAction::Append) - .required(false) - .help("NQN of hosts which are allowed to connect to the target")); + let share = + Command::new("share") + .about("Share or unshare replica") + .arg( + Arg::new("name") + .required(true) + .index(1) + .help("Replica name"), + ) + .arg(Arg::new("protocol").required(true).index(2).help( + "Name of a protocol (nvmf) used for sharing or \"none\" to unshare the replica", + )) + .arg( + Arg::new("allowed-host") + .long("allowed-host") + .action(clap::ArgAction::Append) + .required(false) + .help("NQN of hosts which are allowed to connect to the target"), + ); Command::new("replica") .subcommand_required(true) @@ -154,16 +155,12 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("share", args) => replica_share(ctx, args).await, ("stats", args) => replica_stat(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } -async fn replica_create( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn replica_create(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let pool = matches .get_one::("pool") .ok_or_else(|| ClientError::MissingValue { @@ -177,16 +174,18 @@ async fn replica_create( })? .to_owned(); let size = - parse_size(matches.get_one::("size").ok_or_else(|| { - ClientError::MissingValue { - field: "size".to_string(), - } - })?) + parse_size( + matches + .get_one::("size") + .ok_or_else(|| ClientError::MissingValue { + field: "size".to_string(), + })?, + ) .map_err(|s| Status::invalid_argument(format!("Bad size '{s}'"))) .context(GrpcStatus)?; let thin = matches.get_flag("thin"); - let share = parse_replica_protocol(matches.get_one::("protocol")) - .context(GrpcStatus)?; + let share = + parse_replica_protocol(matches.get_one::("protocol")).context(GrpcStatus)?; let allowed_hosts = matches .get_many::("allowed-host") .unwrap_or_default() @@ -221,10 +220,7 @@ async fn replica_create( Ok(()) } -async fn replica_create_v2( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn replica_create_v2(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let pool = matches .get_one::("pool") .ok_or_else(|| ClientError::MissingValue { @@ -244,16 +240,18 @@ async fn replica_create_v2( })? .to_owned(); let size = - parse_size(matches.get_one::("size").ok_or_else(|| { - ClientError::MissingValue { - field: "size".to_string(), - } - })?) + parse_size( + matches + .get_one::("size") + .ok_or_else(|| ClientError::MissingValue { + field: "size".to_string(), + })?, + ) .map_err(|s| Status::invalid_argument(format!("Bad size '{s}'"))) .context(GrpcStatus)?; let thin = matches.get_flag("thin"); - let share = parse_replica_protocol(matches.get_one::("protocol")) - .context(GrpcStatus)?; + let share = + parse_replica_protocol(matches.get_one::("protocol")).context(GrpcStatus)?; let allowed_hosts = matches .get_many::("allowed-host") .unwrap_or_default() @@ -269,8 +267,7 @@ async fn replica_create_v2( size: size.as_u64(), allowed_hosts, }; - let response = - ctx.client.create_replica_v2(rq).await.context(GrpcStatus)?; + let response = ctx.client.create_replica_v2(rq).await.context(GrpcStatus)?; match ctx.output { OutputFormat::Json => { @@ -290,10 +287,7 @@ async fn replica_create_v2( Ok(()) } -async fn replica_destroy( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn replica_destroy(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { @@ -303,9 +297,7 @@ async fn replica_destroy( let response = ctx .client - .destroy_replica(rpc::DestroyReplicaRequest { - uuid: uuid.clone(), - }) + .destroy_replica(rpc::DestroyReplicaRequest { uuid: uuid.clone() }) .await .context(GrpcStatus)?; @@ -327,10 +319,7 @@ async fn replica_destroy( Ok(()) } -async fn replica_list( - mut ctx: Context, - _matches: &ArgMatches, -) -> crate::Result<()> { +async fn replica_list(mut ctx: Context, _matches: &ArgMatches) -> crate::Result<()> { let response = ctx .client .list_replicas(rpc::Null {}) @@ -379,10 +368,7 @@ async fn replica_list( Ok(()) } -async fn replica_list2( - mut ctx: Context, - _matches: &ArgMatches, -) -> crate::Result<()> { +async fn replica_list2(mut ctx: Context, _matches: &ArgMatches) -> crate::Result<()> { let response = ctx .client .list_replicas_v2(rpc::Null {}) @@ -432,13 +418,10 @@ async fn replica_list2( Ok(()) } -async fn replica_share( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn replica_share(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let name = matches.get_one::("name").unwrap().to_owned(); - let share = parse_replica_protocol(matches.get_one::("protocol")) - .context(GrpcStatus)?; + let share = + parse_replica_protocol(matches.get_one::("protocol")).context(GrpcStatus)?; let allowed_hosts = matches .get_many::("allowed-host") .unwrap_or_default() @@ -473,10 +456,7 @@ async fn replica_share( Ok(()) } -async fn replica_stat( - mut ctx: Context, - _matches: &ArgMatches, -) -> crate::Result<()> { +async fn replica_stat(mut ctx: Context, _matches: &ArgMatches) -> crate::Result<()> { let response = ctx .client .stat_replicas(rpc::Null {}) @@ -500,15 +480,13 @@ async fn replica_stat( return Ok(()); } - let header = - vec!["POOL", "NAME", "RDCNT", "WRCNT", "RDBYTES", "WRBYTES"]; + let header = vec!["POOL", "NAME", "RDCNT", "WRCNT", "RDBYTES", "WRBYTES"]; let table = replicas .iter() .map(|replica| { let stats = replica.stats.as_ref().unwrap(); let read = ctx.units(Byte::from_u64(stats.bytes_read)); - let written = - ctx.units(Byte::from_u64(stats.bytes_written)); + let written = ctx.units(Byte::from_u64(stats.bytes_written)); vec![ replica.pool.clone(), replica.uuid.clone(), diff --git a/io-engine/src/bin/io-engine-client/v0/snapshot_cli.rs b/io-engine/src/bin/io-engine-client/v0/snapshot_cli.rs index 0a8b80a8e..cfb2d7c7a 100644 --- a/io-engine/src/bin/io-engine-client/v0/snapshot_cli.rs +++ b/io-engine/src/bin/io-engine-client/v0/snapshot_cli.rs @@ -3,8 +3,7 @@ use crate::{ context::{Context, OutputFormat}, - ClientError, - GrpcStatus, + ClientError, GrpcStatus, }; use clap::{Arg, ArgMatches, Command}; use colored_json::ToColoredJson; @@ -16,8 +15,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { match matches.subcommand().unwrap() { ("create", args) => create(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -47,9 +45,7 @@ async fn create(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let response = ctx .client - .create_snapshot(rpc::CreateSnapshotRequest { - uuid: uuid.clone(), - }) + .create_snapshot(rpc::CreateSnapshotRequest { uuid: uuid.clone() }) .await .context(GrpcStatus)?; diff --git a/io-engine/src/bin/io-engine-client/v1/bdev_cli.rs b/io-engine/src/bin/io-engine-client/v1/bdev_cli.rs index 236dffaf7..f9f2d4838 100644 --- a/io-engine/src/bin/io-engine-client/v1/bdev_cli.rs +++ b/io-engine/src/bin/io-engine-client/v1/bdev_cli.rs @@ -3,8 +3,7 @@ use crate::{ context::{Context, OutputFormat}, - ClientError, - GrpcStatus, + ClientError, GrpcStatus, }; use byte_unit::Byte; use clap::{Arg, ArgMatches, Command}; @@ -21,8 +20,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("destroy", args) => destroy(ctx, args).await, ("unshare", args) => unshare(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -54,9 +52,7 @@ pub fn subcommands() -> Command { .long("allowed-host") .action(clap::ArgAction::Append) .required(false) - .help( - "NQN of hosts which are allowed to connect to the target", - ), + .help("NQN of hosts which are allowed to connect to the target"), ); let unshare = Command::new("unshare") @@ -78,9 +74,7 @@ async fn list(mut ctx: Context, _args: &ArgMatches) -> crate::Result<()> { let response = ctx .v1 .bdev - .list(v1rpc::bdev::ListBdevOptions { - name: None, - }) + .list(v1rpc::bdev::ListBdevOptions { name: None }) .await .context(GrpcStatus)?; @@ -112,8 +106,7 @@ async fn list(mut ctx: Context, _args: &ArgMatches) -> crate::Result<()> { let table = bdevs .iter() .map(|bdev| { - let cap = - Byte::from_u64(bdev.num_blocks * bdev.blk_size as u64); + let cap = Byte::from_u64(bdev.num_blocks * bdev.blk_size as u64); vec![ bdev.uuid.to_string(), bdev.num_blocks.to_string(), @@ -143,9 +136,7 @@ async fn create(mut ctx: Context, args: &ArgMatches) -> crate::Result<()> { let response = ctx .v1 .bdev - .create(v1rpc::bdev::CreateBdevRequest { - uri, - }) + .create(v1rpc::bdev::CreateBdevRequest { uri }) .await .context(GrpcStatus)?; @@ -178,9 +169,7 @@ async fn destroy(mut ctx: Context, args: &ArgMatches) -> crate::Result<()> { let bdevs = ctx .v1 .bdev - .list(v1rpc::bdev::ListBdevOptions { - name: None, - }) + .list(v1rpc::bdev::ListBdevOptions { name: None }) .await .context(GrpcStatus)? .into_inner(); @@ -196,9 +185,7 @@ async fn destroy(mut ctx: Context, args: &ArgMatches) -> crate::Result<()> { let _ = ctx .v1 .bdev - .unshare(v1rpc::bdev::BdevUnshareRequest { - name, - }) + .unshare(v1rpc::bdev::BdevUnshareRequest { name }) .await .context(GrpcStatus)?; @@ -293,9 +280,7 @@ async fn unshare(mut ctx: Context, args: &ArgMatches) -> crate::Result<()> { let response = ctx .v1 .bdev - .unshare(v1rpc::bdev::BdevUnshareRequest { - name: name.clone(), - }) + .unshare(v1rpc::bdev::BdevUnshareRequest { name: name.clone() }) .await .context(GrpcStatus)?; diff --git a/io-engine/src/bin/io-engine-client/v1/controller_cli.rs b/io-engine/src/bin/io-engine-client/v1/controller_cli.rs index ae8358280..5436def0a 100755 --- a/io-engine/src/bin/io-engine-client/v1/controller_cli.rs +++ b/io-engine/src/bin/io-engine-client/v1/controller_cli.rs @@ -33,8 +33,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("list", args) => list_controllers(ctx, args).await, ("stats", args) => controller_stats(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -51,10 +50,7 @@ fn controller_state_to_str(idx: i32) -> String { .to_string() } -async fn controller_stats( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn controller_stats(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let name = matches .get_one::("name") .cloned() @@ -125,10 +121,7 @@ async fn controller_stats( Ok(()) } -async fn list_controllers( - mut ctx: Context, - _matches: &ArgMatches, -) -> crate::Result<()> { +async fn list_controllers(mut ctx: Context, _matches: &ArgMatches) -> crate::Result<()> { let response = ctx .v1 .host diff --git a/io-engine/src/bin/io-engine-client/v1/device_cli.rs b/io-engine/src/bin/io-engine-client/v1/device_cli.rs index e0820251c..de86d7091 100644 --- a/io-engine/src/bin/io-engine-client/v1/device_cli.rs +++ b/io-engine/src/bin/io-engine-client/v1/device_cli.rs @@ -10,15 +10,15 @@ use snafu::ResultExt; use tonic::Status; pub fn subcommands() -> Command { - let list = - Command::new("list").about("List available block devices") - .arg( - Arg::new("all") - .short('a') - .long("all") - .action(clap::ArgAction::SetTrue) - .help("List all block devices (ie. also include devices currently in use)"), - ); + let list = Command::new("list") + .about("List available block devices") + .arg( + Arg::new("all") + .short('a') + .long("all") + .action(clap::ArgAction::SetTrue) + .help("List all block devices (ie. also include devices currently in use)"), + ); Command::new("device") .subcommand_required(true) @@ -31,8 +31,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { match matches.subcommand().unwrap() { ("list", args) => list_block_devices(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -45,17 +44,12 @@ fn get_partition_type(device: &v1rpc::host::BlockDevice) -> String { } } -async fn list_block_devices( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn list_block_devices(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let all = matches.get_flag("all"); let response = ctx .v1 .host - .list_block_devices(v1rpc::host::ListBlockDevicesRequest { - all, - }) + .list_block_devices(v1rpc::host::ListBlockDevicesRequest { all }) .await .context(GrpcStatus)?; @@ -101,11 +95,7 @@ async fn list_block_devices( device.devmajor.to_string(), device.devminor.to_string(), device.size.to_string(), - String::from(if device.available { - "yes" - } else { - "no" - }), + String::from(if device.available { "yes" } else { "no" }), device.model, part_type, fstype, diff --git a/io-engine/src/bin/io-engine-client/v1/jsonrpc_cli.rs b/io-engine/src/bin/io-engine-client/v1/jsonrpc_cli.rs index 8ca5ed5e4..4031d8e23 100644 --- a/io-engine/src/bin/io-engine-client/v1/jsonrpc_cli.rs +++ b/io-engine/src/bin/io-engine-client/v1/jsonrpc_cli.rs @@ -1,7 +1,6 @@ use crate::{ context::{Context, OutputFormat}, - ClientError, - GrpcStatus, + ClientError, GrpcStatus, }; use clap::{Arg, ArgMatches, Command}; use colored_json::ToColoredJson; @@ -27,10 +26,7 @@ pub fn subcommands() -> Command { .arg_required_else_help(true) } -pub async fn json_rpc_call( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +pub async fn json_rpc_call(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let method = matches .get_one::("method") .ok_or_else(|| ClientError::MissingValue { @@ -47,10 +43,7 @@ pub async fn json_rpc_call( let response = ctx .v1 .json - .json_rpc_call(v1rpc::json::JsonRpcRequest { - method, - params, - }) + .json_rpc_call(v1rpc::json::JsonRpcRequest { method, params }) .await .context(GrpcStatus)?; diff --git a/io-engine/src/bin/io-engine-client/v1/mod.rs b/io-engine/src/bin/io-engine-client/v1/mod.rs index 870090cd5..81c09d6c3 100644 --- a/io-engine/src/bin/io-engine-client/v1/mod.rs +++ b/io-engine/src/bin/io-engine-client/v1/mod.rs @@ -32,14 +32,16 @@ pub(super) async fn main_() -> crate::Result<()> { .default_value("http://127.0.0.1:10124") .value_name("HOST") .help("The URI of mayastor instance") - .global(true)) + .global(true), + ) .arg( Arg::new("quiet") .short('q') .action(clap::ArgAction::SetTrue) .global(true) .long("quiet") - .help("Do not print any output except for list records")) + .help("Do not print any output except for list records"), + ) .arg( Arg::new("verbose") .short('v') @@ -47,7 +49,8 @@ pub(super) async fn main_() -> crate::Result<()> { .action(clap::ArgAction::Count) .help("Verbose output") .conflicts_with("quiet") - .global(true)) + .global(true), + ) .arg( Arg::new("units") .short('u') @@ -56,7 +59,8 @@ pub(super) async fn main_() -> crate::Result<()> { .value_parser(["i", "d"]) .hide_possible_values(true) .next_line_help(true) - .help("Output with large units: i for kiB, etc. or d for kB, etc.")) + .help("Output with large units: i for kiB, etc. or d for kB, etc."), + ) .arg( Arg::new("output") .short('o') @@ -65,7 +69,7 @@ pub(super) async fn main_() -> crate::Result<()> { .default_value("default") .value_parser(["default", "json"]) .global(true) - .help("Output format.") + .help("Output format."), ) .subcommand(pool_cli::subcommands()) .subcommand(nexus_cli::subcommands()) @@ -96,9 +100,7 @@ pub(super) async fn main_() -> crate::Result<()> { ("pool", args) => pool_cli::handler(ctx, args).await, ("replica", args) => replica_cli::handler(ctx, args).await, ("rebuild", args) => rebuild_cli::handler(ctx, args).await, - ("snapshot-rebuild", args) => { - snapshot_rebuild_cli::handler(ctx, args).await - } + ("snapshot-rebuild", args) => snapshot_rebuild_cli::handler(ctx, args).await, ("snapshot", args) => snapshot_cli::handler(ctx, args).await, ("stats", args) => stats_cli::handler(ctx, args).await, ("controller", args) => controller_cli::handler(ctx, args).await, diff --git a/io-engine/src/bin/io-engine-client/v1/nexus_child_cli.rs b/io-engine/src/bin/io-engine-client/v1/nexus_child_cli.rs index 50fa323e1..0d3b08b53 100644 --- a/io-engine/src/bin/io-engine-client/v1/nexus_child_cli.rs +++ b/io-engine/src/bin/io-engine-client/v1/nexus_child_cli.rs @@ -3,8 +3,7 @@ use crate::{ context::{Context, OutputFormat}, - ClientError, - GrpcStatus, + ClientError, GrpcStatus, }; use clap::{Arg, ArgMatches, Command}; use colored_json::ToColoredJson; @@ -19,8 +18,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("online", args) => child_operation(ctx, args, 1).await, ("retire", args) => child_operation(ctx, args, 2).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -138,11 +136,7 @@ async fn fault(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { Ok(()) } -async fn child_operation( - mut ctx: Context, - matches: &ArgMatches, - action: i32, -) -> crate::Result<()> { +async fn child_operation(mut ctx: Context, matches: &ArgMatches, action: i32) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { diff --git a/io-engine/src/bin/io-engine-client/v1/nexus_cli.rs b/io-engine/src/bin/io-engine-client/v1/nexus_cli.rs index 58e4e592c..56bec120a 100644 --- a/io-engine/src/bin/io-engine-client/v1/nexus_cli.rs +++ b/io-engine/src/bin/io-engine-client/v1/nexus_cli.rs @@ -1,9 +1,7 @@ use super::nexus_child_cli; use crate::{ context::{Context, OutputFormat}, - parse_size, - ClientError, - GrpcStatus, + parse_size, ClientError, GrpcStatus, }; use byte_unit::Byte; use clap::{Arg, ArgMatches, Command}; @@ -15,78 +13,77 @@ use tonic::{Code, Status}; use uuid::Uuid; pub fn subcommands() -> Command { - let create = Command::new("create") - .about("Create a new nexus device") - .arg( - Arg::new("uuid") - .required(true) - .index(1) - .help("uuid for the nexus, if uuid is not known please provide \"\" to autogenerate"), - ) - .arg( - Arg::new("size") - .required(true) - .index(2) - .help("size with optional unit suffix"), - ) - .arg( - Arg::new("children") - .required(true) - .index(3) - .action(clap::ArgAction::Append) - .help("list of children to add"), - ) - .arg( - Arg::new("name") - .required(false) - .long("name") - .help("name of the nexus"), - ) - .arg( - Arg::new("min-cntlid") - .required(false) - .default_value("1") - .value_parser(clap::value_parser!(u32)) - .long("min-cntlid") - .help("minimum NVMe controller ID for sharing over NVMf"), - ) - .arg( - Arg::new("max-cntlid") - .required(false) - .value_parser(clap::value_parser!(u32)) - .default_value("65519") - .long("max-cntlid") - .help("maximum NVMe controller ID"), - ) - .arg( - Arg::new("resv-key") - .required(false) - .value_parser(clap::value_parser!(u64)) - .default_value("0") - .long("resv-key") - .help("NVMe reservation key for children"), - ) - .arg( - Arg::new("preempt-key") - .required(false) - .value_parser(clap::value_parser!(u64)) - .default_value("0") - .long("preempt-key") - .help("NVMe preempt key for children, 0 for no preemption"), - ) - .arg(Arg::new("resv-type") - .required(false) - .default_value("") - .long("resv-type") - .help("Defines Nvme reservation type.") - ) - .arg( - Arg::new("nexus-info-key") - .required(false) - .default_value("") - .long("nexus-info-key") - .help("Key used to persist the NexusInfo structure to the persistent store"), - ); + let create = + Command::new("create") + .about("Create a new nexus device") + .arg(Arg::new("uuid").required(true).index(1).help( + "uuid for the nexus, if uuid is not known please provide \"\" to autogenerate", + )) + .arg( + Arg::new("size") + .required(true) + .index(2) + .help("size with optional unit suffix"), + ) + .arg( + Arg::new("children") + .required(true) + .index(3) + .action(clap::ArgAction::Append) + .help("list of children to add"), + ) + .arg( + Arg::new("name") + .required(false) + .long("name") + .help("name of the nexus"), + ) + .arg( + Arg::new("min-cntlid") + .required(false) + .default_value("1") + .value_parser(clap::value_parser!(u32)) + .long("min-cntlid") + .help("minimum NVMe controller ID for sharing over NVMf"), + ) + .arg( + Arg::new("max-cntlid") + .required(false) + .value_parser(clap::value_parser!(u32)) + .default_value("65519") + .long("max-cntlid") + .help("maximum NVMe controller ID"), + ) + .arg( + Arg::new("resv-key") + .required(false) + .value_parser(clap::value_parser!(u64)) + .default_value("0") + .long("resv-key") + .help("NVMe reservation key for children"), + ) + .arg( + Arg::new("preempt-key") + .required(false) + .value_parser(clap::value_parser!(u64)) + .default_value("0") + .long("preempt-key") + .help("NVMe preempt key for children, 0 for no preemption"), + ) + .arg( + Arg::new("resv-type") + .required(false) + .default_value("") + .long("resv-type") + .help("Defines Nvme reservation type."), + ) + .arg( + Arg::new("nexus-info-key") + .required(false) + .default_value("") + .long("nexus-info-key") + .help("Key used to persist the NexusInfo structure to the persistent store"), + ); let destroy = Command::new("destroy") .about("destroy the nexus with given name") @@ -108,19 +105,32 @@ pub fn subcommands() -> Command { let publish = Command::new("publish") .about("publish the nexus") - .arg(Arg::new("uuid").required(true).index(1) - .help("uuid for the nexus")) - .arg(Arg::new("key").required(false).index(2) - .help("crypto key to use")) + .arg( + Arg::new("uuid") + .required(true) + .index(1) + .help("uuid for the nexus"), + ) + .arg( + Arg::new("key") + .required(false) + .index(2) + .help("crypto key to use"), + ) .arg( Arg::new("allowed-host") .long("allowed-host") - .action(clap::ArgAction::Append) .required(false) - .help("NQN of hosts which are allowed to connect to the target")) - .arg(Arg::new("protocol").short('p').long("protocol").value_name("PROTOCOL") - .help("Name of a protocol (nvmf) used for publishing the nexus remotely")); + .help("NQN of hosts which are allowed to connect to the target"), + ) + .arg( + Arg::new("protocol") + .short('p') + .long("protocol") + .value_name("PROTOCOL") + .help("Name of a protocol (nvmf) used for publishing the nexus remotely"), + ); let unpublish = Command::new("unpublish").about("unpublish the nexus").arg( Arg::new("uuid") @@ -239,8 +249,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("remove", args) => nexus_remove(ctx, args).await, ("child", args) => nexus_child_cli::handler(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -258,11 +267,13 @@ fn nexus_create_parse( uuid = Uuid::new_v4().to_string() } let size = - parse_size(matches.get_one::("size").ok_or_else(|| { - ClientError::MissingValue { - field: "size".to_string(), - } - })?) + parse_size( + matches + .get_one::("size") + .ok_or_else(|| ClientError::MissingValue { + field: "size".to_string(), + })?, + ) .map_err(|s| Status::invalid_argument(format!("Bad size '{s}'"))) .context(GrpcStatus)?; let children = matches @@ -276,10 +287,7 @@ fn nexus_create_parse( Ok((uuid, size, children)) } -async fn nexus_create( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_create(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { // let (uuid, size, children) = nexus_create_parse(matches)?; let (uuid, size, children) = nexus_create_parse(matches)?; let name = matches @@ -302,18 +310,10 @@ async fn nexus_create( let resv_type = match resv_type.as_str() { "Reserved" => Some(NvmeReservation::Reserved as i32), "WriteExclusive" => Some(NvmeReservation::WriteExclusive as i32), - "WriteExclusiveRegsOnly" => { - Some(NvmeReservation::WriteExclusiveRegsOnly as i32) - } - "ExclusiveAccessRegsOnly" => { - Some(NvmeReservation::ExclusiveAccessRegsOnly as i32) - } - "ExclusiveAccessAllRegs" => { - Some(NvmeReservation::ExclusiveAccessAllRegs as i32) - } - "WriteExclusiveAllRegs" => { - Some(NvmeReservation::WriteExclusiveAllRegs as i32) - } + "WriteExclusiveRegsOnly" => Some(NvmeReservation::WriteExclusiveRegsOnly as i32), + "ExclusiveAccessRegsOnly" => Some(NvmeReservation::ExclusiveAccessRegsOnly as i32), + "ExclusiveAccessAllRegs" => Some(NvmeReservation::ExclusiveAccessAllRegs as i32), + "WriteExclusiveAllRegs" => Some(NvmeReservation::WriteExclusiveAllRegs as i32), _ => None, }; @@ -354,17 +354,12 @@ async fn nexus_create( Ok(()) } -async fn nexus_shutdown( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_shutdown(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches.get_one::("uuid").unwrap().to_string(); let response = ctx .v1 .nexus - .shutdown_nexus(v1::nexus::ShutdownNexusRequest { - uuid: uuid.clone(), - }) + .shutdown_nexus(v1::nexus::ShutdownNexusRequest { uuid: uuid.clone() }) .await .context(GrpcStatus)?; @@ -386,18 +381,13 @@ async fn nexus_shutdown( Ok(()) } -async fn nexus_destroy( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_destroy(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches.get_one::("uuid").unwrap().to_string(); let _response = ctx .v1 .nexus - .destroy_nexus(v1::nexus::DestroyNexusRequest { - uuid: uuid.clone(), - }) + .destroy_nexus(v1::nexus::DestroyNexusRequest { uuid: uuid.clone() }) .await .context(GrpcStatus)?; @@ -428,10 +418,7 @@ async fn nexus_destroy( Ok(()) } -async fn nexus_list( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_list(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let response = ctx .v1 .nexus @@ -486,8 +473,7 @@ async fn nexus_list( row }) .collect(); - let mut hdr = - vec!["NAME", "UUID", ">SIZE", "STATE", ">REBUILDS", "PATH"]; + let mut hdr = vec!["NAME", "UUID", ">SIZE", "STATE", ">REBUILDS", "PATH"]; if show_child { hdr.push("CHILDREN"); } @@ -498,10 +484,7 @@ async fn nexus_list( Ok(()) } -async fn nexus_children_2( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_children_2(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { @@ -547,12 +530,10 @@ async fn nexus_children_2( .children .iter() .map(|c| { - let state = child_state_to_str_v1( - v1::nexus::ChildState::try_from(c.state).unwrap(), - ); + let state = + child_state_to_str_v1(v1::nexus::ChildState::try_from(c.state).unwrap()); let reason = child_reason_to_str_v1( - v1::nexus::ChildStateReason::try_from(c.state_reason) - .unwrap(), + v1::nexus::ChildStateReason::try_from(c.state_reason).unwrap(), ); let fault_timestamp = match &c.fault_timestamp { Some(d) => d.to_string(), @@ -566,20 +547,14 @@ async fn nexus_children_2( ] }) .collect(); - ctx.print_list( - vec!["NAME", "STATE", "REASON", "LAST_FAULTED_AT"], - table, - ); + ctx.print_list(vec!["NAME", "STATE", "REASON", "LAST_FAULTED_AT"], table); } }; Ok(()) } -async fn nexus_resize( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_resize(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { @@ -588,11 +563,13 @@ async fn nexus_resize( .to_owned(); let requested_size = - parse_size(matches.get_one::("size").ok_or_else(|| { - ClientError::MissingValue { - field: "size".to_string(), - } - })?) + parse_size( + matches + .get_one::("size") + .ok_or_else(|| ClientError::MissingValue { + field: "size".to_string(), + })?, + ) .map_err(|s| Status::invalid_argument(format!("Bad size '{s}'"))) .context(GrpcStatus)?; @@ -624,10 +601,7 @@ async fn nexus_resize( Ok(()) } -async fn nexus_publish( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_publish(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { @@ -639,18 +613,17 @@ async fn nexus_publish( .cloned() .unwrap_or_default(); - let protocol = - match matches.get_one::("protocol").map(|s| s.as_str()) { - None => v1::common::ShareProtocol::Nvmf as i32, - Some("nvmf") => v1::common::ShareProtocol::Nvmf as i32, - Some(_) => { - return Err(Status::new( - Code::Internal, - "Invalid value of share protocol".to_owned(), - )) - .context(GrpcStatus); - } - }; + let protocol = match matches.get_one::("protocol").map(|s| s.as_str()) { + None => v1::common::ShareProtocol::Nvmf as i32, + Some("nvmf") => v1::common::ShareProtocol::Nvmf as i32, + Some(_) => { + return Err(Status::new( + Code::Internal, + "Invalid value of share protocol".to_owned(), + )) + .context(GrpcStatus); + } + }; let allowed_hosts = matches .get_many::("allowed-host") .unwrap_or_default() @@ -690,10 +663,7 @@ async fn nexus_publish( Ok(()) } -async fn nexus_unpublish( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_unpublish(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { @@ -703,9 +673,7 @@ async fn nexus_unpublish( let response = ctx .v1 .nexus - .unpublish_nexus(v1::nexus::UnpublishNexusRequest { - uuid: uuid.clone(), - }) + .unpublish_nexus(v1::nexus::UnpublishNexusRequest { uuid: uuid.clone() }) .await .context(GrpcStatus)?; @@ -727,10 +695,7 @@ async fn nexus_unpublish( Ok(()) } -async fn nexus_nvme_ana_state( - ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_nvme_ana_state(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches.get_one::("uuid").unwrap().to_string(); let ana_state = matches .get_one::("state") @@ -743,16 +708,11 @@ async fn nexus_nvme_ana_state( } } -async fn nexus_get_nvme_ana_state( - mut ctx: Context, - uuid: String, -) -> crate::Result<()> { +async fn nexus_get_nvme_ana_state(mut ctx: Context, uuid: String) -> crate::Result<()> { let resp = ctx .v1 .nexus - .get_nvme_ana_state(v1::nexus::GetNvmeAnaStateRequest { - uuid: uuid.clone(), - }) + .get_nvme_ana_state(v1::nexus::GetNvmeAnaStateRequest { uuid: uuid.clone() }) .await .context(GrpcStatus)?; ctx.v1(ana_state_idx_to_str(resp.get_ref().ana_state)); @@ -788,10 +748,7 @@ async fn nexus_set_nvme_ana_state( Ok(()) } -async fn nexus_add( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_add(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { @@ -839,10 +796,7 @@ async fn nexus_add( Ok(()) } -async fn nexus_remove( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn nexus_remove(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { @@ -890,9 +844,7 @@ fn ana_state_idx_to_str(idx: i32) -> &'static str { v1::nexus::NvmeAnaState::NvmeAnaOptimizedState => "optimized", v1::nexus::NvmeAnaState::NvmeAnaNonOptimizedState => "non_optimized", v1::nexus::NvmeAnaState::NvmeAnaInaccessibleState => "inaccessible", - v1::nexus::NvmeAnaState::NvmeAnaPersistentLossState => { - "persistent_loss" - } + v1::nexus::NvmeAnaState::NvmeAnaPersistentLossState => "persistent_loss", v1::nexus::NvmeAnaState::NvmeAnaChangeState => "change", } } diff --git a/io-engine/src/bin/io-engine-client/v1/perf_cli.rs b/io-engine/src/bin/io-engine-client/v1/perf_cli.rs index 4dcf20d45..7af55fa25 100644 --- a/io-engine/src/bin/io-engine-client/v1/perf_cli.rs +++ b/io-engine/src/bin/io-engine-client/v1/perf_cli.rs @@ -28,16 +28,12 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { match matches.subcommand().unwrap() { ("resource", args) => get_resource_usage(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } // TODO: There's no rpc for this API in v1. -async fn get_resource_usage( - mut ctx: Context, - _matches: &ArgMatches, -) -> crate::Result<()> { +async fn get_resource_usage(mut ctx: Context, _matches: &ArgMatches) -> crate::Result<()> { ctx.v2("Requesting resource usage statistics"); let mut table: Vec> = Vec::new(); diff --git a/io-engine/src/bin/io-engine-client/v1/pool_cli.rs b/io-engine/src/bin/io-engine-client/v1/pool_cli.rs index cfb7abb58..e68bef2ab 100644 --- a/io-engine/src/bin/io-engine-client/v1/pool_cli.rs +++ b/io-engine/src/bin/io-engine-client/v1/pool_cli.rs @@ -1,8 +1,6 @@ use crate::{ context::{Context, OutputFormat}, - parse_size, - ClientError, - GrpcStatus, + parse_size, ClientError, GrpcStatus, }; use byte_unit::Byte; use clap::{Arg, ArgMatches, Command}; @@ -199,8 +197,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("grow", args) => grow(ctx, args).await, ("list", args) => list(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -234,10 +231,8 @@ async fn create(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { Some(s) => match parse_size(s) { Ok(s) => Some(s.as_u64() as u32), Err(err) => { - return Err(Status::invalid_argument(format!( - "Bad size '{err}'" - ))) - .context(GrpcStatus); + return Err(Status::invalid_argument(format!("Bad size '{err}'"))) + .context(GrpcStatus); } }, None => None, @@ -265,9 +260,7 @@ async fn create(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { disks: disks_list, pooltype: v1rpc::pool::PoolType::from(pooltype) as i32, cluster_size, - md_args: Some(v1rpc::pool::PoolMetadataArgs { - md_resv_ratio, - }), + md_args: Some(v1rpc::pool::PoolMetadataArgs { md_resv_ratio }), }) .await .context(GrpcStatus)?; diff --git a/io-engine/src/bin/io-engine-client/v1/rebuild_cli.rs b/io-engine/src/bin/io-engine-client/v1/rebuild_cli.rs index c33086189..9c9f28573 100644 --- a/io-engine/src/bin/io-engine-client/v1/rebuild_cli.rs +++ b/io-engine/src/bin/io-engine-client/v1/rebuild_cli.rs @@ -3,8 +3,7 @@ use crate::{ context::{Context, OutputFormat}, - ClientError, - GrpcStatus, + ClientError, GrpcStatus, }; use clap::{Arg, ArgMatches, Command}; use colored_json::ToColoredJson; @@ -24,8 +23,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("progress", args) => progress(ctx, args).await, ("history", args) => history(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -359,10 +357,7 @@ async fn state(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ); } OutputFormat::Default => { - ctx.print_list( - vec!["state"], - vec![vec![response.get_ref().state.clone()]], - ); + ctx.print_list(vec!["state"], vec![vec![response.get_ref().state.clone()]]); } }; @@ -379,9 +374,7 @@ async fn history(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let response = ctx .v1 .nexus - .get_rebuild_history(v1::nexus::RebuildHistoryRequest { - uuid: uuid.clone(), - }) + .get_rebuild_history(v1::nexus::RebuildHistoryRequest { uuid: uuid.clone() }) .await .context(GrpcStatus)?; diff --git a/io-engine/src/bin/io-engine-client/v1/replica_cli.rs b/io-engine/src/bin/io-engine-client/v1/replica_cli.rs index 3d180138c..47889678c 100644 --- a/io-engine/src/bin/io-engine-client/v1/replica_cli.rs +++ b/io-engine/src/bin/io-engine-client/v1/replica_cli.rs @@ -1,10 +1,7 @@ use super::pool_cli; use crate::{ context::{Context, OutputFormat}, - parse_size, - ClientError, - GrpcParseStatus, - GrpcStatus, + parse_size, ClientError, GrpcParseStatus, GrpcStatus, }; use byte_unit::Byte; use clap::{Arg, ArgMatches, Command}; @@ -19,45 +16,50 @@ pub fn subcommands() -> Command { .about("Create replica on pool") .arg( Arg::new("name") - .required(true).index(1) - .help("Replica name")) + .required(true) + .index(1) + .help("Replica name"), + ) .arg( Arg::new("uuid") - .required(true).index(2) - .help("Unique replica uuid")) + .required(true) + .index(2) + .help("Unique replica uuid"), + ) .arg( Arg::new("pooluuid") .required(true) .index(3) - .help("Storage pool name or UUID")) + .help("Storage pool name or UUID"), + ) .arg( Arg::new("size") .short('s') .long("size") .required(true) .value_name("NUMBER") - .help("Size of the replica")) + .help("Size of the replica"), + ) .arg( Arg::new("protocol") .short('p') .long("protocol") - .value_name("PROTOCOL") - .help("Name of a protocol (nvmf) used for sharing the replica (default none)")) + .help("Name of a protocol (nvmf) used for sharing the replica (default none)"), + ) .arg( Arg::new("thin") .short('t') .long("thin") .action(clap::ArgAction::SetTrue) - .help("Whether replica is thin provisioned (default false)")) + .help("Whether replica is thin provisioned (default false)"), + ) .arg( Arg::new("allowed-host") .long("allowed-host") .action(clap::ArgAction::Append) .required(false) - .help( - "NQN of hosts which are allowed to connect to the target", - ), + .help("NQN of hosts which are allowed to connect to the target"), ) .arg( Arg::new("type") @@ -91,24 +93,27 @@ pub fn subcommands() -> Command { .help("Name of the pool where replica resides"), ); - let share = Command::new("share").about("Share replica over specified protocol") + let share = Command::new("share") + .about("Share replica over specified protocol") .arg( Arg::new("uuid") .required(true) .index(1) - .help("Replica uuid")) + .help("Replica uuid"), + ) .arg( - Arg::new("protocol") - .required(true) - .index(2) - .help("Name of a protocol (nvmf) used for sharing or \"none\" to unshare the replica")) + Arg::new("protocol").required(true).index(2).help( + "Name of a protocol (nvmf) used for sharing or \"none\" to unshare the replica", + ), + ) .arg( Arg::new("allowed-host") .long("allowed-host") - .action(clap::ArgAction::Append) .required(false) - .help("Name of a protocol (nvmf) used for sharing or \"none\" to unshare the replica") + .help( + "Name of a protocol (nvmf) used for sharing or \"none\" to unshare the replica", + ), ); let unshare = Command::new("unshare").about("Unshare replica").arg( Arg::new("uuid") @@ -162,16 +167,12 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("resize", args) => replica_resize(ctx, args).await, ("stats", args) => replica_stat(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } -async fn replica_create( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn replica_create(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let name = matches .get_one::("name") .ok_or_else(|| ClientError::MissingValue { @@ -191,16 +192,18 @@ async fn replica_create( })? .to_owned(); let size = - parse_size(matches.get_one::("size").ok_or_else(|| { - ClientError::MissingValue { - field: "size".to_string(), - } - })?) + parse_size( + matches + .get_one::("size") + .ok_or_else(|| ClientError::MissingValue { + field: "size".to_string(), + })?, + ) .map_err(|s| Status::invalid_argument(format!("Bad size '{s}'"))) .context(GrpcStatus)?; let thin = matches.get_flag("thin"); - let share = parse_replica_protocol(matches.get_one::("protocol")) - .context(GrpcStatus)?; + let share = + parse_replica_protocol(matches.get_one::("protocol")).context(GrpcStatus)?; let allowed_hosts = matches .get_many::("allowed-host") .unwrap_or_default() @@ -243,10 +246,7 @@ async fn replica_create( Ok(()) } -async fn replica_destroy( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn replica_destroy(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { @@ -255,16 +255,12 @@ async fn replica_destroy( .to_owned(); let pool = match matches.get_one::("pool-uuid") { - Some(uuid) => { - Some(v1_rpc::replica::destroy_replica_request::Pool::PoolUuid( - uuid.to_string(), - )) - } - None => matches.get_one::("pool-name").map(|name| { - v1_rpc::replica::destroy_replica_request::Pool::PoolName( - name.to_string(), - ) - }), + Some(uuid) => Some(v1_rpc::replica::destroy_replica_request::Pool::PoolUuid( + uuid.to_string(), + )), + None => matches + .get_one::("pool-name") + .map(|name| v1_rpc::replica::destroy_replica_request::Pool::PoolName(name.to_string())), }; let _ = ctx @@ -287,10 +283,7 @@ async fn replica_destroy( Ok(()) } -async fn replica_list( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn replica_list(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let pooltype = matches .get_many::("type") .unwrap_or_default() @@ -339,10 +332,8 @@ async fn replica_list( let usage = r.usage.as_ref().unwrap(); let proto = replica_protocol_to_str(r.share); let size = ctx.units(Byte::from_u64(r.size)); - let capacity = - ctx.units(Byte::from_u64(usage.capacity_bytes)); - let allocated = - ctx.units(Byte::from_u64(usage.allocated_bytes)); + let capacity = ctx.units(Byte::from_u64(usage.capacity_bytes)); + let allocated = ctx.units(Byte::from_u64(usage.allocated_bytes)); vec![ r.poolname.clone(), r.name.clone(), @@ -387,13 +378,10 @@ async fn replica_list( Ok(()) } -async fn replica_share( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn replica_share(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches.get_one::("uuid").unwrap().to_owned(); - let share = parse_replica_protocol(matches.get_one::("protocol")) - .context(GrpcStatus)?; + let share = + parse_replica_protocol(matches.get_one::("protocol")).context(GrpcStatus)?; let allowed_hosts = matches .get_many::("allowed-host") .unwrap_or_default() @@ -427,18 +415,13 @@ async fn replica_share( Ok(()) } -async fn replica_unshare( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn replica_unshare(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches.get_one::("uuid").unwrap().to_owned(); let response = ctx .v1 .replica - .unshare_replica(v1_rpc::replica::UnshareReplicaRequest { - uuid, - }) + .unshare_replica(v1_rpc::replica::UnshareReplicaRequest { uuid }) .await .context(GrpcStatus)?; @@ -460,10 +443,7 @@ async fn replica_unshare( Ok(()) } -async fn replica_resize( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn replica_resize(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { @@ -472,11 +452,13 @@ async fn replica_resize( .to_owned(); let requested_size = - parse_size(matches.get_one::("size").ok_or_else(|| { - ClientError::MissingValue { - field: "size".to_string(), - } - })?) + parse_size( + matches + .get_one::("size") + .ok_or_else(|| ClientError::MissingValue { + field: "size".to_string(), + })?, + ) .map_err(|s| Status::invalid_argument(format!("Bad size '{s}'"))) .context(GrpcStatus)?; @@ -501,10 +483,7 @@ async fn replica_resize( } // TODO : There's no v1 rpc for stat. -async fn replica_stat( - mut ctx: Context, - _matches: &ArgMatches, -) -> crate::Result<()> { +async fn replica_stat(mut ctx: Context, _matches: &ArgMatches) -> crate::Result<()> { let response = ctx .client .stat_replicas(rpc::Null {}) @@ -528,15 +507,13 @@ async fn replica_stat( return Ok(()); } - let header = - vec!["POOL", "NAME", "RDCNT", "WRCNT", "RDBYTES", "WRBYTES"]; + let header = vec!["POOL", "NAME", "RDCNT", "WRCNT", "RDBYTES", "WRBYTES"]; let table = replicas .iter() .map(|replica| { let stats = replica.stats.as_ref().unwrap(); let read = ctx.units(Byte::from_u64(stats.bytes_read)); - let written = - ctx.units(Byte::from_u64(stats.bytes_written)); + let written = ctx.units(Byte::from_u64(stats.bytes_written)); vec![ replica.pool.clone(), replica.uuid.clone(), diff --git a/io-engine/src/bin/io-engine-client/v1/snapshot_cli.rs b/io-engine/src/bin/io-engine-client/v1/snapshot_cli.rs index 07d4de208..8d473a77b 100644 --- a/io-engine/src/bin/io-engine-client/v1/snapshot_cli.rs +++ b/io-engine/src/bin/io-engine-client/v1/snapshot_cli.rs @@ -3,8 +3,7 @@ use crate::{ context::{Context, OutputFormat}, - ClientError, - GrpcStatus, + ClientError, GrpcStatus, }; use clap::{Arg, ArgMatches, Command}; use colored_json::ToColoredJson; @@ -21,8 +20,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("create_clone", args) => create_clone(ctx, args).await, ("list_clone", args) => list_clone(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -154,13 +152,12 @@ pub fn subcommands() -> Command { .index(3) .help("Clone uuid"), ); - let list_clone = - Command::new("list_clone").about("List clones details").arg( - Arg::new("snapshot_uuid") - .required(false) - .index(1) - .help("Snapshot uuid"), - ); + let list_clone = Command::new("list_clone").about("List clones details").arg( + Arg::new("snapshot_uuid") + .required(false) + .index(1) + .help("Snapshot uuid"), + ); Command::new("snapshot") .subcommand_required(true) .arg_required_else_help(true) @@ -175,10 +172,7 @@ pub fn subcommands() -> Command { /// For multiple replicas, replica_uuid will be given in a single string, /// separated by comma. Same for snapshot_uuid. replica_uuid and snapshot_uuid /// will be matched by index. -async fn create_for_nexus( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn create_for_nexus(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let nexus_uuid = matches .get_one::("nexus_uuid") .ok_or_else(|| ClientError::MissingValue { @@ -226,22 +220,22 @@ async fn create_for_nexus( .collect(); if replica_uuid.len() != snapshot_uuid.len() { return Err(ClientError::MissingValue { - field: "Parameter count doesn't match between replica_uuid and snapshot_uuid".to_string() - }); + field: "Parameter count doesn't match between replica_uuid and snapshot_uuid" + .to_string(), + }); } - let replicas: Vec = - replica_uuid - .into_iter() - .zip(snapshot_uuid) - .map(|(a, b)| { - v1_rpc::snapshot::NexusCreateSnapshotReplicaDescriptor { - replica_uuid: a, - snapshot_uuid: Some(b), - skip: false, - } - }) - .collect(); + let replicas: Vec = replica_uuid + .into_iter() + .zip(snapshot_uuid) + .map( + |(a, b)| v1_rpc::snapshot::NexusCreateSnapshotReplicaDescriptor { + replica_uuid: a, + snapshot_uuid: Some(b), + skip: false, + }, + ) + .collect(); let request = v1_rpc::snapshot::NexusCreateSnapshotRequest { nexus_uuid: nexus_uuid.clone(), @@ -300,10 +294,7 @@ async fn create_for_nexus( } /// Replica Snapshot Create CLI Function. -async fn create_for_replica( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn create_for_replica(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let replica_uuid = matches .get_one::("replica_uuid") .ok_or_else(|| ClientError::MissingValue { @@ -492,15 +483,11 @@ async fn destroy(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { })? .to_owned(); let pool = match matches.get_one::("pool-uuid") { - Some(uuid) => { - Some(v1_rpc::snapshot::destroy_snapshot_request::Pool::PoolUuid( - uuid.to_string(), - )) - } + Some(uuid) => Some(v1_rpc::snapshot::destroy_snapshot_request::Pool::PoolUuid( + uuid.to_string(), + )), None => matches.get_one::("pool-name").map(|name| { - v1_rpc::snapshot::destroy_snapshot_request::Pool::PoolName( - name.to_string(), - ) + v1_rpc::snapshot::destroy_snapshot_request::Pool::PoolName(name.to_string()) }), }; let _ = ctx @@ -523,10 +510,7 @@ async fn destroy(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { Ok(()) } /// CLI to create snapshot clone. -async fn create_clone( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn create_clone(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let snapshot_uuid = matches .get_one::("snapshot_uuid") .ok_or_else(|| ClientError::MissingValue { @@ -599,16 +583,11 @@ async fn create_clone( Ok(()) } -async fn list_clone( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn list_clone(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let snapshot_uuid = matches .get_one::("snapshot_uuid") .map(|s| s.to_owned()); - let request = v1_rpc::snapshot::ListSnapshotCloneRequest { - snapshot_uuid, - }; + let request = v1_rpc::snapshot::ListSnapshotCloneRequest { snapshot_uuid }; let response = ctx .v1 diff --git a/io-engine/src/bin/io-engine-client/v1/snapshot_rebuild_cli.rs b/io-engine/src/bin/io-engine-client/v1/snapshot_rebuild_cli.rs index d6d36fec8..d1a142721 100644 --- a/io-engine/src/bin/io-engine-client/v1/snapshot_rebuild_cli.rs +++ b/io-engine/src/bin/io-engine-client/v1/snapshot_rebuild_cli.rs @@ -3,8 +3,7 @@ use crate::{ context::{Context, OutputFormat}, - ClientError, - GrpcStatus, + ClientError, GrpcStatus, }; use clap::{Arg, ArgMatches, Command}; use colored_json::ToColoredJson; @@ -18,8 +17,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("destroy", args) => destroy(ctx, args).await, ("list", args) => list(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -84,19 +82,17 @@ async fn create(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let response = ctx .v1 .snapshot_rebuild - .create_snapshot_rebuild( - v1::snapshot_rebuild::CreateSnapshotRebuildRequest { - replica_uuid: uuid.to_string(), - uuid, - snapshot_uuid: "".to_string(), - replica_uri: "".to_string(), - snapshot_uri: uri, - resume: false, - bitmap: None, - use_bitmap: false, - error_policy: None, - }, - ) + .create_snapshot_rebuild(v1::snapshot_rebuild::CreateSnapshotRebuildRequest { + replica_uuid: uuid.to_string(), + uuid, + snapshot_uuid: "".to_string(), + replica_uri: "".to_string(), + snapshot_uri: uri, + resume: false, + bitmap: None, + use_bitmap: false, + error_policy: None, + }) .await .context(GrpcStatus)?; match ctx.output { @@ -129,11 +125,9 @@ async fn destroy(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let _response = ctx .v1 .snapshot_rebuild - .destroy_snapshot_rebuild( - v1::snapshot_rebuild::DestroySnapshotRebuildRequest { - uuid: uuid.to_string(), - }, - ) + .destroy_snapshot_rebuild(v1::snapshot_rebuild::DestroySnapshotRebuildRequest { + uuid: uuid.to_string(), + }) .await .context(GrpcStatus)?; println!("Snapshot Rebuild {uuid} deleted"); @@ -147,13 +141,11 @@ async fn list(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let response = ctx .v1 .snapshot_rebuild - .list_snapshot_rebuild( - v1::snapshot_rebuild::ListSnapshotRebuildRequest { - uuid: replica_uuid, - replica_uuid: None, - snapshot_uuid: None, - }, - ) + .list_snapshot_rebuild(v1::snapshot_rebuild::ListSnapshotRebuildRequest { + uuid: replica_uuid, + replica_uuid: None, + snapshot_uuid: None, + }) .await .context(GrpcStatus)?; match ctx.output { @@ -183,12 +175,8 @@ async fn list(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { r.total.to_string(), r.rebuilt.to_string(), r.remaining.to_string(), - r.start_timestamp - .map(|s| s.to_string()) - .unwrap_or_default(), - r.end_timestamp - .map(|s| s.to_string()) - .unwrap_or_default(), + r.start_timestamp.map(|s| s.to_string()).unwrap_or_default(), + r.end_timestamp.map(|s| s.to_string()).unwrap_or_default(), ] }) .collect(); diff --git a/io-engine/src/bin/io-engine-client/v1/stats_cli.rs b/io-engine/src/bin/io-engine-client/v1/stats_cli.rs index 96fe4e038..f3741e53b 100644 --- a/io-engine/src/bin/io-engine-client/v1/stats_cli.rs +++ b/io-engine/src/bin/io-engine-client/v1/stats_cli.rs @@ -50,8 +50,7 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("replica", args) => replica(ctx, args).await, ("reset", _) => reset(ctx).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } @@ -95,9 +94,8 @@ async fn pool(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { .iter() .map(|stats| { let tick_rate = stats.tick_rate; - let ticks_time = |ticks| -> String { - ticks_to_time(ticks, tick_rate).to_string() - }; + let ticks_time = + |ticks| -> String { ticks_to_time(ticks, tick_rate).to_string() }; vec![ stats.name.clone(), stats.num_read_ops.to_string(), @@ -179,9 +177,8 @@ async fn nexus(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { .iter() .map(|stats| { let tick_rate = stats.tick_rate; - let ticks_time = |ticks| -> String { - ticks_to_time(ticks, tick_rate).to_string() - }; + let ticks_time = + |ticks| -> String { ticks_to_time(ticks, tick_rate).to_string() }; vec![ stats.name.clone(), stats.num_read_ops.to_string(), @@ -246,8 +243,7 @@ async fn replica(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ); } OutputFormat::Default => { - let stats: &Vec = - &response.get_ref().stats; + let stats: &Vec = &response.get_ref().stats; if stats.is_empty() { if let Some(name) = replica_name { ctx.v1(&format!( @@ -265,9 +261,8 @@ async fn replica(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { .map(|p| { let io_stat = p.stats.as_ref().unwrap(); let tick_rate = io_stat.tick_rate; - let ticks_time = |ticks| -> String { - ticks_to_time(ticks, tick_rate).to_string() - }; + let ticks_time = + |ticks| -> String { ticks_to_time(ticks, tick_rate).to_string() }; vec![ io_stat.name.clone(), io_stat.num_read_ops.to_string(), diff --git a/io-engine/src/bin/io-engine-client/v1/test_cli.rs b/io-engine/src/bin/io-engine-client/v1/test_cli.rs index 46a02744d..586aa307c 100644 --- a/io-engine/src/bin/io-engine-client/v1/test_cli.rs +++ b/io-engine/src/bin/io-engine-client/v1/test_cli.rs @@ -1,8 +1,6 @@ use crate::{ context::{Context, OutputFormat}, - parse_size, - ClientError, - GrpcStatus, + parse_size, ClientError, GrpcStatus, }; use byte_unit::Byte; use clap::{Arg, ArgMatches, Command}; @@ -150,16 +148,12 @@ pub async fn handler(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { ("features", args) => features(ctx, args).await, ("wipe", args) => wipe(ctx, args).await, (cmd, _) => { - Err(Status::not_found(format!("command {cmd} does not exist"))) - .context(GrpcStatus) + Err(Status::not_found(format!("command {cmd} does not exist"))).context(GrpcStatus) } } } -async fn features( - mut ctx: Context, - _matches: &ArgMatches, -) -> crate::Result<()> { +async fn features(mut ctx: Context, _matches: &ArgMatches) -> crate::Result<()> { let response = ctx.v1.test.get_features(()).await.context(GrpcStatus)?; let features = response.into_inner(); match ctx.output { @@ -188,10 +182,7 @@ async fn wipe(ctx: Context, matches: &ArgMatches) -> crate::Result<()> { } } -async fn replica_wipe( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn replica_wipe(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let uuid = matches .get_one::("uuid") .ok_or_else(|| ClientError::MissingValue { @@ -203,16 +194,17 @@ async fn replica_wipe( Some(uuid) => Some(v1_rpc::test::wipe_replica_request::Pool::PoolUuid( uuid.to_string(), )), - None => matches.get_one::("pool-name").map(|name| { - v1_rpc::test::wipe_replica_request::Pool::PoolName(name.to_string()) - }), + None => matches + .get_one::("pool-name") + .map(|name| v1_rpc::test::wipe_replica_request::Pool::PoolName(name.to_string())), }; - let method_str = matches.get_one::("method").ok_or_else(|| { - ClientError::MissingValue { - field: "method".to_string(), - } - })?; + let method_str = + matches + .get_one::("method") + .ok_or_else(|| ClientError::MissingValue { + field: "method".to_string(), + })?; let method = WipeMethod::from_str(method_str) .map_err(|e| Status::invalid_argument(e.to_string())) .context(GrpcStatus)?; @@ -234,14 +226,9 @@ async fn replica_wipe( pool, wipe_options: Some(v1_rpc::test::StreamWipeOptions { options: Some(v1_rpc::test::WipeOptions { - wipe_method: v1_rpc::test::wipe_options::WipeMethod::from( - method, - ) as i32, + wipe_method: v1_rpc::test::wipe_options::WipeMethod::from(method) as i32, write_pattern: None, - cksum_alg: - v1_rpc::test::wipe_options::CheckSumAlgorithm::from( - method, - ) as i32, + cksum_alg: v1_rpc::test::wipe_options::CheckSumAlgorithm::from(method) as i32, }), chunk_size: chunk_size.as_u64(), }), @@ -253,9 +240,7 @@ async fn replica_wipe( fn bandwidth(response: &v1_rpc::test::WipeReplicaResponse) -> String { let unknown = String::new(); - let Some(Ok(elapsed)) = - response.since.map(TryInto::::try_into) - else { + let Some(Ok(elapsed)) = response.since.map(TryInto::::try_into) else { return unknown; }; let elapsed_f = elapsed.as_secs_f64(); @@ -266,8 +251,7 @@ async fn replica_wipe( let bandwidth = (response.wiped_bytes as f64 / elapsed_f) as u64; format!( "{:.2}/s", - Byte::from_u64(bandwidth) - .get_appropriate_unit(byte_unit::UnitType::Binary) + Byte::from_u64(bandwidth).get_appropriate_unit(byte_unit::UnitType::Binary) ) } @@ -315,8 +299,7 @@ async fn replica_wipe( let response = response.map(|response| { // back fill with spaces with ensure checksum aligns // with its header - let bandwidth = - format!("{: <12}", bandwidth(&response)); + let bandwidth = format!("{: <12}", bandwidth(&response)); let checksum = checksum(&response); vec![ response.uuid, @@ -349,10 +332,7 @@ fn adjust_bytes(bytes: u64) -> String { format!("{adjusted_byte:.2}") } -async fn injections( - mut ctx: Context, - matches: &ArgMatches, -) -> crate::Result<()> { +async fn injections(mut ctx: Context, matches: &ArgMatches) -> crate::Result<()> { let inj_add = matches.get_many::("add"); let inj_remove = matches.get_many::("remove"); if inj_add.is_none() && inj_remove.is_none() { @@ -377,11 +357,9 @@ async fn injections( println!("Removing injected fault: {uri}"); ctx.v1 .test - .remove_fault_injection( - v1_rpc::test::RemoveFaultInjectionRequest { - uri: uri.to_owned(), - }, - ) + .remove_fault_injection(v1_rpc::test::RemoveFaultInjectionRequest { + uri: uri.to_owned(), + }) .await .context(GrpcStatus)?; } diff --git a/io-engine/src/bin/io-engine.rs b/io-engine/src/bin/io-engine.rs index 794b38b2d..935a78872 100644 --- a/io-engine/src/bin/io-engine.rs +++ b/io-engine/src/bin/io-engine.rs @@ -9,31 +9,17 @@ use futures::future::FutureExt; use io_engine::{ bdev::{ - nexus::{ - ENABLE_NEXUS_CHANNEL_DEBUG, - ENABLE_NEXUS_RESET, - ENABLE_PARTIAL_REBUILD, - }, + nexus::{ENABLE_NEXUS_CHANNEL_DEBUG, ENABLE_NEXUS_RESET, ENABLE_PARTIAL_REBUILD}, util::uring, }, core::{ device_monitor_loop, diagnostics::process_diagnostics_cli, - lock::{ - ProtectedSubsystems, - ResourceLockManager, - ResourceLockManagerConfig, - }, - reactor_monitor_loop, - runtime, - MayastorCliArgs, - MayastorEnvironment, - Mthread, - Reactors, + lock::{ProtectedSubsystems, ResourceLockManager, ResourceLockManagerConfig}, + reactor_monitor_loop, runtime, MayastorCliArgs, MayastorEnvironment, Mthread, Reactors, }, eventing::Event, - grpc, - logger, + grpc, logger, persistent_store::PersistentStoreBuilder, subsys::Registration, }; @@ -216,9 +202,8 @@ fn hugepage_get_nr(hugepage_path: &Path) -> (u32, u32) { fn hugepage_check() { let (nr_pages, free_pages) = hugepage_get_nr(Path::new("/sys/kernel/mm/hugepages/hugepages-2048kB")); - let (nr_1g_pages, free_1g_pages) = hugepage_get_nr(Path::new( - "/sys/kernel/mm/hugepages/hugepages-1048576kB", - )); + let (nr_1g_pages, free_1g_pages) = + hugepage_get_nr(Path::new("/sys/kernel/mm/hugepages/hugepages-1048576kB")); if nr_pages + nr_1g_pages * 512 < PAGES_NEEDED { error!( @@ -277,25 +262,24 @@ fn main() -> Result<(), Box> { hugepage_check(); let nvme_core_path = Path::new("/sys/module/nvme_core/parameters"); - let nvme_mp: String = - match sysfs::parse_value::(nvme_core_path, "multipath") { - Ok(s) => match s.as_str() { - "Y" => "yes".to_string(), - "N" => "disabled".to_string(), - u => format!("unknown value {u}"), - }, - Err(e) => { - if e.kind() == std::io::ErrorKind::NotFound { - if nvme_core_path.exists() { - "not built".to_string() - } else { - "nvme not loaded".to_string() - } + let nvme_mp: String = match sysfs::parse_value::(nvme_core_path, "multipath") { + Ok(s) => match s.as_str() { + "Y" => "yes".to_string(), + "N" => "disabled".to_string(), + u => format!("unknown value {u}"), + }, + Err(e) => { + if e.kind() == std::io::ErrorKind::NotFound { + if nvme_core_path.exists() { + "not built".to_string() } else { - format!("unknown error: {e}") + "nvme not loaded".to_string() } + } else { + format!("unknown error: {e}") } - }; + } + }; info!( "kernel io_uring support: {}", diff --git a/io-engine/src/bin/jsonrpc.rs b/io-engine/src/bin/jsonrpc.rs index 8b3a56ea8..97501cfeb 100644 --- a/io-engine/src/bin/jsonrpc.rs +++ b/io-engine/src/bin/jsonrpc.rs @@ -43,15 +43,11 @@ async fn main() -> std::result::Result<(), Box> { let opt = Opt::parse(); let fut = match opt.cmd { - Sub::Raw { - method, - arg, - } => { + Sub::Raw { method, arg } => { if let Some(arg) = arg { let args: serde_json::Value = serde_json::from_str(&arg)?; - let out: serde_json::Value = - call(&opt.socket, &method, Some(args)).await?; + let out: serde_json::Value = call(&opt.socket, &method, Some(args)).await?; // we don't always get valid json back which is a bug in the RPC // method really. if let Ok(json) = serde_json::to_string_pretty(&out) { @@ -62,8 +58,7 @@ async fn main() -> std::result::Result<(), Box> { } } else { serde_json::to_string_pretty( - &call::<(), serde_json::Value>(&opt.socket, &method, None) - .await?, + &call::<(), serde_json::Value>(&opt.socket, &method, None).await?, )? } } diff --git a/io-engine/src/bin/nvmet.rs b/io-engine/src/bin/nvmet.rs index 3f5c33871..b5dad73f9 100644 --- a/io-engine/src/bin/nvmet.rs +++ b/io-engine/src/bin/nvmet.rs @@ -15,8 +15,7 @@ use futures::FutureExt; use io_engine::{ bdev::nexus::{nexus_create, nexus_lookup_mut}, core::{MayastorCliArgs, MayastorEnvironment, Mthread, Reactors, Share}, - grpc, - logger, + grpc, logger, }; use version_info::version_info_str; @@ -77,18 +76,20 @@ fn main() { let matches = Command::new("NVMeT CLI") .version(version_info_str!()) .about("NVMe test utility to quickly create a nexus over existing nvme targets") - .arg(Arg::new("size") - .default_value("64") - .short('s') - .long("size") - .help("Size of the nexus to create in MB") + .arg( + Arg::new("size") + .default_value("64") + .short('s') + .long("size") + .help("Size of the nexus to create in MB"), ) .arg( Arg::new("uri") .short('u') .required(true) .long("uris") - .help("NVMe-OF TCP targets to connect to")) + .help("NVMe-OF TCP targets to connect to"), + ) .get_matches(); let margs = MayastorCliArgs { @@ -101,8 +102,7 @@ fn main() { let ms = MayastorEnvironment::new(margs.clone()).init(); start_tokio_runtime(&margs); - Reactors::current() - .send_future(async move { create_nexus(&matches).await }); + Reactors::current().send_future(async move { create_nexus(&matches).await }); Reactors::current().running(); Reactors::current().poll_reactor(); diff --git a/io-engine/src/bin/spdk.rs b/io-engine/src/bin/spdk.rs index 8862ecf78..0db6193a0 100644 --- a/io-engine/src/bin/spdk.rs +++ b/io-engine/src/bin/spdk.rs @@ -18,11 +18,7 @@ use std::{ use io_engine::delay; use spdk_rs::libspdk::{ - spdk_app_fini, - spdk_app_opts, - spdk_app_opts_init, - spdk_app_parse_args, - spdk_app_start, + spdk_app_fini, spdk_app_opts, spdk_app_opts_init, spdk_app_parse_args, spdk_app_start, spdk_app_stop, }; @@ -54,10 +50,7 @@ fn main() -> Result<(), std::io::Error> { None, // usage ) != spdk_rs::libspdk::SPDK_APP_PARSE_ARGS_SUCCESS { - return Err(Error::new( - ErrorKind::Other, - "Parsing arguments failed", - )); + return Err(Error::new(ErrorKind::Other, "Parsing arguments failed")); } } diff --git a/io-engine/src/core/bdev.rs b/io-engine/src/core/bdev.rs index 8d2814076..548a62b3f 100644 --- a/io-engine/src/core/bdev.rs +++ b/io-engine/src/core/bdev.rs @@ -15,12 +15,7 @@ use crate::{ bdev_api::bdev_uri_eq, core::{ share::{NvmfShareProps, Protocol, Share, UpdateProps}, - BlockDeviceIoStats, - CoreError, - DescriptorGuard, - PtplProps, - ShareNvmf, - UnshareNvmf, + BlockDeviceIoStats, CoreError, DescriptorGuard, PtplProps, ShareNvmf, UnshareNvmf, }, subsys::NvmfSubsystem, target::nvmf, @@ -69,9 +64,7 @@ where { /// TODO pub(crate) fn new(b: spdk_rs::Bdev) -> Self { - Self { - inner: b, - } + Self { inner: b } } /// Constructs a Bdev from a raw SPDK pointer. @@ -79,17 +72,12 @@ where if bdev.is_null() { None } else { - unsafe { - Some(Self::new(spdk_rs::Bdev::unsafe_from_inner_ptr(bdev))) - } + unsafe { Some(Self::new(spdk_rs::Bdev::unsafe_from_inner_ptr(bdev))) } } } /// Opens a Bdev by its name in read_write mode. - pub fn open_by_name( - name: &str, - read_write: bool, - ) -> Result, CoreError> { + pub fn open_by_name(name: &str, read_write: bool) -> Result, CoreError> { if let Some(bdev) = Self::lookup_by_name(name) { bdev.open(read_write) } else { @@ -102,19 +90,10 @@ where /// Opens the current Bdev. /// A Bdev can be opened multiple times resulting in a new descriptor for /// each call. - pub fn open( - &self, - read_write: bool, - ) -> Result, CoreError> { - match spdk_rs::BdevDesc::::open( - self.name(), - read_write, - bdev_event_callback, - ) { + pub fn open(&self, read_write: bool) -> Result, CoreError> { + match spdk_rs::BdevDesc::::open(self.name(), read_write, bdev_event_callback) { Ok(d) => Ok(DescriptorGuard::new(d)), - Err(err) => Err(CoreError::OpenBdev { - source: err, - }), + Err(err) => Err(CoreError::OpenBdev { source: err }), } } @@ -179,19 +158,16 @@ where unmap_latency_ticks: stat.unmap_latency_ticks, tick_rate: self.get_tick_rate(), }), - Err(err) => Err(CoreError::DeviceStatisticsFailed { - source: err, - }), + Err(err) => Err(CoreError::DeviceStatisticsFailed { source: err }), } } /// Resets io stats for a given Bdev. pub async fn reset_bdev_io_stats(&self) -> Result<(), CoreError> { - self.inner.stats_reset_async().await.map_err(|err| { - CoreError::DeviceStatisticsFailed { - source: err, - } - }) + self.inner + .stats_reset_async() + .await + .map_err(|err| CoreError::DeviceStatisticsFailed { source: err }) } } @@ -216,8 +192,7 @@ where // todo: add option to use uuid here, will allow for the replica uuid to // be used! - let subsystem = - NvmfSubsystem::try_from_with(me, ptpl).context(ShareNvmf {})?; + let subsystem = NvmfSubsystem::try_from_with(me, ptpl).context(ShareNvmf {})?; if let Some((cntlid_min, cntlid_max)) = props.cntlid_range() { subsystem @@ -246,8 +221,7 @@ where ) -> Result<(), Self::Error> { match self.shared() { Some(Protocol::Nvmf) => { - if let Some(subsystem) = NvmfSubsystem::nqn_lookup(self.name()) - { + if let Some(subsystem) = NvmfSubsystem::nqn_lookup(self.name()) { let props = UpdateProps::from(props.into()); subsystem.allow_any(props.host_any()); subsystem @@ -300,12 +274,10 @@ where /// TODO fn allowed_hosts(&self) -> Vec { match self.shared() { - Some(Protocol::Nvmf) => { - match NvmfSubsystem::nqn_lookup(self.name()) { - Some(subsystem) => subsystem.allowed_hosts(), - None => vec![], - } - } + Some(Protocol::Nvmf) => match NvmfSubsystem::nqn_lookup(self.name()) { + Some(subsystem) => subsystem.allowed_hosts(), + None => vec![], + }, _ => vec![], } } @@ -313,9 +285,7 @@ where /// return the URI that was used to construct the bdev fn bdev_uri(&self) -> Option { self.bdev_uri_original().map(|mut uri| { - if !uri.query_pairs().any(|e| e.0 == "uuid") - && !self.uuid().is_nil() - { + if !uri.query_pairs().any(|e| e.0 == "uuid") && !self.uuid().is_nil() { uri.query_pairs_mut() .append_pair("uuid", &self.uuid_as_string()); } @@ -436,11 +406,7 @@ pub struct BdevStats { impl BdevStats { /// Create a new `Self` from the given parts. pub fn new(name: String, uuid: String, stats: BlockDeviceIoStats) -> Self { - Self { - name, - uuid, - stats, - } + Self { name, uuid, stats } } } diff --git a/io-engine/src/core/block_device.rs b/io-engine/src/core/block_device.rs index d0146a226..a07a37644 100644 --- a/io-engine/src/core/block_device.rs +++ b/io-engine/src/core/block_device.rs @@ -1,10 +1,4 @@ -use super::{ - CoreError, - DeviceEventSink, - IoCompletionStatus, - IoType, - SnapshotParams, -}; +use super::{CoreError, DeviceEventSink, IoCompletionStatus, IoType, SnapshotParams}; use spdk_rs::{DmaBuf, DmaError, IoVec}; @@ -88,19 +82,13 @@ pub trait BlockDevice { async fn io_stats(&self) -> Result; /// Open device and obtain a descriptor. - fn open( - &self, - read_write: bool, - ) -> Result, CoreError>; + fn open(&self, read_write: bool) -> Result, CoreError>; /// Obtain I/O controller for device. fn get_io_controller(&self) -> Option>; /// Register device event listener. - fn add_event_listener( - &self, - listener: DeviceEventSink, - ) -> Result<(), CoreError>; + fn add_event_listener(&self, listener: DeviceEventSink) -> Result<(), CoreError>; } /// Core trait that represents a descriptor for an opened block device. @@ -114,9 +102,7 @@ pub trait BlockDeviceDescriptor { fn device_name(&self) -> String; /// Consumes BlockDeviceDescriptor and returns a BlockDeviceHandle. - fn into_handle( - self: Box, - ) -> Result, CoreError>; + fn into_handle(self: Box) -> Result, CoreError>; /// Returns a BlockDeviceHandle for this descriptor without consuming it. fn get_io_handle(&self) -> Result, CoreError>; @@ -125,9 +111,7 @@ pub trait BlockDeviceDescriptor { fn unclaim(&self); /// TODO - async fn get_io_handle_nonblock( - &self, - ) -> Result, CoreError>; + async fn get_io_handle_nonblock(&self) -> Result, CoreError>; } /// TODO @@ -167,19 +151,11 @@ pub trait BlockDeviceHandle { /// TODO #[deprecated(note = "use read_buf_blocks_async()")] - async fn read_at( - &self, - offset: u64, - buffer: &mut DmaBuf, - ) -> Result; + async fn read_at(&self, offset: u64, buffer: &mut DmaBuf) -> Result; /// TODO #[deprecated(note = "use write_buf_blocks_async()")] - async fn write_at( - &self, - offset: u64, - buffer: &DmaBuf, - ) -> Result; + async fn write_at(&self, offset: u64, buffer: &DmaBuf) -> Result; /// Reads the given number of blocks into the list of buffers from the /// device, starting at the given offset. @@ -250,13 +226,8 @@ pub trait BlockDeviceHandle { num_blocks: u64, opts: ReadOptions, ) -> Result<(), CoreError> { - self.readv_blocks_async( - &mut [buf.to_io_vec()], - offset_blocks, - num_blocks, - opts, - ) - .await + self.readv_blocks_async(&mut [buf.to_io_vec()], offset_blocks, num_blocks, opts) + .await } /// Writes the given number of blocks from the list of buffers to the @@ -422,10 +393,7 @@ pub trait BlockDeviceHandle { async fn nvme_identify_ctrlr(&self) -> Result; /// TODO - async fn create_snapshot( - &self, - params: SnapshotParams, - ) -> Result; + async fn create_snapshot(&self, params: SnapshotParams) -> Result; /// TODO async fn nvme_resv_register( @@ -466,11 +434,7 @@ pub trait BlockDeviceHandle { } /// TODO - async fn nvme_resv_report( - &self, - _cdw11: u32, - _buffer: &mut DmaBuf, - ) -> Result<(), CoreError> { + async fn nvme_resv_report(&self, _cdw11: u32, _buffer: &mut DmaBuf) -> Result<(), CoreError> { Err(CoreError::NotSupported { source: Errno::EOPNOTSUPP, }) @@ -536,8 +500,5 @@ pub trait DeviceIoController { fn get_timeout_action(&self) -> Result; /// TODO - fn set_timeout_action( - &mut self, - action: DeviceTimeoutAction, - ) -> Result<(), CoreError>; + fn set_timeout_action(&mut self, action: DeviceTimeoutAction) -> Result<(), CoreError>; } diff --git a/io-engine/src/core/device_events.rs b/io-engine/src/core/device_events.rs index 196a04b35..1390830a3 100644 --- a/io-engine/src/core/device_events.rs +++ b/io-engine/src/core/device_events.rs @@ -62,10 +62,7 @@ impl SinkInner { /// TODO fn new(p: &dyn DeviceEventListener) -> Self { let p = unsafe { - std::mem::transmute::< - &dyn DeviceEventListener, - &'static dyn DeviceEventListener, - >(p) + std::mem::transmute::<&dyn DeviceEventListener, &'static dyn DeviceEventListener>(p) }; Self { @@ -140,11 +137,7 @@ impl DeviceEventDispatcher { /// Dispatches an event to all registered listeners. /// Returns the number of listeners notified about target event. - pub fn dispatch_event( - &self, - evt: DeviceEventType, - dev_name: &str, - ) -> usize { + pub fn dispatch_event(&self, evt: DeviceEventType, dev_name: &str) -> usize { let mut listeners = Vec::new(); // To avoid potential deadlocks we never call the listeners with the @@ -169,13 +162,16 @@ impl DeviceEventDispatcher { /// Returns the number of registered listeners. pub fn count(&self) -> usize { - self.listeners.lock().iter().fold(0, |acc, x| { - if x.strong_count() > 0 { - acc + 1 - } else { - acc - } - }) + self.listeners.lock().iter().fold( + 0, + |acc, x| { + if x.strong_count() > 0 { + acc + 1 + } else { + acc + } + }, + ) } /// Removes all dropped listeners. diff --git a/io-engine/src/core/device_monitor.rs b/io-engine/src/core/device_monitor.rs index 2012929f4..e8f76fd5c 100644 --- a/io-engine/src/core/device_monitor.rs +++ b/io-engine/src/core/device_monitor.rs @@ -55,14 +55,12 @@ pub async fn device_monitor_loop() { let child_device = child_device.clone(); async move { if let Some(n) = nexus_lookup(&nexus_name) { - if let Err(e) = - n.close_child(&child_device).await - { + if let Err(e) = n.close_child(&child_device).await { error!( - "Nexus '{nexus_name}': failed to close \ + "Nexus '{nexus_name}': failed to close \ retired child '{child_device}': {e}", - e = e.verbose() - ); + e = e.verbose() + ); } } } diff --git a/io-engine/src/core/env.rs b/io-engine/src/core/env.rs index 21ad0c3ed..3c88c1823 100644 --- a/io-engine/src/core/env.rs +++ b/io-engine/src/core/env.rs @@ -7,8 +7,7 @@ use std::{ str::FromStr, sync::{ atomic::{AtomicBool, Ordering::SeqCst}, - Arc, - Mutex, + Arc, Mutex, }, time::Duration, }; @@ -25,31 +24,12 @@ use version_info::{package_description, version_info_str}; use spdk_rs::{ libspdk::{ - spdk_app_shutdown_cb, - spdk_env_dpdk_post_init, - spdk_env_dpdk_rte_eal_init, - spdk_env_fini, - spdk_log_close, - spdk_log_level, - spdk_log_open, - spdk_log_set_flag, - spdk_log_set_level, - spdk_log_set_print_level, - spdk_pci_addr, - spdk_rpc_finish, - spdk_rpc_initialize, - spdk_rpc_set_state, - spdk_subsystem_fini, - spdk_subsystem_init, - spdk_thread_lib_fini, - spdk_thread_send_critical_msg, - spdk_trace_cleanup, - spdk_trace_create_tpoint_group_mask, - spdk_trace_init, - spdk_trace_set_tpoints, - SPDK_LOG_DEBUG, - SPDK_LOG_INFO, - SPDK_RPC_RUNTIME, + spdk_app_shutdown_cb, spdk_env_dpdk_post_init, spdk_env_dpdk_rte_eal_init, spdk_env_fini, + spdk_log_close, spdk_log_level, spdk_log_open, spdk_log_set_flag, spdk_log_set_level, + spdk_log_set_print_level, spdk_pci_addr, spdk_rpc_finish, spdk_rpc_initialize, + spdk_rpc_set_state, spdk_subsystem_fini, spdk_subsystem_init, spdk_thread_lib_fini, + spdk_thread_send_critical_msg, spdk_trace_cleanup, spdk_trace_create_tpoint_group_mask, + spdk_trace_init, spdk_trace_set_tpoints, SPDK_LOG_DEBUG, SPDK_LOG_INFO, SPDK_RPC_RUNTIME, }, spdk_rs_log, }; @@ -60,26 +40,16 @@ use crate::{ core::{ nic, reactor::{Reactor, ReactorState, Reactors}, - Cores, - MayastorFeatures, - Mthread, - }, - eventing::{ - io_engine_events::io_engine_stop_event_meta, - Event, - EventWithMeta, + Cores, MayastorFeatures, Mthread, }, + eventing::{io_engine_events::io_engine_stop_event_meta, Event, EventWithMeta}, grpc, grpc::MayastorGrpcServer, logger, persistent_store::PersistentStoreBuilder, subsys::{ - self, - config::opts::TARGET_CRDT_LEN, - registration::registration_grpc::ApiVersion, - Config, - PoolConfig, - Registration, + self, config::opts::TARGET_CRDT_LEN, registration::registration_grpc::ApiVersion, Config, + PoolConfig, Registration, }, }; @@ -124,9 +94,7 @@ fn parse_crdt(src: &str) -> Result<[u16; TARGET_CRDT_LEN], String> { let items = src.split(',').collect::>(); match items.as_slice() { [one] => Ok([parse_val(one)?, 0, 0]), - [one, two, three] => { - Ok([parse_val(one)?, parse_val(two)?, parse_val(three)?]) - } + [one, two, three] => Ok([parse_val(one)?, parse_val(two)?, parse_val(three)?]), _ => Err("Command Retry Delay argument must be an integer or \ a comma-separated list of three intergers" .to_string()), @@ -300,10 +268,8 @@ impl MayastorFeatures { fn init_features() -> MayastorFeatures { let ana = env::var("NEXUS_NVMF_ANA_ENABLE").as_deref() == Ok("1"); let lvm = env::var("ENABLE_LVM").as_deref() == Ok("true"); - let snapshot_rebuild = - env::var("ENABLE_SNAPSHOT_REBUILD").as_deref() == Ok("true"); - let rdma_capable_io_engine = - env::var("ENABLE_RDMA").as_deref() == Ok("true"); + let snapshot_rebuild = env::var("ENABLE_SNAPSHOT_REBUILD").as_deref() == Ok("true"); + let rdma_capable_io_engine = env::var("ENABLE_RDMA").as_deref() == Ok("true"); MayastorFeatures { asymmetric_namespace_access: ana, logical_volume_manager: lvm, @@ -381,12 +347,10 @@ impl MayastorCliArgs { /// Global exit code of the program, initially set to -1 to capture double /// shutdown during test cases -pub static GLOBAL_RC: Lazy>> = - Lazy::new(|| Arc::new(Mutex::new(-1))); +pub static GLOBAL_RC: Lazy>> = Lazy::new(|| Arc::new(Mutex::new(-1))); /// keep track if we have received a signal already -pub static SIG_RECEIVED: Lazy = - Lazy::new(|| AtomicBool::new(false)); +pub static SIG_RECEIVED: Lazy = Lazy::new(|| AtomicBool::new(false)); #[derive(Debug, Snafu)] pub enum EnvError { @@ -588,10 +552,7 @@ extern "C" fn mayastor_signal_handler(signo: i32) { SIG_RECEIVED.store(true, SeqCst); unsafe { if let Some(mth) = Mthread::primary_safe() { - spdk_thread_send_critical_msg( - mth.as_ptr(), - Some(signal_trampoline), - ); + spdk_thread_send_critical_msg(mth.as_ptr(), Some(signal_trampoline)); } }; } @@ -604,8 +565,7 @@ struct SubsystemCtx { static MAYASTOR_FEATURES: OnceCell = OnceCell::new(); -static MAYASTOR_DEFAULT_ENV: OnceCell> = - OnceCell::new(); +static MAYASTOR_DEFAULT_ENV: OnceCell> = OnceCell::new(); impl MayastorEnvironment { pub fn new(args: MayastorCliArgs) -> Self { @@ -615,9 +575,10 @@ impl MayastorEnvironment { ps_endpoint: args.ps_endpoint, ps_timeout: args.ps_timeout, ps_retries: args.ps_retries, - node_name: args.node_name.clone().unwrap_or_else(|| { - env::var("HOSTNAME").unwrap_or_else(|_| "mayastor-node".into()) - }), + node_name: args + .node_name + .clone() + .unwrap_or_else(|| env::var("HOSTNAME").unwrap_or_else(|_| "mayastor-node".into())), node_nqn: make_hostnqn( args.node_name .or_else(|| env::var("HOSTNAME").ok()) @@ -643,8 +604,7 @@ impl MayastorEnvironment { developer_delay: args.developer_delay, rdma: args.rdma, bs_cluster_unmap: args.bs_cluster_unmap, - enable_io_all_thrd_nexus_channels: args - .enable_io_all_thrd_nexus_channels, + enable_io_all_thrd_nexus_channels: args.enable_io_all_thrd_nexus_channels, ..Default::default() } .setup_static() @@ -658,8 +618,7 @@ impl MayastorEnvironment { fn setup_static(self) -> Self { match MAYASTOR_DEFAULT_ENV.get() { None => { - MAYASTOR_DEFAULT_ENV - .get_or_init(|| parking_lot::Mutex::new(self.clone())); + MAYASTOR_DEFAULT_ENV.get_or_init(|| parking_lot::Mutex::new(self.clone())); } Some(some) => { *some.lock() = self.clone(); @@ -680,18 +639,16 @@ impl MayastorEnvironment { /// configure signal handling fn install_signal_handlers(&self) { unsafe { - signal_hook::low_level::register( - signal_hook::consts::SIGTERM, - || mayastor_signal_handler(1), - ) + signal_hook::low_level::register(signal_hook::consts::SIGTERM, || { + mayastor_signal_handler(1) + }) } .unwrap(); unsafe { - signal_hook::low_level::register( - signal_hook::consts::SIGINT, - || mayastor_signal_handler(1), - ) + signal_hook::low_level::register(signal_hook::consts::SIGINT, || { + mayastor_signal_handler(1) + }) } .unwrap(); } @@ -701,9 +658,7 @@ impl MayastorEnvironment { let mut args = vec![CString::new(self.name.clone()).unwrap()]; if self.mem_channel > 0 { - args.push( - CString::new(format!("-n {}", self.mem_channel)).unwrap(), - ); + args.push(CString::new(format!("-n {}", self.mem_channel)).unwrap()); } if self.shm_id < 0 { @@ -715,10 +670,7 @@ impl MayastorEnvironment { } if self.master_core > 0 { - args.push( - CString::new(format!("--master-lcore={}", self.master_core)) - .unwrap(), - ); + args.push(CString::new(format!("--master-lcore={}", self.master_core)).unwrap()); } if self.no_pci { @@ -752,13 +704,7 @@ impl MayastorEnvironment { .unwrap(), ); } else { - args.push( - CString::new(format!( - "--file-prefix=mayastor_pid{}", - self.shm_id - )) - .unwrap(), - ); + args.push(CString::new(format!("--file-prefix=mayastor_pid{}", self.shm_id)).unwrap()); args.push(CString::new("--proc-type=auto").unwrap()); } @@ -792,9 +738,7 @@ impl MayastorEnvironment { if let Some(list) = &self.core_list { args.push(CString::new(format!("-l {list}")).unwrap()); } else { - args.push( - CString::new(format!("-c {}", self.reactor_mask)).unwrap(), - ) + args.push(CString::new(format!("-c {}", self.reactor_mask)).unwrap()) } let mut cargs = args @@ -849,11 +793,9 @@ impl MayastorEnvironment { pub(crate) fn get_nvmf_tgt_ip() -> Result { static TGT_IP: OnceCell = OnceCell::new(); TGT_IP - .get_or_try_init(|| { - match Self::global_or_default().nvmf_tgt_interface { - Some(ref iface) => Self::detect_nvmf_tgt_iface_ip(iface), - None => Self::detect_pod_ip(), - } + .get_or_try_init(|| match Self::global_or_default().nvmf_tgt_interface { + Some(ref iface) => Self::detect_nvmf_tgt_iface_ip(iface), + None => Self::detect_pod_ip(), }) .cloned() } @@ -892,19 +834,14 @@ impl MayastorEnvironment { Box::new(move |n| n.ipv4_subnet_eq(subnet, mask)) } _ => { - return Err(format!( - "Invalid NVMF target interface: '{iface}'", - )); + return Err(format!("Invalid NVMF target interface: '{iface}'",)); } }; - let mut nics: Vec<_> = - nic::find_all_nics().into_iter().filter(pred).collect(); + let mut nics: Vec<_> = nic::find_all_nics().into_iter().filter(pred).collect(); if nics.is_empty() { - return Err(format!( - "Network interface matching '{iface}' not found", - )); + return Err(format!("Network interface matching '{iface}' not found",)); } if nics.len() > 1 { @@ -962,10 +899,7 @@ impl MayastorEnvironment { } else { info!("RPC server listening at: {}", ctx.rpc.to_str().unwrap()); unsafe { - spdk_rpc_initialize( - ctx.rpc.as_ptr() as *mut c_char, - std::ptr::null_mut(), - ); + spdk_rpc_initialize(ctx.rpc.as_ptr() as *mut c_char, std::ptr::null_mut()); spdk_rpc_set_state(SPDK_RPC_RUNTIME); }; @@ -1027,33 +961,21 @@ impl MayastorEnvironment { const MAX_GROUP_IDS: u32 = 16; const NUM_THREADS: u32 = 1; let cshm_name = if self.shm_id >= 0 { - CString::new( - format!("/{}_trace.{}", self.name, self.shm_id).as_str(), - ) - .unwrap() + CString::new(format!("/{}_trace.{}", self.name, self.shm_id).as_str()).unwrap() } else { - CString::new( - format!("/{}_trace.pid{}", self.name, std::process::id()) - .as_str(), - ) - .unwrap() + CString::new(format!("/{}_trace.pid{}", self.name, std::process::id()).as_str()) + .unwrap() }; unsafe { - if spdk_trace_init( - cshm_name.as_ptr(), - self.num_entries, - NUM_THREADS, - ) != 0 - { + if spdk_trace_init(cshm_name.as_ptr(), self.num_entries, NUM_THREADS) != 0 { error!("SPDK tracing init error"); } } let tpoint_group_name = CString::new("all").unwrap(); - let tpoint_group_mask = unsafe { - spdk_trace_create_tpoint_group_mask(tpoint_group_name.as_ptr()) - }; + let tpoint_group_mask = + unsafe { spdk_trace_create_tpoint_group_mask(tpoint_group_name.as_ptr()) }; - for group_id in 0 .. MAX_GROUP_IDS { + for group_id in 0..MAX_GROUP_IDS { if (tpoint_group_mask & (1 << group_id) as u64) > 0 { unsafe { spdk_trace_set_tpoints(group_id, u64::MAX); @@ -1119,9 +1041,9 @@ impl MayastorEnvironment { // but when using more then 16 cores, I saw some "weirdness" // which could be related purely to logging. - while Reactors::iter().any(|r| { - r.get_state() == ReactorState::Init && r.core() != Cores::current() - }) { + while Reactors::iter() + .any(|r| r.get_state() == ReactorState::Init && r.core() != Cores::current()) + { std::thread::sleep(Duration::from_millis(1)); } @@ -1141,10 +1063,7 @@ impl MayastorEnvironment { unsafe { spdk_subsystem_init( Some(Self::start_rpc), - Box::into_raw(Box::new(SubsystemCtx { - rpc, - sender, - })) as *mut _, + Box::into_raw(Box::new(SubsystemCtx { rpc, sender })) as *mut _, ); } @@ -1201,9 +1120,7 @@ impl MayastorEnvironment { let master = Reactors::current(); master.send_future(async { f() }); - let mut futures: Vec< - Pin>>, - > = Vec::new(); + let mut futures: Vec>>> = Vec::new(); if let Some(grpc_endpoint) = grpc_endpoint { futures.push(Box::pin(grpc::MayastorGrpcServer::run( &node_name, @@ -1230,9 +1147,9 @@ impl MayastorEnvironment { } fn make_hostnqn(node_name: Option<&String>) -> Option { - std::env::var("HOSTNQN").ok().or_else(|| { - node_name.map(|n| format!("{NVME_NQN_PREFIX}:node-name:{n}")) - }) + std::env::var("HOSTNQN") + .ok() + .or_else(|| node_name.map(|n| format!("{NVME_NQN_PREFIX}:node-name:{n}"))) } fn print_asan_env() { diff --git a/io-engine/src/core/fault_injection/bdev_io_injection.rs b/io-engine/src/core/fault_injection/bdev_io_injection.rs index 638fb0f8f..93f871b08 100644 --- a/io-engine/src/core/fault_injection/bdev_io_injection.rs +++ b/io-engine/src/core/fault_injection/bdev_io_injection.rs @@ -4,25 +4,14 @@ use std::collections::HashMap; use spdk_rs::{ libspdk::{ - spdk_bdev_fn_table, - spdk_bdev_io, - spdk_bdev_io_complete_nvme_status, - spdk_io_channel, + spdk_bdev_fn_table, spdk_bdev_io, spdk_bdev_io_complete_nvme_status, spdk_io_channel, }, - BdevIo, - UntypedBdev, + BdevIo, UntypedBdev, }; use crate::core::IoCompletionStatus; -use super::{ - FaultDomain, - FaultInjectionError, - FaultIoStage, - FaultMethod, - InjectIoCtx, - Injection, -}; +use super::{FaultDomain, FaultInjectionError, FaultIoStage, FaultMethod, InjectIoCtx, Injection}; /// TODO struct BdevInfo { @@ -44,10 +33,7 @@ fn get_bdevs<'a>() -> MutexGuard<'a, Bdevs> { } /// TODO -unsafe extern "C" fn inject_submit_request( - chan: *mut spdk_io_channel, - io_ptr: *mut spdk_bdev_io, -) { +unsafe extern "C" fn inject_submit_request(chan: *mut spdk_io_channel, io_ptr: *mut spdk_bdev_io) { let mut g = get_bdevs(); let io = BdevIo::<()>::legacy_from_ptr(io_ptr); @@ -89,9 +75,7 @@ unsafe extern "C" fn inject_submit_request( } /// TODO -pub(super) fn add_bdev_io_injection( - inj: &Injection, -) -> Result<(), FaultInjectionError> { +pub(super) fn add_bdev_io_injection(inj: &Injection) -> Result<(), FaultInjectionError> { if inj.io_stage != FaultIoStage::Submission { return Err(FaultInjectionError::InvalidInjection { name: inj.device_name.clone(), diff --git a/io-engine/src/core/fault_injection/fault_method.rs b/io-engine/src/core/fault_injection/fault_method.rs index a53719edc..980de93b2 100644 --- a/io-engine/src/core/fault_injection/fault_method.rs +++ b/io-engine/src/core/fault_injection/fault_method.rs @@ -33,9 +33,7 @@ impl Display for FaultMethod { use IoCompletionStatus::*; match self { - Self::Status(NvmeError(NvmeStatus::DATA_TRANSFER_ERROR)) => { - f.write_str("status") - } + Self::Status(NvmeError(NvmeStatus::DATA_TRANSFER_ERROR)) => f.write_str("status"), Self::Status(NvmeError(s)) => { let (sct, sc) = s.as_sct_sc_codes(); write!(f, "status-nvme-{sct:x}-{sc:x}",) @@ -65,9 +63,9 @@ impl Display for FaultMethod { impl FaultMethod { /// A shorthand for a generic data transfer error. - pub const DATA_TRANSFER_ERROR: Self = Self::Status( - IoCompletionStatus::NvmeError(NvmeStatus::DATA_TRANSFER_ERROR), - ); + pub const DATA_TRANSFER_ERROR: Self = Self::Status(IoCompletionStatus::NvmeError( + NvmeStatus::DATA_TRANSFER_ERROR, + )); /// TODO pub(super) fn inject( @@ -91,7 +89,7 @@ impl FaultMethod { }; for iov in iovs { - for i in 0 .. iov.len() { + for i in 0..iov.len() { iov[i] = s.rng.next_u32() as u8; } } @@ -118,15 +116,13 @@ impl FaultMethod { } let r = match s { - "status-lvol-nospace" => { - IoCompletionStatus::LvolError(LvolFailure::NoSpace) - } + "status-lvol-nospace" => IoCompletionStatus::LvolError(LvolFailure::NoSpace), "status-submit-read" => { IoCompletionStatus::IoSubmissionError(IoSubmissionFailure::Read) } - "status-submit-write" => IoCompletionStatus::IoSubmissionError( - IoSubmissionFailure::Write, - ), + "status-submit-write" => { + IoCompletionStatus::IoSubmissionError(IoSubmissionFailure::Write) + } "status-admin" => IoCompletionStatus::AdminCommandError, _ => return None, }; diff --git a/io-engine/src/core/fault_injection/inject_io_ctx.rs b/io-engine/src/core/fault_injection/inject_io_ctx.rs index d1f161b0f..7d4d03935 100644 --- a/io-engine/src/core/fault_injection/inject_io_ctx.rs +++ b/io-engine/src/core/fault_injection/inject_io_ctx.rs @@ -43,7 +43,7 @@ impl InjectIoCtx { Self { domain, dev: InjectIoDevice::None, - range: 0 .. 0, + range: 0..0, io_type: IoType::Invalid, iovs: std::ptr::null_mut(), iovs_len: 0, @@ -63,7 +63,7 @@ impl InjectIoCtx { Self { domain, dev: dev.into(), - range: offset .. offset + num_blocks, + range: offset..offset + num_blocks, io_type, iovs: iovs.as_ptr() as *mut _, iovs_len: iovs.len(), @@ -88,9 +88,7 @@ impl InjectIoCtx { unsafe { match self.dev { InjectIoDevice::None => false, - InjectIoDevice::BlockDevice(dev) => { - (*dev).device_name() == name - } + InjectIoDevice::BlockDevice(dev) => (*dev).device_name() == name, InjectIoDevice::DeviceName(pname) => &*pname == name, } } diff --git a/io-engine/src/core/fault_injection/injection.rs b/io-engine/src/core/fault_injection/injection.rs index f45f71ce7..a26f64d4f 100644 --- a/io-engine/src/core/fault_injection/injection.rs +++ b/io-engine/src/core/fault_injection/injection.rs @@ -12,12 +12,7 @@ use url::Url; use crate::core::IoCompletionStatus; use super::{ - FaultDomain, - FaultInjectionError, - FaultIoOperation, - FaultIoStage, - FaultMethod, - InjectIoCtx, + FaultDomain, FaultInjectionError, FaultIoOperation, FaultIoStage, FaultMethod, InjectIoCtx, InjectionState, }; @@ -54,14 +49,13 @@ pub struct Injection { impl InjectionBuilder { /// TODO pub fn with_offset(&mut self, offset: u64, num_blocks: u64) -> &mut Self { - self.block_range = Some(offset .. offset + num_blocks); + self.block_range = Some(offset..offset + num_blocks); self } /// TODO pub fn with_method_nvme_error(&mut self, err: NvmeStatus) -> &mut Self { - self.method = - Some(FaultMethod::Status(IoCompletionStatus::NvmeError(err))); + self.method = Some(FaultMethod::Status(IoCompletionStatus::NvmeError(err))); self } @@ -126,8 +120,7 @@ impl Debug for Injection { ft = self.method, ); - let timed = if !self.time_range.start.is_zero() - || self.time_range.end != Duration::MAX + let timed = if !self.time_range.start.is_zero() || self.time_range.end != Duration::MAX { format!( " for period {b} -> {e} ({t})", @@ -139,9 +132,7 @@ impl Debug for Injection { String::default() }; - let range = if self.block_range.start != 0 - || self.block_range.end != u64::MAX - { + let range = if self.block_range.start != 0 || self.block_range.end != u64::MAX { format!( " at blocks {rs}..{re}", rs = self.block_range.start, @@ -179,8 +170,8 @@ impl Default for Injection { io_operation: FaultIoOperation::ReadWrite, io_stage: FaultIoStage::Submission, method: FaultMethod::DATA_TRANSFER_ERROR, - time_range: Duration::ZERO .. Duration::MAX, - block_range: 0 .. u64::MAX, + time_range: Duration::ZERO..Duration::MAX, + block_range: 0..u64::MAX, retries: u64::MAX, state: Default::default(), } @@ -196,11 +187,10 @@ impl Injection { }); } - let p = - Url::parse(uri).map_err(|e| FaultInjectionError::InvalidUri { - source: e, - uri: uri.to_owned(), - })?; + let p = Url::parse(uri).map_err(|e| FaultInjectionError::InvalidUri { + source: e, + uri: uri.to_owned(), + })?; let mut r = Self { uri: Some(uri.to_owned()), @@ -226,9 +216,7 @@ impl Injection { "begin_at" => r.time_range.start = parse_timer(&k, &v)?, "end_at" => r.time_range.end = parse_timer(&k, &v)?, "offset" => r.block_range.start = parse_num(&k, &v)?, - "num_blk" | "num_blocks" => { - r.block_range.end = parse_num(&k, &v)? - } + "num_blk" | "num_blocks" => r.block_range.end = parse_num(&k, &v)?, "retries" => r.retries = parse_num(&k, &v)?, _ => { return Err(FaultInjectionError::UnknownParameter { @@ -239,8 +227,7 @@ impl Injection { }; } - r.block_range.end = - r.block_range.start.saturating_add(r.block_range.end); + r.block_range.end = r.block_range.start.saturating_add(r.block_range.end); if r.time_range.start > r.time_range.end { return Err(FaultInjectionError::BadDurations { @@ -276,10 +263,7 @@ impl Injection { } if self.time_range.start != d.time_range.start { - opts.push(format!( - "begin_at={:?}", - self.time_range.start.as_millis() - )); + opts.push(format!("begin_at={:?}", self.time_range.start.as_millis())); } if self.time_range.end != d.time_range.end { @@ -331,11 +315,7 @@ impl Injection { /// Otherwise, returns an operation status to be returned by the calling I/O /// routine. #[inline] - pub fn inject( - &self, - stage: FaultIoStage, - ctx: &InjectIoCtx, - ) -> Option { + pub fn inject(&self, stage: FaultIoStage, ctx: &InjectIoCtx) -> Option { if !ctx.is_valid() || !ctx.domain_ok(self.domain) || stage != self.io_stage @@ -374,10 +354,7 @@ fn parse_domain(k: &str, v: &str) -> Result { } /// TODO -fn parse_fault_io_type( - k: &str, - v: &str, -) -> Result { +fn parse_fault_io_type(k: &str, v: &str) -> Result { let res = match v { "read" | "r" | "Read" => FaultIoOperation::Read, "write" | "w" | "Write" => FaultIoOperation::Write, @@ -393,14 +370,9 @@ fn parse_fault_io_type( } /// TODO -fn parse_fault_io_stage( - k: &str, - v: &str, -) -> Result { +fn parse_fault_io_stage(k: &str, v: &str) -> Result { let res = match v { - "submit" | "s" | "submission" | "Submission" => { - FaultIoStage::Submission - } + "submit" | "s" | "submission" | "Submission" => FaultIoStage::Submission, "compl" | "c" | "completion" | "Completion" => FaultIoStage::Completion, _ => { return Err(FaultInjectionError::UnknownParameter { @@ -418,23 +390,21 @@ fn parse_method(k: &str, v: &str) -> Result { "status" | "Status" => Ok(FaultMethod::DATA_TRANSFER_ERROR), // TODO: add data corruption methods. "data" | "Data" => Ok(FaultMethod::Data), - _ => FaultMethod::parse(v).ok_or_else(|| { - FaultInjectionError::UnknownParameter { - name: k.to_string(), - value: v.to_string(), - } + _ => FaultMethod::parse(v).ok_or_else(|| FaultInjectionError::UnknownParameter { + name: k.to_string(), + value: v.to_string(), }), } } /// TODO fn parse_timer(k: &str, v: &str) -> Result { - let b = v.parse::().map_err(|_| { - FaultInjectionError::BadParameterValue { + let b = v + .parse::() + .map_err(|_| FaultInjectionError::BadParameterValue { name: k.to_string(), value: v.to_string(), - } - })?; + })?; Ok(Duration::from_millis(b)) } diff --git a/io-engine/src/core/fault_injection/injection_api.rs b/io-engine/src/core/fault_injection/injection_api.rs index 2a6786b5b..692de1d83 100644 --- a/io-engine/src/core/fault_injection/injection_api.rs +++ b/io-engine/src/core/fault_injection/injection_api.rs @@ -7,12 +7,7 @@ use std::sync::atomic::{AtomicBool, Ordering}; use crate::core::{CoreError, IoCompletionStatus}; use super::{ - add_bdev_io_injection, - FaultDomain, - FaultInjectionError, - FaultIoStage, - InjectIoCtx, - Injection, + add_bdev_io_injection, FaultDomain, FaultInjectionError, FaultIoStage, InjectIoCtx, Injection, }; /// A list of fault injections. @@ -24,9 +19,7 @@ static INJECTIONS: OnceCell> = OnceCell::new(); impl Injections { fn new() -> Self { - Self { - items: Vec::new(), - } + Self { items: Vec::new() } } #[inline(always)] @@ -62,11 +55,7 @@ impl Injections { /// TODO #[inline(always)] - fn inject( - &self, - stage: FaultIoStage, - op: &InjectIoCtx, - ) -> Option { + fn inject(&self, stage: FaultIoStage, op: &InjectIoCtx) -> Option { self.items.iter().find_map(|inj| inj.inject(stage, op)) } } @@ -138,10 +127,7 @@ pub fn inject_completion_error( ctx: &InjectIoCtx, status: IoCompletionStatus, ) -> IoCompletionStatus { - if !injections_enabled() - || !ctx.is_valid() - || status != IoCompletionStatus::Success - { + if !injections_enabled() || !ctx.is_valid() || status != IoCompletionStatus::Success { return status; } diff --git a/io-engine/src/core/fault_injection/injection_state.rs b/io-engine/src/core/fault_injection/injection_state.rs index 83ff90220..10d766424 100644 --- a/io-engine/src/core/fault_injection/injection_state.rs +++ b/io-engine/src/core/fault_injection/injection_state.rs @@ -15,8 +15,8 @@ pub struct InjectionState { impl Default for InjectionState { fn default() -> Self { let seed = [ - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, + 0, 0, 0, ]; Self { started: None, diff --git a/io-engine/src/core/fault_injection/mod.rs b/io-engine/src/core/fault_injection/mod.rs index 5f9dac674..6b3c84a70 100644 --- a/io-engine/src/core/fault_injection/mod.rs +++ b/io-engine/src/core/fault_injection/mod.rs @@ -19,10 +19,7 @@ pub use fault_method::FaultMethod; pub use inject_io_ctx::{InjectIoCtx, InjectIoDevice}; pub use injection::{Injection, InjectionBuilder, InjectionBuilderError}; pub use injection_api::{ - add_fault_injection, - inject_completion_error, - inject_submission_error, - list_fault_injections, + add_fault_injection, inject_completion_error, inject_submission_error, list_fault_injections, remove_fault_injection, }; pub use injection_state::InjectionState; @@ -95,12 +92,7 @@ pub enum FaultInjectionError { UnknownParameter { name: String, value: String }, #[snafu(display("Bad injection parameter value: '{}={}'", name, value))] BadParameterValue { name: String, value: String }, - #[snafu(display( - "Bad injection '{}' timer durations: {:?}, {:?}", - name, - begin, - end - ))] + #[snafu(display("Bad injection '{}' timer durations: {:?}, {:?}", name, begin, end))] BadDurations { name: String, begin: Duration, diff --git a/io-engine/src/core/handle.rs b/io-engine/src/core/handle.rs index 3e69e9d4f..8bfc905ea 100644 --- a/io-engine/src/core/handle.rs +++ b/io-engine/src/core/handle.rs @@ -10,33 +10,15 @@ use nix::errno::Errno; use spdk_rs::{ libspdk::{ - spdk_bdev_desc, - spdk_bdev_free_io, - spdk_bdev_io, - spdk_bdev_nvme_admin_passthru_ro, - spdk_bdev_read, - spdk_bdev_reset, - spdk_bdev_write, - spdk_bdev_write_zeroes, - spdk_io_channel, + spdk_bdev_desc, spdk_bdev_free_io, spdk_bdev_io, spdk_bdev_nvme_admin_passthru_ro, + spdk_bdev_read, spdk_bdev_reset, spdk_bdev_write, spdk_bdev_write_zeroes, spdk_io_channel, spdk_nvme_cmd, }, - nvme_admin_opc, - BdevOps, - DmaBuf, - DmaError, - IoChannelGuard, - NvmeStatus, + nvme_admin_opc, BdevOps, DmaBuf, DmaError, IoChannelGuard, NvmeStatus, }; use crate::{ - core::{ - Bdev, - CoreError, - DescriptorGuard, - IoCompletionStatus, - SnapshotParams, - }, + core::{Bdev, CoreError, DescriptorGuard, IoCompletionStatus, SnapshotParams}, ffihelper::cb_arg, subsys, }; @@ -57,30 +39,19 @@ pub type UntypedBdevHandle = BdevHandle<()>; impl BdevHandle { /// Opens a new bdev handle allocating a new descriptor as well as a new /// I/O channel. - pub fn open( - name: &str, - read_write: bool, - claim: bool, - ) -> Result, CoreError> { + pub fn open(name: &str, read_write: bool, claim: bool) -> Result, CoreError> { if let Ok(desc) = Bdev::::open_by_name(name, read_write) { if claim && !desc.claim() { - return Err(CoreError::BdevNotFound { - name: name.into(), - }); + return Err(CoreError::BdevNotFound { name: name.into() }); } return BdevHandle::try_from(Arc::new(desc)); } - Err(CoreError::BdevNotFound { - name: name.into(), - }) + Err(CoreError::BdevNotFound { name: name.into() }) } /// Opens a new bdev handle given a bdev. - pub fn open_with_bdev( - bdev: &Bdev, - read_write: bool, - ) -> Result, CoreError> { + pub fn open_with_bdev(bdev: &Bdev, read_write: bool) -> Result, CoreError> { let desc = bdev.open(read_write)?; BdevHandle::try_from(Arc::new(desc)) } @@ -109,14 +80,8 @@ impl BdevHandle { /// private io completion callback that sends back the success status of the /// IO. When the IO is freed, it is returned to the memory pool. The /// buffer is not freed. - extern "C" fn io_completion_cb( - io: *mut spdk_bdev_io, - success: bool, - arg: *mut c_void, - ) { - let sender = unsafe { - Box::from_raw(arg as *const _ as *mut oneshot::Sender) - }; + extern "C" fn io_completion_cb(io: *mut spdk_bdev_io, success: bool, arg: *mut c_void) { + let sender = unsafe { Box::from_raw(arg as *const _ as *mut oneshot::Sender) }; unsafe { spdk_bdev_free_io(io); @@ -133,11 +98,7 @@ impl BdevHandle { /// write the ['DmaBuf'] to the given offset. This function is implemented /// using a ['Future'] and is not intended for non-internal IO. - pub async fn write_at( - &self, - offset: u64, - buffer: &DmaBuf, - ) -> Result { + pub async fn write_at(&self, offset: u64, buffer: &DmaBuf) -> Result { let (s, r) = oneshot::channel::(); let errno = unsafe { spdk_bdev_write( @@ -170,11 +131,7 @@ impl BdevHandle { } /// read at given offset into the ['DmaBuf'] - pub async fn read_at( - &self, - offset: u64, - buffer: &mut DmaBuf, - ) -> Result { + pub async fn read_at(&self, offset: u64, buffer: &mut DmaBuf) -> Result { let (s, r) = oneshot::channel::(); let errno = unsafe { spdk_bdev_read( @@ -198,12 +155,10 @@ impl BdevHandle { match r.await.expect("Failed awaiting read IO") { NvmeStatus::SUCCESS => Ok(buffer.len()), - NvmeStatus::UNWRITTEN_BLOCK => { - Err(CoreError::ReadingUnallocatedBlock { - offset, - len: buffer.len(), - }) - } + NvmeStatus::UNWRITTEN_BLOCK => Err(CoreError::ReadingUnallocatedBlock { + offset, + len: buffer.len(), + }), status => Err(CoreError::ReadFailed { status: IoCompletionStatus::NvmeError(status), offset, @@ -236,11 +191,7 @@ impl BdevHandle { } } - pub async fn write_zeroes_at( - &self, - offset: u64, - len: u64, - ) -> Result<(), CoreError> { + pub async fn write_zeroes_at(&self, offset: u64, len: u64) -> Result<(), CoreError> { let (s, r) = oneshot::channel::(); let errno = unsafe { spdk_bdev_write_zeroes( @@ -267,19 +218,13 @@ impl BdevHandle { { Ok(()) } else { - Err(CoreError::WriteZeroesFailed { - offset, - len, - }) + Err(CoreError::WriteZeroesFailed { offset, len }) } } /// create a snapshot, only works for nvme bdev /// returns snapshot time as u64 seconds since Unix epoch - pub async fn create_snapshot( - &self, - _snapshot: SnapshotParams, - ) -> Result { + pub async fn create_snapshot(&self, _snapshot: SnapshotParams) -> Result { let mut cmd = spdk_nvme_cmd::default(); cmd.set_opc(nvme_admin_opc::CREATE_SNAPSHOT.into()); let now = subsys::set_snapshot_time(&mut cmd); @@ -290,10 +235,7 @@ impl BdevHandle { /// identify controller /// buffer must be at least 4096B - pub async fn nvme_identify_ctrlr( - &self, - buffer: &mut DmaBuf, - ) -> Result<(), CoreError> { + pub async fn nvme_identify_ctrlr(&self, buffer: &mut DmaBuf) -> Result<(), CoreError> { let mut cmd = spdk_nvme_cmd::default(); cmd.set_opc(nvme_admin_opc::IDENTIFY.into()); cmd.nsid = 0xffffffff; @@ -374,10 +316,7 @@ impl TryFrom>> for BdevHandle { fn try_from(desc: Arc>) -> Result { if let Ok(channel) = desc.io_channel() { - return Ok(Self { - channel, - desc, - }); + return Ok(Self { channel, desc }); } Err(CoreError::GetIoChannel { diff --git a/io-engine/src/core/io_device.rs b/io-engine/src/core/io_device.rs index 4a3c7417c..766024494 100644 --- a/io-engine/src/core/io_device.rs +++ b/io-engine/src/core/io_device.rs @@ -2,13 +2,8 @@ use std::{os::raw::c_void, ptr::NonNull}; use crate::ffihelper::IntoCString; use spdk_rs::libspdk::{ - spdk_for_each_channel, - spdk_for_each_channel_continue, - spdk_io_channel, - spdk_io_channel_iter, - spdk_io_channel_iter_get_channel, - spdk_io_channel_iter_get_ctx, - spdk_io_device_register, + spdk_for_each_channel, spdk_for_each_channel_continue, spdk_io_channel, spdk_io_channel_iter, + spdk_io_channel_iter_get_channel, spdk_io_channel_iter_get_ctx, spdk_io_device_register, spdk_io_device_unregister, }; @@ -65,8 +60,7 @@ impl IoDevice { struct TraverseCtx { channel_cb: Box i32 + 'static>, done_cb: Box, - ctx_getter: - Box &'static mut C + 'static>, + ctx_getter: Box &'static mut C + 'static>, ctx: N, } @@ -83,12 +77,9 @@ impl IoDevice { /// Low-level per-channel visitor to be invoked by SPDK I/O channel /// enumeration logic. - extern "C" fn _visit_channel( - i: *mut spdk_io_channel_iter, - ) { + extern "C" fn _visit_channel(i: *mut spdk_io_channel_iter) { let traverse_ctx = unsafe { - let p = - spdk_io_channel_iter_get_ctx(i) as *mut TraverseCtx; + let p = spdk_io_channel_iter_get_ctx(i) as *mut TraverseCtx; &mut *p }; let io_channel = unsafe { @@ -96,8 +87,7 @@ impl IoDevice { (traverse_ctx.ctx_getter)(ch) }; - let rc = - (traverse_ctx.channel_cb)(io_channel, &mut traverse_ctx.ctx); + let rc = (traverse_ctx.channel_cb)(io_channel, &mut traverse_ctx.ctx); unsafe { spdk_for_each_channel_continue(i, rc); @@ -114,7 +104,7 @@ impl IoDevice { // dropped. let mut traverse_ctx = unsafe { Box::>::from_raw( - spdk_io_channel_iter_get_ctx(i) as *mut TraverseCtx, + spdk_io_channel_iter_get_ctx(i) as *mut TraverseCtx ) }; diff --git a/io-engine/src/core/io_driver.rs b/io-engine/src/core/io_driver.rs index 9cd40c359..d41ea6125 100644 --- a/io-engine/src/core/io_driver.rs +++ b/io-engine/src/core/io_driver.rs @@ -4,11 +4,7 @@ use rand::Rng; use std::{ptr::NonNull, sync::Mutex}; use spdk_rs::libspdk::{ - spdk_bdev_free_io, - spdk_bdev_io, - spdk_bdev_read, - spdk_bdev_reset, - spdk_bdev_write, + spdk_bdev_free_io, spdk_bdev_io, spdk_bdev_read, spdk_bdev_reset, spdk_bdev_write, }; use crate::{ @@ -237,9 +233,7 @@ impl Job { } /// start the job that will dispatch an IO up to the provided queue depth fn start(mut self) -> Box { - let thread = - Thread::new(format!("job_{}", self.bdev.name()), self.core) - .unwrap(); + let thread = Thread::new(format!("job_{}", self.bdev.name()), self.core).unwrap(); thread.with(|| { self.ch = self.desc.io_channel().ok(); let mut boxed = Box::new(self); @@ -325,7 +319,7 @@ impl Builder { let mut queue = Vec::new(); - (0 .. self.qd).for_each(|offset| { + (0..self.qd).for_each(|offset| { queue.push(Io { buf: DmaBuf::new(self.io_size, bdev.alignment()).unwrap(), iot: self.iot, diff --git a/io-engine/src/core/lock.rs b/io-engine/src/core/lock.rs index 29bbc1f9f..17785f0bf 100755 --- a/io-engine/src/core/lock.rs +++ b/io-engine/src/core/lock.rs @@ -26,11 +26,7 @@ pub struct ResourceLockManagerConfig { impl ResourceLockManagerConfig { /// Add resource subsystem to the config. /// Panics if another subsystem with the same id already exists. - pub fn with_subsystem>( - mut self, - id: T, - num_objects: usize, - ) -> Self { + pub fn with_subsystem>(mut self, id: T, num_objects: usize) -> Self { let ids = id.as_ref(); if self.subsystems.iter().any(|(i, _)| ids.eq(i)) { @@ -54,10 +50,9 @@ impl ResourceSubsystem { /// Create a new resource subsystem with target id and maximum number of /// objects. fn new(id: String, num_objects: usize) -> Self { - let object_locks = - std::iter::repeat_with(|| Mutex::new(LockStats::default())) - .take(num_objects) - .collect::>(); + let object_locks = std::iter::repeat_with(|| Mutex::new(LockStats::default())) + .take(num_objects) + .collect::>(); Self { id, diff --git a/io-engine/src/core/mempool.rs b/io-engine/src/core/mempool.rs index a25cd961f..265de4f66 100644 --- a/io-engine/src/core/mempool.rs +++ b/io-engine/src/core/mempool.rs @@ -7,13 +7,8 @@ use std::{marker::PhantomData, mem::size_of, os::raw::c_void, ptr::NonNull}; use spdk_rs::libspdk::{ - spdk_mempool, - spdk_mempool_count, - spdk_mempool_create, - spdk_mempool_free, - spdk_mempool_get, - spdk_mempool_put, - SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, + spdk_mempool, spdk_mempool_count, spdk_mempool_create, spdk_mempool_free, spdk_mempool_get, + spdk_mempool_put, SPDK_MEMPOOL_DEFAULT_CACHE_SIZE, }; use crate::ffihelper::IntoCString; @@ -52,7 +47,9 @@ impl MemoryPool { info!( "Memory pool '{}' with {} elements ({} bytes size) successfully created", - name, size, size_of::() + name, + size, + size_of::() ); Some(Self { pool: NonNull::new(pool).unwrap(), @@ -65,8 +62,7 @@ impl MemoryPool { /// Get free element from memory pool and initialize memory with target /// object. pub fn get(&self, val: T) -> Option<*mut T> { - let ptr: *mut T = - unsafe { spdk_mempool_get(self.pool.as_ptr()) } as *mut T; + let ptr: *mut T = unsafe { spdk_mempool_get(self.pool.as_ptr()) } as *mut T; if ptr.is_null() { return None; diff --git a/io-engine/src/core/mod.rs b/io-engine/src/core/mod.rs index f6e997ce7..0cf3fa4d1 100644 --- a/io-engine/src/core/mod.rs +++ b/io-engine/src/core/mod.rs @@ -10,80 +10,37 @@ use snafu::Snafu; pub use bdev::{Bdev, BdevIter, BdevStater, BdevStats, UntypedBdev}; pub use block_device::{ - BlockDevice, - BlockDeviceDescriptor, - BlockDeviceHandle, - BlockDeviceIoStats, - DeviceIoController, - DeviceTimeoutAction, - IoCompletionCallback, - IoCompletionCallbackArg, - LbaRangeController, - OpCompletionCallback, - OpCompletionCallbackArg, - ReadOptions, + BlockDevice, BlockDeviceDescriptor, BlockDeviceHandle, BlockDeviceIoStats, DeviceIoController, + DeviceTimeoutAction, IoCompletionCallback, IoCompletionCallbackArg, LbaRangeController, + OpCompletionCallback, OpCompletionCallbackArg, ReadOptions, }; pub use cpu_cores::{Core, Cores}; pub use descriptor::{DescriptorGuard, UntypedDescriptorGuard}; pub use device_events::{ - DeviceEventDispatcher, - DeviceEventListener, - DeviceEventSink, - DeviceEventType, -}; -pub use device_monitor::{ - device_cmd_queue, - device_monitor_loop, - DeviceCommand, -}; -pub use env::{ - mayastor_env_stop, - MayastorCliArgs, - MayastorEnvironment, - GLOBAL_RC, - SIG_RECEIVED, + DeviceEventDispatcher, DeviceEventListener, DeviceEventSink, DeviceEventType, }; +pub use device_monitor::{device_cmd_queue, device_monitor_loop, DeviceCommand}; +pub use env::{mayastor_env_stop, MayastorCliArgs, MayastorEnvironment, GLOBAL_RC, SIG_RECEIVED}; pub use handle::{BdevHandle, UntypedBdevHandle}; pub use io_device::IoDevice; pub use logical_volume::LogicalVolume; -pub use reactor::{ - reactor_monitor_loop, - Reactor, - ReactorState, - Reactors, - REACTOR_LIST, -}; +pub use reactor::{reactor_monitor_loop, Reactor, ReactorState, Reactors, REACTOR_LIST}; pub use lock::{ - ProtectedSubsystems, - ResourceLockGuard, - ResourceLockManager, - ResourceLockManagerConfig, + ProtectedSubsystems, ResourceLockGuard, ResourceLockManager, ResourceLockManagerConfig, ResourceSubsystem, }; pub use runtime::spawn; pub(crate) use segment_map::SegmentMap; -pub use share::{ - NvmfShareProps, - Protocol, - PtplProps, - Share, - ShareProps, - UpdateProps, -}; +pub use share::{NvmfShareProps, Protocol, PtplProps, Share, ShareProps, UpdateProps}; pub use spdk_rs::{cpu_cores, IoStatus, IoType, NvmeStatus}; pub use thread::Mthread; use crate::subsys::NvmfError; pub use snapshot::{ - CloneParams, - CloneXattrs, - ISnapshotDescriptor, - LvolSnapshotOps, - SnapshotDescriptor, - SnapshotParams, - SnapshotXattrs, + CloneParams, CloneXattrs, ISnapshotDescriptor, LvolSnapshotOps, SnapshotDescriptor, + SnapshotParams, SnapshotXattrs, }; use spdk_rs::libspdk::SPDK_NVME_SC_CAPACITY_EXCEEDED; @@ -157,31 +114,19 @@ pub enum CoreError { InvalidOffset { offset: u64, }, - #[snafu(display( - "Failed to dispatch write at offset {} length {}", - offset, - len - ))] + #[snafu(display("Failed to dispatch write at offset {} length {}", offset, len))] WriteDispatch { source: Errno, offset: u64, len: u64, }, - #[snafu(display( - "Failed to dispatch compare at offset {} length {}", - offset, - len - ))] + #[snafu(display("Failed to dispatch compare at offset {} length {}", offset, len))] CompareDispatch { source: Errno, offset: u64, len: u64, }, - #[snafu(display( - "Failed to dispatch read at offset {} length {}", - offset, - len - ))] + #[snafu(display("Failed to dispatch read at offset {} length {}", offset, len))] ReadDispatch { source: Errno, offset: u64, @@ -195,30 +140,18 @@ pub enum CoreError { FlushDispatch { source: Errno, }, - #[snafu(display( - "Failed to dispatch NVMe Admin command {:x}h: {}", - opcode, - source - ))] + #[snafu(display("Failed to dispatch NVMe Admin command {:x}h: {}", opcode, source))] NvmeAdminDispatch { source: Errno, opcode: u16, }, - #[snafu(display( - "Failed to dispatch unmap at offset {} length {}", - offset, - len - ))] + #[snafu(display("Failed to dispatch unmap at offset {} length {}", offset, len))] UnmapDispatch { source: Errno, offset: u64, len: u64, }, - #[snafu(display( - "Failed to dispatch write-zeroes at offset {} length {}", - offset, - len - ))] + #[snafu(display("Failed to dispatch write-zeroes at offset {} length {}", offset, len))] WriteZeroesDispatch { source: Errno, offset: u64, @@ -277,11 +210,7 @@ pub enum CoreError { }, #[snafu(display("Reset failed"))] ResetFailed {}, - #[snafu(display( - "Write zeroes failed at offset {} length {}", - offset, - len - ))] + #[snafu(display("Write zeroes failed at offset {} length {}", offset, len))] WriteZeroesFailed { offset: u64, len: u64, @@ -331,10 +260,7 @@ pub enum CoreError { source: Errno, name: String, }, - #[snafu(display( - "NVMe persistence through power-loss failure: {}", - reason - ))] + #[snafu(display("NVMe persistence through power-loss failure: {}", reason))] Ptpl { reason: String, }, @@ -358,108 +284,40 @@ pub trait ToErrno { impl ToErrno for CoreError { fn to_errno(self) -> Errno { match self { - Self::BdevNotFound { - .. - } => Errno::ENODEV, - Self::OpenBdev { - source, - } => source, - Self::InvalidDescriptor { - .. - } => Errno::ENODEV, - Self::GetIoChannel { - .. - } => Errno::ENXIO, - Self::InvalidOffset { - .. - } => Errno::EINVAL, - Self::WriteDispatch { - source, .. - } => source, - Self::ReadDispatch { - source, .. - } => source, - Self::CompareDispatch { - source, .. - } => source, - Self::ResetDispatch { - source, .. - } => source, - Self::FlushDispatch { - source, .. - } => source, - Self::NvmeAdminDispatch { - source, .. - } => source, - Self::UnmapDispatch { - source, .. - } => source, - Self::WriteZeroesDispatch { - source, .. - } => source, - Self::NvmeIoPassthruDispatch { - source, .. - } => source, - Self::WriteFailed { - .. - } - | Self::ReadFailed { - .. - } - | Self::CompareFailed { - .. - } - | Self::ReadingUnallocatedBlock { - .. - } - | Self::ResetFailed { - .. - } - | Self::WriteZeroesFailed { - .. - } - | Self::NvmeIoPassthruFailed { - .. - } - | Self::ShareNvmf { - .. - } - | Self::UnshareNvmf { - .. - } => Errno::EIO, - Self::NvmeAdminFailed { - source, .. - } => source, - Self::NotSupported { - source, .. - } => source, - Self::ReactorConfigureFailed { - source, .. - } => source, - Self::DmaAllocationFailed { - .. - } => Errno::ENOMEM, - Self::DeviceStatisticsFailed { - source, .. - } => source, - Self::NoDevicesAvailable { - .. - } => Errno::ENODEV, - Self::InvalidNvmeDeviceHandle { - .. - } => Errno::EINVAL, - Self::DeviceFlush { - source, .. - } => source, - Self::Ptpl { - .. - } => Errno::EIO, - Self::SnapshotCreate { - source, .. - } => source, - Self::WipeFailed { - .. - } => Errno::EIO, + Self::BdevNotFound { .. } => Errno::ENODEV, + Self::OpenBdev { source } => source, + Self::InvalidDescriptor { .. } => Errno::ENODEV, + Self::GetIoChannel { .. } => Errno::ENXIO, + Self::InvalidOffset { .. } => Errno::EINVAL, + Self::WriteDispatch { source, .. } => source, + Self::ReadDispatch { source, .. } => source, + Self::CompareDispatch { source, .. } => source, + Self::ResetDispatch { source, .. } => source, + Self::FlushDispatch { source, .. } => source, + Self::NvmeAdminDispatch { source, .. } => source, + Self::UnmapDispatch { source, .. } => source, + Self::WriteZeroesDispatch { source, .. } => source, + Self::NvmeIoPassthruDispatch { source, .. } => source, + Self::WriteFailed { .. } + | Self::ReadFailed { .. } + | Self::CompareFailed { .. } + | Self::ReadingUnallocatedBlock { .. } + | Self::ResetFailed { .. } + | Self::WriteZeroesFailed { .. } + | Self::NvmeIoPassthruFailed { .. } + | Self::ShareNvmf { .. } + | Self::UnshareNvmf { .. } => Errno::EIO, + Self::NvmeAdminFailed { source, .. } => source, + Self::NotSupported { source, .. } => source, + Self::ReactorConfigureFailed { source, .. } => source, + Self::DmaAllocationFailed { .. } => Errno::ENOMEM, + Self::DeviceStatisticsFailed { source, .. } => source, + Self::NoDevicesAvailable { .. } => Errno::ENODEV, + Self::InvalidNvmeDeviceHandle { .. } => Errno::EINVAL, + Self::DeviceFlush { source, .. } => source, + Self::Ptpl { .. } => Errno::EIO, + Self::SnapshotCreate { source, .. } => source, + Self::WipeFailed { .. } => Errno::EIO, } } } @@ -509,8 +367,7 @@ impl Debug for IoCompletionStatus { impl From for IoCompletionStatus { fn from(s: NvmeStatus) -> Self { match s { - NvmeStatus::NO_SPACE - | NvmeStatus::Generic(SPDK_NVME_SC_CAPACITY_EXCEEDED) => { + NvmeStatus::NO_SPACE | NvmeStatus::Generic(SPDK_NVME_SC_CAPACITY_EXCEEDED) => { Self::LvolError(LvolFailure::NoSpace) } _ => Self::NvmeError(s), diff --git a/io-engine/src/core/nic.rs b/io-engine/src/core/nic.rs index a20061155..8d243f4e9 100644 --- a/io-engine/src/core/nic.rs +++ b/io-engine/src/core/nic.rs @@ -61,12 +61,7 @@ impl fmt::Display for MacAddr { write!( f, "{:02x}:{:02x}:{:02x}:{:02x}:{:02x}:{:02x}", - self.addr[0], - self.addr[1], - self.addr[2], - self.addr[3], - self.addr[4], - self.addr[5] + self.addr[0], self.addr[1], self.addr[2], self.addr[3], self.addr[4], self.addr[5] ) } } @@ -84,9 +79,7 @@ impl FromStr for MacAddr { impl MacAddr { /// Creates a new MAC address instance from address bytes. pub fn new(addr: [u8; 6]) -> Self { - Self { - addr, - } + Self { addr } } /// Parses MAC address string: six 2-digit hex numbers separated by commas. diff --git a/io-engine/src/core/partition.rs b/io-engine/src/core/partition.rs index 98337ca65..c569a5cc0 100644 --- a/io-engine/src/core/partition.rs +++ b/io-engine/src/core/partition.rs @@ -10,8 +10,7 @@ pub const METADATA_RESERVATION_OFFSET: u64 = 1024 * 1024; pub const METADATA_RESERVATION_SIZE: u64 = 4 * 1024 * 1024; /// Start of data partition, in bytes. -pub const DATA_PARTITION_OFFSET: u64 = - METADATA_RESERVATION_OFFSET + METADATA_RESERVATION_SIZE; +pub const DATA_PARTITION_OFFSET: u64 = METADATA_RESERVATION_OFFSET + METADATA_RESERVATION_SIZE; /// Calculates offsets of the first and last blocks of the data /// partition for the given device size and block size. @@ -55,15 +54,13 @@ pub fn calc_data_partition( let gpt_blocks = bytes_to_alinged_blocks(GPT_TABLE_SIZE, block_size); // First block of metadata reservation. - let lba_start = - bytes_to_alinged_blocks(METADATA_RESERVATION_OFFSET, block_size); + let lba_start = bytes_to_alinged_blocks(METADATA_RESERVATION_OFFSET, block_size); // Last usable device block. let lba_end = num_blocks - gpt_blocks - 2; // Blocks used by metadata reservation. - let meta_blocks = - bytes_to_alinged_blocks(METADATA_RESERVATION_SIZE, block_size); + let meta_blocks = bytes_to_alinged_blocks(METADATA_RESERVATION_SIZE, block_size); // First block of data. let data_start = lba_start + meta_blocks; diff --git a/io-engine/src/core/reactor.rs b/io-engine/src/core/reactor.rs index 5caa0f8a5..fdb74bb68 100644 --- a/io-engine/src/core/reactor.rs +++ b/io-engine/src/core/reactor.rs @@ -50,16 +50,9 @@ use futures::{ }; use spdk_rs::libspdk::{ - spdk_cpuset_get_cpu, - spdk_env_thread_launch_pinned, - spdk_env_thread_wait_all, - spdk_thread, - spdk_thread_get_cpumask, - spdk_thread_lib_init_ext, - spdk_thread_op, - spdk_thread_send_msg, - SPDK_DEFAULT_MSG_MEMPOOL_SIZE, - SPDK_THREAD_OP_NEW, + spdk_cpuset_get_cpu, spdk_env_thread_launch_pinned, spdk_env_thread_wait_all, spdk_thread, + spdk_thread_get_cpumask, spdk_thread_lib_init_ext, spdk_thread_op, spdk_thread_send_msg, + SPDK_DEFAULT_MSG_MEMPOOL_SIZE, SPDK_THREAD_OP_NEW, }; use crate::{ @@ -159,9 +152,7 @@ impl Reactors { // construct one main init thread, this thread is used to bootstrap // and should be used to teardown as well. - if let Some(t) = - spdk_rs::Thread::new("init_thread".into(), Cores::first()) - { + if let Some(t) = spdk_rs::Thread::new("init_thread".into(), Cores::first()) { info!("Init thread ID {}", t.id()); } } @@ -287,8 +278,7 @@ impl Reactor { /// create a new ['Reactor'] instance fn new(core: u32, developer_delay: bool) -> Self { // create a channel to receive futures on - let (sx, rx) = - unbounded:: + 'static>>>(); + let (sx, rx) = unbounded:: + 'static>>>(); Self { threads: RefCell::new(VecDeque::new()), @@ -516,7 +506,7 @@ impl Reactor { /// queues pub fn poll_times(&self, times: u32) { let threads = self.threads.borrow(); - for _ in 0 .. times { + for _ in 0..times { threads.iter().for_each(|t| { t.poll(); }); @@ -568,10 +558,7 @@ impl Reactor { /// Spawns a future on a core the current thread is running on returning a /// channel which can be awaited. This decouples the SPDK runtime from the /// future runtimes within Rust. - pub fn spawn_at( - thread: &spdk_rs::Thread, - f: F, - ) -> Result, CoreError> + pub fn spawn_at(thread: &spdk_rs::Thread, f: F) -> Result, CoreError> where F: Future + 'static, F::Output: Send + Debug, @@ -632,9 +619,7 @@ impl Reactor { } /// TODO - pub fn spawn_at_primary( - f: F, - ) -> Result, CoreError> + pub fn spawn_at_primary(f: F) -> Result, CoreError> where F: Future + 'static, F::Output: Send + Debug, @@ -727,8 +712,7 @@ pub async fn reactor_monitor_loop(freeze_timeout: Option) { for (id, core) in Cores::count().into_iter().enumerate() { let reactor = Reactors::get_by_core(core) .unwrap_or_else(|| panic!("Can't get reactor for core {}", core)); - let reactor_tick = - heartbeat_ticks.get(id).expect("Failed to get tick item"); + let reactor_tick = heartbeat_ticks.get(id).expect("Failed to get tick item"); reactor_state.push(ReactorRecord { frozen: false, diff --git a/io-engine/src/core/runtime.rs b/io-engine/src/core/runtime.rs index f7de599cf..3703e18d0 100644 --- a/io-engine/src/core/runtime.rs +++ b/io-engine/src/core/runtime.rs @@ -59,16 +59,12 @@ static RUNTIME: Lazy = Lazy::new(|| { .build() .unwrap(); - Runtime { - rt, - } + Runtime { rt } }); impl Runtime { pub fn new(rt: tokio::runtime::Runtime) -> Self { - Self { - rt, - } + Self { rt } } fn block_on(&self, f: impl Future + Send + 'static) { self.rt.block_on(f); diff --git a/io-engine/src/core/segment_map.rs b/io-engine/src/core/segment_map.rs index ac9e75901..56d10be62 100644 --- a/io-engine/src/core/segment_map.rs +++ b/io-engine/src/core/segment_map.rs @@ -61,7 +61,7 @@ impl SegmentMap { let start_seg = self.lbn_to_seg(lbn); // when `lbn_cnt` is 1 means we write only the `lbn` blk, not `lbn` + 1 let end_seg = self.lbn_to_seg(lbn + lbn_cnt - 1); - for i in start_seg ..= end_seg { + for i in start_seg..=end_seg { self.segments.set(i, value); } } diff --git a/io-engine/src/core/share.rs b/io-engine/src/core/share.rs index 119c03e34..4ddeeedad 100644 --- a/io-engine/src/core/share.rs +++ b/io-engine/src/core/share.rs @@ -24,9 +24,7 @@ impl TryFrom for Protocol { // 2 was for iSCSI // the gRPC code does not validate enums so we have // to do it here - _ => Err(LvsError::ReplicaShareProtocol { - value, - }), + _ => Err(LvsError::ReplicaShareProtocol { value }), } } } @@ -70,9 +68,7 @@ pub struct PtplProps { impl PtplProps { /// Create a new `Self` with the given json file path. pub fn new(file: std::path::PathBuf) -> Self { - Self { - file, - } + Self { file } } /// Get the json file path. pub fn path(&self) -> &std::path::PathBuf { diff --git a/io-engine/src/core/snapshot.rs b/io-engine/src/core/snapshot.rs index 03b78bdf3..0c34faf1c 100644 --- a/io-engine/src/core/snapshot.rs +++ b/io-engine/src/core/snapshot.rs @@ -103,11 +103,7 @@ impl CloneParams { } } /// Validate the given arguments and prepare the clone parameters. - pub fn prepare( - clone_name: &str, - clone_uuid: &str, - source_uuid: &str, - ) -> Option { + pub fn prepare(clone_name: &str, clone_uuid: &str, source_uuid: &str) -> Option { let clone_name = if clone_name.is_empty() { return None; } else { @@ -237,10 +233,7 @@ pub struct SnapshotDescriptor { impl SnapshotDescriptor { /// Return a generic SnapshotDescriptor. - pub fn new( - snapshot: impl SnapshotOps + 'static, - info: SnapshotInfo, - ) -> Self { + pub fn new(snapshot: impl SnapshotOps + 'static, info: SnapshotInfo) -> Self { Self { snapshot: Box::new(snapshot), info, diff --git a/io-engine/src/core/wiper.rs b/io-engine/src/core/wiper.rs index 37648ab14..ab55a632a 100644 --- a/io-engine/src/core/wiper.rs +++ b/io-engine/src/core/wiper.rs @@ -13,9 +13,7 @@ pub enum Error { TooManyChunks {}, #[snafu(display("The chunk_size is larger than the bdev"))] ChunkTooLarge {}, - #[snafu(display( - "The chunk_size is not a multiple of the bdev block size" - ))] + #[snafu(display("The chunk_size is not a multiple of the bdev block size"))] ChunkBlockSizeInvalid {}, #[snafu(display("The bdev seems to have no size!"))] ZeroBdev {}, @@ -31,9 +29,7 @@ pub enum Error { impl From for CoreError { fn from(source: Error) -> Self { - Self::WipeFailed { - source, - } + Self::WipeFailed { source } } } impl From for Error { @@ -178,10 +174,7 @@ impl WipeStats { impl Wiper { /// Return a new `Self` which can wipe the given bdev using the provided /// wipe method. - pub fn new( - bdev: UntypedBdevHandle, - wipe_method: WipeMethod, - ) -> Result { + pub fn new(bdev: UntypedBdevHandle, wipe_method: WipeMethod) -> Result { Ok(Self { bdev, wipe_method: Self::supported(wipe_method)?, @@ -192,30 +185,22 @@ impl Wiper { match &mut self.wipe_method { WipeMethod::None => Ok(()), WipeMethod::WriteZeroes => { - self.bdev.write_zeroes_at(offset, size).await.map_err( - |source| Error::WipeIoFailed { + self.bdev + .write_zeroes_at(offset, size) + .await + .map_err(|source| Error::WipeIoFailed { source: Box::new(source), - }, - ) + }) } - WipeMethod::Unmap | WipeMethod::WritePattern(_) => { - Err(Error::MethodUnimplemented { - method: self.wipe_method, - }) - } - WipeMethod::CkSum(CkSumMethod::Crc32 { - crc32c, - }) => { + WipeMethod::Unmap | WipeMethod::WritePattern(_) => Err(Error::MethodUnimplemented { + method: self.wipe_method, + }), + WipeMethod::CkSum(CkSumMethod::Crc32 { crc32c }) => { let mut buffer = self.bdev.dma_malloc(size).unwrap(); self.bdev.read_at(offset, &mut buffer).await?; - *crc32c = unsafe { - spdk_rs::libspdk::spdk_crc32c_update( - buffer.as_ptr(), - size, - *crc32c, - ) - }; + *crc32c = + unsafe { spdk_rs::libspdk::spdk_crc32c_update(buffer.as_ptr(), size, *crc32c) }; Ok(()) } @@ -223,16 +208,12 @@ impl Wiper { Ok(()) } /// Check if the given method is supported. - pub(crate) fn supported( - wipe_method: WipeMethod, - ) -> Result { + pub(crate) fn supported(wipe_method: WipeMethod) -> Result { match wipe_method { WipeMethod::None | WipeMethod::WriteZeroes => Ok(wipe_method), - WipeMethod::Unmap | WipeMethod::WritePattern(_) => { - Err(Error::MethodUnimplemented { - method: wipe_method, - }) - } + WipeMethod::Unmap | WipeMethod::WritePattern(_) => Err(Error::MethodUnimplemented { + method: wipe_method, + }), WipeMethod::CkSum(_) => Ok(wipe_method), } } @@ -252,10 +233,7 @@ impl StreamedWiper { snafu::ensure!(chunk_size_bytes <= size, ChunkTooLarge {}); let iterator = WipeIterator::new(0, size, chunk_size_bytes, block_len)?; - snafu::ensure!( - iterator.total_chunks < max_chunks as u64, - TooManyChunks {} - ); + snafu::ensure!(iterator.total_chunks < max_chunks as u64, TooManyChunks {}); let stats = WipeStats { uuid: wiper.bdev.get_bdev().uuid(), @@ -301,10 +279,7 @@ impl StreamedWiper { /// Complete the current chunk. fn complete_chunk(&mut self, start: std::time::Instant, size: u64) { self.stats.complete_chunk(start, size); - if let WipeMethod::CkSum(CkSumMethod::Crc32 { - crc32c, - }) = &mut self.wiper.wipe_method - { + if let WipeMethod::CkSum(CkSumMethod::Crc32 { crc32c }) = &mut self.wiper.wipe_method { // Finalize CRC by inverting all bits. if self.stats.remaining_chunks == 0 { *crc32c ^= spdk_rs::libspdk::SPDK_CRC32C_XOR; @@ -316,17 +291,12 @@ impl StreamedWiper { /// Wipe the bdev at the given byte offset and byte size. /// Uses the abort checker allowing us to stop early if a client disconnects /// or if the process is being shutdown. - async fn wipe_with_abort( - &mut self, - offset: u64, - size: u64, - ) -> Result<(), Error> { + async fn wipe_with_abort(&mut self, offset: u64, size: u64) -> Result<(), Error> { // todo: configurable? let max_io_size = 8 * 1024 * 1024; if size > max_io_size { let block_len = self.wiper.bdev.get_bdev().block_len() as u64; - let mut iterator = - WipeIterator::new(offset, size, max_io_size, block_len)?; + let mut iterator = WipeIterator::new(offset, size, max_io_size, block_len)?; while let Some((offset, size)) = iterator.next() { self.wiper.wipe(offset, size).await?; iterator.complete_chunk(size); @@ -349,9 +319,7 @@ impl StreamedWiper { fn notify(&self) -> Result<(), Error> { if let Err(error) = self.stream.notify(&self.stats) { self.check_abort()?; - return Err(Error::ChunkNotifyFailed { - error, - }); + return Err(Error::ChunkNotifyFailed { error }); } Ok(()) } @@ -412,10 +380,7 @@ impl WipeIterator { }; snafu::ensure!(chunk_size_bytes <= total_bytes, ChunkTooLarge {}); - snafu::ensure!( - chunk_size_bytes % block_len == 0, - ChunkBlockSizeInvalid {} - ); + snafu::ensure!(chunk_size_bytes % block_len == 0, ChunkBlockSizeInvalid {}); let mut chunks = total_bytes / chunk_size_bytes; let remainder = total_bytes % chunk_size_bytes; @@ -454,15 +419,12 @@ impl Iterator for WipeIterator { if self.wiped_chunks >= self.total_chunks { None } else { - let offset = - self.start_offset + (self.wiped_chunks * self.chunk_size_bytes); + let offset = self.start_offset + (self.wiped_chunks * self.chunk_size_bytes); match self.extra_chunk_size_bytes { // the very last chunk might have a different size is the bdev // size is not an exact multiple of the chunk // size. - Some(size) if self.remaining_chunks == 1 => { - Some((offset, size)) - } + Some(size) if self.remaining_chunks == 1 => Some((offset, size)), None | Some(_) => Some((offset, self.chunk_size_bytes)), } } diff --git a/io-engine/src/delay.rs b/io-engine/src/delay.rs index 63144ac82..91a8c5a32 100644 --- a/io-engine/src/delay.rs +++ b/io-engine/src/delay.rs @@ -1,10 +1,6 @@ use std::{cell::RefCell, os::raw::c_void, time::Duration}; -use spdk_rs::libspdk::{ - spdk_poller, - spdk_poller_register, - spdk_poller_unregister, -}; +use spdk_rs::libspdk::{spdk_poller, spdk_poller_register, spdk_poller_unregister}; thread_local! { /// Delay poller pointer for unregistering the poller at the end @@ -22,9 +18,7 @@ extern "C" fn sleep(_ctx: *mut c_void) -> i32 { /// short moment so it is not able to perform any useful work when sleeping. pub fn register() { warn!("*** Delaying reactor every 1ms by 1ms ***"); - let delay_poller = unsafe { - spdk_poller_register(Some(sleep), std::ptr::null_mut(), 1000) - }; + let delay_poller = unsafe { spdk_poller_register(Some(sleep), std::ptr::null_mut(), 1000) }; DELAY_POLLER.with(move |poller_cell| { let mut poller_maybe = poller_cell.try_borrow_mut().unwrap(); if poller_maybe.is_some() { diff --git a/io-engine/src/eventing/clone_events.rs b/io-engine/src/eventing/clone_events.rs index 448c76d30..68aa254f6 100644 --- a/io-engine/src/eventing/clone_events.rs +++ b/io-engine/src/eventing/clone_events.rs @@ -1,10 +1,4 @@ -use events_api::event::{ - EventAction, - EventCategory, - EventMessage, - EventMeta, - EventSource, -}; +use events_api::event::{EventAction, EventCategory, EventMessage, EventMeta, EventSource}; use crate::{ core::{snapshot::CloneParams, MayastorEnvironment}, @@ -13,13 +7,11 @@ use crate::{ impl Event for CloneParams { fn event(&self, event_action: EventAction) -> EventMessage { - let event_source = EventSource::new( - MayastorEnvironment::global_or_default().node_name, - ) - .with_clone_data( - self.source_uuid().unwrap_or_default(), - self.clone_create_time().unwrap_or_default(), - ); + let event_source = EventSource::new(MayastorEnvironment::global_or_default().node_name) + .with_clone_data( + self.source_uuid().unwrap_or_default(), + self.clone_create_time().unwrap_or_default(), + ); EventMessage { category: EventCategory::Clone as i32, diff --git a/io-engine/src/eventing/host_events.rs b/io-engine/src/eventing/host_events.rs index 640908c67..2c22bd4bc 100644 --- a/io-engine/src/eventing/host_events.rs +++ b/io-engine/src/eventing/host_events.rs @@ -1,10 +1,4 @@ -use events_api::event::{ - EventAction, - EventCategory, - EventMessage, - EventMeta, - EventSource, -}; +use events_api::event::{EventAction, EventCategory, EventMessage, EventMeta, EventSource}; use crate::{ bdev::Nexus, @@ -24,8 +18,7 @@ pub(crate) trait HostTargetMeta { impl HostTargetMeta for Nexus<'_> { fn host_target_meta(&self, mut meta: EventMeta) -> EventMeta { if let Some(source) = meta.source { - let event_source = - source.with_target_data("nexus", &self.uuid().to_string()); + let event_source = source.with_target_data("nexus", &self.uuid().to_string()); meta.source = Some(event_source); } meta @@ -45,21 +38,15 @@ impl HostTargetMeta for Lvol { impl EventMetaGen for NvmfSubsystem { fn meta(&self) -> EventMeta { let nqn = self.get_nqn(); - let event_source = EventSource::new( - MayastorEnvironment::global_or_default().node_name, - ) - .with_subsystem_data(&nqn); + let event_source = EventSource::new(MayastorEnvironment::global_or_default().node_name) + .with_subsystem_data(&nqn); EventMeta::from_source(event_source) } } impl EventWithMeta for NvmfController { - fn event( - &self, - event_action: EventAction, - mut meta: EventMeta, - ) -> EventMessage { + fn event(&self, event_action: EventAction, mut meta: EventMeta) -> EventMessage { if let Some(source) = meta.source { let event_source = source.with_host_initiator_data(&self.hostnqn()); meta.source = Some(event_source); diff --git a/io-engine/src/eventing/io_engine_events.rs b/io-engine/src/eventing/io_engine_events.rs index 24662f602..85f1cbb02 100644 --- a/io-engine/src/eventing/io_engine_events.rs +++ b/io-engine/src/eventing/io_engine_events.rs @@ -1,10 +1,4 @@ -use events_api::event::{ - EventAction, - EventCategory, - EventMessage, - EventMeta, - EventSource, -}; +use events_api::event::{EventAction, EventCategory, EventMessage, EventMeta, EventSource}; use std::time::Duration; @@ -14,9 +8,8 @@ use crate::{ }; pub(crate) fn io_engine_stop_event_meta(total_time: Duration) -> EventMeta { - let event_source = - EventSource::new(MayastorEnvironment::global_or_default().node_name) - .with_event_action_duration_details(total_time); + let event_source = EventSource::new(MayastorEnvironment::global_or_default().node_name) + .with_event_action_duration_details(total_time); EventMeta::from_source(event_source) } @@ -36,11 +29,7 @@ impl Event for MayastorEnvironment { // Io-engine event message with metadata from Mayastor env data. impl EventWithMeta for MayastorEnvironment { - fn event( - &self, - event_action: EventAction, - meta: EventMeta, - ) -> EventMessage { + fn event(&self, event_action: EventAction, meta: EventMeta) -> EventMessage { EventMessage { category: EventCategory::IoEngineCategory as i32, action: event_action as i32, @@ -53,13 +42,8 @@ impl EventWithMeta for MayastorEnvironment { // Reactor event message from Reactor data. impl Event for Reactor { fn event(&self, event_action: EventAction) -> EventMessage { - let event_source = EventSource::new( - MayastorEnvironment::global_or_default().node_name, - ) - .with_reactor_details( - self.core().into(), - &self.get_state().to_string(), - ); + let event_source = EventSource::new(MayastorEnvironment::global_or_default().node_name) + .with_reactor_details(self.core().into(), &self.get_state().to_string()); EventMessage { category: EventCategory::IoEngineCategory as i32, action: event_action as i32, diff --git a/io-engine/src/eventing/nexus_child_events.rs b/io-engine/src/eventing/nexus_child_events.rs index 1f0554887..2aeef2c7c 100644 --- a/io-engine/src/eventing/nexus_child_events.rs +++ b/io-engine/src/eventing/nexus_child_events.rs @@ -1,10 +1,4 @@ -use events_api::event::{ - EventAction, - EventCategory, - EventMessage, - EventMeta, - EventSource, -}; +use events_api::event::{EventAction, EventCategory, EventMessage, EventMeta, EventSource}; use io_engine_api::v1::nexus::{AddChildNexusRequest, RemoveChildNexusRequest}; @@ -13,10 +7,8 @@ use crate::{core::MayastorEnvironment, eventing::Event}; // Nexus child added event message. impl Event for AddChildNexusRequest { fn event(&self, event_action: EventAction) -> EventMessage { - let event_source = EventSource::new( - MayastorEnvironment::global_or_default().node_name, - ) - .with_nexus_child_data(&self.uri); + let event_source = EventSource::new(MayastorEnvironment::global_or_default().node_name) + .with_nexus_child_data(&self.uri); EventMessage { category: EventCategory::Nexus as i32, @@ -30,10 +22,8 @@ impl Event for AddChildNexusRequest { // Nexus child removed event message. impl Event for RemoveChildNexusRequest { fn event(&self, event_action: EventAction) -> EventMessage { - let event_source = EventSource::new( - MayastorEnvironment::global_or_default().node_name, - ) - .with_nexus_child_data(&self.uri); + let event_source = EventSource::new(MayastorEnvironment::global_or_default().node_name) + .with_nexus_child_data(&self.uri); EventMessage { category: EventCategory::Nexus as i32, diff --git a/io-engine/src/eventing/nexus_events.rs b/io-engine/src/eventing/nexus_events.rs index ad1ae7444..43b5825f3 100644 --- a/io-engine/src/eventing/nexus_events.rs +++ b/io-engine/src/eventing/nexus_events.rs @@ -1,10 +1,5 @@ use events_api::event::{ - EventAction, - EventCategory, - EventMessage, - EventMeta, - EventSource, - RebuildStatus, + EventAction, EventCategory, EventMessage, EventMeta, EventSource, RebuildStatus, }; use std::time::Duration; @@ -22,24 +17,20 @@ use crate::{ impl EventMetaGen for NexusRebuildJob { fn meta(&self) -> EventMeta { let rebuild_status = match self.state() { - RebuildState::Init | RebuildState::Running => { - RebuildStatus::Started - } + RebuildState::Init | RebuildState::Running => RebuildStatus::Started, RebuildState::Stopped => RebuildStatus::Stopped, RebuildState::Failed => RebuildStatus::Failed, RebuildState::Completed => RebuildStatus::Completed, _ => RebuildStatus::Unknown, }; - let event_source = EventSource::new( - MayastorEnvironment::global_or_default().node_name, - ) - .with_rebuild_data( - rebuild_status, - self.src_uri(), - self.dst_uri(), - self.error().map(|e| e.verbose()), - ); + let event_source = EventSource::new(MayastorEnvironment::global_or_default().node_name) + .with_rebuild_data( + rebuild_status, + self.src_uri(), + self.dst_uri(), + self.error().map(|e| e.verbose()), + ); EventMeta::from_source(event_source) } @@ -47,22 +38,16 @@ impl EventMetaGen for NexusRebuildJob { impl EventMetaGen for NexusChild<'_> { fn meta(&self) -> EventMeta { - let event_source = EventSource::new( - MayastorEnvironment::global_or_default().node_name, - ) - .with_nexus_child_data(self.uri()); + let event_source = EventSource::new(MayastorEnvironment::global_or_default().node_name) + .with_nexus_child_data(self.uri()); EventMeta::from_source(event_source) } } /// Nexus state change event meta. -pub(crate) fn state_change_event_meta( - previous: NexusState, - next: NexusState, -) -> EventMeta { - let event_source = - EventSource::new(MayastorEnvironment::global_or_default().node_name) - .with_state_change_data(previous.to_string(), next.to_string()); +pub(crate) fn state_change_event_meta(previous: NexusState, next: NexusState) -> EventMeta { + let event_source = EventSource::new(MayastorEnvironment::global_or_default().node_name) + .with_state_change_data(previous.to_string(), next.to_string()); EventMeta::from_source(event_source) } @@ -76,12 +61,10 @@ pub(crate) fn subsystem_pause_event_meta( Some(pause_state) => pause_state.to_string(), None => String::default(), }; - let mut event_source = - EventSource::new(MayastorEnvironment::global_or_default().node_name) - .with_subsystem_pause_details(nexus_pause_state); + let mut event_source = EventSource::new(MayastorEnvironment::global_or_default().node_name) + .with_subsystem_pause_details(nexus_pause_state); if let Some(total_time) = total_time_option { - event_source = - event_source.with_event_action_duration_details(total_time); + event_source = event_source.with_event_action_duration_details(total_time); } if let Some(err) = error_option { event_source = event_source.with_error_details(err.to_string()); @@ -91,9 +74,7 @@ pub(crate) fn subsystem_pause_event_meta( impl Event for nexus::Nexus<'_> { fn event(&self, event_action: EventAction) -> EventMessage { - let event_source = EventSource::new( - MayastorEnvironment::global_or_default().node_name, - ); + let event_source = EventSource::new(MayastorEnvironment::global_or_default().node_name); EventMessage { category: EventCategory::Nexus as i32, action: event_action as i32, @@ -104,11 +85,7 @@ impl Event for nexus::Nexus<'_> { } impl EventWithMeta for nexus::Nexus<'_> { - fn event( - &self, - event_action: EventAction, - meta: EventMeta, - ) -> EventMessage { + fn event(&self, event_action: EventAction, meta: EventMeta) -> EventMessage { EventMessage { category: EventCategory::Nexus as i32, action: event_action as i32, @@ -120,10 +97,8 @@ impl EventWithMeta for nexus::Nexus<'_> { impl EventMetaGen for Error { fn meta(&self) -> EventMeta { - let event_source = EventSource::new( - MayastorEnvironment::global_or_default().node_name, - ) - .with_error_details(self.to_string()); + let event_source = EventSource::new(MayastorEnvironment::global_or_default().node_name) + .with_error_details(self.to_string()); EventMeta::from_source(event_source) } } diff --git a/io-engine/src/eventing/pool_events.rs b/io-engine/src/eventing/pool_events.rs index 1cb5b9b05..312a9c80e 100644 --- a/io-engine/src/eventing/pool_events.rs +++ b/io-engine/src/eventing/pool_events.rs @@ -1,19 +1,11 @@ -use events_api::event::{ - EventAction, - EventCategory, - EventMessage, - EventMeta, - EventSource, -}; +use events_api::event::{EventAction, EventCategory, EventMessage, EventMeta, EventSource}; use crate::{core::MayastorEnvironment, eventing::Event, lvs::Lvs}; // Pool event messages from Lvs data. impl Event for Lvs { fn event(&self, event_action: EventAction) -> EventMessage { - let event_source = EventSource::new( - MayastorEnvironment::global_or_default().node_name, - ); + let event_source = EventSource::new(MayastorEnvironment::global_or_default().node_name); EventMessage { category: EventCategory::Pool as i32, action: event_action as i32, diff --git a/io-engine/src/eventing/replica_events.rs b/io-engine/src/eventing/replica_events.rs index dae5cf564..0fe6972e5 100644 --- a/io-engine/src/eventing/replica_events.rs +++ b/io-engine/src/eventing/replica_events.rs @@ -1,10 +1,4 @@ -use events_api::event::{ - EventAction, - EventCategory, - EventMessage, - EventMeta, - EventSource, -}; +use events_api::event::{EventAction, EventCategory, EventMessage, EventMeta, EventSource}; use crate::{ core::{logical_volume::LogicalVolume, MayastorEnvironment}, @@ -19,14 +13,8 @@ use crate::{ // Replica event messages from Lvol data. impl Event for Lvol { fn event(&self, event_action: EventAction) -> EventMessage { - let event_source = EventSource::new( - MayastorEnvironment::global_or_default().node_name, - ) - .with_replica_data( - self.lvs().name(), - &self.lvs().uuid(), - &self.name(), - ); + let event_source = EventSource::new(MayastorEnvironment::global_or_default().node_name) + .with_replica_data(self.lvs().name(), &self.lvs().uuid(), &self.name()); EventMessage { category: EventCategory::Replica as i32, @@ -38,23 +26,15 @@ impl Event for Lvol { } /// Replica state change event meta. -pub(crate) fn state_change_event_meta( - previous: ChildState, - next: ChildState, -) -> EventMeta { - let event_source = - EventSource::new(MayastorEnvironment::global_or_default().node_name) - .with_state_change_data(previous.to_string(), next.to_string()); +pub(crate) fn state_change_event_meta(previous: ChildState, next: ChildState) -> EventMeta { + let event_source = EventSource::new(MayastorEnvironment::global_or_default().node_name) + .with_state_change_data(previous.to_string(), next.to_string()); EventMeta::from_source(event_source) } /// Replica state change event. impl EventWithMeta for NexusChild<'_> { - fn event( - &self, - event_action: EventAction, - meta: EventMeta, - ) -> EventMessage { + fn event(&self, event_action: EventAction, meta: EventMeta) -> EventMessage { EventMessage { category: EventCategory::Replica as i32, action: event_action as i32, diff --git a/io-engine/src/eventing/snapshot_events.rs b/io-engine/src/eventing/snapshot_events.rs index d5718364b..14f843268 100644 --- a/io-engine/src/eventing/snapshot_events.rs +++ b/io-engine/src/eventing/snapshot_events.rs @@ -1,30 +1,18 @@ -use events_api::event::{ - EventAction, - EventCategory, - EventMessage, - EventMeta, - EventSource, -}; +use events_api::event::{EventAction, EventCategory, EventMessage, EventMeta, EventSource}; use crate::{ - core::{ - snapshot::ISnapshotDescriptor, - MayastorEnvironment, - SnapshotParams, - }, + core::{snapshot::ISnapshotDescriptor, MayastorEnvironment, SnapshotParams}, eventing::Event, }; impl Event for SnapshotParams { fn event(&self, event_action: EventAction) -> EventMessage { - let event_source = EventSource::new( - MayastorEnvironment::global_or_default().node_name, - ) - .with_snapshot_data( - self.parent_id().unwrap_or_default(), - self.create_time().unwrap_or_default(), - self.entity_id().unwrap_or_default(), - ); + let event_source = EventSource::new(MayastorEnvironment::global_or_default().node_name) + .with_snapshot_data( + self.parent_id().unwrap_or_default(), + self.create_time().unwrap_or_default(), + self.entity_id().unwrap_or_default(), + ); EventMessage { category: EventCategory::Snapshot as i32, diff --git a/io-engine/src/grpc/controller_grpc.rs b/io-engine/src/grpc/controller_grpc.rs index 71de9b608..75fef450a 100644 --- a/io-engine/src/grpc/controller_grpc.rs +++ b/io-engine/src/grpc/controller_grpc.rs @@ -29,12 +29,9 @@ impl NvmeController<'_> { } /// Returns the IO stats of the given NVMe Controller -pub async fn controller_stats( - controller_name: &str, -) -> Result { +pub async fn controller_stats(controller_name: &str) -> Result { if let Some(ctrlr) = NVME_CONTROLLERS.lookup_by_name(controller_name) { - let (s, r) = - oneshot::channel::>(); + let (s, r) = oneshot::channel::>(); { let ctrlr = ctrlr.lock(); if let Err(e) = ctrlr.get_io_stats( diff --git a/io-engine/src/grpc/mod.rs b/io-engine/src/grpc/mod.rs index 3baea9c66..b2727a2ff 100644 --- a/io-engine/src/grpc/mod.rs +++ b/io-engine/src/grpc/mod.rs @@ -12,54 +12,29 @@ use tonic::{Request, Response, Status}; use crate::{ bdev_api::BdevError, core::{ - CoreError, - MayastorFeatures, - Reactor, - ResourceLockGuard, - ResourceSubsystem, - VerboseError, + CoreError, MayastorFeatures, Reactor, ResourceLockGuard, ResourceSubsystem, VerboseError, }, }; impl From for tonic::Status { fn from(e: BdevError) -> Self { match e { - BdevError::UriParseFailed { - .. - } => Status::invalid_argument(e.to_string()), - BdevError::UriSchemeUnsupported { - .. - } => Status::invalid_argument(e.to_string()), - BdevError::InvalidUri { - .. - } => Status::invalid_argument(e.to_string()), - BdevError::IntParamParseFailed { - .. - } => Status::invalid_argument(e.to_string()), - BdevError::BoolParamParseFailed { - .. - } => Status::invalid_argument(e.to_string()), - BdevError::UuidParamParseFailed { - .. - } => Status::invalid_argument(e.to_string()), - BdevError::BdevWrongUuid { - .. - } => Status::invalid_argument(e.to_string()), - BdevError::CreateBdevFailed { - source, .. - } - | BdevError::CreateBdevInvalidParams { - source, .. - } => match source { + BdevError::UriParseFailed { .. } => Status::invalid_argument(e.to_string()), + BdevError::UriSchemeUnsupported { .. } => Status::invalid_argument(e.to_string()), + BdevError::InvalidUri { .. } => Status::invalid_argument(e.to_string()), + BdevError::IntParamParseFailed { .. } => Status::invalid_argument(e.to_string()), + BdevError::BoolParamParseFailed { .. } => Status::invalid_argument(e.to_string()), + BdevError::UuidParamParseFailed { .. } => Status::invalid_argument(e.to_string()), + BdevError::BdevWrongUuid { .. } => Status::invalid_argument(e.to_string()), + BdevError::CreateBdevFailed { source, .. } + | BdevError::CreateBdevInvalidParams { source, .. } => match source { Errno::EINVAL => Status::invalid_argument(e.verbose()), Errno::ENOENT => Status::not_found(e.verbose()), Errno::ENODEV => Status::not_found(e.verbose()), Errno::EEXIST => Status::already_exists(e.verbose()), _ => Status::invalid_argument(e.verbose()), }, - BdevError::BdevNotFound { - .. - } => Status::not_found(e.to_string()), + BdevError::BdevNotFound { .. } => Status::not_found(e.to_string()), e => Status::internal(e.verbose()), } } @@ -158,16 +133,13 @@ macro_rules! spdk_submit { pub type GrpcResult = std::result::Result, Status>; /// Submit rpc code to the primary reactor. -pub fn rpc_submit( - future: F, -) -> Result>, tonic::Status> +pub fn rpc_submit(future: F) -> Result>, tonic::Status> where E: Send + Debug + Display + 'static, F: Future> + 'static, R: Send + Debug + 'static, { - Reactor::spawn_at_primary(future) - .map_err(|_| Status::resource_exhausted("ENOMEM")) + Reactor::spawn_at_primary(future).map_err(|_| Status::resource_exhausted("ENOMEM")) } /// Submit rpc code to the primary reactor. /// Similar to `rpc_submit` but with a more generic response abstraction. @@ -176,22 +148,18 @@ where F: Future + 'static, R: Send + Debug + 'static, { - Reactor::spawn_at_primary(future) - .map_err(|_| Status::resource_exhausted("ENOMEM")) + Reactor::spawn_at_primary(future).map_err(|_| Status::resource_exhausted("ENOMEM")) } /// Submit rpc code to the primary reactor. /// Similar to `rpc_submit_ext` but specifying a result output with tonic /// Status as error. -pub fn rpc_submit_ext2( - future: F, -) -> Result>, tonic::Status> +pub fn rpc_submit_ext2(future: F) -> Result>, tonic::Status> where F: Future> + 'static, R: Send + Debug + 'static, { - Reactor::spawn_at_primary(future) - .map_err(|_| Status::resource_exhausted("ENOMEM")) + Reactor::spawn_at_primary(future).map_err(|_| Status::resource_exhausted("ENOMEM")) } /// Manage locks across multiple grpc services. @@ -200,7 +168,10 @@ pub async fn acquire_subsystem_lock<'a>( resource: Option<&str>, ) -> Result, Status> { if let Some(resource) = resource { - match subsystem.lock_resource(resource.to_string(), None, true).await { + match subsystem + .lock_resource(resource.to_string(), None, true) + .await + { Some(lock_guard) => Ok(lock_guard), None => Err(Status::aborted(format!( "Failed to acquire lock for the resource: {resource}, lock already held" @@ -232,9 +203,9 @@ pub fn endpoint_from_str(endpoint: &str, port: u16) -> std::net::SocketAddr { /// mismatch, for ex, in case of EKS clusters where hostname and /// node name differ volume wont be created, so we set it to hostname. pub fn node_name(node_name: &Option) -> String { - node_name.clone().unwrap_or_else(|| { - std::env::var("HOSTNAME").unwrap_or_else(|_| "mayastor-node".into()) - }) + node_name + .clone() + .unwrap_or_else(|| std::env::var("HOSTNAME").unwrap_or_else(|_| "mayastor-node".into())) } const SECONDS_IN_HOUR: u64 = 60 * 60; @@ -251,18 +222,13 @@ pub fn get_request_timeout(req: &Request) -> Duration { Ok(timeout) => { // At least one digit for the value + 1 character for unit. if timeout.len() >= 2 { - let (t_value, t_unit) = - timeout.split_at(timeout.len() - 1); + let (t_value, t_unit) = timeout.split_at(timeout.len() - 1); if let Ok(tv) = t_value.parse() { return match t_unit { // Hours - "H" => { - Duration::from_secs(tv * SECONDS_IN_HOUR) - } + "H" => Duration::from_secs(tv * SECONDS_IN_HOUR), // Minutes - "M" => { - Duration::from_secs(tv * SECONDS_IN_MINUTE) - } + "M" => Duration::from_secs(tv * SECONDS_IN_MINUTE), // Seconds "S" => Duration::from_secs(tv), // Milliseconds @@ -276,9 +242,7 @@ pub fn get_request_timeout(req: &Request) -> Duration { timeout, "Unsupported time unit in gRPC timeout, applying default gRPC timeout" ); - Duration::from_secs( - DEFAULT_GRPC_TIMEOUT_SEC, - ) + Duration::from_secs(DEFAULT_GRPC_TIMEOUT_SEC) } }; } diff --git a/io-engine/src/grpc/server.rs b/io-engine/src/grpc/server.rs index 2a908bd8f..8044849eb 100644 --- a/io-engine/src/grpc/server.rs +++ b/io-engine/src/grpc/server.rs @@ -1,27 +1,15 @@ use super::{ - v0::{ - bdev_grpc::BdevSvc, - json_grpc::JsonRpcSvc, - mayastor_grpc::MayastorSvc, - }, + v0::{bdev_grpc::BdevSvc, json_grpc::JsonRpcSvc, mayastor_grpc::MayastorSvc}, v1::{ - bdev::BdevService, - host::HostService, - json::JsonService, - nexus::NexusService, - pool::PoolService, - replica::ReplicaService, - snapshot::SnapshotService, - snapshot_rebuild::SnapshotRebuildService, - stats::StatsService, - test::TestService, + bdev::BdevService, host::HostService, json::JsonService, nexus::NexusService, + pool::PoolService, replica::ReplicaService, snapshot::SnapshotService, + snapshot_rebuild::SnapshotRebuildService, stats::StatsService, test::TestService, }, }; use io_engine_api::{ v0::{ - bdev_rpc_server::BdevRpcServer, - json_rpc_server::JsonRpcServer, + bdev_rpc_server::BdevRpcServer, json_rpc_server::JsonRpcServer, mayastor_server::MayastorServer as MayastorRpcServer, }, v1, @@ -82,34 +70,27 @@ impl MayastorGrpcServer { ); let svc = Server::builder() .add_optional_service( - enable_v1 - .map(|_| v1::bdev::BdevRpcServer::new(BdevService::new())), + enable_v1.map(|_| v1::bdev::BdevRpcServer::new(BdevService::new())), + ) + .add_optional_service( + enable_v1.map(|_| v1::json::JsonRpcServer::new(JsonService::new(address.clone()))), + ) + .add_optional_service(enable_v1.map(|_| v1::pool::PoolRpcServer::new(pool_v1.clone()))) + .add_optional_service( + enable_v1.map(|_| v1::replica::ReplicaRpcServer::new(replica_v1.clone())), ) - .add_optional_service(enable_v1.map(|_| { - v1::json::JsonRpcServer::new(JsonService::new(address.clone())) - })) .add_optional_service( enable_v1 - .map(|_| v1::pool::PoolRpcServer::new(pool_v1.clone())), + .map(|_| v1::test::TestRpcServer::new(TestService::new(replica_v1.clone()))), ) .add_optional_service(enable_v1.map(|_| { - v1::replica::ReplicaRpcServer::new(replica_v1.clone()) - })) - .add_optional_service(enable_v1.map(|_| { - v1::test::TestRpcServer::new(TestService::new( - replica_v1.clone(), - )) + v1::snapshot::SnapshotRpcServer::new(SnapshotService::new(replica_v1.clone())) })) .add_optional_service(enable_v1.map(|_| { - v1::snapshot::SnapshotRpcServer::new(SnapshotService::new( + v1::snapshot_rebuild::SnapshotRebuildRpcServer::new(SnapshotRebuildService::new( replica_v1.clone(), )) })) - .add_optional_service(enable_v1.map(|_| { - v1::snapshot_rebuild::SnapshotRebuildRpcServer::new( - SnapshotRebuildService::new(replica_v1.clone()), - ) - })) .add_optional_service(enable_v1.map(|_| { v1::host::HostRpcServer::new(HostService::new( node_name, @@ -118,29 +99,22 @@ impl MayastorGrpcServer { api_versions, )) })) + .add_optional_service( + enable_v1.map(|_| v1::nexus::NexusRpcServer::new(NexusService::new())), + ) .add_optional_service( enable_v1.map(|_| { - v1::nexus::NexusRpcServer::new(NexusService::new()) + v1::stats::StatsRpcServer::new(StatsService::new(pool_v1, replica_v1)) }), ) - .add_optional_service(enable_v1.map(|_| { - v1::stats::StatsRpcServer::new(StatsService::new( - pool_v1, replica_v1, - )) - })) - .add_optional_service(enable_v0.map(|_| { - MayastorRpcServer::new(MayastorSvc::new(Duration::from_millis( - 4, - ))) - })) .add_optional_service( - enable_v0.map(|_| { - JsonRpcServer::new(JsonRpcSvc::new(address.clone())) - }), + enable_v0 + .map(|_| MayastorRpcServer::new(MayastorSvc::new(Duration::from_millis(4)))), ) .add_optional_service( - enable_v0.map(|_| BdevRpcServer::new(BdevSvc::new())), + enable_v0.map(|_| JsonRpcServer::new(JsonRpcSvc::new(address.clone()))), ) + .add_optional_service(enable_v0.map(|_| BdevRpcServer::new(BdevSvc::new()))) .serve(endpoint); select! { diff --git a/io-engine/src/grpc/v0/bdev_grpc.rs b/io-engine/src/grpc/v0/bdev_grpc.rs index 86f35730d..c3cf0e539 100644 --- a/io-engine/src/grpc/v0/bdev_grpc.rs +++ b/io-engine/src/grpc/v0/bdev_grpc.rs @@ -5,14 +5,8 @@ use std::{convert::TryFrom, pin::Pin}; use url::Url; use io_engine_api::v0::{ - bdev_rpc_server::BdevRpc, - Bdev as RpcBdev, - BdevShareReply, - BdevShareRequest, - BdevUri, - Bdevs, - CreateReply, - Null, + bdev_rpc_server::BdevRpc, Bdev as RpcBdev, BdevShareReply, BdevShareRequest, BdevUri, Bdevs, + CreateReply, Null, }; use crate::{ @@ -65,9 +59,7 @@ impl BdevRpc for BdevSvc { bdev.into_iter().for_each(|bdev| list.push(bdev.into())) } - Ok(Bdevs { - bdevs: list, - }) + Ok(Bdevs { bdevs: list }) })?; rx.await @@ -77,10 +69,7 @@ impl BdevRpc for BdevSvc { } #[instrument(level = "debug", err)] - async fn create( - &self, - request: Request, - ) -> Result, Status> { + async fn create(&self, request: Request) -> Result, Status> { let uri = request.into_inner().uri; let rx = rpc_submit(async move { bdev_create(&uri).await })?; @@ -88,11 +77,7 @@ impl BdevRpc for BdevSvc { rx.await .map_err(|_| Status::cancelled("cancelled"))? .map_err(Status::from) - .map(|name| { - Ok(Response::new(CreateReply { - name, - })) - })? + .map(|name| Ok(Response::new(CreateReply { name })))? } #[instrument(level = "debug", err)] @@ -123,8 +108,7 @@ impl BdevRpc for BdevSvc { let rx = match proto.as_str() { "nvmf" => rpc_submit::<_, String, CoreError>(async move { let mut bdev = UntypedBdev::get_by_name(&bdev_name)?; - let props = - NvmfShareProps::new().with_allowed_hosts(r.allowed_hosts); + let props = NvmfShareProps::new().with_allowed_hosts(r.allowed_hosts); let share = Pin::new(&mut bdev).share_nvmf(Some(props)).await?; let bdev = UntypedBdev::get_by_name(&bdev_name)?; Ok(bdev.share_uri().unwrap_or(share)) @@ -136,16 +120,10 @@ impl BdevRpc for BdevSvc { rx.await .map_err(|_| Status::cancelled("cancelled"))? .map_err(|e| match e { - CoreError::BdevNotFound { - name, - } => Status::not_found(name), + CoreError::BdevNotFound { name } => Status::not_found(name), e => Status::internal(e.to_string()), }) - .map(|uri| { - Ok(Response::new(BdevShareReply { - uri, - })) - })? + .map(|uri| Ok(Response::new(BdevShareReply { uri })))? } #[instrument(level = "debug", err)] diff --git a/io-engine/src/grpc/v0/json_grpc.rs b/io-engine/src/grpc/v0/json_grpc.rs index d2e451993..88b8907e8 100644 --- a/io-engine/src/grpc/v0/json_grpc.rs +++ b/io-engine/src/grpc/v0/json_grpc.rs @@ -2,11 +2,7 @@ //! gRPC method to proxy calls to (local) SPDK json-rpc service use crate::grpc::GrpcResult; -use io_engine_api::v0::{ - json_rpc_server::JsonRpc, - JsonRpcReply, - JsonRpcRequest, -}; +use io_engine_api::v0::{json_rpc_server::JsonRpc, JsonRpcReply, JsonRpcRequest}; use jsonrpc::error::Error; use std::borrow::Cow; use tonic::{Request, Response}; @@ -20,9 +16,7 @@ pub struct JsonRpcSvc { impl JsonRpcSvc { pub fn new(rpc_addr: Cow<'static, str>) -> Self { - Self { - rpc_addr, - } + Self { rpc_addr } } } @@ -30,19 +24,14 @@ impl JsonRpcSvc { impl JsonRpc for JsonRpcSvc { /// Invoke a json-rpc method and return the result #[instrument(level = "debug", err)] - async fn json_rpc_call( - &self, - request: Request, - ) -> GrpcResult { + async fn json_rpc_call(&self, request: Request) -> GrpcResult { let args = request.into_inner(); let result = self .spdk_jsonrpc_call(&args.method, empty_as_none(&args.params)) .await?; - Ok(Response::new(JsonRpcReply { - result, - })) + Ok(Response::new(JsonRpcReply { result })) } } @@ -55,16 +44,10 @@ fn empty_as_none(value: &str) -> Option<&str> { } impl JsonRpcSvc { - async fn spdk_jsonrpc_call( - &self, - method: &str, - arg: Option<&str>, - ) -> Result { - let params: Option = - arg.map(serde_json::from_str).transpose()?; + async fn spdk_jsonrpc_call(&self, method: &str, arg: Option<&str>) -> Result { + let params: Option = arg.map(serde_json::from_str).transpose()?; - let result: serde_json::Value = - jsonrpc::call(&self.rpc_addr, method, params).await?; + let result: serde_json::Value = jsonrpc::call(&self.rpc_addr, method, params).await?; serde_json::to_string_pretty(&result).map_err(Error::ParseError) } diff --git a/io-engine/src/grpc/v0/mayastor_grpc.rs b/io-engine/src/grpc/v0/mayastor_grpc.rs index 9483d1be2..b9520e62f 100644 --- a/io-engine/src/grpc/v0/mayastor_grpc.rs +++ b/io-engine/src/grpc/v0/mayastor_grpc.rs @@ -11,44 +11,21 @@ use crate::{ bdev::{ nexus, - nexus::{ - Error as NexusError, - FaultReason, - NexusReplicaSnapshotDescriptor, - }, - NvmeControllerState as ControllerState, - PtplFileOps, + nexus::{Error as NexusError, FaultReason, NexusReplicaSnapshotDescriptor}, + NvmeControllerState as ControllerState, PtplFileOps, }, bdev_api::BdevError, core::{ lock::{ProtectedSubsystems, ResourceLockManager}, logical_volume::{LogicalVolume, LvolSpaceUsage}, - BlockDeviceIoStats, - CoreError, - MayastorFeatures, - NvmfShareProps, - Protocol, - Share, - SnapshotParams, - ToErrno, - UntypedBdev, + BlockDeviceIoStats, CoreError, MayastorFeatures, NvmfShareProps, Protocol, Share, + SnapshotParams, ToErrno, UntypedBdev, }, grpc::{ - controller_grpc::{ - controller_stats, - list_controllers, - NvmeControllerInfo, - }, + controller_grpc::{controller_stats, list_controllers, NvmeControllerInfo}, rpc_submit, - v0::nexus_grpc::{ - nexus_add_child, - nexus_destroy, - nexus_lookup, - uuid_to_name, - }, - GrpcClientContext, - GrpcResult, - Serializer, + v0::nexus_grpc::{nexus_add_child, nexus_destroy, nexus_lookup, uuid_to_name}, + GrpcClientContext, GrpcResult, Serializer, }, host::{blk_device, resource}, lvs::{lvs_lvol::LvsLvol, BsError, Lvol, Lvs, LvsError}, @@ -155,10 +132,11 @@ impl MayastorSvc { let _global_guard = if global_operation { match lock_manager.lock(Some(ctx.timeout), false).await { Some(g) => Some(g), - None => return Err(Status::deadline_exceeded( - "Failed to acquire access to object within given timeout" - .to_string() - )), + None => { + return Err(Status::deadline_exceeded( + "Failed to acquire access to object within given timeout".to_string(), + )) + } } } else { None @@ -168,13 +146,15 @@ impl MayastorSvc { let _resource_guard = match lock_manager .get_subsystem(ProtectedSubsystems::NEXUS) .lock_resource(nexus_uuid, Some(ctx.timeout), false) - .await { - Some(g) => g, - None => return Err(Status::deadline_exceeded( - "Failed to acquire access to object within given timeout" - .to_string() - )), - }; + .await + { + Some(g) => g, + None => { + return Err(Status::deadline_exceeded( + "Failed to acquire access to object within given timeout".to_string(), + )) + } + }; let r = fut.await; match r { @@ -188,9 +168,10 @@ impl MayastorSvc { } } }) - .await { + .await + { Ok(r) => r, - Err(_) => Err(Status::cancelled("gRPC call cancelled")) + Err(_) => Err(Status::cancelled("gRPC call cancelled")), } } } @@ -215,73 +196,48 @@ impl TryFrom for PoolArgs { impl From for tonic::Status { fn from(e: LvsError) -> Self { match e { - LvsError::Import { - source, .. - } => match source.to_errno() { + LvsError::Import { source, .. } => match source.to_errno() { Errno::EINVAL => Status::invalid_argument(e.to_string()), Errno::EEXIST => Status::already_exists(e.to_string()), _ => Status::invalid_argument(e.to_string()), }, - LvsError::RepCreate { - source, .. - } => { + LvsError::RepCreate { source, .. } => { if source.to_errno() == Errno::ENOSPC { Status::resource_exhausted(e.to_string()) } else { Status::invalid_argument(e.to_string()) } } - LvsError::RepDestroy { - source, .. - } => match source.to_errno() { + LvsError::RepDestroy { source, .. } => match source.to_errno() { Errno::ENOENT => { let mut status = Status::not_found(e.to_string()); - status.metadata_mut().insert( - "gtm-602", - tonic::metadata::MetadataValue::from(0), - ); + status + .metadata_mut() + .insert("gtm-602", tonic::metadata::MetadataValue::from(0)); status } Errno::ENOMEDIUM => Status::failed_precondition(e.to_string()), Errno::EMEDIUMTYPE => Status::aborted(e.to_string()), _ => Status::internal(e.to_string()), }, - LvsError::RepResize { - source, .. - } => match source.to_errno() { - Errno::ENOSPC | Errno::ENOMEM => { - Status::resource_exhausted(e.to_string()) - } + LvsError::RepResize { source, .. } => match source.to_errno() { + Errno::ENOSPC | Errno::ENOMEM => Status::resource_exhausted(e.to_string()), Errno::EPERM => Status::permission_denied(e.to_string()), - Errno::EINVAL | Errno::ENOENT => { - Status::invalid_argument(e.to_string()) - } + Errno::EINVAL | Errno::ENOENT => Status::invalid_argument(e.to_string()), _ => Status::internal(e.to_string()), }, - LvsError::RepExists { - .. - } => Status::already_exists(e.to_string()), - LvsError::ReplicaShareProtocol { - .. - } => Status::invalid_argument(e.to_string()), - LvsError::Destroy { - source, .. - } => source.into(), - LvsError::Invalid { - source, .. - } => match source.to_errno() { + LvsError::RepExists { .. } => Status::already_exists(e.to_string()), + LvsError::ReplicaShareProtocol { .. } => Status::invalid_argument(e.to_string()), + LvsError::Destroy { source, .. } => source.into(), + LvsError::Invalid { source, .. } => match source.to_errno() { Errno::EINVAL => Status::invalid_argument(e.to_string()), Errno::ENOMEDIUM => Status::failed_precondition(e.to_string()), Errno::ENOENT => Status::not_found(e.to_string()), Errno::EEXIST => Status::already_exists(e.to_string()), _ => Status::invalid_argument(e.to_string()), }, - LvsError::PoolNotFound { - .. - } => Status::not_found(e.to_string()), - LvsError::PoolCreate { - source, .. - } => { + LvsError::PoolNotFound { .. } => Status::not_found(e.to_string()), + LvsError::PoolCreate { source, .. } => { if source.to_errno() == Errno::EEXIST { Status::already_exists(e.to_string()) } else if source.to_errno() == Errno::EINVAL { @@ -290,18 +246,10 @@ impl From for tonic::Status { Status::internal(e.to_string()) } } - LvsError::InvalidBdev { - source, .. - } => source.into(), - LvsError::SetProperty { - .. - } => Status::data_loss(e.to_string()), - LvsError::WipeFailed { - source, - } => source.into(), - LvsError::ResourceLockFailed { - .. - } => Status::aborted(e.to_string()), + LvsError::InvalidBdev { source, .. } => source.into(), + LvsError::SetProperty { .. } => Status::data_loss(e.to_string()), + LvsError::WipeFailed { source } => source.into(), + LvsError::ResourceLockFailed { .. } => Status::aborted(e.to_string()), _ => Status::internal(e.verbose()), } } @@ -320,10 +268,7 @@ impl From for Pool { fn from(l: Lvs) -> Self { Self { name: l.name().into(), - disks: vec![l - .base_bdev() - .bdev_uri_str() - .unwrap_or_else(|| "".into())], + disks: vec![l.base_bdev().bdev_uri_str().unwrap_or_else(|| "".into())], state: PoolState::PoolOnline.into(), capacity: l.capacity(), used: l.used(), @@ -494,9 +439,7 @@ impl From for NvmeControllerState { ControllerState::Initializing => NvmeControllerState::Initializing, ControllerState::Running => NvmeControllerState::Running, ControllerState::Faulted(_) => NvmeControllerState::Faulted, - ControllerState::Unconfiguring => { - NvmeControllerState::Unconfiguring - } + ControllerState::Unconfiguring => NvmeControllerState::Unconfiguring, ControllerState::Unconfigured => NvmeControllerState::Unconfigured, } } @@ -521,18 +464,10 @@ impl From for nexus::NvmeReservation { NvmeReservation::Reserved => Self::Reserved, NvmeReservation::WriteExclusive => Self::WriteExclusive, NvmeReservation::ExclusiveAccess => Self::ExclusiveAccess, - NvmeReservation::WriteExclusiveRegsOnly => { - Self::WriteExclusiveRegsOnly - } - NvmeReservation::ExclusiveAccessRegsOnly => { - Self::ExclusiveAccessRegsOnly - } - NvmeReservation::WriteExclusiveAllRegs => { - Self::WriteExclusiveAllRegs - } - NvmeReservation::ExclusiveAccessAllRegs => { - Self::ExclusiveAccessAllRegs - } + NvmeReservation::WriteExclusiveRegsOnly => Self::WriteExclusiveRegsOnly, + NvmeReservation::ExclusiveAccessRegsOnly => Self::ExclusiveAccessRegsOnly, + NvmeReservation::WriteExclusiveAllRegs => Self::WriteExclusiveAllRegs, + NvmeReservation::ExclusiveAccessAllRegs => Self::ExclusiveAccessAllRegs, } } } @@ -556,12 +491,8 @@ impl TryFrom for nexus::NexusNvmePreemption { type Error = tonic::Status; fn try_from(value: NvmePreemptionConv) -> Result { match NexusNvmePreemption::try_from(value.0) { - Ok(NexusNvmePreemption::ArgKey) => { - Ok(nexus::NexusNvmePreemption::ArgKey) - } - Ok(NexusNvmePreemption::Holder) => { - Ok(nexus::NexusNvmePreemption::Holder) - } + Ok(NexusNvmePreemption::ArgKey) => Ok(nexus::NexusNvmePreemption::ArgKey), + Ok(NexusNvmePreemption::Holder) => Ok(nexus::NexusNvmePreemption::Holder), Err(_) => Err(tonic::Status::invalid_argument(format!( "Invalid reservation preempt policy {}", value.0 @@ -573,10 +504,7 @@ impl TryFrom for nexus::NexusNvmePreemption { #[tonic::async_trait] impl mayastor_server::Mayastor for MayastorSvc { #[named] - async fn create_pool( - &self, - request: Request, - ) -> GrpcResult { + async fn create_pool(&self, request: Request) -> GrpcResult { self.locked( GrpcClientContext::new(&request, function_name!()), async move { @@ -587,26 +515,18 @@ impl mayastor_server::Mayastor for MayastorSvc { } let rx = rpc_submit::<_, _, LvsError>(async move { - let pool = - match Lvs::create_or_import(PoolArgs::try_from(args)?) - .await + let pool = match Lvs::create_or_import(PoolArgs::try_from(args)?).await { + Ok(p) => p, + // this check is added specifically so that the + // create_pool is idempotent + Err(LvsError::PoolCreate { source, name }) + if source.to_errno() == Errno::EEXIST => { - Ok(p) => p, - // this check is added specifically so that the - // create_pool is idempotent - Err(LvsError::PoolCreate { - source, - name, - }) if source.to_errno() == Errno::EEXIST => { - info!( - "returning already created pool {}", - name, - ); - Lvs::lookup(name.as_str()) - .expect("Already exists") - } - Err(e) => return Err(e), - }; + info!("returning already created pool {}", name,); + Lvs::lookup(name.as_str()).expect("Already exists") + } + Err(e) => return Err(e), + }; // Capture current pool config and export to file. PoolConfig::capture().export().await; Ok(Pool::from(pool)) @@ -622,10 +542,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } #[named] - async fn destroy_pool( - &self, - request: Request, - ) -> GrpcResult { + async fn destroy_pool(&self, request: Request) -> GrpcResult { self.locked( GrpcClientContext::new(&request, function_name!()), async move { @@ -654,18 +571,13 @@ impl mayastor_server::Mayastor for MayastorSvc { } #[named] - async fn list_pools( - &self, - request: Request, - ) -> GrpcResult { + async fn list_pools(&self, request: Request) -> GrpcResult { self.locked( GrpcClientContext::new(&request, function_name!()), async move { let rx = rpc_submit::<_, _, LvsError>(async move { Ok(ListPoolsReply { - pools: Lvs::iter() - .map(|l| l.into()) - .collect::>(), + pools: Lvs::iter().map(|l| l.into()).collect::>(), }) })?; @@ -679,10 +591,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } #[named] - async fn create_replica( - &self, - request: Request, - ) -> GrpcResult { + async fn create_replica(&self, request: Request) -> GrpcResult { self.locked( GrpcClientContext::new(&request, function_name!()), async move { @@ -705,49 +614,36 @@ impl mayastor_server::Mayastor for MayastorSvc { Protocol::try_from(args.share)?, Protocol::Off | Protocol::Nvmf ) { - return Err(LvsError::ReplicaShareProtocol { - value: args.share, - }); + return Err(LvsError::ReplicaShareProtocol { value: args.share }); } let p = Lvs::lookup(&args.pool).unwrap(); match p - .create_lvol( - &args.uuid, args.size, None, args.thin, None, - ) + .create_lvol(&args.uuid, args.size, None, args.thin, None) .await { - Ok(mut lvol) - if Protocol::try_from(args.share)? - == Protocol::Nvmf => - { + Ok(mut lvol) if Protocol::try_from(args.share)? == Protocol::Nvmf => { let props = NvmfShareProps::new() .with_allowed_hosts(args.allowed_hosts) - .with_ptpl(lvol.ptpl().create().map_err( - |source| LvsError::LvolShare { + .with_ptpl(lvol.ptpl().create().map_err(|source| { + LvsError::LvolShare { source: crate::core::CoreError::Ptpl { reason: source.to_string(), }, name: lvol.name(), - }, - )?); - match Pin::new(&mut lvol) - .share_nvmf(Some(props)) - .await - { + } + })?); + match Pin::new(&mut lvol).share_nvmf(Some(props)).await { Ok(s) => { - debug!( - "Created and shared {:?} as {}", - lvol, s - ); + debug!("Created and shared {:?} as {}", lvol, s); Ok(Replica::from(lvol)) } Err(e) => { debug!( - "Failed to share created {:?}: {} (destroying)", - lvol, - e.to_string() - ); + "Failed to share created {:?}: {} (destroying)", + lvol, + e.to_string() + ); let _ = lvol.destroy().await; Err(e) } @@ -800,52 +696,35 @@ impl mayastor_server::Mayastor for MayastorSvc { Protocol::try_from(args.share)?, Protocol::Off | Protocol::Nvmf ) { - return Err(LvsError::ReplicaShareProtocol { - value: args.share, - }); + return Err(LvsError::ReplicaShareProtocol { value: args.share }); } match lvs - .create_lvol( - &args.name, - args.size, - Some(&args.uuid), - args.thin, - None, - ) + .create_lvol(&args.name, args.size, Some(&args.uuid), args.thin, None) .await { - Ok(mut lvol) - if Protocol::try_from(args.share)? - == Protocol::Nvmf => - { + Ok(mut lvol) if Protocol::try_from(args.share)? == Protocol::Nvmf => { let props = NvmfShareProps::new() .with_allowed_hosts(args.allowed_hosts) - .with_ptpl(lvol.ptpl().create().map_err( - |source| LvsError::LvolShare { + .with_ptpl(lvol.ptpl().create().map_err(|source| { + LvsError::LvolShare { source: crate::core::CoreError::Ptpl { reason: source.to_string(), }, name: lvol.name(), - }, - )?); - match Pin::new(&mut lvol) - .share_nvmf(Some(props)) - .await - { + } + })?); + match Pin::new(&mut lvol).share_nvmf(Some(props)).await { Ok(s) => { - debug!( - "Created and shared {:?} as {}", - lvol, s - ); + debug!("Created and shared {:?} as {}", lvol, s); Ok(ReplicaV2::from(lvol)) } Err(e) => { debug!( - "Failed to share created {:?}: {} (destroying)", - lvol, - e.to_string() - ); + "Failed to share created {:?}: {} (destroying)", + lvol, + e.to_string() + ); let _ = lvol.destroy().await; Err(e) } @@ -869,10 +748,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } #[named] - async fn destroy_replica( - &self, - request: Request, - ) -> GrpcResult { + async fn destroy_replica(&self, request: Request) -> GrpcResult { self.locked(GrpcClientContext::new(&request, function_name!()), async { let args = request.into_inner(); let rx = rpc_submit::<_, _, LvsError>(async move { @@ -892,10 +768,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } #[named] - async fn list_replicas( - &self, - request: Request, - ) -> GrpcResult { + async fn list_replicas(&self, request: Request) -> GrpcResult { self.locked(GrpcClientContext::new(&request, function_name!()), async { let rx = rpc_submit::<_, _, LvsError>(async move { let mut replicas = Vec::new(); @@ -907,9 +780,7 @@ impl mayastor_server::Mayastor for MayastorSvc { .collect(); } - Ok(ListReplicasReply { - replicas, - }) + Ok(ListReplicasReply { replicas }) })?; rx.await @@ -921,10 +792,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } #[named] - async fn list_replicas_v2( - &self, - request: Request, - ) -> GrpcResult { + async fn list_replicas_v2(&self, request: Request) -> GrpcResult { self.locked(GrpcClientContext::new(&request, function_name!()), async { let rx = rpc_submit::<_, _, LvsError>(async move { let mut replicas = Vec::new(); @@ -936,9 +804,7 @@ impl mayastor_server::Mayastor for MayastorSvc { .collect(); } - Ok(ListReplicasReplyV2 { - replicas, - }) + Ok(ListReplicasReplyV2 { replicas }) })?; rx.await @@ -950,10 +816,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } // TODO; lost track of what this is supposed to do - async fn stat_replicas( - &self, - _request: Request, - ) -> GrpcResult { + async fn stat_replicas(&self, _request: Request) -> GrpcResult { let rx = rpc_submit::<_, _, CoreError>(async { let mut lvols = Vec::new(); if let Some(bdev) = UntypedBdev::bdev_first() { @@ -976,9 +839,7 @@ impl mayastor_server::Mayastor for MayastorSvc { }); } - Ok(StatReplicasReply { - replicas, - }) + Ok(StatReplicasReply { replicas }) })?; rx.await @@ -1002,14 +863,10 @@ impl mayastor_server::Mayastor for MayastorSvc { let mut lvol = Lvol::try_from(bdev)?; // if we are already shared ... - if lvol.shared() - == Some(Protocol::try_from(args.share)?) - { + if lvol.shared() == Some(Protocol::try_from(args.share)?) { Pin::new(&mut lvol) .update_properties( - UpdateProps::new().with_allowed_hosts( - args.allowed_hosts, - ), + UpdateProps::new().with_allowed_hosts(args.allowed_hosts), ) .await?; return Ok(ShareReplicaReply { @@ -1025,14 +882,14 @@ impl mayastor_server::Mayastor for MayastorSvc { Protocol::Nvmf => { let props = NvmfShareProps::new() .with_allowed_hosts(args.allowed_hosts) - .with_ptpl(lvol.ptpl().create().map_err( - |source| LvsError::LvolShare { + .with_ptpl(lvol.ptpl().create().map_err(|source| { + LvsError::LvolShare { source: crate::core::CoreError::Ptpl { reason: source.to_string(), }, name: lvol.name(), - }, - )?); + } + })?); lvol.as_mut().share_nvmf(Some(props)).await?; } } @@ -1061,10 +918,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } #[named] - async fn create_nexus( - &self, - request: Request, - ) -> GrpcResult { + async fn create_nexus(&self, request: Request) -> GrpcResult { let ctx = GrpcClientContext::new(&request, function_name!()); let args = request.into_inner(); @@ -1072,13 +926,7 @@ impl mayastor_server::Mayastor for MayastorSvc { let rx = rpc_submit::<_, _, nexus::Error>(async move { let uuid = args.uuid.clone(); let name = uuid_to_name(&args.uuid)?; - nexus::nexus_create( - &name, - args.size, - Some(&args.uuid), - &args.children, - ) - .await?; + nexus::nexus_create(&name, args.size, Some(&args.uuid), &args.children).await?; let nexus = nexus_lookup(&uuid)?; info!("Created nexus: '{}'", uuid); Ok(nexus.to_grpc().await) @@ -1092,17 +940,13 @@ impl mayastor_server::Mayastor for MayastorSvc { } #[named] - async fn create_nexus_v2( - &self, - request: Request, - ) -> GrpcResult { + async fn create_nexus_v2(&self, request: Request) -> GrpcResult { let ctx = GrpcClientContext::new(&request, function_name!()); let args = request.into_inner(); self.serialized(ctx, args.uuid.clone(), true, async move { let resv_type = NvmeReservationConv(args.resv_type).try_into()?; - let preempt_policy = - NvmePreemptionConv(args.preempt_policy).try_into()?; + let preempt_policy = NvmePreemptionConv(args.preempt_policy).try_into()?; // If the control plane has supplied a key, use it to store the // NexusInfo. let nexus_info_key = if args.nexus_info_key.is_empty() { @@ -1144,10 +988,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } #[named] - async fn destroy_nexus( - &self, - request: Request, - ) -> GrpcResult { + async fn destroy_nexus(&self, request: Request) -> GrpcResult { let ctx = GrpcClientContext::new(&request, function_name!()); let args = request.into_inner(); @@ -1168,10 +1009,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } #[named] - async fn shutdown_nexus( - &self, - request: Request, - ) -> GrpcResult { + async fn shutdown_nexus(&self, request: Request) -> GrpcResult { let ctx = GrpcClientContext::new(&request, function_name!()); let args = request.into_inner(); @@ -1191,18 +1029,14 @@ impl mayastor_server::Mayastor for MayastorSvc { .await } - async fn list_nexus( - &self, - request: Request, - ) -> GrpcResult { + async fn list_nexus(&self, request: Request) -> GrpcResult { let args = request.into_inner(); trace!("{:?}", args); let rx = rpc_submit::<_, _, nexus::Error>(async move { Ok(ListNexusReply { nexus_list: { - let mut nexus_list = - Vec::with_capacity(nexus::nexus_iter().count()); + let mut nexus_list = Vec::with_capacity(nexus::nexus_iter().count()); for n in nexus::nexus_iter() { if n.state.lock().deref() != &nexus::NexusState::Init { nexus_list.push(n.to_grpc().await); @@ -1219,10 +1053,7 @@ impl mayastor_server::Mayastor for MayastorSvc { .map(Response::new) } - async fn list_nexus_v2( - &self, - request: Request, - ) -> GrpcResult { + async fn list_nexus_v2(&self, request: Request) -> GrpcResult { let args = request.into_inner(); trace!("{:?}", args); @@ -1235,9 +1066,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } } - Ok(ListNexusV2Reply { - nexus_list, - }) + Ok(ListNexusV2Reply { nexus_list }) })?; rx.await @@ -1247,10 +1076,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } #[named] - async fn add_child_nexus( - &self, - request: Request, - ) -> GrpcResult { + async fn add_child_nexus(&self, request: Request) -> GrpcResult { let ctx = GrpcClientContext::new(&request, function_name!()); let args = request.into_inner(); @@ -1368,9 +1194,7 @@ impl mayastor_server::Mayastor for MayastorSvc { "Published nexus {} under {} for {:?}", uuid, device_uri, args.allowed_hosts ); - Ok(PublishNexusReply { - device_uri, - }) + Ok(PublishNexusReply { device_uri }) })?; rx.await .map_err(|_| Status::cancelled("cancelled"))? @@ -1381,10 +1205,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } #[named] - async fn unpublish_nexus( - &self, - request: Request, - ) -> GrpcResult { + async fn unpublish_nexus(&self, request: Request) -> GrpcResult { let ctx = GrpcClientContext::new(&request, function_name!()); let args = request.into_inner(); @@ -1418,8 +1239,7 @@ impl mayastor_server::Mayastor for MayastorSvc { let rx = rpc_submit::<_, _, nexus::Error>(async move { let uuid = args.uuid.clone(); debug!("Getting NVMe ANA state for nexus {} ...", uuid); - let ana_state = - nexus_lookup(&args.uuid)?.get_ana_state().await?; + let ana_state = nexus_lookup(&args.uuid)?.get_ana_state().await?; info!("Got nexus {} NVMe ANA state {:?}", uuid, ana_state); Ok(GetNvmeAnaStateReply { ana_state: ana_state as i32, @@ -1448,8 +1268,7 @@ impl mayastor_server::Mayastor for MayastorSvc { debug!("Setting NVMe ANA state for nexus {} ...", uuid); let ana_state = nexus::NvmeAnaState::from_i32(args.ana_state)?; - let ana_state = - nexus_lookup(&args.uuid)?.set_ana_state(ana_state).await?; + let ana_state = nexus_lookup(&args.uuid)?.set_ana_state(ana_state).await?; info!("Set nexus {} NVMe ANA state {:?}", uuid, ana_state); Ok(Null {}) })?; @@ -1463,10 +1282,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } #[named] - async fn child_operation( - &self, - request: Request, - ) -> GrpcResult { + async fn child_operation(&self, request: Request) -> GrpcResult { let ctx = GrpcClientContext::new(&request, function_name!()); let args = request.into_inner(); @@ -1477,13 +1293,9 @@ impl mayastor_server::Mayastor for MayastorSvc { let nexus = nexus_lookup(&args.uuid)?; match args.action { - 0 => { - nexus.fault_child(&args.uri, FaultReason::Offline).await - } + 0 => nexus.fault_child(&args.uri, FaultReason::Offline).await, 1 => nexus.online_child(&args.uri).await, - 2 => { - nexus.fault_child(&args.uri, FaultReason::IoError).await - } + 2 => nexus.fault_child(&args.uri, FaultReason::IoError).await, _ => Err(nexus::Error::InvalidKey {}), }?; @@ -1499,10 +1311,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } #[named] - async fn start_rebuild( - &self, - request: Request, - ) -> GrpcResult { + async fn start_rebuild(&self, request: Request) -> GrpcResult { let ctx = GrpcClientContext::new(&request, function_name!()); let args = request.into_inner(); @@ -1525,10 +1334,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } #[named] - async fn stop_rebuild( - &self, - request: Request, - ) -> GrpcResult { + async fn stop_rebuild(&self, request: Request) -> GrpcResult { let ctx = GrpcClientContext::new(&request, function_name!()); let args = request.into_inner(); @@ -1549,10 +1355,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } #[named] - async fn pause_rebuild( - &self, - request: Request, - ) -> GrpcResult { + async fn pause_rebuild(&self, request: Request) -> GrpcResult { let ctx = GrpcClientContext::new(&request, function_name!()); let msg = request.into_inner(); @@ -1572,10 +1375,7 @@ impl mayastor_server::Mayastor for MayastorSvc { } #[named] - async fn resume_rebuild( - &self, - request: Request, - ) -> GrpcResult { + async fn resume_rebuild(&self, request: Request) -> GrpcResult { let ctx = GrpcClientContext::new(&request, function_name!()); let msg = request.into_inner(); @@ -1655,9 +1455,7 @@ impl mayastor_server::Mayastor for MayastorSvc { nexus_lookup(&args.uuid)? .rebuild_progress(&args.uri) .await - .map(|p| RebuildProgressReply { - progress: p, - }) + .map(|p| RebuildProgressReply { progress: p }) })?; rx.await @@ -1705,17 +1503,13 @@ impl mayastor_server::Mayastor for MayastorSvc { }) } None => { - let emsg = - format!("Replica {} has no UUID", r.uri()); + let emsg = format!("Replica {} has no UUID", r.uri()); error!( nexus = uuid, error = emsg, "Failed to create a snapshot for nexus" ); - return Err(NexusError::FailedCreateSnapshot { - name, - reason: emsg, - }); + return Err(NexusError::FailedCreateSnapshot { name, reason: emsg }); } }; } @@ -1723,9 +1517,7 @@ impl mayastor_server::Mayastor for MayastorSvc { let reply = nexus .create_snapshot(snapshot, replicas) .await - .map(|_r| CreateSnapshotReply { - name, - })?; + .map(|_r| CreateSnapshotReply { name })?; info!("Created snapshot on nexus {}", uuid); trace!("{:?}", reply); Ok(reply) @@ -1780,9 +1572,7 @@ impl mayastor_server::Mayastor for MayastorSvc { .into_iter() .map(NvmeController::from) .collect(); - Ok(ListNvmeControllersReply { - controllers, - }) + Ok(ListNvmeControllersReply { controllers }) })?; rx.await @@ -1810,15 +1600,11 @@ impl mayastor_server::Mayastor for MayastorSvc { if stats.is_ok() { res.push(NvmeControllerStats { name: ctrl.name, - stats: stats - .ok() - .map(NvmeControllerIoStats::from), + stats: stats.ok().map(NvmeControllerIoStats::from), }); } } - Ok(StatNvmeControllersReply { - controllers: res, - }) + Ok(StatNvmeControllersReply { controllers: res }) })?; rx.await .map_err(|_| Status::cancelled("cancelled"))? @@ -1829,10 +1615,7 @@ impl mayastor_server::Mayastor for MayastorSvc { .await } - async fn get_mayastor_info( - &self, - _request: Request, - ) -> GrpcResult { + async fn get_mayastor_info(&self, _request: Request) -> GrpcResult { let features = MayastorFeatures::get().into(); let reply = MayastorInfoRequest { diff --git a/io-engine/src/grpc/v0/nexus_grpc.rs b/io-engine/src/grpc/v0/nexus_grpc.rs index 96e0a19b3..844054848 100644 --- a/io-engine/src/grpc/v0/nexus_grpc.rs +++ b/io-engine/src/grpc/v0/nexus_grpc.rs @@ -8,15 +8,8 @@ use crate::{ bdev::{ nexus, nexus::{ - nexus_lookup_mut, - nexus_lookup_uuid_mut, - ChildStateClient, - FaultReason, - Nexus, - NexusChild, - NexusPtpl, - NexusStatus, - NvmeAnaState, + nexus_lookup_mut, nexus_lookup_uuid_mut, ChildStateClient, FaultReason, Nexus, + NexusChild, NexusPtpl, NexusStatus, NvmeAnaState, }, PtplFileOps, }, @@ -42,9 +35,7 @@ fn map_fault_reason(r: FaultReason) -> ChildStateReason { fn map_child_state(child: &NexusChild) -> (ChildState, ChildStateReason) { use ChildState::{ - ChildDegraded as Degraded, - ChildFaulted as Faulted, - ChildOnline as Online, + ChildDegraded as Degraded, ChildFaulted as Faulted, ChildOnline as Online, ChildUnknown as Unknown, }; use ChildStateReason::*; @@ -82,21 +73,11 @@ impl From for rpc::NexusState { impl From for rpc::NvmeAnaState { fn from(state: NvmeAnaState) -> Self { match state { - NvmeAnaState::InvalidState => { - rpc::NvmeAnaState::NvmeAnaInvalidState - } - NvmeAnaState::OptimizedState => { - rpc::NvmeAnaState::NvmeAnaOptimizedState - } - NvmeAnaState::NonOptimizedState => { - rpc::NvmeAnaState::NvmeAnaNonOptimizedState - } - NvmeAnaState::InaccessibleState => { - rpc::NvmeAnaState::NvmeAnaInaccessibleState - } - NvmeAnaState::PersistentLossState => { - rpc::NvmeAnaState::NvmeAnaPersistentLossState - } + NvmeAnaState::InvalidState => rpc::NvmeAnaState::NvmeAnaInvalidState, + NvmeAnaState::OptimizedState => rpc::NvmeAnaState::NvmeAnaOptimizedState, + NvmeAnaState::NonOptimizedState => rpc::NvmeAnaState::NvmeAnaNonOptimizedState, + NvmeAnaState::InaccessibleState => rpc::NvmeAnaState::NvmeAnaInaccessibleState, + NvmeAnaState::PersistentLossState => rpc::NvmeAnaState::NvmeAnaPersistentLossState, NvmeAnaState::ChangeState => rpc::NvmeAnaState::NvmeAnaChangeState, } } @@ -199,9 +180,7 @@ pub fn uuid_to_name(uuid: &str) -> Result { /// Look up a nexus by name first (if created by nexus_create_v2) then by its /// uuid prepending "nexus-" prefix. /// Return error if nexus not found. -pub fn nexus_lookup<'n>( - uuid: &str, -) -> Result>, nexus::Error> { +pub fn nexus_lookup<'n>(uuid: &str) -> Result>, nexus::Error> { if let Some(nexus) = nexus_lookup_mut(uuid) { Ok(nexus) } else if let Some(nexus) = nexus_lookup_uuid_mut(uuid) { @@ -221,9 +200,7 @@ pub fn nexus_lookup<'n>( /// Add child to nexus. Normally this would have been part of grpc method /// implementation, however it is not allowed to use '?' in `locally` macro. /// So we implement it as a separate function. -pub async fn nexus_add_child( - args: rpc::AddChildNexusRequest, -) -> Result { +pub async fn nexus_add_child(args: rpc::AddChildNexusRequest) -> Result { let mut n = nexus_lookup(&args.uuid)?; // TODO: do not add child if it already exists (idempotency) // For that we need api to check existence of child by name (not uri that diff --git a/io-engine/src/grpc/v1/bdev.rs b/io-engine/src/grpc/v1/bdev.rs index a028f888a..21381ccb2 100644 --- a/io-engine/src/grpc/v1/bdev.rs +++ b/io-engine/src/grpc/v1/bdev.rs @@ -5,16 +5,8 @@ use crate::{ grpc::{rpc_submit, GrpcResult}, }; use io_engine_api::v1::bdev::{ - Bdev, - BdevRpc, - BdevShareRequest, - BdevShareResponse, - BdevUnshareRequest, - CreateBdevRequest, - CreateBdevResponse, - DestroyBdevRequest, - ListBdevOptions, - ListBdevResponse, + Bdev, BdevRpc, BdevShareRequest, BdevShareResponse, BdevUnshareRequest, CreateBdevRequest, + CreateBdevResponse, DestroyBdevRequest, ListBdevOptions, ListBdevResponse, }; use std::{convert::TryFrom, pin::Pin}; use tonic::{Request, Response, Status}; @@ -61,10 +53,7 @@ impl Default for BdevService { #[tonic::async_trait] impl BdevRpc for BdevService { #[tracing::instrument(skip(self))] - async fn list( - &self, - request: Request, - ) -> GrpcResult { + async fn list(&self, request: Request) -> GrpcResult { let rx = rpc_submit::<_, _, BdevError>(async { let mut bdevs = Vec::new(); let args = request.into_inner(); @@ -76,9 +65,7 @@ impl BdevRpc for BdevService { bdev.into_iter().for_each(|bdev| bdevs.push(bdev.into())) } - Ok(ListBdevResponse { - bdevs, - }) + Ok(ListBdevResponse { bdevs }) })?; rx.await @@ -102,9 +89,7 @@ impl BdevRpc for BdevService { bdev: Some(bdev.into()), })) } else { - Err(BdevError::BdevNotFound { - name, - }) + Err(BdevError::BdevNotFound { name }) } })?; @@ -114,10 +99,7 @@ impl BdevRpc for BdevService { } #[tracing::instrument(skip(self))] - async fn destroy( - &self, - request: Request, - ) -> GrpcResult<()> { + async fn destroy(&self, request: Request) -> GrpcResult<()> { let uri = request.into_inner().uri; let rx = rpc_submit(async move { bdev_destroy(&uri).await })?; @@ -138,16 +120,13 @@ impl BdevRpc for BdevService { let protocol = r.protocol; let rx = match Protocol::try_from(protocol) { - Ok(Protocol::Nvmf) => { - rpc_submit::<_, Bdev, CoreError>(async move { - let mut bdev = core::UntypedBdev::get_by_name(&bdev_name)?; - let props = NvmfShareProps::new() - .with_allowed_hosts(r.allowed_hosts); - Pin::new(&mut bdev).share_nvmf(Some(props)).await?; - let bdev = core::UntypedBdev::get_by_name(&bdev_name)?; - Ok(bdev.into()) - }) - } + Ok(Protocol::Nvmf) => rpc_submit::<_, Bdev, CoreError>(async move { + let mut bdev = core::UntypedBdev::get_by_name(&bdev_name)?; + let props = NvmfShareProps::new().with_allowed_hosts(r.allowed_hosts); + Pin::new(&mut bdev).share_nvmf(Some(props)).await?; + let bdev = core::UntypedBdev::get_by_name(&bdev_name)?; + Ok(bdev.into()) + }), _ => return Err(Status::invalid_argument(protocol.to_string())), }?; @@ -155,23 +134,14 @@ impl BdevRpc for BdevService { rx.await .map_err(|_| Status::cancelled("cancelled"))? .map_err(|e| match e { - CoreError::BdevNotFound { - name, - } => Status::not_found(name), + CoreError::BdevNotFound { name } => Status::not_found(name), e => Status::internal(e.to_string()), }) - .map(|bdev| { - Ok(Response::new(BdevShareResponse { - bdev: Some(bdev), - })) - })? + .map(|bdev| Ok(Response::new(BdevShareResponse { bdev: Some(bdev) })))? } #[tracing::instrument(skip(self))] - async fn unshare( - &self, - request: Request, - ) -> GrpcResult<()> { + async fn unshare(&self, request: Request) -> GrpcResult<()> { let rx = rpc_submit::<_, _, CoreError>(async { let name = request.into_inner().name; if let Some(mut bdev) = core::UntypedBdev::lookup_by_name(&name) { diff --git a/io-engine/src/grpc/v1/host.rs b/io-engine/src/grpc/v1/host.rs index d8b2f3bb6..83f363ef3 100644 --- a/io-engine/src/grpc/v1/host.rs +++ b/io-engine/src/grpc/v1/host.rs @@ -2,15 +2,8 @@ use crate::{ bdev::{nexus, NvmeControllerState}, core::{BlockDeviceIoStats, CoreError, MayastorBugFixes, MayastorFeatures}, grpc::{ - controller_grpc::{ - controller_stats, - list_controllers, - NvmeControllerInfo, - }, - rpc_submit, - GrpcClientContext, - GrpcResult, - Serializer, + controller_grpc::{controller_stats, list_controllers, NvmeControllerInfo}, + rpc_submit, GrpcClientContext, GrpcResult, Serializer, }, host::{blk_device, resource}, subsys::{registration::registration_grpc::ApiVersion, Registration}, @@ -201,21 +194,11 @@ impl From for host_rpc::NvmeControllerState { fn from(state: NvmeControllerState) -> Self { match state { NvmeControllerState::New => host_rpc::NvmeControllerState::New, - NvmeControllerState::Initializing => { - host_rpc::NvmeControllerState::Initializing - } - NvmeControllerState::Running => { - host_rpc::NvmeControllerState::Running - } - NvmeControllerState::Faulted(_) => { - host_rpc::NvmeControllerState::Faulted - } - NvmeControllerState::Unconfiguring => { - host_rpc::NvmeControllerState::Unconfiguring - } - NvmeControllerState::Unconfigured => { - host_rpc::NvmeControllerState::Unconfigured - } + NvmeControllerState::Initializing => host_rpc::NvmeControllerState::Initializing, + NvmeControllerState::Running => host_rpc::NvmeControllerState::Running, + NvmeControllerState::Faulted(_) => host_rpc::NvmeControllerState::Faulted, + NvmeControllerState::Unconfiguring => host_rpc::NvmeControllerState::Unconfiguring, + NvmeControllerState::Unconfigured => host_rpc::NvmeControllerState::Unconfigured, } } } @@ -251,8 +234,7 @@ impl host_rpc::HostRpc for HostService { registration_info: Some(RegisterRequest { id: self.node_name.clone(), grpc_endpoint: self.grpc_socket.to_string(), - instance_uuid: Registration::get() - .map(|r| r.instance_uuid().to_string()), + instance_uuid: Registration::get().map(|r| r.instance_uuid().to_string()), api_version: api_versions, hostnqn: self.node_nqn.clone(), features: Some(MayastorFeatures::get().into()), @@ -307,9 +289,7 @@ impl host_rpc::HostRpc for HostService { .into_iter() .map(host_rpc::NvmeController::from) .collect(); - Ok(host_rpc::ListNvmeControllersResponse { - controllers, - }) + Ok(host_rpc::ListNvmeControllersResponse { controllers }) })?; rx.await @@ -333,14 +313,8 @@ impl host_rpc::HostRpc for HostService { let rx = rpc_submit::<_, _, CoreError>(async move { controller_stats(&args.name) .await - .map(|blk_stat| { - Some(host_rpc::NvmeControllerIoStats::from( - blk_stat, - )) - }) - .map(|ctrl_stat| host_rpc::StatNvmeControllerResponse { - stats: ctrl_stat, - }) + .map(|blk_stat| Some(host_rpc::NvmeControllerIoStats::from(blk_stat))) + .map(|ctrl_stat| host_rpc::StatNvmeControllerResponse { stats: ctrl_stat }) })?; rx.await .map_err(|_| Status::cancelled("cancelled"))? diff --git a/io-engine/src/grpc/v1/json.rs b/io-engine/src/grpc/v1/json.rs index ec3a76338..73a0cdd00 100644 --- a/io-engine/src/grpc/v1/json.rs +++ b/io-engine/src/grpc/v1/json.rs @@ -16,9 +16,7 @@ pub struct JsonService { impl JsonService { pub fn new(rpc_addr: Cow<'static, str>) -> Self { - Self { - rpc_addr, - } + Self { rpc_addr } } } @@ -26,19 +24,14 @@ impl JsonService { impl JsonRpc for JsonService { /// Invoke a json-rpc method and return the result #[tracing::instrument(skip(self))] - async fn json_rpc_call( - &self, - request: Request, - ) -> GrpcResult { + async fn json_rpc_call(&self, request: Request) -> GrpcResult { let args = request.into_inner(); let result = self .spdk_jsonrpc_call(&args.method, empty_as_none(&args.params)) .await?; - Ok(Response::new(JsonRpcResponse { - result, - })) + Ok(Response::new(JsonRpcResponse { result })) } } @@ -51,16 +44,10 @@ fn empty_as_none(value: &str) -> Option<&str> { } impl JsonService { - async fn spdk_jsonrpc_call( - &self, - method: &str, - arg: Option<&str>, - ) -> Result { - let params: Option = - arg.map(serde_json::from_str).transpose()?; + async fn spdk_jsonrpc_call(&self, method: &str, arg: Option<&str>) -> Result { + let params: Option = arg.map(serde_json::from_str).transpose()?; - let result: serde_json::Value = - jsonrpc::call(&self.rpc_addr, method, params).await?; + let result: serde_json::Value = jsonrpc::call(&self.rpc_addr, method, params).await?; serde_json::to_string_pretty(&result).map_err(Error::ParseError) } diff --git a/io-engine/src/grpc/v1/lvm/mod.rs b/io-engine/src/grpc/v1/lvm/mod.rs index e32324319..4ad07e144 100644 --- a/io-engine/src/grpc/v1/lvm/mod.rs +++ b/io-engine/src/grpc/v1/lvm/mod.rs @@ -4,27 +4,14 @@ use tonic::Status; impl From for tonic::Status { fn from(e: LvmError) -> Self { match e { - LvmError::InvalidPoolType { - .. + LvmError::InvalidPoolType { .. } + | LvmError::VgUuidSet { .. } + | LvmError::DisksMismatch { .. } => Status::invalid_argument(e.to_string()), + LvmError::NotFound { .. } | LvmError::LvNotFound { .. } => { + Status::not_found(e.to_string()) } - | LvmError::VgUuidSet { - .. - } - | LvmError::DisksMismatch { - .. - } => Status::invalid_argument(e.to_string()), - LvmError::NotFound { - .. - } - | LvmError::LvNotFound { - .. - } => Status::not_found(e.to_string()), - LvmError::NoSpace { - .. - } => Status::resource_exhausted(e.to_string()), - LvmError::SnapshotNotSup { - .. - } => Status::failed_precondition(e.to_string()), + LvmError::NoSpace { .. } => Status::resource_exhausted(e.to_string()), + LvmError::SnapshotNotSup { .. } => Status::failed_precondition(e.to_string()), _ => Status::internal(e.to_string()), } } diff --git a/io-engine/src/grpc/v1/nexus.rs b/io-engine/src/grpc/v1/nexus.rs index a1c8629da..2040f9581 100644 --- a/io-engine/src/grpc/v1/nexus.rs +++ b/io-engine/src/grpc/v1/nexus.rs @@ -1,18 +1,11 @@ use crate::{ bdev::{ nexus, - nexus::{ - nexus_lookup_uuid_mut, - ChildStateClient, - FaultReason, - NexusChild, - NexusStatus, - }, + nexus::{nexus_lookup_uuid_mut, ChildStateClient, FaultReason, NexusChild, NexusStatus}, }, core::{ lock::{ProtectedSubsystems, ResourceLockManager}, - Protocol, - Share, + Protocol, Share, }, grpc::{rpc_submit, GrpcClientContext, GrpcResult}, rebuild::{HistoryRecord, RebuildState, RebuildStats}, @@ -80,10 +73,11 @@ impl NexusService { let _global_guard = if global_operation { match lock_manager.lock(Some(ctx.timeout), false).await { Some(g) => Some(g), - None => return Err(Status::deadline_exceeded( - "Failed to acquire access to object within given timeout" - .to_string() - )), + None => { + return Err(Status::deadline_exceeded( + "Failed to acquire access to object within given timeout".to_string(), + )) + } } } else { None @@ -93,13 +87,15 @@ impl NexusService { let _resource_guard = match lock_manager .get_subsystem(ProtectedSubsystems::NEXUS) .lock_resource(nexus_uuid, Some(ctx.timeout), false) - .await { - Some(g) => g, - None => return Err(Status::deadline_exceeded( - "Failed to acquire access to object within given timeout" - .to_string() - )), - }; + .await + { + Some(g) => g, + None => { + return Err(Status::deadline_exceeded( + "Failed to acquire access to object within given timeout".to_string(), + )) + } + }; let r = fut.await; match r { @@ -113,9 +109,10 @@ impl NexusService { } } }) - .await { + .await + { Ok(r) => r, - Err(_) => Err(Status::cancelled("gRPC call cancelled")) + Err(_) => Err(Status::cancelled("gRPC call cancelled")), } } } @@ -234,18 +231,10 @@ impl From for nexus::NvmeReservation { NvmeReservation::Reserved => Self::Reserved, NvmeReservation::WriteExclusive => Self::WriteExclusive, NvmeReservation::ExclusiveAccess => Self::ExclusiveAccess, - NvmeReservation::WriteExclusiveRegsOnly => { - Self::WriteExclusiveRegsOnly - } - NvmeReservation::ExclusiveAccessRegsOnly => { - Self::ExclusiveAccessRegsOnly - } - NvmeReservation::WriteExclusiveAllRegs => { - Self::WriteExclusiveAllRegs - } - NvmeReservation::ExclusiveAccessAllRegs => { - Self::ExclusiveAccessAllRegs - } + NvmeReservation::WriteExclusiveRegsOnly => Self::WriteExclusiveRegsOnly, + NvmeReservation::ExclusiveAccessRegsOnly => Self::ExclusiveAccessRegsOnly, + NvmeReservation::WriteExclusiveAllRegs => Self::WriteExclusiveAllRegs, + NvmeReservation::ExclusiveAccessAllRegs => Self::ExclusiveAccessAllRegs, } } } @@ -269,12 +258,8 @@ impl TryFrom for nexus::NexusNvmePreemption { type Error = tonic::Status; fn try_from(value: NvmePreemptionConv) -> Result { match NexusNvmePreemption::try_from(value.0) { - Ok(NexusNvmePreemption::ArgKey) => { - Ok(nexus::NexusNvmePreemption::ArgKey) - } - Ok(NexusNvmePreemption::Holder) => { - Ok(nexus::NexusNvmePreemption::Holder) - } + Ok(NexusNvmePreemption::ArgKey) => Ok(nexus::NexusNvmePreemption::ArgKey), + Ok(NexusNvmePreemption::Holder) => Ok(nexus::NexusNvmePreemption::Holder), Err(_) => Err(tonic::Status::invalid_argument(format!( "Invalid reservation preempt policy {}", value.0 @@ -284,9 +269,7 @@ impl TryFrom for nexus::NexusNvmePreemption { } /// Look up a nexus by uuid -pub fn nexus_lookup<'n>( - uuid: &str, -) -> Result>, nexus::Error> { +pub fn nexus_lookup<'n>(uuid: &str) -> Result>, nexus::Error> { if let Some(nexus) = nexus_lookup_uuid_mut(uuid) { Ok(nexus) } else { @@ -344,8 +327,7 @@ impl<'n> nexus::Nexus<'n> { state: NexusState::from(self.status()) as i32, device_uri: self.get_share_uri().unwrap_or_default(), children: { - let mut children = - Vec::with_capacity(self.children_iter().count()); + let mut children = Vec::with_capacity(self.children_iter().count()); for child in self.children_iter() { children.push(child.to_grpc_v1().await); } @@ -361,9 +343,7 @@ impl<'n> nexus::Nexus<'n> { /// Add child to nexus. Normally this would have been part of grpc method /// implementation, however it is not allowed to use '?' in `locally` macro. /// So we implement it as a separate function. -async fn nexus_add_child( - args: &AddChildNexusRequest, -) -> Result { +async fn nexus_add_child(args: &AddChildNexusRequest) -> Result { let mut n = nexus_lookup(&args.uuid)?; if n.contains_child_uri(&args.uri) || { match device_name(&args.uri) { @@ -396,8 +376,7 @@ impl NexusRpc for NexusService { self.serialized(ctx, args.uuid.clone(), true, async move { trace!("{:?}", args); let resv_type = NvmeReservationConv(args.resv_type).try_into()?; - let preempt_policy = - NvmePreemptionConv(args.preempt_policy).try_into()?; + let preempt_policy = NvmePreemptionConv(args.preempt_policy).try_into()?; let rx = rpc_submit::<_, _, nexus::Error>(async move { // check for nexus exists, uuid & name if let Some(_n) = nexus::nexus_lookup(&args.name) { @@ -447,20 +426,13 @@ impl NexusRpc for NexusService { rx.await .map_err(|_| Status::cancelled("cancelled"))? .map_err(Status::from) - .map(|nexus| { - Response::new(CreateNexusResponse { - nexus: Some(nexus), - }) - }) + .map(|nexus| Response::new(CreateNexusResponse { nexus: Some(nexus) })) }) .await } #[named] - async fn destroy_nexus( - &self, - request: Request, - ) -> GrpcResult<()> { + async fn destroy_nexus(&self, request: Request) -> GrpcResult<()> { let ctx = GrpcClientContext::new(&request, function_name!()); let args = request.into_inner(); @@ -555,14 +527,9 @@ impl NexusRpc for NexusService { } } - return Ok(ListNexusResponse { - nexus_list, - }); + return Ok(ListNexusResponse { nexus_list }); - async fn add_nexus( - nexus_list: &mut Vec, - nexus: &nexus::Nexus<'_>, - ) { + async fn add_nexus(nexus_list: &mut Vec, nexus: &nexus::Nexus<'_>) { if nexus.state.lock().deref() != &nexus::NexusState::Init { nexus_list.push(nexus.into_grpc().await); } @@ -597,11 +564,7 @@ impl NexusRpc for NexusService { rx.await .map_err(|_| Status::cancelled("cancelled"))? .map_err(Status::from) - .map(|nexus| { - Response::new(AddChildNexusResponse { - nexus: Some(nexus), - }) - }) + .map(|nexus| Response::new(AddChildNexusResponse { nexus: Some(nexus) })) }) .await } @@ -620,15 +583,9 @@ impl NexusRpc for NexusService { let rx = rpc_submit::<_, _, nexus::Error>(async move { trace!("{:?}", args); if nexus_lookup(&args.uuid)?.contains_child_uri(&args.uri) { - debug!( - "Removing child {} from nexus {} ...", - args.uri, args.uuid - ); + debug!("Removing child {} from nexus {} ...", args.uri, args.uuid); nexus_lookup(&args.uuid)?.remove_child(&args.uri).await?; - info!( - "Removed child {} from nexus {}", - args.uri, args.uuid - ); + info!("Removed child {} from nexus {}", args.uri, args.uuid); event.generate(); } Ok(nexus_lookup(&args.uuid)?.into_grpc().await) @@ -637,11 +594,7 @@ impl NexusRpc for NexusService { rx.await .map_err(|_| Status::cancelled("cancelled"))? .map_err(Status::from) - .map(|nexus| { - Response::new(RemoveChildNexusResponse { - nexus: Some(nexus), - }) - }) + .map(|nexus| Response::new(RemoveChildNexusResponse { nexus: Some(nexus) })) }) .await } @@ -725,9 +678,7 @@ impl NexusRpc for NexusService { let nexus = nexus_lookup(&args.uuid)?.into_grpc().await; - Ok(PublishNexusResponse { - nexus: Some(nexus), - }) + Ok(PublishNexusResponse { nexus: Some(nexus) }) })?; rx.await .map_err(|_| Status::cancelled("cancelled"))? @@ -758,11 +709,7 @@ impl NexusRpc for NexusService { rx.await .map_err(|_| Status::cancelled("cancelled"))? .map_err(Status::from) - .map(|nexus| { - Response::new(UnpublishNexusResponse { - nexus: Some(nexus), - }) - }) + .map(|nexus| Response::new(UnpublishNexusResponse { nexus: Some(nexus) })) }) .await } @@ -780,8 +727,7 @@ impl NexusRpc for NexusService { debug!("Getting NVMe ANA state for nexus {} ...", uuid); let rx = rpc_submit::<_, _, nexus::Error>(async move { - let ana_state = - nexus_lookup(&args.uuid)?.get_ana_state().await?; + let ana_state = nexus_lookup(&args.uuid)?.get_ana_state().await?; info!("Got nexus {} NVMe ANA state {:?}", uuid, ana_state); Ok(GetNvmeAnaStateResponse { ana_state: ana_state as i32, @@ -811,8 +757,7 @@ impl NexusRpc for NexusService { let rx = rpc_submit::<_, _, nexus::Error>(async move { let ana_state = nexus::NvmeAnaState::from_i32(args.ana_state)?; - let ana_state = - nexus_lookup(&args.uuid)?.set_ana_state(ana_state).await?; + let ana_state = nexus_lookup(&args.uuid)?.set_ana_state(ana_state).await?; info!("Set nexus {} NVMe ANA state {:?}", uuid, ana_state); Ok(nexus_lookup(&args.uuid)?.into_grpc().await) })?; @@ -820,11 +765,7 @@ impl NexusRpc for NexusService { rx.await .map_err(|_| Status::cancelled("cancelled"))? .map_err(Status::from) - .map(|nexus| { - Response::new(SetNvmeAnaStateResponse { - nexus: Some(nexus), - }) - }) + .map(|nexus| Response::new(SetNvmeAnaStateResponse { nexus: Some(nexus) })) }) .await } @@ -859,10 +800,7 @@ impl NexusRpc for NexusService { 3 => { nexus .as_mut() - .fault_child( - &args.uri, - FaultReason::OfflinePermanent, - ) + .fault_child(&args.uri, FaultReason::OfflinePermanent) .await } _ => Err(nexus::Error::InvalidKey {}), @@ -874,11 +812,7 @@ impl NexusRpc for NexusService { rx.await .map_err(|_| Status::cancelled("cancelled"))? .map_err(Status::from) - .map(|n| { - Response::new(ChildOperationResponse { - nexus: Some(n), - }) - }) + .map(|n| Response::new(ChildOperationResponse { nexus: Some(n) })) }) .await } @@ -905,11 +839,7 @@ impl NexusRpc for NexusService { rx.await .map_err(|_| Status::cancelled("cancelled"))? .map_err(Status::from) - .map(|n| { - Response::new(StartRebuildResponse { - nexus: Some(n), - }) - }) + .map(|n| Response::new(StartRebuildResponse { nexus: Some(n) })) }) .await } @@ -935,11 +865,7 @@ impl NexusRpc for NexusService { rx.await .map_err(|_| Status::cancelled("cancelled"))? .map_err(Status::from) - .map(|n| { - Response::new(StopRebuildResponse { - nexus: Some(n), - }) - }) + .map(|n| Response::new(StopRebuildResponse { nexus: Some(n) })) }) .await } @@ -965,11 +891,7 @@ impl NexusRpc for NexusService { rx.await .map_err(|_| Status::cancelled("cancelled"))? .map_err(Status::from) - .map(|n| { - Response::new(PauseRebuildResponse { - nexus: Some(n), - }) - }) + .map(|n| Response::new(PauseRebuildResponse { nexus: Some(n) })) }) .await } @@ -994,11 +916,7 @@ impl NexusRpc for NexusService { rx.await .map_err(|_| Status::cancelled("cancelled"))? .map_err(Status::from) - .map(|n| { - Response::new(ResumeRebuildResponse { - nexus: Some(n), - }) - }) + .map(|n| Response::new(ResumeRebuildResponse { nexus: Some(n) })) }) .await } @@ -1111,11 +1029,7 @@ impl NexusRpc for NexusService { newest_end_time = Some(record.end_time); } }) - .filter(|record| { - end_time - .map(|t| record.end_time > t) - .unwrap_or(true) - }) + .filter(|record| end_time.map(|t| record.end_time > t).unwrap_or(true)) .rev() .take(count) .cloned() @@ -1134,9 +1048,7 @@ impl NexusRpc for NexusService { Ok(ListRebuildHistoryResponse { histories, - end_time: Some( - newest_end_time.map_or(default_end_time.into(), Into::into), - ), + end_time: Some(newest_end_time.map_or(default_end_time.into(), Into::into)), }) })?; diff --git a/io-engine/src/grpc/v1/pool.rs b/io-engine/src/grpc/v1/pool.rs index 799ad0303..1bf94f2c6 100644 --- a/io-engine/src/grpc/v1/pool.rs +++ b/io-engine/src/grpc/v1/pool.rs @@ -1,38 +1,17 @@ pub use crate::pool_backend::FindPoolArgs as PoolIdProbe; use crate::{ - core::{ - NvmfShareProps, - ProtectedSubsystems, - Protocol, - ResourceLockGuard, - ResourceLockManager, - }, - grpc::{ - acquire_subsystem_lock, - GrpcClientContext, - GrpcResult, - RWLock, - RWSerializer, - }, + core::{NvmfShareProps, ProtectedSubsystems, Protocol, ResourceLockGuard, ResourceLockManager}, + grpc::{acquire_subsystem_lock, GrpcClientContext, GrpcResult, RWLock, RWSerializer}, lvs::{BsError, LvsError}, pool_backend::{ - self, - FindPoolArgs, - IPoolFactory, - ListPoolArgs, - PoolArgs, - PoolBackend, - PoolFactory, - PoolOps, - ReplicaArgs, + self, FindPoolArgs, IPoolFactory, ListPoolArgs, PoolArgs, PoolBackend, PoolFactory, + PoolOps, ReplicaArgs, }, }; use ::function_name::named; use futures::FutureExt; use io_engine_api::v1::{ - pool::*, - replica::destroy_replica_request, - snapshot::destroy_snapshot_request, + pool::*, replica::destroy_replica_request, snapshot::destroy_snapshot_request, }; use std::{convert::TryFrom, fmt::Debug, ops::Deref, panic::AssertUnwindSafe}; use tonic::{Request, Status}; @@ -45,10 +24,7 @@ impl From for FindPoolArgs { impl From<&destroy_replica_request::Pool> for FindPoolArgs { fn from(value: &destroy_replica_request::Pool) -> Self { match value.clone() { - destroy_replica_request::Pool::PoolName(name) => Self::NameUuid { - name, - uuid: None, - }, + destroy_replica_request::Pool::PoolName(name) => Self::NameUuid { name, uuid: None }, destroy_replica_request::Pool::PoolUuid(uuid) => Self::Uuid(uuid), } } @@ -56,10 +32,7 @@ impl From<&destroy_replica_request::Pool> for FindPoolArgs { impl From<&destroy_snapshot_request::Pool> for FindPoolArgs { fn from(value: &destroy_snapshot_request::Pool) -> Self { match value.clone() { - destroy_snapshot_request::Pool::PoolName(name) => Self::NameUuid { - name, - uuid: None, - }, + destroy_snapshot_request::Pool::PoolName(name) => Self::NameUuid { name, uuid: None }, destroy_snapshot_request::Pool::PoolUuid(uuid) => Self::Uuid(uuid), } } @@ -80,8 +53,7 @@ impl From for FindPoolArgs { #[allow(dead_code)] pub struct PoolService { name: String, - client_context: - std::sync::Arc>>, + client_context: std::sync::Arc>>, } #[async_trait::async_trait] @@ -164,19 +136,15 @@ impl TryFrom for PoolArgs { }); } - let backend = PoolType::try_from(args.pooltype).map_err(|_| { - LvsError::Invalid { - source: BsError::InvalidArgument {}, - msg: format!("invalid pooltype provided: {}", args.pooltype), - } + let backend = PoolType::try_from(args.pooltype).map_err(|_| LvsError::Invalid { + source: BsError::InvalidArgument {}, + msg: format!("invalid pooltype provided: {}", args.pooltype), })?; if backend == PoolType::Lvs { if let Some(s) = args.uuid.clone() { - let _uuid = uuid::Uuid::parse_str(s.as_str()).map_err(|e| { - LvsError::Invalid { - source: BsError::InvalidArgument {}, - msg: format!("invalid uuid provided, {e}"), - } + let _uuid = uuid::Uuid::parse_str(s.as_str()).map_err(|e| LvsError::Invalid { + source: BsError::InvalidArgument {}, + msg: format!("invalid uuid provided, {e}"), })?; } } @@ -245,19 +213,15 @@ impl TryFrom for PoolArgs { }); } - let backend = PoolType::try_from(args.pooltype).map_err(|_| { - LvsError::Invalid { - source: BsError::InvalidArgument {}, - msg: format!("invalid pooltype provided: {}", args.pooltype), - } + let backend = PoolType::try_from(args.pooltype).map_err(|_| LvsError::Invalid { + source: BsError::InvalidArgument {}, + msg: format!("invalid pooltype provided: {}", args.pooltype), })?; if backend == PoolType::Lvs { if let Some(s) = args.uuid.clone() { - let _uuid = uuid::Uuid::parse_str(s.as_str()).map_err(|e| { - LvsError::Invalid { - source: BsError::InvalidArgument {}, - msg: format!("invalid uuid provided, {e}"), - } + let _uuid = uuid::Uuid::parse_str(s.as_str()).map_err(|e| LvsError::Invalid { + source: BsError::InvalidArgument {}, + msg: format!("invalid uuid provided, {e}"), })?; } } @@ -293,10 +257,7 @@ pub(crate) struct PoolGrpc { impl PoolGrpc { fn new(pool: Box, _guard: ResourceLockGuard<'static>) -> Self { - Self { - pool, - _guard, - } + Self { pool, _guard } } pub(crate) async fn create_replica( &self, @@ -322,14 +283,10 @@ impl PoolGrpc { match replica.share_nvmf(props).await { Ok(share_uri) => { debug!("created and shared {replica:?} as {share_uri}"); - Ok(io_engine_api::v1::replica::Replica::from( - replica.deref(), - )) + Ok(io_engine_api::v1::replica::Replica::from(replica.deref())) } Err(error) => { - warn!( - "failed to share created lvol {replica:?}: {error} (destroying)" - ); + warn!("failed to share created lvol {replica:?}: {error} (destroying)"); let _ = replica.destroy().await; Err(error.into()) } @@ -428,14 +385,11 @@ impl GrpcPoolFactory { } /// Probe backends for the given name and/or uuid and return the right one. - pub(crate) async fn finder>( - args: I, - ) -> Result { + pub(crate) async fn finder>(args: I) -> Result { let pool = PoolFactory::find(args).await?; - let pool_subsystem = ResourceLockManager::get_instance() - .get_subsystem(ProtectedSubsystems::POOL); - let lock_guard = - acquire_subsystem_lock(pool_subsystem, Some(pool.name())).await?; + let pool_subsystem = + ResourceLockManager::get_instance().get_subsystem(ProtectedSubsystems::POOL); + let lock_guard = acquire_subsystem_lock(pool_subsystem, Some(pool.name())).await?; Ok(PoolGrpc::new(pool, lock_guard)) } async fn list(&self, args: &ListPoolArgs) -> Result, Status> { @@ -473,12 +427,11 @@ impl GrpcPoolFactory { } } async fn create(&self, args: PoolArgs) -> Result { - let pool_subsystem = ResourceLockManager::get_instance() - .get_subsystem(ProtectedSubsystems::POOL); + let pool_subsystem = + ResourceLockManager::get_instance().get_subsystem(ProtectedSubsystems::POOL); // todo: missing lock by uuid as well, need to ensure also we don't // clash with a pool with != name but same uuid - let _lock_guard = - acquire_subsystem_lock(pool_subsystem, Some(&args.name)).await?; + let _lock_guard = acquire_subsystem_lock(pool_subsystem, Some(&args.name)).await?; let finder = FindPoolArgs::from(&args); for factory in Self::factories() { @@ -489,10 +442,9 @@ impl GrpcPoolFactory { Ok(pool.into()) } async fn import(&self, args: PoolArgs) -> Result { - let pool_subsystem = ResourceLockManager::get_instance() - .get_subsystem(ProtectedSubsystems::POOL); - let _lock_guard = - acquire_subsystem_lock(pool_subsystem, Some(&args.name)).await?; + let pool_subsystem = + ResourceLockManager::get_instance().get_subsystem(ProtectedSubsystems::POOL); + let _lock_guard = acquire_subsystem_lock(pool_subsystem, Some(&args.name)).await?; let finder = FindPoolArgs::from(&args); for factory in Self::factories() { @@ -509,19 +461,15 @@ impl GrpcPoolFactory { #[tonic::async_trait] impl PoolRpc for PoolService { #[named] - async fn create_pool( - &self, - request: Request, - ) -> GrpcResult { + async fn create_pool(&self, request: Request) -> GrpcResult { self.locked( GrpcClientContext::new(&request, function_name!()), async move { crate::spdk_submit!(async move { info!("{:?}", request.get_ref()); - let factory = GrpcPoolFactory::new(PoolBackend::try_from( - request.get_ref().pooltype, - )?)?; + let factory = + GrpcPoolFactory::new(PoolBackend::try_from(request.get_ref().pooltype)?)?; factory .create(PoolArgs::try_from(request.into_inner())?) .await @@ -532,18 +480,14 @@ impl PoolRpc for PoolService { } #[named] - async fn destroy_pool( - &self, - request: Request, - ) -> GrpcResult<()> { + async fn destroy_pool(&self, request: Request) -> GrpcResult<()> { self.locked( GrpcClientContext::new(&request, function_name!()), async move { crate::spdk_submit!(async move { info!("{:?}", request.get_ref()); - let pool = - GrpcPoolFactory::finder(request.into_inner()).await?; + let pool = GrpcPoolFactory::finder(request.into_inner()).await?; pool.destroy().await.map_err(Into::into) }) }, @@ -552,18 +496,14 @@ impl PoolRpc for PoolService { } #[named] - async fn export_pool( - &self, - request: Request, - ) -> GrpcResult<()> { + async fn export_pool(&self, request: Request) -> GrpcResult<()> { self.locked( GrpcClientContext::new(&request, function_name!()), async move { crate::spdk_submit!(async move { info!("{:?}", request.get_ref()); - let pool = - GrpcPoolFactory::finder(request.into_inner()).await?; + let pool = GrpcPoolFactory::finder(request.into_inner()).await?; pool.export().await.map_err(Into::into) }) }, @@ -572,19 +512,15 @@ impl PoolRpc for PoolService { } #[named] - async fn import_pool( - &self, - request: Request, - ) -> GrpcResult { + async fn import_pool(&self, request: Request) -> GrpcResult { self.locked( GrpcClientContext::new(&request, function_name!()), async move { crate::spdk_submit!(async move { info!("{:?}", request.get_ref()); - let factory = GrpcPoolFactory::new(PoolBackend::try_from( - request.get_ref().pooltype, - )?)?; + let factory = + GrpcPoolFactory::new(PoolBackend::try_from(request.get_ref().pooltype)?)?; factory .import(PoolArgs::try_from(request.into_inner())?) .await @@ -595,10 +531,7 @@ impl PoolRpc for PoolService { } #[named] - async fn list_pools( - &self, - request: Request, - ) -> GrpcResult { + async fn list_pools(&self, request: Request) -> GrpcResult { self.locked( GrpcClientContext::new(&request, function_name!()), async move { @@ -614,9 +547,8 @@ impl PoolRpc for PoolService { let pool_type = match pool_type { None => None, Some(pool_type) => Some( - PoolType::try_from(pool_type).map_err(|_| { - Status::invalid_argument("Unknown pool type") - })?, + PoolType::try_from(pool_type) + .map_err(|_| Status::invalid_argument("Unknown pool type"))?, ), }; @@ -628,9 +560,7 @@ impl PoolRpc for PoolService { let mut pools = Vec::new(); for factory in GrpcPoolFactory::factories() { - if args.backend.is_some() - && args.backend != Some(factory.backend()) - { + if args.backend.is_some() && args.backend != Some(factory.backend()) { continue; } match factory.list(&args).await { @@ -639,14 +569,14 @@ impl PoolRpc for PoolService { } Err(error) => { let backend = factory.0.as_factory().backend(); - tracing::error!("Failed to list pools of type {backend:?}, error: {error}"); + tracing::error!( + "Failed to list pools of type {backend:?}, error: {error}" + ); } } } - Ok(ListPoolsResponse { - pools, - }) + Ok(ListPoolsResponse { pools }) }) }, ) @@ -654,18 +584,14 @@ impl PoolRpc for PoolService { } #[named] - async fn grow_pool( - &self, - request: Request, - ) -> GrpcResult { + async fn grow_pool(&self, request: Request) -> GrpcResult { self.locked( GrpcClientContext::new(&request, function_name!()), async move { crate::spdk_submit!(async move { info!("{:?}", request.get_ref()); - let pool = - GrpcPoolFactory::finder(request.into_inner()).await?; + let pool = GrpcPoolFactory::finder(request.into_inner()).await?; let previous_pool = Pool::from(pool.as_ops()); pool.grow().await.map_err(Into::::into)?; @@ -677,7 +603,7 @@ impl PoolRpc for PoolService { p = current_pool.name, sz = current_pool.capacity, ); - } else{ + } else { info!( "Grow pool '{p}': pool capacity has changed from {a} to {b} bytes", p = current_pool.name, diff --git a/io-engine/src/grpc/v1/replica.rs b/io-engine/src/grpc/v1/replica.rs index d47c3fa05..acb6d8102 100644 --- a/io-engine/src/grpc/v1/replica.rs +++ b/io-engine/src/grpc/v1/replica.rs @@ -2,31 +2,18 @@ use crate::{ core::{ logical_volume::LvolSpaceUsage, wiper::{WipeMethod, Wiper}, - Bdev, - NvmfShareProps, - ProtectedSubsystems, - Protocol, - ResourceLockManager, - ToErrno, + Bdev, NvmfShareProps, ProtectedSubsystems, Protocol, ResourceLockManager, ToErrno, UpdateProps, }, grpc::{ acquire_subsystem_lock, v1::pool::{GrpcPoolFactory, PoolGrpc, PoolIdProbe}, - GrpcClientContext, - GrpcResult, - RWLock, - RWSerializer, + GrpcClientContext, GrpcResult, RWLock, RWSerializer, }, pool_backend::{FindPoolArgs, PoolBackend}, replica_backend::{ - FindReplicaArgs, - IReplicaFactory, - ListCloneArgs, - ListReplicaArgs, - ListSnapshotArgs, - ReplicaFactory, - ReplicaOps, + FindReplicaArgs, IReplicaFactory, ListCloneArgs, ListReplicaArgs, ListSnapshotArgs, + ReplicaFactory, ReplicaOps, }, }; use ::function_name::named; @@ -39,8 +26,7 @@ use tonic::{Request, Status}; pub struct ReplicaService { #[allow(unused)] name: String, - client_context: - std::sync::Arc>>, + client_context: std::sync::Arc>>, } #[async_trait::async_trait] @@ -124,9 +110,7 @@ impl Default for ReplicaService { impl From for PoolIdProbe { fn from(value: destroy_replica_request::Pool) -> Self { match value { - destroy_replica_request::Pool::PoolName(name) => { - Self::UuidOrName(name) - } + destroy_replica_request::Pool::PoolName(name) => Self::UuidOrName(name), destroy_replica_request::Pool::PoolUuid(uuid) => Self::Uuid(uuid), } } @@ -190,12 +174,12 @@ fn filter_replicas_by_replica_type( // ... add other fields here as needed ]; - query_fields.iter().any(|(query_field, replica_field)| { - match query_field { + query_fields + .iter() + .any(|(query_field, replica_field)| match query_field { true => *replica_field, false => false, - } - }) + }) }) .collect() } @@ -209,15 +193,11 @@ impl GrpcReplicaFactory { .map(Self) .collect::>() } - pub(crate) async fn finder( - args: &FindReplicaArgs, - ) -> Result { + pub(crate) async fn finder(args: &FindReplicaArgs) -> Result { let replica = ReplicaFactory::find(args).await?; Ok(ReplicaGrpc::new(replica)) } - pub(crate) async fn pool_finder>( - args: I, - ) -> Result { + pub(crate) async fn pool_finder>(args: I) -> Result { GrpcPoolFactory::finder(args).await.map_err(|error| { if error.code() == tonic::Code::NotFound { Status::failed_precondition(error.to_string()) @@ -226,10 +206,7 @@ impl GrpcReplicaFactory { } }) } - pub(crate) async fn list( - &self, - args: &ListReplicaArgs, - ) -> Result, Status> { + pub(crate) async fn list(&self, args: &ListReplicaArgs) -> Result, Status> { let replicas = self.as_factory().list(args).await?; Ok(replicas.into_iter().map(Into::into).collect::>()) } @@ -247,10 +224,7 @@ impl GrpcReplicaFactory { let snapshots = self.as_factory().list_snaps(args).await?; Ok(snapshots.into_iter().map(Into::into).collect::>()) } - pub(crate) async fn list_clones( - &self, - args: &ListCloneArgs, - ) -> Result, Status> { + pub(crate) async fn list_clones(&self, args: &ListCloneArgs) -> Result, Status> { let clones = self.as_factory().list_clones(args).await?; Ok(clones.into_iter().map(Into::into).collect::>()) } @@ -269,15 +243,10 @@ pub(crate) struct ReplicaGrpc { impl ReplicaGrpc { fn new(replica: Box) -> Self { - Self { - replica, - } + Self { replica } } /// Get a wiper for this replica. - pub(crate) fn wiper( - &self, - wipe_method: WipeMethod, - ) -> Result { + pub(crate) fn wiper(&self, wipe_method: WipeMethod) -> Result { let hdl = Bdev::open(&self.replica.try_as_bdev()?, true) .and_then(|desc| desc.into_handle()) .map_err(|e| crate::lvs::LvsError::Invalid { @@ -294,26 +263,21 @@ impl ReplicaGrpc { } async fn share(&mut self, args: ShareReplicaRequest) -> Result<(), Status> { let pool_name = self.replica.pool_name(); - let pool_subsystem = ResourceLockManager::get_instance() - .get_subsystem(ProtectedSubsystems::POOL); - let _lock_guard = - acquire_subsystem_lock(pool_subsystem, Some(&pool_name)).await?; + let pool_subsystem = + ResourceLockManager::get_instance().get_subsystem(ProtectedSubsystems::POOL); + let _lock_guard = acquire_subsystem_lock(pool_subsystem, Some(&pool_name)).await?; let protocol = Protocol::try_from(args.share)?; // if we are already shared with the same protocol if self.replica.shared() == Some(protocol) { self.replica - .update_properties( - UpdateProps::new().with_allowed_hosts(args.allowed_hosts), - ) + .update_properties(UpdateProps::new().with_allowed_hosts(args.allowed_hosts)) .await?; return Ok(()); } if let Protocol::Off = protocol { - return Err(Status::invalid_argument( - "Invalid share protocol NONE", - )); + return Err(Status::invalid_argument("Invalid share protocol NONE")); } let props = NvmfShareProps::new() @@ -324,10 +288,9 @@ impl ReplicaGrpc { } async fn unshare(&mut self) -> Result<(), Status> { let pool_name = self.replica.pool_name(); - let pool_subsystem = ResourceLockManager::get_instance() - .get_subsystem(ProtectedSubsystems::POOL); - let _lock_guard = - acquire_subsystem_lock(pool_subsystem, Some(&pool_name)).await?; + let pool_subsystem = + ResourceLockManager::get_instance().get_subsystem(ProtectedSubsystems::POOL); + let _lock_guard = acquire_subsystem_lock(pool_subsystem, Some(&pool_name)).await?; if self.replica.shared().is_some() { self.replica.unshare().await?; @@ -346,9 +309,7 @@ impl ReplicaGrpc { pub(crate) fn verify_pool(&self, pool: &PoolGrpc) -> Result<(), Status> { let pool = pool.as_ops(); let replica = &self.replica; - if pool.name() != replica.pool_name() - || pool.uuid() != replica.pool_uuid() - { + if pool.name() != replica.pool_name() || pool.uuid() != replica.pool_uuid() { let msg = format!( "Specified pool: {pool:?} does not match the target replica's pool: {replica:?}!" ); @@ -369,10 +330,7 @@ impl From for Replica { #[tonic::async_trait] impl ReplicaRpc for ReplicaService { #[named] - async fn create_replica( - &self, - request: Request, - ) -> GrpcResult { + async fn create_replica(&self, request: Request) -> GrpcResult { self.locked( GrpcClientContext::new(&request, function_name!()), async move { @@ -391,10 +349,9 @@ impl ReplicaRpc for ReplicaService { let args = request.into_inner(); - let pool = GrpcReplicaFactory::pool_finder( - FindPoolArgs::uuid_or_name(&args.pooluuid), - ) - .await?; + let pool = + GrpcReplicaFactory::pool_finder(FindPoolArgs::uuid_or_name(&args.pooluuid)) + .await?; pool.create_replica(args).await }) }, @@ -403,10 +360,7 @@ impl ReplicaRpc for ReplicaService { } #[named] - async fn destroy_replica( - &self, - request: Request, - ) -> GrpcResult<()> { + async fn destroy_replica(&self, request: Request) -> GrpcResult<()> { self.locked( GrpcClientContext::new(&request, function_name!()), async move { @@ -415,21 +369,15 @@ impl ReplicaRpc for ReplicaService { let args = request.into_inner(); let pool = match &args.pool { - Some(pool) => { - Some(GrpcReplicaFactory::pool_finder(pool).await?) - } + Some(pool) => Some(GrpcReplicaFactory::pool_finder(pool).await?), None => None, }; let probe = FindReplicaArgs::new(&args.uuid); - let replica = match GrpcReplicaFactory::finder(&probe).await - { - Err(mut status) - if status.code() == tonic::Code::NotFound => - { - status.metadata_mut().insert( - "gtm-602", - tonic::metadata::MetadataValue::from(0), - ); + let replica = match GrpcReplicaFactory::finder(&probe).await { + Err(mut status) if status.code() == tonic::Code::NotFound => { + status + .metadata_mut() + .insert("gtm-602", tonic::metadata::MetadataValue::from(0)); Err(status) } _else => _else, @@ -465,10 +413,9 @@ impl ReplicaRpc for ReplicaService { let query = args.query; let fargs = ListReplicaArgs::from(args); - for factory in - GrpcReplicaFactory::factories().into_iter().filter(|f| { - backends.is_empty() || backends.contains(&f.backend()) - }) + for factory in GrpcReplicaFactory::factories() + .into_iter() + .filter(|f| backends.is_empty() || backends.contains(&f.backend())) { if let Ok(freplicas) = factory.list(&fargs).await { replicas.extend(freplicas); @@ -484,10 +431,7 @@ impl ReplicaRpc for ReplicaService { } #[named] - async fn share_replica( - &self, - request: Request, - ) -> GrpcResult { + async fn share_replica(&self, request: Request) -> GrpcResult { self.locked( GrpcClientContext::new(&request, function_name!()), async move { @@ -495,8 +439,7 @@ impl ReplicaRpc for ReplicaService { info!("{:?}", request.get_ref()); let probe = FindReplicaArgs::new(&request.get_ref().uuid); - let mut replica = - GrpcReplicaFactory::finder(&probe).await?; + let mut replica = GrpcReplicaFactory::finder(&probe).await?; replica.share(request.into_inner()).await?; Ok(replica.into()) }) @@ -517,8 +460,7 @@ impl ReplicaRpc for ReplicaService { info!("{:?}", request.get_ref()); let probe = FindReplicaArgs::new(&request.get_ref().uuid); - let mut replica = - GrpcReplicaFactory::finder(&probe).await?; + let mut replica = GrpcReplicaFactory::finder(&probe).await?; replica.unshare().await?; Ok(replica.into()) }) @@ -528,10 +470,7 @@ impl ReplicaRpc for ReplicaService { } #[named] - async fn resize_replica( - &self, - request: Request, - ) -> GrpcResult { + async fn resize_replica(&self, request: Request) -> GrpcResult { self.locked( GrpcClientContext::new(&request, function_name!()), async move { @@ -539,8 +478,7 @@ impl ReplicaRpc for ReplicaService { info!("{:?}", request.get_ref()); let probe = FindReplicaArgs::new(&request.get_ref().uuid); - let mut replica = - GrpcReplicaFactory::finder(&probe).await?; + let mut replica = GrpcReplicaFactory::finder(&probe).await?; replica.resize(request.into_inner().requested_size).await?; Ok(replica.into()) }) @@ -561,8 +499,7 @@ impl ReplicaRpc for ReplicaService { info!("{:?}", request.get_ref()); let probe = FindReplicaArgs::new(&request.get_ref().uuid); - let mut replica = - GrpcReplicaFactory::finder(&probe).await?; + let mut replica = GrpcReplicaFactory::finder(&probe).await?; replica .set_entity_id(request.into_inner().entity_id) .await?; @@ -583,10 +520,8 @@ impl From for ReplicaSpaceUsage { num_clusters: u.num_clusters, num_allocated_clusters: u.num_allocated_clusters, allocated_bytes_snapshots: u.allocated_bytes_snapshots, - num_allocated_clusters_snapshots: u - .num_allocated_clusters_snapshots, - allocated_bytes_snapshot_from_clone: u - .allocated_bytes_snapshot_from_clone, + num_allocated_clusters_snapshots: u.num_allocated_clusters_snapshots, + allocated_bytes_snapshot_from_clone: u.allocated_bytes_snapshot_from_clone, } } } diff --git a/io-engine/src/grpc/v1/snapshot.rs b/io-engine/src/grpc/v1/snapshot.rs index f8c6f464b..8c251a626 100644 --- a/io-engine/src/grpc/v1/snapshot.rs +++ b/io-engine/src/grpc/v1/snapshot.rs @@ -6,15 +6,12 @@ use crate::{ core::{ lock::ProtectedSubsystems, snapshot::{SnapshotDescriptor, SnapshotParams}, - ResourceLockManager, - UntypedBdev, + ResourceLockManager, UntypedBdev, }, grpc::{ rpc_submit, v1::{nexus::nexus_lookup, replica::ReplicaGrpc}, - GrpcClientContext, - GrpcResult, - RWSerializer, + GrpcClientContext, GrpcResult, RWSerializer, }, }; use ::function_name::named; @@ -35,9 +32,7 @@ pub struct SnapshotService { replica_svc: super::replica::ReplicaService, } -impl From - for NexusReplicaSnapshotDescriptor -{ +impl From for NexusReplicaSnapshotDescriptor { fn from(descr: NexusCreateSnapshotReplicaDescriptor) -> Self { NexusReplicaSnapshotDescriptor { replica_uuid: descr.replica_uuid, @@ -146,10 +141,11 @@ impl SnapshotService { let _global_guard = if global_operation { match lock_manager.lock(Some(ctx.timeout), false).await { Some(g) => Some(g), - None => return Err(Status::deadline_exceeded( - "Failed to acquire access to object within given timeout" - .to_string() - )), + None => { + return Err(Status::deadline_exceeded( + "Failed to acquire access to object within given timeout".to_string(), + )) + } } } else { None @@ -159,13 +155,15 @@ impl SnapshotService { let _resource_guard = match lock_manager .get_subsystem(ProtectedSubsystems::NEXUS) .lock_resource(nexus_uuid, Some(ctx.timeout), false) - .await { - Some(g) => g, - None => return Err(Status::deadline_exceeded( - "Failed to acquire access to object within given timeout" - .to_string() - )), - }; + .await + { + Some(g) => g, + None => { + return Err(Status::deadline_exceeded( + "Failed to acquire access to object within given timeout".to_string(), + )) + } + }; let r = fut.await; match r { @@ -179,9 +177,10 @@ impl SnapshotService { } } }) - .await { + .await + { Ok(r) => r, - Err(_) => Err(Status::cancelled("gRPC call cancelled")) + Err(_) => Err(Status::cancelled("gRPC call cancelled")), } } } @@ -207,13 +206,13 @@ fn filter_snapshots_by_snapshot_query_type( // ... add other fields here as needed ]; - query_fields.iter().all(|(query_field, snapshot_field)| { - match query_field { + query_fields + .iter() + .all(|(query_field, snapshot_field)| match query_field { Some(true) => *snapshot_field, Some(false) => !(*snapshot_field), None => true, - } - }) + }) }) .collect() } @@ -222,11 +221,7 @@ use crate::{ core::snapshot::ISnapshotDescriptor, grpc::v1::{pool::PoolGrpc, replica::GrpcReplicaFactory}, replica_backend::{ - FindReplicaArgs, - FindSnapshotArgs, - ListCloneArgs, - ListSnapshotArgs, - ReplicaFactory, + FindReplicaArgs, FindSnapshotArgs, ListCloneArgs, ListSnapshotArgs, ReplicaFactory, SnapshotOps, }, }; @@ -276,9 +271,7 @@ impl ReplicaGrpc { }) } Err(error) => { - error!( - "Create Snapshot Failed for lvol: {replica:?} with Error: {error:?}" - ); + error!("Create Snapshot Failed for lvol: {replica:?} with Error: {error:?}"); Err(error.into()) } } @@ -302,16 +295,12 @@ impl SnapshotGrpc { } } } - Err(error.unwrap_or_else(|| { - Status::not_found(format!("Snapshot {args:?} not found")) - })) + Err(error.unwrap_or_else(|| Status::not_found(format!("Snapshot {args:?} not found")))) } fn verify_pool(&self, pool: &PoolGrpc) -> Result<(), Status> { let snapshot = &self.0; let pool = pool.as_ops(); - if pool.name() != snapshot.pool_name() - || pool.uuid() != snapshot.pool_uuid() - { + if pool.name() != snapshot.pool_name() || pool.uuid() != snapshot.pool_uuid() { let msg = format!( "Specified pool: {pool:?} does not match the target snapshot's pool: {snapshot:?}!" ); @@ -394,8 +383,7 @@ impl SnapshotRpc for SnapshotService { info!("{:?}", args); let probe = FindReplicaArgs::new(&args.replica_uuid); - let mut replica = - GrpcReplicaFactory::finder(&probe).await?; + let mut replica = GrpcReplicaFactory::finder(&probe).await?; replica.create_snapshot(args).await }) }, @@ -417,16 +405,13 @@ impl SnapshotRpc for SnapshotService { let fargs = ListSnapshotArgs::from(args.clone()); let mut snapshots = vec![]; for factory in GrpcReplicaFactory::factories() { - if let Ok(fsnapshots) = factory.list_snaps(&fargs).await - { + if let Ok(fsnapshots) = factory.list_snaps(&fargs).await { snapshots.extend(fsnapshots); } } Ok(ListSnapshotsResponse { - snapshots: filter_snapshots_by_snapshot_query_type( - snapshots, args.query, - ), + snapshots: filter_snapshots_by_snapshot_query_type(snapshots, args.query), }) }) }, @@ -435,10 +420,7 @@ impl SnapshotRpc for SnapshotService { } #[named] - async fn destroy_snapshot( - &self, - request: Request, - ) -> GrpcResult<()> { + async fn destroy_snapshot(&self, request: Request) -> GrpcResult<()> { self.locked( GrpcClientContext::new(&request, function_name!()), async move { @@ -446,9 +428,7 @@ impl SnapshotRpc for SnapshotService { info!("{:?}", args); crate::spdk_submit!(async move { let pool = match &args.pool { - Some(pool) => { - Some(GrpcReplicaFactory::pool_finder(pool).await?) - } + Some(pool) => Some(GrpcReplicaFactory::pool_finder(pool).await?), None => None, }; let probe = FindSnapshotArgs::new(args.snapshot_uuid); @@ -477,11 +457,13 @@ impl SnapshotRpc for SnapshotService { info!("{:?}", args); crate::spdk_submit!(async move { if UntypedBdev::lookup_by_uuid_str(&args.clone_uuid).is_some() { - return Err(tonic::Status::already_exists(format!("clone uuid {} already exist", args.clone_uuid))); + return Err(tonic::Status::already_exists(format!( + "clone uuid {} already exist", + args.clone_uuid + ))); } let probe = FindSnapshotArgs::new(args.snapshot_uuid.clone()); - let snapshot = - SnapshotGrpc::finder(&probe).await?.0; + let snapshot = SnapshotGrpc::finder(&probe).await?.0; // reject clone creation if "discardedSnapshot" xattr is marked as true. // todo: should be part of create_clone? @@ -492,19 +474,17 @@ impl SnapshotRpc for SnapshotService { ))); } - let clone_config = - match snapshot.prepare_clone_config( - &args.clone_name, - &args.clone_uuid, - &args.snapshot_uuid - ) { - Some(clone_config) => Ok(clone_config), - None => Err(tonic::Status::invalid_argument(format!( - "Invalid parameters clone_uuid: {}, clone_name: {}", - args.clone_uuid, - args.clone_name - ))) - }?; + let clone_config = match snapshot.prepare_clone_config( + &args.clone_name, + &args.clone_uuid, + &args.snapshot_uuid, + ) { + Some(clone_config) => Ok(clone_config), + None => Err(tonic::Status::invalid_argument(format!( + "Invalid parameters clone_uuid: {}, clone_name: {}", + args.clone_uuid, args.clone_name + ))), + }?; match snapshot.create_clone(clone_config).await { Ok(clone_lvol) => { info!("Create Clone Success for {snapshot:?}, {clone_lvol:?}"); @@ -541,9 +521,7 @@ impl SnapshotRpc for SnapshotService { replicas.extend(clones); } } - Ok(ListSnapshotCloneResponse { - replicas, - }) + Ok(ListSnapshotCloneResponse { replicas }) }) }, ) diff --git a/io-engine/src/grpc/v1/snapshot_rebuild.rs b/io-engine/src/grpc/v1/snapshot_rebuild.rs index 85ad114d9..5155756b2 100644 --- a/io-engine/src/grpc/v1/snapshot_rebuild.rs +++ b/io-engine/src/grpc/v1/snapshot_rebuild.rs @@ -1,22 +1,12 @@ use crate::{ grpc::GrpcResult, - rebuild::{ - RebuildError, - RebuildState, - RebuildStats, - SnapshotRebuildError, - SnapshotRebuildJob, - }, + rebuild::{RebuildError, RebuildState, RebuildStats, SnapshotRebuildError, SnapshotRebuildJob}, }; use io_engine_api::v1::{ snapshot_rebuild, snapshot_rebuild::{ - CreateSnapshotRebuildRequest, - DestroySnapshotRebuildRequest, - ListSnapshotRebuildRequest, - ListSnapshotRebuildResponse, - SnapshotRebuild, - SnapshotRebuildRpc, + CreateSnapshotRebuildRequest, DestroySnapshotRebuildRequest, ListSnapshotRebuildRequest, + ListSnapshotRebuildResponse, SnapshotRebuild, SnapshotRebuildRpc, }, }; use std::sync::Arc; @@ -51,9 +41,7 @@ impl SnapshotRebuildRpc for SnapshotRebuildService { info!("{:?}", request); let None = request.bitmap else { - return Err(tonic::Status::invalid_argument( - "BitMap not supported", - )); + return Err(tonic::Status::invalid_argument("BitMap not supported")); }; if let Ok(job) = SnapshotRebuildJob::lookup(&request.uuid) { return Ok(SnapshotRebuild::from(SnapRebuild::from(job).await)); @@ -87,9 +75,7 @@ impl SnapshotRebuildRpc for SnapshotRebuildService { for job in jobs { rebuilds.push(SnapRebuild::from(job).await.into()); } - Ok(ListSnapshotRebuildResponse { - rebuilds, - }) + Ok(ListSnapshotRebuildResponse { rebuilds }) } Some(uuid) => { let job = SnapRebuild::lookup(&uuid).await?; @@ -128,10 +114,7 @@ struct SnapRebuild { impl SnapRebuild { async fn from(job: Arc) -> Self { let stats = job.stats().await; - Self { - stats, - job, - } + Self { stats, job } } async fn lookup(uuid: &str) -> Result { let job = SnapshotRebuildJob::lookup(uuid)?; @@ -183,52 +166,24 @@ impl From for tonic::Status { fn from(value: RebuildError) -> Self { let message = value.to_string(); match value { - RebuildError::JobAlreadyExists { - .. - } => tonic::Status::already_exists(message), - RebuildError::NoCopyBuffer { - .. - } => tonic::Status::internal(message), - RebuildError::InvalidSrcDstRange { - .. - } => tonic::Status::out_of_range(message), - RebuildError::InvalidMapRange { - .. - } => tonic::Status::out_of_range(message), - RebuildError::SameBdev { - .. - } => tonic::Status::invalid_argument(message), - RebuildError::NoBdevHandle { - .. - } => tonic::Status::failed_precondition(message), - RebuildError::BdevNotFound { - .. - } => tonic::Status::failed_precondition(message), - RebuildError::JobNotFound { - .. - } => tonic::Status::not_found(message), - RebuildError::BdevInvalidUri { - .. - } => tonic::Status::invalid_argument(message), - RebuildError::RebuildTasksChannel { - .. - } => tonic::Status::resource_exhausted(message), - RebuildError::SnapshotRebuild { - source, - } => match source { - SnapshotRebuildError::LocalBdevNotFound { - .. - } => tonic::Status::not_found(message), - SnapshotRebuildError::RemoteNoUri { - .. - } => tonic::Status::internal(message), - SnapshotRebuildError::NotAReplica { - .. - } => tonic::Status::invalid_argument(message), + RebuildError::JobAlreadyExists { .. } => tonic::Status::already_exists(message), + RebuildError::NoCopyBuffer { .. } => tonic::Status::internal(message), + RebuildError::InvalidSrcDstRange { .. } => tonic::Status::out_of_range(message), + RebuildError::InvalidMapRange { .. } => tonic::Status::out_of_range(message), + RebuildError::SameBdev { .. } => tonic::Status::invalid_argument(message), + RebuildError::NoBdevHandle { .. } => tonic::Status::failed_precondition(message), + RebuildError::BdevNotFound { .. } => tonic::Status::failed_precondition(message), + RebuildError::JobNotFound { .. } => tonic::Status::not_found(message), + RebuildError::BdevInvalidUri { .. } => tonic::Status::invalid_argument(message), + RebuildError::RebuildTasksChannel { .. } => tonic::Status::resource_exhausted(message), + RebuildError::SnapshotRebuild { source } => match source { + SnapshotRebuildError::LocalBdevNotFound { .. } => tonic::Status::not_found(message), + SnapshotRebuildError::RemoteNoUri { .. } => tonic::Status::internal(message), + SnapshotRebuildError::NotAReplica { .. } => { + tonic::Status::invalid_argument(message) + } // todo better error check here, what if bdev uri is invalid? - SnapshotRebuildError::UriBdevOpen { - .. - } => tonic::Status::not_found(message), + SnapshotRebuildError::UriBdevOpen { .. } => tonic::Status::not_found(message), }, _ => tonic::Status::internal(message), } diff --git a/io-engine/src/grpc/v1/stats.rs b/io-engine/src/grpc/v1/stats.rs index e6670c7e4..48d624dda 100644 --- a/io-engine/src/grpc/v1/stats.rs +++ b/io-engine/src/grpc/v1/stats.rs @@ -3,10 +3,7 @@ use crate::{ grpc::{ rpc_submit, v1::{pool::PoolService, replica::ReplicaService}, - GrpcClientContext, - GrpcResult, - RWLock, - Serializer, + GrpcClientContext, GrpcResult, RWLock, Serializer, }, }; use futures::{future::join_all, FutureExt}; @@ -28,8 +25,7 @@ use ::function_name::named; #[allow(dead_code)] pub struct StatsService { name: String, - client_context: - std::sync::Arc>>, + client_context: std::sync::Arc>>, pool_svc: PoolService, replica_svc: ReplicaService, } @@ -50,13 +46,14 @@ where let lock_manager = ResourceLockManager::get_instance(); // For nexus global lock. - let _global_guard = - match lock_manager.lock(Some(ctx.timeout), false).await { - Some(g) => Some(g), - None => return Err(Status::deadline_exceeded( + let _global_guard = match lock_manager.lock(Some(ctx.timeout), false).await { + Some(g) => Some(g), + None => { + return Err(Status::deadline_exceeded( "Failed to acquire access to object within given timeout", - )), - }; + )) + } + }; let fut = AssertUnwindSafe(f).catch_unwind(); let r = fut.await; r.unwrap_or_else(|_| { @@ -96,13 +93,14 @@ impl StatsService { let _stat_svc = self.client_context.read().await; let lock_manager = ResourceLockManager::get_instance(); // For nexus global lock. - let _global_guard = - match lock_manager.lock(Some(ctx.timeout), false).await { - Some(g) => Some(g), - None => return Err(Status::deadline_exceeded( + let _global_guard = match lock_manager.lock(Some(ctx.timeout), false).await { + Some(g) => Some(g), + None => { + return Err(Status::deadline_exceeded( "Failed to acquire access to object within given timeout", - )), - }; + )) + } + }; let fut = AssertUnwindSafe(f).catch_unwind(); let r = fut.await; r.unwrap_or_else(|_| { @@ -136,20 +134,15 @@ impl StatsRpc for StatsService { let mut pools = vec![]; let args = ListPoolArgs::new_named(args.name); for factory in GrpcPoolFactory::factories() { - pools.extend( - factory.list_ops(&args).await.unwrap_or_default(), - ); + pools.extend(factory.list_ops(&args).await.unwrap_or_default()); } let pools_stats_future = pools.iter().map(|r| r.stats()); - let pools_stats = - join_all(pools_stats_future).await.into_iter(); + let pools_stats = join_all(pools_stats_future).await.into_iter(); let stats = pools_stats .map(|d| d.map(Into::into)) .collect::, _>>()?; - Ok(PoolIoStatsResponse { - stats, - }) + Ok(PoolIoStatsResponse { stats }) }) }) .await @@ -177,9 +170,7 @@ impl StatsRpc for StatsService { .into_iter() .map(|d| d.map(Into::into)); let stats = nexus_stats.collect::, _>>()?; - Ok(NexusIoStatsResponse { - stats, - }) + Ok(NexusIoStatsResponse { stats }) }) }, ) @@ -195,19 +186,14 @@ impl StatsRpc for StatsService { let mut replicas = vec![]; let args = ListReplicaArgs::new_named(args.name); for factory in GrpcReplicaFactory::factories() { - replicas.extend( - factory.list_ops(&args).await.unwrap_or_default(), - ); + replicas.extend(factory.list_ops(&args).await.unwrap_or_default()); } let replica_stats_future = replicas.iter().map(|r| r.stats()); - let replica_stats = - join_all(replica_stats_future).await.into_iter(); + let replica_stats = join_all(replica_stats_future).await.into_iter(); let stats = replica_stats .map(|d| d.map(Into::into)) .collect::, _>>()?; - Ok(ReplicaIoStatsResponse { - stats, - }) + Ok(ReplicaIoStatsResponse { stats }) }) }) .await diff --git a/io-engine/src/grpc/v1/test.rs b/io-engine/src/grpc/v1/test.rs index e0b8a8460..ded0c6598 100644 --- a/io-engine/src/grpc/v1/test.rs +++ b/io-engine/src/grpc/v1/test.rs @@ -5,9 +5,7 @@ use crate::{ }, grpc::{ v1::replica::{GrpcReplicaFactory, ReplicaGrpc}, - GrpcClientContext, - GrpcResult, - RWSerializer, + GrpcClientContext, GrpcResult, RWSerializer, }, replica_backend::FindReplicaArgs, }; @@ -15,13 +13,8 @@ use ::function_name::named; use io_engine_api::{ v1, v1::test::{ - wipe_options::WipeMethod, - wipe_replica_request, - wipe_replica_response, - StreamWipeOptions, - TestRpc, - WipeReplicaRequest, - WipeReplicaResponse, + wipe_options::WipeMethod, wipe_replica_request, wipe_replica_response, StreamWipeOptions, + TestRpc, WipeReplicaRequest, WipeReplicaResponse, }, }; use std::convert::{TryFrom, TryInto}; @@ -32,10 +25,7 @@ use crate::grpc::v1::pool::PoolGrpc; #[cfg(feature = "fault-injection")] use crate::{ core::fault_injection::{ - add_fault_injection, - list_fault_injections, - remove_fault_injection, - FaultInjectionError, + add_fault_injection, list_fault_injections, remove_fault_injection, FaultInjectionError, Injection, }, grpc::rpc_submit, @@ -59,23 +49,17 @@ impl TestService { #[tonic::async_trait] impl TestRpc for TestService { - type WipeReplicaStream = - ReceiverStream>; + type WipeReplicaStream = ReceiverStream>; /// Get all the features supported by the test service. - async fn get_features( - &self, - _request: Request<()>, - ) -> GrpcResult { + async fn get_features(&self, _request: Request<()>) -> GrpcResult { GrpcResult::Ok(tonic::Response::new(v1::test::TestFeatures { wipe_methods: vec![ v1::test::wipe_options::WipeMethod::None as i32, v1::test::wipe_options::WipeMethod::WriteZeroes as i32, v1::test::wipe_options::WipeMethod::Checksum as i32, ], - cksum_algs: vec![ - v1::test::wipe_options::CheckSumAlgorithm::Crc32c as i32, - ], + cksum_algs: vec![v1::test::wipe_options::CheckSumAlgorithm::Crc32c as i32], })) } @@ -91,9 +75,8 @@ impl TestRpc for TestService { let replica_svc = self.replica_svc.clone(); let tx_cln = tx.clone(); - let options = crate::core::wiper::StreamWipeOptions::try_from( - &request.get_ref().wipe_options, - )?; + let options = + crate::core::wiper::StreamWipeOptions::try_from(&request.get_ref().wipe_options)?; let uuid = request.get_ref().uuid.clone(); crate::core::spawn(async move { @@ -105,10 +88,7 @@ impl TestRpc for TestService { info!("{:?}", args); crate::spdk_submit!(async move { let pool = match args.pool { - Some(pool) => Some( - GrpcReplicaFactory::pool_finder(pool) - .await?, - ), + Some(pool) => Some(GrpcReplicaFactory::pool_finder(pool).await?), None => None, }; let args = FindReplicaArgs::new(&args.uuid); @@ -221,9 +201,7 @@ impl TestRpc for TestService { .map(v1::test::FaultInjection::from) .collect(); - Ok(v1::test::ListFaultInjectionsReply { - injections, - }) + Ok(v1::test::ListFaultInjectionsReply { injections }) })?; rx.await @@ -239,18 +217,12 @@ impl TestRpc for TestService { } } -impl TryFrom<&Option> - for crate::core::wiper::StreamWipeOptions -{ +impl TryFrom<&Option> for crate::core::wiper::StreamWipeOptions { type Error = tonic::Status; - fn try_from( - value: &Option, - ) -> Result { + fn try_from(value: &Option) -> Result { let Some(wipe) = value else { - return Err(tonic::Status::invalid_argument( - "Missing StreamWipeOptions", - )); + return Err(tonic::Status::invalid_argument("Missing StreamWipeOptions")); }; let Some(options) = &wipe.options else { return Err(tonic::Status::invalid_argument("Missing WipeOptions")); @@ -260,29 +232,19 @@ impl TryFrom<&Option> chunk_size: wipe.chunk_size, wipe_method: { let method = WipeMethod::try_from(options.wipe_method) - .map_err(|_| { - tonic::Status::invalid_argument("Invalid Wipe Method") - })?; + .map_err(|_| tonic::Status::invalid_argument("Invalid Wipe Method"))?; Wiper::supported(match method { WipeMethod::None => crate::core::wiper::WipeMethod::None, - WipeMethod::WriteZeroes => { - crate::core::wiper::WipeMethod::WriteZeroes - } + WipeMethod::WriteZeroes => crate::core::wiper::WipeMethod::WriteZeroes, WipeMethod::Unmap => crate::core::wiper::WipeMethod::Unmap, - WipeMethod::WritePattern => { - crate::core::wiper::WipeMethod::WritePattern( - options.write_pattern.unwrap_or(0xdeadbeef), - ) - } - WipeMethod::Checksum => { - crate::core::wiper::WipeMethod::CkSum( - crate::core::wiper::CkSumMethod::default(), - ) - } + WipeMethod::WritePattern => crate::core::wiper::WipeMethod::WritePattern( + options.write_pattern.unwrap_or(0xdeadbeef), + ), + WipeMethod::Checksum => crate::core::wiper::WipeMethod::CkSum( + crate::core::wiper::CkSumMethod::default(), + ), }) - .map_err(|error| { - tonic::Status::invalid_argument(error.to_string()) - })? + .map_err(|error| tonic::Status::invalid_argument(error.to_string()))? }, }) } @@ -312,30 +274,14 @@ impl From<&WipeStats> for WipeReplicaResponse { impl From for tonic::Status { fn from(value: WipeError) -> Self { match value { - WipeError::TooManyChunks { - .. - } => Self::invalid_argument(value.to_string()), - WipeError::ChunkTooLarge { - .. - } => Self::invalid_argument(value.to_string()), - WipeError::ZeroBdev { - .. - } => Self::invalid_argument(value.to_string()), - WipeError::ChunkBlockSizeInvalid { - .. - } => Self::invalid_argument(value.to_string()), - WipeError::WipeIoFailed { - .. - } => Self::data_loss(value.verbose()), - WipeError::MethodUnimplemented { - .. - } => Self::invalid_argument(value.to_string()), - WipeError::ChunkNotifyFailed { - .. - } => Self::internal(value.to_string()), - WipeError::WipeAborted { - .. - } => Self::aborted(value.to_string()), + WipeError::TooManyChunks { .. } => Self::invalid_argument(value.to_string()), + WipeError::ChunkTooLarge { .. } => Self::invalid_argument(value.to_string()), + WipeError::ZeroBdev { .. } => Self::invalid_argument(value.to_string()), + WipeError::ChunkBlockSizeInvalid { .. } => Self::invalid_argument(value.to_string()), + WipeError::WipeIoFailed { .. } => Self::data_loss(value.verbose()), + WipeError::MethodUnimplemented { .. } => Self::invalid_argument(value.to_string()), + WipeError::ChunkNotifyFailed { .. } => Self::internal(value.to_string()), + WipeError::WipeAborted { .. } => Self::aborted(value.to_string()), } } } @@ -343,19 +289,14 @@ impl From for tonic::Status { impl From for crate::pool_backend::FindPoolArgs { fn from(value: wipe_replica_request::Pool) -> Self { match value { - wipe_replica_request::Pool::PoolName(name) => { - Self::name_uuid(name, &None) - } + wipe_replica_request::Pool::PoolName(name) => Self::name_uuid(name, &None), wipe_replica_request::Pool::PoolUuid(uuid) => Self::uuid(uuid), } } } /// Validate that the replica belongs to the specified pool. -async fn validate_pool( - repl: &ReplicaGrpc, - pool: Option, -) -> Result<(), Status> { +async fn validate_pool(repl: &ReplicaGrpc, pool: Option) -> Result<(), Status> { let Some(pool) = pool else { return Ok(()); }; @@ -363,9 +304,7 @@ async fn validate_pool( repl.verify_pool(&pool) } -struct WiperStream( - tokio::sync::mpsc::Sender>, -); +struct WiperStream(tokio::sync::mpsc::Sender>); impl crate::core::wiper::NotifyStream for WiperStream { fn notify(&self, stats: &WipeStats) -> Result<(), String> { diff --git a/io-engine/src/host/blk_device.rs b/io-engine/src/host/blk_device.rs index 68bae2072..ff65dfa1a 100644 --- a/io-engine/src/host/blk_device.rs +++ b/io-engine/src/host/blk_device.rs @@ -115,8 +115,7 @@ fn usable_device(devmajor: &u32) -> bool { 8, // SCSI disk devices 43, // Network block devices // START 240-254 block - 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, - 254, // END + 240, 241, 242, 243, 244, 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, // END 255, // Reserved 259, // Block Extended Major ]; @@ -173,12 +172,9 @@ fn new_partition(parent: Option<&str>, device: &Device) -> Option { parent: String::from(parent.unwrap_or("")), number: Property(device.property_value("PARTN")).into(), name: Property(device.property_value("PARTNAME")).into(), - scheme: Property(device.property_value("ID_PART_ENTRY_SCHEME")) - .into(), - typeid: Property(device.property_value("ID_PART_ENTRY_TYPE")) - .into(), - uuid: Property(device.property_value("ID_PART_ENTRY_UUID")) - .into(), + scheme: Property(device.property_value("ID_PART_ENTRY_SCHEME")).into(), + typeid: Property(device.property_value("ID_PART_ENTRY_TYPE")).into(), + uuid: Property(device.property_value("ID_PART_ENTRY_UUID")).into(), }); } } @@ -189,12 +185,8 @@ fn new_partition(parent: Option<&str>, device: &Device) -> Option { // and the list of current filesystem mounts. // Note that the result can be None if there is no filesystem // associated with this Device. -fn new_filesystem( - device: &Device, - mountinfo: &[MountInfo], -) -> Option { - let mut fstype: Option = - Property(device.property_value("ID_FS_TYPE")).into(); +fn new_filesystem(device: &Device, mountinfo: &[MountInfo]) -> Option { + let mut fstype: Option = Property(device.property_value("ID_FS_TYPE")).into(); if fstype.is_none() { fstype = if mountinfo.is_empty() { @@ -205,19 +197,13 @@ fn new_filesystem( } } - let label: Option = - Property(device.property_value("ID_FS_LABEL")).into(); + let label: Option = Property(device.property_value("ID_FS_LABEL")).into(); - let uuid: Option = - Property(device.property_value("ID_FS_UUID")).into(); + let uuid: Option = Property(device.property_value("ID_FS_UUID")).into(); // Do no return an actual object if none of the fields therein have actual // values. - if fstype.is_none() - && label.is_none() - && uuid.is_none() - && mountinfo.is_empty() - { + if fstype.is_none() && label.is_none() && uuid.is_none() && mountinfo.is_empty() { return None; } @@ -243,8 +229,7 @@ fn new_device( ) -> Option { if let Some(devname) = device.property_value("DEVNAME") { let partition = new_partition(parent, device); - let filesystem = - new_filesystem(device, mounts.get(devname).unwrap_or(&Vec::new())); + let filesystem = new_filesystem(device, mounts.get(devname).unwrap_or(&Vec::new())); let devmajor: u32 = Property(device.property_value("MAJOR")).into(); let size: u64 = Property(device.attribute_value("size")).into(); @@ -316,9 +301,7 @@ fn get_disks( if let Some(devname) = entry.property_value("DEVNAME") { let partitions = get_partitions(devname.to_str(), &entry, mounts)?; - if let Some(device) = - new_device(None, partitions.is_empty(), &entry, mounts) - { + if let Some(device) = new_device(None, partitions.is_empty(), &entry, mounts) { if all || device.available { list.push(device); } diff --git a/io-engine/src/jsonrpc.rs b/io-engine/src/jsonrpc.rs index 9f953159f..0590e446b 100644 --- a/io-engine/src/jsonrpc.rs +++ b/io-engine/src/jsonrpc.rs @@ -16,20 +16,11 @@ use nix::errno::Errno; use serde::{Deserialize, Serialize}; use spdk_rs::libspdk::{ - spdk_json_val, - spdk_json_write_val_raw, - spdk_jsonrpc_begin_result, - spdk_jsonrpc_end_result, - spdk_jsonrpc_request, - spdk_jsonrpc_send_error_response, - spdk_rpc_register_method, - SPDK_JSONRPC_ERROR_INTERNAL_ERROR, - SPDK_JSONRPC_ERROR_INVALID_PARAMS, - SPDK_JSONRPC_ERROR_INVALID_REQUEST, - SPDK_JSONRPC_ERROR_METHOD_NOT_FOUND, - SPDK_JSONRPC_ERROR_PARSE_ERROR, - SPDK_JSON_VAL_OBJECT_BEGIN, - SPDK_RPC_RUNTIME, + spdk_json_val, spdk_json_write_val_raw, spdk_jsonrpc_begin_result, spdk_jsonrpc_end_result, + spdk_jsonrpc_request, spdk_jsonrpc_send_error_response, spdk_rpc_register_method, + SPDK_JSONRPC_ERROR_INTERNAL_ERROR, SPDK_JSONRPC_ERROR_INVALID_PARAMS, + SPDK_JSONRPC_ERROR_INVALID_REQUEST, SPDK_JSONRPC_ERROR_METHOD_NOT_FOUND, + SPDK_JSONRPC_ERROR_PARSE_ERROR, SPDK_JSON_VAL_OBJECT_BEGIN, SPDK_RPC_RUNTIME, }; use crate::core::Reactors; @@ -129,9 +120,7 @@ pub fn print_error_chain(err: &dyn std::error::Error) -> String { /// Extract JSON object from text, trim any pending characters which follow /// the closing bracket of the object. -fn extract_json_object( - params: &spdk_json_val, -) -> std::result::Result { +fn extract_json_object(params: &spdk_json_val) -> std::result::Result { if params.type_ != SPDK_JSON_VAL_OBJECT_BEGIN { return Err("JSON parameters must be an object".to_owned()); } @@ -148,7 +137,7 @@ fn extract_json_object( } else if c == '}' { level -= 1; if level == 0 { - return Ok(text[0 ..= i].to_string()); + return Ok(text[0..=i].to_string()); } } } @@ -164,8 +153,7 @@ unsafe extern "C" fn jsonrpc_handler( params: *const spdk_json_val, arg: *mut c_void, ) where - H: 'static - + Fn(P) -> Pin>>>, + H: 'static + Fn(P) -> Pin>>>, P: 'static + for<'de> Deserialize<'de>, R: Serialize, E: RpcErrorCode + std::error::Error, @@ -206,9 +194,7 @@ unsafe extern "C" fn jsonrpc_handler( return; } // serialize result to string - let data = - CString::new(serde_json::to_string(&val).unwrap()) - .unwrap(); + let data = CString::new(serde_json::to_string(&val).unwrap()).unwrap(); spdk_json_write_val_raw( w_ctx, data.as_ptr() as *const c_void, @@ -221,11 +207,7 @@ unsafe extern "C" fn jsonrpc_handler( let msg = print_error_chain(&err); error!("{}", msg); let cerr = CString::new(msg).unwrap(); - spdk_jsonrpc_send_error_response( - request, - code.into(), - cerr.as_ptr(), - ); + spdk_jsonrpc_send_error_response(request, code.into(), cerr.as_ptr()); } } }; diff --git a/io-engine/src/logger.rs b/io-engine/src/logger.rs index 06f04a606..b6058ec48 100644 --- a/io-engine/src/logger.rs +++ b/io-engine/src/logger.rs @@ -23,8 +23,7 @@ use tracing_subscriber::{ filter::{filter_fn, Targets}, fmt::{ format::{FmtSpan, FormatEvent, FormatFields, Writer}, - FmtContext, - FormattedFields, + FmtContext, FormattedFields, }, layer::{Layer, SubscriberExt}, registry::LookupSpan, @@ -78,8 +77,7 @@ pub extern "C" fn log_impl( return; } - let arg = - unsafe { CStr::from_ptr(buf).to_string_lossy().trim_end().to_string() }; + let arg = unsafe { CStr::from_ptr(buf).to_string_lossy().trim_end().to_string() }; let filename = unsafe { CStr::from_ptr(file).to_str().unwrap() }; log::logger().log( @@ -101,10 +99,7 @@ struct FormatLevel<'a> { impl<'a> FormatLevel<'a> { fn new(level: &'a Level, ansi: bool) -> Self { - Self { - level, - ansi, - } + Self { level, ansi } } fn short(&self) -> &str { @@ -253,9 +248,7 @@ struct Location<'a> { impl<'a> Location<'a> { fn new(meta: &'a Metadata<'a>) -> Self { - Self { - meta, - } + Self { meta } } } @@ -348,7 +341,7 @@ fn ellipsis(s: &str, w: usize) -> String { if w < 8 || s.len() <= w { s.to_owned() } else { - format!("{}...", &s[.. w - 3]) + format!("{}...", &s[..w - 3]) } } @@ -374,9 +367,7 @@ impl Visit for StringVisitor<'_> { impl<'a> StringVisitor<'a> { pub fn new(string: &'a mut String) -> Self { - Self { - string, - } + Self { string } } } @@ -448,10 +439,9 @@ impl LogFormat { fmt.short(), )?; - let ctx = - CustomContext::new(context, event.parent(), false).to_string(); + let ctx = CustomContext::new(context, event.parent(), false).to_string(); if ctx.len() > 1 { - write!(buf, "{}: ", &ctx[1 ..])?; + write!(buf, "{}: ", &ctx[1..])?; } fmt.fmt_line(writer.by_ref(), &buf)?; @@ -540,8 +530,7 @@ pub fn init_ex(level: &str, format: LogFormat, events_url: Option) { // Get the optional eventing layer. let events_layer = match events_url { Some(url) => { - let events_filter = - Targets::new().with_target(EVENTING_TARGET, Level::INFO); + let events_filter = Targets::new().with_target(EVENTING_TARGET, Level::INFO); Some( EventHandle::init_ext(url.to_string(), SERVICE_NAME, spawn) .with_filter(events_filter), @@ -555,8 +544,7 @@ pub fn init_ex(level: &str, format: LogFormat, events_url: Option) { .with(Some(builder)) .with(events_layer); - tracing::subscriber::set_global_default(subscriber) - .expect("failed to set default subscriber"); + tracing::subscriber::set_global_default(subscriber).expect("failed to set default subscriber"); } pub fn init(level: &str) { diff --git a/io-engine/src/lvm/cli.rs b/io-engine/src/lvm/cli.rs index 7b8bc0cef..a2fa9fd08 100644 --- a/io-engine/src/lvm/cli.rs +++ b/io-engine/src/lvm/cli.rs @@ -198,17 +198,17 @@ impl LvmCmd { /// `Error::LvmBinErr` => Completed with an exit code. /// `Error::JsonParsing` => StdOut output is not a valid json for `T`. /// `Error::ReportMissing` => Output does not contain a report for `T`. - pub(super) async fn report Deserialize<'a>>( - self, - ) -> Result { + pub(super) async fn report Deserialize<'a>>(self) -> Result { let cmd = self.cmd; let json_output: LvReport = self.output_json().await?; - let report: T = json_output.report.into_iter().next().ok_or( - Error::ReportMissing { + let report: T = json_output + .report + .into_iter() + .next() + .ok_or(Error::ReportMissing { command: cmd.to_string(), - }, - )?; + })?; Ok(report) } @@ -222,15 +222,14 @@ impl LvmCmd { /// `Error::LvmBinSpawnErr` => Failed to execute or await for completion. /// `Error::LvmBinErr` => Completed with an exit code. /// `Error::JsonParsing` => StdOut output is not a valid json for `T`. - pub(super) async fn output_json Deserialize<'a>>( - self, - ) -> Result { + pub(super) async fn output_json Deserialize<'a>>(self) -> Result { let cmd = self.cmd; let output = self.output().await?; - let json_output: T = serde_json::from_slice(output.stdout.as_slice()) - .map_err(|error| Error::JsonParsing { - command: cmd.to_string(), - error: error.to_string(), + let json_output: T = serde_json::from_slice(output.stdout.as_slice()).map_err(|error| { + Error::JsonParsing { + command: cmd.to_string(), + error: error.to_string(), + } })?; Ok(json_output) @@ -283,17 +282,17 @@ impl LvmCmd { /// /// `Error::LvmBinSpawnErr` => Failed to execute or await for completion. /// `Error::LvmBinErr` => Completed with an exit code. - pub(super) async fn output( - mut self, - ) -> Result { + pub(super) async fn output(mut self) -> Result { tracing::trace!("{:?}", self.cmder); crate::tokio_run!(async move { - let output = self.cmder.output().await.context( - error::LvmBinSpawnErrSnafu { + let output = self + .cmder + .output() + .await + .context(error::LvmBinSpawnErrSnafu { command: self.cmd.to_string(), - }, - )?; + })?; if !output.status.success() { let error = String::from_utf8_lossy(&output.stderr).to_string(); return Err(Error::LvmBinErr { @@ -309,17 +308,10 @@ impl LvmCmd { /// Serde deserializer helpers to help decode LVM json output from the cli. pub(super) mod de { use serde::de::{self, Deserialize, Deserializer, Visitor}; - use std::{ - fmt::Display, - iter::FromIterator, - marker::PhantomData, - str::FromStr, - }; + use std::{fmt::Display, iter::FromIterator, marker::PhantomData, str::FromStr}; /// Decode a number from a number as a string, example: "10". - pub(crate) fn number_from_string<'de, T, D>( - deserializer: D, - ) -> Result + pub(crate) fn number_from_string<'de, T, D>(deserializer: D) -> Result where T: FromStr, T::Err: Display, @@ -330,9 +322,7 @@ pub(super) mod de { } /// Decode a comma-separated string into a vector of strings. - pub(crate) fn comma_separated<'de, V, T, D>( - deserializer: D, - ) -> Result + pub(crate) fn comma_separated<'de, V, T, D>(deserializer: D) -> Result where V: FromIterator, T: FromStr, @@ -349,12 +339,8 @@ pub(super) mod de { { type Value = V; - fn expecting( - &self, - formatter: &mut std::fmt::Formatter, - ) -> std::fmt::Result { - formatter - .write_str("string containing comma-separated elements") + fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result { + formatter.write_str("string containing comma-separated elements") } fn visit_str(self, s: &str) -> Result diff --git a/io-engine/src/lvm/error.rs b/io-engine/src/lvm/error.rs index 3a4b27066..a28086985 100644 --- a/io-engine/src/lvm/error.rs +++ b/io-engine/src/lvm/error.rs @@ -17,9 +17,7 @@ pub enum Error { command: String, source: std::io::Error, }, - #[snafu(display( - "LVM VolumeGroup disk mismatch, args:{args:?}, vg:{vg:?}" - ))] + #[snafu(display("LVM VolumeGroup disk mismatch, args:{args:?}, vg:{vg:?}"))] DisksMismatch { args: Vec, vg: Vec }, #[snafu(display("Invalid PoolType: {value}"))] InvalidPoolType { value: i32 }, @@ -58,84 +56,36 @@ pub enum Error { NoSpace { error: String }, #[snafu(display("Snapshots are not currently supported for LVM volumes"))] SnapshotNotSup {}, - #[snafu(display( - "Pool expansion is not currently supported for LVM volumes" - ))] + #[snafu(display("Pool expansion is not currently supported for LVM volumes"))] GrowNotSup {}, } impl ToErrno for Error { fn to_errno(self) -> Errno { match self { - Error::ReportMissing { - .. - } => Errno::EIO, - Error::JsonParsing { - .. - } => Errno::EIO, - Error::LvmBinErr { - .. - } => Errno::EIO, - Error::LvmBinSpawnErr { - .. - } => Errno::EIO, - Error::DisksMismatch { - .. - } => Errno::EINVAL, - Error::InvalidPoolType { - .. - } => Errno::EINVAL, - Error::NotFound { - .. - } => Errno::ENOENT, - Error::VgUuidSet { - .. - } => Errno::EINVAL, - Error::LvNotFound { - .. - } => Errno::ENOENT, - Error::ThinProv { - .. - } => Errno::ENOTSUP, - Error::ReactorSpawn { - .. - } => Errno::EXFULL, - Error::ReactorSpawnChannel { - .. - } => Errno::EPIPE, - Error::BdevImport { - .. - } => Errno::EIO, - Error::BdevExport { - .. - } => Errno::EIO, - Error::BdevOpen { - .. - } => Errno::EIO, - Error::BdevShare { - .. - } => Errno::EFAULT, - Error::BdevShareUri { - .. - } => Errno::EFAULT, - Error::BdevUnshare { - .. - } => Errno::EFAULT, - Error::BdevMissing { - .. - } => Errno::ENODEV, - Error::UpdateProps { - .. - } => Errno::EIO, - Error::NoSpace { - .. - } => Errno::ENOSPC, - Error::SnapshotNotSup { - .. - } => Errno::ENOTSUP, - Error::GrowNotSup { - .. - } => Errno::ENOTSUP, + Error::ReportMissing { .. } => Errno::EIO, + Error::JsonParsing { .. } => Errno::EIO, + Error::LvmBinErr { .. } => Errno::EIO, + Error::LvmBinSpawnErr { .. } => Errno::EIO, + Error::DisksMismatch { .. } => Errno::EINVAL, + Error::InvalidPoolType { .. } => Errno::EINVAL, + Error::NotFound { .. } => Errno::ENOENT, + Error::VgUuidSet { .. } => Errno::EINVAL, + Error::LvNotFound { .. } => Errno::ENOENT, + Error::ThinProv { .. } => Errno::ENOTSUP, + Error::ReactorSpawn { .. } => Errno::EXFULL, + Error::ReactorSpawnChannel { .. } => Errno::EPIPE, + Error::BdevImport { .. } => Errno::EIO, + Error::BdevExport { .. } => Errno::EIO, + Error::BdevOpen { .. } => Errno::EIO, + Error::BdevShare { .. } => Errno::EFAULT, + Error::BdevShareUri { .. } => Errno::EFAULT, + Error::BdevUnshare { .. } => Errno::EFAULT, + Error::BdevMissing { .. } => Errno::ENODEV, + Error::UpdateProps { .. } => Errno::EIO, + Error::NoSpace { .. } => Errno::ENOSPC, + Error::SnapshotNotSup { .. } => Errno::ENOTSUP, + Error::GrowNotSup { .. } => Errno::ENOTSUP, } } } diff --git a/io-engine/src/lvm/lv_replica.rs b/io-engine/src/lvm/lv_replica.rs index e155e523d..ccba31256 100644 --- a/io-engine/src/lvm/lv_replica.rs +++ b/io-engine/src/lvm/lv_replica.rs @@ -2,14 +2,7 @@ use super::{cli::de, error::Error, vg_pool::VolumeGroup, CmnQueryArgs}; use crate::{ bdev::PtplFileOps, bdev_api::{bdev_create, BdevError}, - core::{ - NvmfShareProps, - Protocol, - PtplProps, - Share, - UntypedBdev, - UpdateProps, - }, + core::{NvmfShareProps, Protocol, PtplProps, Share, UntypedBdev, UpdateProps}, lvm::{ cli::LvmCmd, property::{Property, PropertyType}, @@ -47,10 +40,7 @@ impl QueryArgs { } /// Add the LV query args. pub(crate) fn with_lv(self, lv: CmnQueryArgs) -> Self { - Self { - lv, - ..self - } + Self { lv, ..self } } /// Get a comma-separated list of query selection args. /// todo: should be Display trait? @@ -213,8 +203,7 @@ impl LogicalVolume { entity_id: &Option, share: Protocol, ) -> Result { - let pool = - VolumeGroup::lookup(CmnQueryArgs::ours().uuid(vg_uuid)).await?; + let pool = VolumeGroup::lookup(CmnQueryArgs::ours().uuid(vg_uuid)).await?; pool.create_lvol(name, size, uuid, thin, entity_id, share) .await?; Self::lookup( @@ -235,9 +224,7 @@ impl LogicalVolume { /// List logical volumes using the provided options as query criteria. /// All lv's are imported and all imports must succeed. - pub(crate) async fn list( - opts: &QueryArgs, - ) -> Result, Error> { + pub(crate) async fn list(opts: &QueryArgs) -> Result, Error> { let mut g_error = Ok(()); let mut lvs = Self::fetch(opts).await?; for lv in &mut lvs { @@ -337,13 +324,8 @@ impl LogicalVolume { let bdev = crate::bdev::uri::parse(&uri).unwrap(); match bdev.destroy().await { - Ok(()) - | Err(BdevError::BdevNotFound { - .. - }) => Ok(()), - Err(source) => Err(Error::BdevExport { - source, - }), + Ok(()) | Err(BdevError::BdevNotFound { .. }) => Ok(()), + Err(source) => Err(Error::BdevExport { source }), } })?; self.bdev = None; @@ -372,12 +354,8 @@ impl LogicalVolume { { Ok(()) => Ok(()), // not great, but not sure how else to map the error otherwise... - Err(Error::LvmBinErr { - error, .. - }) if error.starts_with("Insufficient free space") => { - Err(Error::NoSpace { - error, - }) + Err(Error::LvmBinErr { error, .. }) if error.starts_with("Insufficient free space") => { + Err(Error::NoSpace { error }) } Err(error) => Err(error), } @@ -395,12 +373,8 @@ impl LogicalVolume { let blk_cnt = size / bdev.block_len() as u64; use spdk_rs::libspdk::spdk_bdev_notify_blockcnt_change; - let rc = unsafe { - spdk_bdev_notify_blockcnt_change( - bdev.unsafe_inner_mut_ptr(), - blk_cnt, - ) - }; + let rc = + unsafe { spdk_bdev_notify_blockcnt_change(bdev.unsafe_inner_mut_ptr(), blk_cnt) }; Ok((rc, bdev.size_in_bytes())) })?; if rc != 0 { @@ -422,18 +396,12 @@ impl LogicalVolume { self.set_properties(properties).await } - async fn sync_share_protocol( - &mut self, - protocol: Protocol, - ) -> Result<(), Error> { + async fn sync_share_protocol(&mut self, protocol: Protocol) -> Result<(), Error> { self.set_property(Property::LvShare(protocol)).await?; self.share = protocol; Ok(()) } - async fn build_set_properties_args( - &self, - properties: Vec, - ) -> Vec { + async fn build_set_properties_args(&self, properties: Vec) -> Vec { let mut args = Vec::new(); for property in properties { args.extend(self.build_set_property_args(property).await); @@ -458,10 +426,7 @@ impl LogicalVolume { } args } - async fn set_properties( - &mut self, - properties: Vec, - ) -> Result<(), Error> { + async fn set_properties(&mut self, properties: Vec) -> Result<(), Error> { let args = self.build_set_properties_args(properties).await; if args.is_empty() { return Ok(()); @@ -472,10 +437,7 @@ impl LogicalVolume { } result } - pub(crate) async fn set_property( - &mut self, - property: Property, - ) -> Result<(), Error> { + pub(crate) async fn set_property(&mut self, property: Property) -> Result<(), Error> { self.set_properties(vec![property]).await } @@ -489,12 +451,10 @@ impl LogicalVolume { Protocol::Nvmf => { let props = NvmfShareProps::new() .with_allowed_hosts(allowed_hosts) - .with_ptpl(ptpl.create().map_err(|source| { - Error::BdevShare { - source: crate::core::CoreError::Ptpl { - reason: source.to_string(), - }, - } + .with_ptpl(ptpl.create().map_err(|source| Error::BdevShare { + source: crate::core::CoreError::Ptpl { + reason: source.to_string(), + }, })?); Self::bdev_share_nvmf(bdev, Some(props)).await?; } @@ -522,16 +482,13 @@ impl LogicalVolume { let bdev = crate::spdk_run!(async move { if crate::core::UntypedBdev::lookup_by_name(&disk_uri).is_none() { - bdev_create(&disk_uri).await.map_err(|source| { - Error::BdevImport { - source, - } - })?; + bdev_create(&disk_uri) + .await + .map_err(|source| Error::BdevImport { source })?; } let mut bdev = Self::bdev(&disk_uri)?; - Self::bdev_sync_props(&mut bdev, share, ptpl, allowed_hosts) - .await?; + Self::bdev_sync_props(&mut bdev, share, ptpl, allowed_hosts).await?; Ok(BdevOpts::from(bdev)) })?; @@ -667,31 +624,23 @@ impl LogicalVolume { bdev.as_mut() .update_properties(props.map(Into::into)) .await - .map_err(|source| Error::BdevShare { - source, - })?; + .map_err(|source| Error::BdevShare { source })?; bdev.share_uri().ok_or(Error::BdevShareUri {}) } - Some(Protocol::Off) | None => { - bdev.share_nvmf(props).await.map_err(|source| { - Error::BdevShare { - source, - } - }) - } + Some(Protocol::Off) | None => bdev + .share_nvmf(props) + .await + .map_err(|source| Error::BdevShare { source }), } } - async fn bdev_unshare( - bdev: &mut UntypedBdev, - ) -> Result, Error> { + async fn bdev_unshare(bdev: &mut UntypedBdev) -> Result, Error> { let mut bdev = Pin::new(bdev); match bdev.shared() { Some(Protocol::Nvmf) => { - bdev.as_mut().unshare().await.map_err(|source| { - Error::BdevUnshare { - source, - } - })?; + bdev.as_mut() + .unshare() + .await + .map_err(|source| Error::BdevUnshare { source })?; } Some(Protocol::Off) | None => {} } @@ -734,12 +683,13 @@ impl LogicalVolume { let props = props.into(); let bdev_opts = crate::spdk_run!(async move { let mut bdev = Self::bdev(&uri)?; - Pin::new(&mut bdev).update_properties(props).await.map_err( - |e| Error::UpdateProps { + Pin::new(&mut bdev) + .update_properties(props) + .await + .map_err(|e| Error::UpdateProps { source: e, name: bdev.name().to_string(), - }, - )?; + })?; Ok(BdevOpts::from(bdev)) })?; bdev.update_from(bdev_opts); diff --git a/io-engine/src/lvm/mod.rs b/io-engine/src/lvm/mod.rs index 0cac60c15..8de739299 100644 --- a/io-engine/src/lvm/mod.rs +++ b/io-engine/src/lvm/mod.rs @@ -48,52 +48,28 @@ pub(crate) use lv_replica::{LogicalVolume, QueryArgs}; use crate::{ bdev::PtplFileOps, core::{ - snapshot::SnapshotDescriptor, - BdevStater, - BdevStats, - CloneParams, - CoreError, - NvmfShareProps, - Protocol, - PtplProps, - SnapshotParams, - UntypedBdev, - UpdateProps, + snapshot::SnapshotDescriptor, BdevStater, BdevStats, CloneParams, CoreError, + NvmfShareProps, Protocol, PtplProps, SnapshotParams, UntypedBdev, UpdateProps, }, lvm::property::Property, pool_backend::{ - FindPoolArgs, - IPoolFactory, - IPoolProps, - ListPoolArgs, - PoolArgs, - PoolBackend, - PoolMetadataInfo, - PoolOps, - ReplicaArgs, + FindPoolArgs, IPoolFactory, IPoolProps, ListPoolArgs, PoolArgs, PoolBackend, + PoolMetadataInfo, PoolOps, ReplicaArgs, }, replica_backend::{ - FindReplicaArgs, - FindSnapshotArgs, - IReplicaFactory, - ListCloneArgs, - ListReplicaArgs, - ListSnapshotArgs, - ReplicaBdevStats, - ReplicaOps, - SnapshotOps, + FindReplicaArgs, FindSnapshotArgs, IReplicaFactory, ListCloneArgs, ListReplicaArgs, + ListSnapshotArgs, ReplicaBdevStats, ReplicaOps, SnapshotOps, }, }; use futures::channel::oneshot::Receiver; pub(super) fn is_alphanumeric(name: &str, value: &str) -> Result<(), Error> { - if value.chars().any(|c| { - !(c.is_ascii_alphanumeric() || matches!(c, '_' | '-' | '.' | '+')) - }) { + if value + .chars() + .any(|c| !(c.is_ascii_alphanumeric() || matches!(c, '_' | '-' | '.' | '+'))) + { return Err(Error::NotFound { - query: format!( - "{name}('{value}') invalid: must be [a-zA-Z0-9.-_+]" - ), + query: format!("{name}('{value}') invalid: must be [a-zA-Z0-9.-_+]"), }); } Ok(()) @@ -152,16 +128,12 @@ impl PoolOps for VolumeGroup { Ok(Box::new(replica)) } - async fn destroy( - self: Box, - ) -> Result<(), crate::pool_backend::Error> { + async fn destroy(self: Box) -> Result<(), crate::pool_backend::Error> { (*self).destroy().await?; Ok(()) } - async fn export( - mut self: Box, - ) -> Result<(), crate::pool_backend::Error> { + async fn export(mut self: Box) -> Result<(), crate::pool_backend::Error> { VolumeGroup::export(&mut self).await?; Ok(()) } @@ -207,24 +179,16 @@ impl ReplicaOps for LogicalVolume { Ok(()) } - async fn set_entity_id( - &mut self, - id: String, - ) -> Result<(), crate::pool_backend::Error> { + async fn set_entity_id(&mut self, id: String) -> Result<(), crate::pool_backend::Error> { self.set_property(Property::LvEntityId(id)).await?; Ok(()) } - async fn resize( - &mut self, - size: u64, - ) -> Result<(), crate::pool_backend::Error> { + async fn resize(&mut self, size: u64) -> Result<(), crate::pool_backend::Error> { self.resize(size).await.map_err(Into::into) } - async fn destroy( - self: Box, - ) -> Result<(), crate::pool_backend::Error> { + async fn destroy(self: Box) -> Result<(), crate::pool_backend::Error> { (*self).destroy().await.map_err(Into::into) } @@ -232,9 +196,7 @@ impl ReplicaOps for LogicalVolume { self.share_proto() } - fn create_ptpl( - &self, - ) -> Result, crate::pool_backend::Error> { + fn create_ptpl(&self) -> Result, crate::pool_backend::Error> { self.ptpl() .create() .map_err(|source| crate::pool_backend::Error::Lvm { @@ -288,9 +250,7 @@ impl BdevStater for LogicalVolume { #[async_trait::async_trait(?Send)] impl SnapshotOps for LogicalVolume { - async fn destroy_snapshot( - self: Box, - ) -> Result<(), crate::pool_backend::Error> { + async fn destroy_snapshot(self: Box) -> Result<(), crate::pool_backend::Error> { Err(Error::SnapshotNotSup {}.into()) } @@ -368,18 +328,12 @@ impl IPoolProps for VolumeGroup { pub struct PoolLvmFactory {} #[async_trait::async_trait(?Send)] impl IPoolFactory for PoolLvmFactory { - async fn create( - &self, - args: PoolArgs, - ) -> Result, crate::pool_backend::Error> { + async fn create(&self, args: PoolArgs) -> Result, crate::pool_backend::Error> { let pool = VolumeGroup::create(args).await?; Ok(Box::new(pool)) } - async fn import( - &self, - args: PoolArgs, - ) -> Result, crate::pool_backend::Error> { + async fn import(&self, args: PoolArgs) -> Result, crate::pool_backend::Error> { let pool = VolumeGroup::import(args).await?; Ok(Box::new(pool)) } @@ -396,16 +350,13 @@ impl IPoolFactory for PoolLvmFactory { let query = match args { FindPoolArgs::Uuid(uuid) => CmnQueryArgs::ours().uuid(uuid), FindPoolArgs::UuidOrName(uuid) => CmnQueryArgs::ours().uuid(uuid), - FindPoolArgs::NameUuid { - name, - uuid, - } => CmnQueryArgs::ours().named(name).uuid_opt(uuid), + FindPoolArgs::NameUuid { name, uuid } => { + CmnQueryArgs::ours().named(name).uuid_opt(uuid) + } }; match VolumeGroup::lookup(query).await { Ok(vg) => Ok(Some(Box::new(vg))), - Err(Error::NotFound { - .. - }) => Ok(None), + Err(Error::NotFound { .. }) => Ok(None), Err(error) => Err(error.into()), } } @@ -443,25 +394,19 @@ impl IPoolFactory for PoolLvmFactory { pub struct ReplLvmFactory {} #[async_trait::async_trait(?Send)] impl IReplicaFactory for ReplLvmFactory { - fn bdev_as_replica( - &self, - _bdev: crate::core::UntypedBdev, - ) -> Option> { + fn bdev_as_replica(&self, _bdev: crate::core::UntypedBdev) -> Option> { None } async fn find( &self, args: &FindReplicaArgs, ) -> Result>, crate::pool_backend::Error> { - let lookup = LogicalVolume::lookup( - &QueryArgs::new().with_lv(CmnQueryArgs::ours().uuid(&args.uuid)), - ) - .await; + let lookup = + LogicalVolume::lookup(&QueryArgs::new().with_lv(CmnQueryArgs::ours().uuid(&args.uuid))) + .await; match lookup { Ok(repl) => Ok(Some(Box::new(repl) as _)), - Err(Error::NotFound { - .. - }) => Ok(None), + Err(Error::NotFound { .. }) => Ok(None), Err(error) => Err(error.into()), } } diff --git a/io-engine/src/lvm/property.rs b/io-engine/src/lvm/property.rs index f0df3d6c9..f9a60f423 100644 --- a/io-engine/src/lvm/property.rs +++ b/io-engine/src/lvm/property.rs @@ -102,9 +102,7 @@ impl Property { match self { Property::Lvm => None, Property::LvName(name) => Some(name.to_owned()), - Property::LvShare(protocol) => { - Some(protocol.value_str().to_owned()) - } + Property::LvShare(protocol) => Some(protocol.value_str().to_owned()), Property::LvAllowedHosts(hosts) => Some(hosts.join(",").to_owned()), Property::LvEntityId(entity_id) => Some(entity_id.to_owned()), Property::Unknown(_, value) => Some(value.to_owned()), @@ -141,9 +139,7 @@ impl Property { match PropertyType::from_str(key).ok()? { PropertyType::Lvm => Some(Self::Lvm), PropertyType::LvName => Some(Self::LvName(value.to_owned())), - PropertyType::LvShare => { - Some(Self::LvShare(Protocol::from_value(value))) - } + PropertyType::LvShare => Some(Self::LvShare(Protocol::from_value(value))), PropertyType::LvAllowedHosts => Some(Self::LvAllowedHosts( value .split(',') @@ -151,9 +147,7 @@ impl Property { .map(|s| s.to_owned()) .collect::>(), )), - PropertyType::LvEntityId => { - Some(Self::LvEntityId(value.to_owned())) - } + PropertyType::LvEntityId => Some(Self::LvEntityId(value.to_owned())), _ => None, } } @@ -163,13 +157,10 @@ impl Property { /// If the pair is not valid then nothing is returned. pub(super) fn new(tag: &str) -> Self { if let [key, value] = tag.split('=').collect::>()[..] { - Self::new_known(key, value).unwrap_or(Property::Unknown( - key.to_string(), - value.to_string(), - )) + Self::new_known(key, value) + .unwrap_or(Property::Unknown(key.to_string(), value.to_string())) } else { - Self::new_known(tag, "") - .unwrap_or(Property::Unknown(tag.to_string(), "".to_string())) + Self::new_known(tag, "").unwrap_or(Property::Unknown(tag.to_string(), "".to_string())) } } } diff --git a/io-engine/src/lvm/vg_pool.rs b/io-engine/src/lvm/vg_pool.rs index e8cc6b248..56c3bc55d 100644 --- a/io-engine/src/lvm/vg_pool.rs +++ b/io-engine/src/lvm/vg_pool.rs @@ -105,9 +105,7 @@ impl VolumeGroup { } /// List all the volume groups using the provided list options. - pub(crate) async fn list( - opts: &CmnQueryArgs, - ) -> Result, Error> { + pub(crate) async fn list(opts: &CmnQueryArgs) -> Result, Error> { let mut args = vec![ "--units=b", "--nosuffix", @@ -120,8 +118,7 @@ impl VolumeGroup { if !select.is_empty() { args.push(select_query.trim_end_matches(',')); } - let report: VolGroups = - LvmCmd::vg_list().args(args.as_slice()).report().await?; + let report: VolGroups = LvmCmd::vg_list().args(args.as_slice()).report().await?; let vgs = report .vg @@ -144,29 +141,22 @@ impl VolumeGroup { /// Import a volume group with the name provided or create one with the name /// and disks provided currently only import is supported. pub(crate) async fn create(args: PoolArgs) -> Result { - let vg = - match VolumeGroup::lookup(CmnQueryArgs::any().named(&args.name)) - .await - { - Ok(_) => Self::import_inner(args).await, - Err(Error::NotFound { - .. - }) => { - LvmCmd::pv_create().args(&args.disks).run().await?; - - LvmCmd::vg_create() - .arg(&args.name) - .tag(Property::Lvm) - .args(args.disks) - .run() - .await?; - let lookup = CmnQueryArgs::ours() - .named(&args.name) - .uuid_opt(&args.uuid); - VolumeGroup::lookup(lookup).await - } - Err(error) => Err(error), - }?; + let vg = match VolumeGroup::lookup(CmnQueryArgs::any().named(&args.name)).await { + Ok(_) => Self::import_inner(args).await, + Err(Error::NotFound { .. }) => { + LvmCmd::pv_create().args(&args.disks).run().await?; + + LvmCmd::vg_create() + .arg(&args.name) + .tag(Property::Lvm) + .args(args.disks) + .run() + .await?; + let lookup = CmnQueryArgs::ours().named(&args.name).uuid_opt(&args.uuid); + VolumeGroup::lookup(lookup).await + } + Err(error) => Err(error), + }?; info!("The lvm vg pool '{}' has been created", vg.name()); Ok(vg) @@ -250,7 +240,10 @@ impl VolumeGroup { info!("LVM pool '{}' has been destroyed successfully", self.name()); } else { - warn!("LVM pool '{}' is not destroyed as it contains foreign lvs: {foreign_lvs:?}", self.name()); + warn!( + "LVM pool '{}' is not destroyed as it contains foreign lvs: {foreign_lvs:?}", + self.name() + ); } self.ptpl().destroy().ok(); Ok(()) @@ -295,19 +288,15 @@ impl VolumeGroup { share: Protocol, ) -> Result<(), Error> { let vg_name = self.name(); - let ins_space = - format!("Volume group \"{vg_name}\" has insufficient free space"); + let ins_space = format!("Volume group \"{vg_name}\" has insufficient free space"); if thin { return Err(Error::ThinProv {}); } else if size > self.free { - return Err(Error::NoSpace { - error: ins_space, - }); + return Err(Error::NoSpace { error: ins_space }); } - let ins_space = - format!("Volume group \"{vg_name}\" has insufficient free space"); + let ins_space = format!("Volume group \"{vg_name}\" has insufficient free space"); let entity_id = entity_id.clone().unwrap_or_default(); match LvmCmd::lv_create() .arg(format!("-L{size}b")) @@ -321,11 +310,9 @@ impl VolumeGroup { .await { // not great, but not sure how else to map the error otherwise... - Err(Error::LvmBinErr { - error, .. - }) if error.starts_with(&ins_space) => Err(Error::NoSpace { - error, - }), + Err(Error::LvmBinErr { error, .. }) if error.starts_with(&ins_space) => { + Err(Error::NoSpace { error }) + } _else => _else, }?; diff --git a/io-engine/src/lvs/lvol_snapshot.rs b/io-engine/src/lvs/lvol_snapshot.rs index e46e9b18b..c391cea53 100644 --- a/io-engine/src/lvs/lvol_snapshot.rs +++ b/io-engine/src/lvs/lvol_snapshot.rs @@ -14,28 +14,15 @@ use strum::{EnumCount, IntoEnumIterator}; use events_api::event::EventAction; use spdk_rs::libspdk::{ - spdk_blob, - spdk_blob_reset_used_clusters_cache, - spdk_lvol, - spdk_xattr_descriptor, - vbdev_lvol_create_clone_ext, - vbdev_lvol_create_snapshot_ext, + spdk_blob, spdk_blob_reset_used_clusters_cache, spdk_lvol, spdk_xattr_descriptor, + vbdev_lvol_create_clone_ext, vbdev_lvol_create_snapshot_ext, }; use crate::{ core::{ logical_volume::LogicalVolume, - snapshot::{ - CloneParams, - ISnapshotDescriptor, - SnapshotDescriptor, - SnapshotInfo, - }, - Bdev, - CloneXattrs, - SnapshotParams, - SnapshotXattrs, - UntypedBdev, + snapshot::{CloneParams, ISnapshotDescriptor, SnapshotDescriptor, SnapshotInfo}, + Bdev, CloneXattrs, SnapshotParams, SnapshotXattrs, UntypedBdev, }, eventing::Event, ffihelper::{cb_arg, done_cb, IntoCString}, @@ -54,10 +41,7 @@ pub trait LvolSnapshotOps { type Lvol; /// Create Snapshot Common API. - async fn create_snapshot( - &self, - snap_param: SnapshotParams, - ) -> Result; + async fn create_snapshot(&self, snap_param: SnapshotParams) -> Result; /// Destroy snapshot. async fn destroy_snapshot(mut self) -> Result<(), Self::Error>; @@ -73,19 +57,12 @@ pub trait LvolSnapshotOps { fn list_snapshot_by_snapshot_uuid(&self) -> Vec; /// List All Snapshot. - fn list_all_snapshots( - parent_lvol: Option<&Lvol>, - ) -> Vec; + fn list_all_snapshots(parent_lvol: Option<&Lvol>) -> Vec; /// List All Lvol Snapshots. - fn list_all_lvol_snapshots( - parent_lvol: Option<&Lvol>, - ) -> Vec; + fn list_all_lvol_snapshots(parent_lvol: Option<&Lvol>) -> Vec; /// Create snapshot clone. - async fn create_clone( - &self, - clone_param: CloneParams, - ) -> Result; + async fn create_clone(&self, clone_param: CloneParams) -> Result; /// Get clone list based on snapshot_uuid. fn list_clones_by_snapshot_uuid(&self) -> Vec; @@ -152,20 +129,11 @@ pub trait LvolSnapshotOps { ) -> Result; /// Common API to set SnapshotDescriptor for ListReplicaSnapshot. - fn snapshot_descriptor( - &self, - parent: Option<&Lvol>, - ) -> Option; + fn snapshot_descriptor(&self, parent: Option<&Lvol>) -> Option; /// Common API to set SnapshotInfo for ListReplicaSnapshot. - fn snapshot_descriptor_info( - &self, - parent: Option<&Lvol>, - ) -> Option; + fn snapshot_descriptor_info(&self, parent: Option<&Lvol>) -> Option; /// Common API to set LvolSnapshotDescriptor for ListReplicaSnapshot. - fn lvol_snapshot_descriptor( - &self, - parent: Option<&Lvol>, - ) -> Option; + fn lvol_snapshot_descriptor(&self, parent: Option<&Lvol>) -> Option; /// Return bool value to indicate, if the snapshot is marked as discarded. fn is_discarded_snapshot(&self) -> bool; @@ -179,10 +147,7 @@ pub trait LvolSnapshotOps { /// If self is clone or a snapshot whose parent is clone, then do ancestor /// calculation for all snapshot linked to clone. - fn calculate_clone_source_snap_usage( - &self, - total_ancestor_snap_size: u64, - ) -> Option; + fn calculate_clone_source_snap_usage(&self, total_ancestor_snap_size: u64) -> Option; /// Reset snapshot tree usage cache. if the lvol is replica, then reset /// cache will be based on replica uuid, which is parent uuid for all @@ -258,12 +223,8 @@ impl AsyncParentIterator for LvolSnapshotIter { if self.inner_blob.is_null() { None } else { - let parent_blob = - unsafe { self.inner_lvol.bs_iter_parent(self.inner_blob) }?; - let uuid = Lvol::get_blob_xattr( - parent_blob, - SnapshotXattrs::SnapshotUuid.name(), - )?; + let parent_blob = unsafe { self.inner_lvol.bs_iter_parent(self.inner_blob) }?; + let uuid = Lvol::get_blob_xattr(parent_blob, SnapshotXattrs::SnapshotUuid.name())?; let snap_lvol = UntypedBdev::lookup_by_uuid_str(&uuid) .and_then(|bdev| Lvol::try_from(bdev).ok())?; self.inner_blob = parent_blob; @@ -325,20 +286,16 @@ impl LvolSnapshotOps for Lvol { }) } }, - SnapshotXattrs::SnapshotCreateTime => { - match params.create_time() { - Some(v) => v, - None => { - return Err(LvsError::SnapshotConfigFailed { - name: self.as_bdev().name().to_string(), - msg: "create_time not provided".to_string(), - }) - } + SnapshotXattrs::SnapshotCreateTime => match params.create_time() { + Some(v) => v, + None => { + return Err(LvsError::SnapshotConfigFailed { + name: self.as_bdev().name().to_string(), + msg: "create_time not provided".to_string(), + }) } - } - SnapshotXattrs::DiscardedSnapshot => { - params.discarded_snapshot().to_string() - } + }, + SnapshotXattrs::DiscardedSnapshot => params.discarded_snapshot().to_string(), }; let attr_name = attr.name().to_string().into_cstring(); let attr_val = av.into_cstring(); @@ -369,11 +326,7 @@ impl LvolSnapshotOps for Lvol { // stored inside CStrings. let mut cstrs: Vec = Vec::new(); - self.prepare_snapshot_xattrs( - &mut attr_descrs, - snap_param.clone(), - &mut cstrs, - )?; + self.prepare_snapshot_xattrs(&mut attr_descrs, snap_param.clone(), &mut cstrs)?; let c_snapshot_name = snap_param.name().unwrap().into_cstring(); @@ -438,17 +391,15 @@ impl LvolSnapshotOps for Lvol { }) } }, - CloneXattrs::CloneCreateTime => { - match params.clone_create_time() { - Some(v) => v, - None => { - return Err(LvsError::CloneConfigFailed { - name: self.as_bdev().name().to_string(), - msg: "create_time not provided".to_string(), - }) - } + CloneXattrs::CloneCreateTime => match params.clone_create_time() { + Some(v) => v, + None => { + return Err(LvsError::CloneConfigFailed { + name: self.as_bdev().name().to_string(), + msg: "create_time not provided".to_string(), + }) } - } + }, CloneXattrs::CloneUuid => match params.clone_uuid() { Some(v) => v, None => { @@ -486,14 +437,9 @@ impl LvolSnapshotOps for Lvol { // stored inside CStrings. let mut cstrs: Vec = Vec::new(); - self.prepare_clone_xattrs( - &mut attr_descrs, - clone_param.clone(), - &mut cstrs, - )?; + self.prepare_clone_xattrs(&mut attr_descrs, clone_param.clone(), &mut cstrs)?; - let c_clone_name = - clone_param.clone_name().unwrap_or_default().into_cstring(); + let c_clone_name = clone_param.clone_name().unwrap_or_default().into_cstring(); unsafe { vbdev_lvol_create_clone_ext( @@ -537,39 +483,29 @@ impl LvolSnapshotOps for Lvol { } /// Common API to set SnapshotDescriptor for ListReplicaSnapshot. - fn snapshot_descriptor( - &self, - parent: Option<&Lvol>, - ) -> Option { + fn snapshot_descriptor(&self, parent: Option<&Lvol>) -> Option { let info = self.snapshot_descriptor_info(parent)?; Some(SnapshotDescriptor::new(self.to_owned(), info)) } /// Common API to set SnapshotDescriptor for ListReplicaSnapshot. - fn lvol_snapshot_descriptor( - &self, - parent: Option<&Lvol>, - ) -> Option { + fn lvol_snapshot_descriptor(&self, parent: Option<&Lvol>) -> Option { let info = self.snapshot_descriptor_info(parent)?; Some(LvolSnapshotDescriptor::new(self.to_owned(), info)) } /// Common API to set SnapshotDescriptor for ListReplicaSnapshot. - fn snapshot_descriptor_info( - &self, - parent: Option<&Lvol>, - ) -> Option { + fn snapshot_descriptor_info(&self, parent: Option<&Lvol>) -> Option { let mut valid_snapshot = true; let mut snapshot_param: SnapshotParams = Default::default(); for attr in SnapshotXattrs::iter() { - let curr_attr_val = - match Self::get_blob_xattr(self.blob_checked(), attr.name()) { - Some(val) => val, - None => { - valid_snapshot = false; - continue; - } - }; + let curr_attr_val = match Self::get_blob_xattr(self.blob_checked(), attr.name()) { + Some(val) => val, + None => { + valid_snapshot = false; + continue; + } + }; match attr { SnapshotXattrs::ParentId => { if let Some(parent_lvol) = parent { @@ -593,9 +529,8 @@ impl LvolSnapshotOps for Lvol { snapshot_param.set_create_time(curr_attr_val); } SnapshotXattrs::DiscardedSnapshot => { - snapshot_param.set_discarded_snapshot( - curr_attr_val.parse().unwrap_or_default(), - ); + snapshot_param + .set_discarded_snapshot(curr_attr_val.parse().unwrap_or_default()); } } } @@ -605,10 +540,8 @@ impl LvolSnapshotOps for Lvol { let parent_uuid = if let Some(parent_lvol) = parent { parent_lvol.uuid() } else { - match Bdev::lookup_by_uuid_str( - snapshot_param.parent_id().unwrap_or_default().as_str(), - ) - .and_then(|b| Lvol::try_from(b).ok()) + match Bdev::lookup_by_uuid_str(snapshot_param.parent_id().unwrap_or_default().as_str()) + .and_then(|b| Lvol::try_from(b).ok()) { Some(parent) => parent.uuid(), None => String::default(), @@ -625,10 +558,7 @@ impl LvolSnapshotOps for Lvol { } /// Create Snapshot Common API for Local Device. - async fn create_snapshot( - &self, - snap_param: SnapshotParams, - ) -> Result { + async fn create_snapshot(&self, snap_param: SnapshotParams) -> Result { extern "C" fn snapshot_create_done_cb( arg: *mut c_void, lvol_ptr: *mut spdk_lvol, @@ -648,13 +578,8 @@ impl LvolSnapshotOps for Lvol { let (s, r) = oneshot::channel::(); - self.do_create_snapshot( - snap_param, - snapshot_create_done_cb, - cb_arg(s), - r, - ) - .await + self.do_create_snapshot(snap_param, snapshot_create_done_cb, cb_arg(s), r) + .await } /// Get a Snapshot Iterator. @@ -714,9 +639,7 @@ impl LvolSnapshotOps for Lvol { } /// List All Snapshot. - fn list_all_snapshots( - parent_lvol: Option<&Lvol>, - ) -> Vec { + fn list_all_snapshots(parent_lvol: Option<&Lvol>) -> Vec { Self::list_all_lvol_snapshots(parent_lvol) .into_iter() .map(Into::into) @@ -724,9 +647,7 @@ impl LvolSnapshotOps for Lvol { } /// List All Lvol Snapshots. - fn list_all_lvol_snapshots( - parent_lvol: Option<&Lvol>, - ) -> Vec { + fn list_all_lvol_snapshots(parent_lvol: Option<&Lvol>) -> Vec { let mut snapshot_list: Vec = Vec::new(); let bdev = match UntypedBdev::bdev_first() { @@ -747,9 +668,7 @@ impl LvolSnapshotOps for Lvol { continue; } match snapshot_lvol.lvol_snapshot_descriptor(parent_lvol) { - Some(snapshot_descriptor) => { - snapshot_list.push(snapshot_descriptor) - } + Some(snapshot_descriptor) => snapshot_list.push(snapshot_descriptor), None => continue, } } @@ -757,15 +676,8 @@ impl LvolSnapshotOps for Lvol { } /// Create snapshot clone. - async fn create_clone( - &self, - clone_param: CloneParams, - ) -> Result { - extern "C" fn clone_done_cb( - arg: *mut c_void, - lvol_ptr: *mut spdk_lvol, - errno: i32, - ) { + async fn create_clone(&self, clone_param: CloneParams) -> Result { + extern "C" fn clone_done_cb(arg: *mut c_void, lvol_ptr: *mut spdk_lvol, errno: i32) { let res = if errno == 0 { Ok(lvol_ptr) } else { @@ -795,9 +707,7 @@ impl LvolSnapshotOps for Lvol { .map(|b| Lvol::try_from(b).unwrap()) .filter_map(|b| { let snap_lvol = b.is_snapshot_clone(); - if snap_lvol.is_some() - && snap_lvol.unwrap().uuid() == self.uuid() - { + if snap_lvol.is_some() && snap_lvol.unwrap().uuid() == self.uuid() { Some(b) } else { None @@ -866,18 +776,12 @@ impl LvolSnapshotOps for Lvol { // if self is clone or a snapshot whose parent is clone, then do ancestor // calculation for all snapshot linked to clone. - fn calculate_clone_source_snap_usage( - &self, - total_ancestor_snap_size: u64, - ) -> Option { + fn calculate_clone_source_snap_usage(&self, total_ancestor_snap_size: u64) -> Option { // if self is snapshot created from clone. if self.is_snapshot() { match UntypedBdev::lookup_by_uuid_str( - &Lvol::get_blob_xattr( - self.blob_checked(), - SnapshotXattrs::ParentId.name(), - ) - .unwrap_or_default(), + &Lvol::get_blob_xattr(self.blob_checked(), SnapshotXattrs::ParentId.name()) + .unwrap_or_default(), ) { Some(bdev) => match Lvol::try_from(bdev) { Ok(l) => match l.is_snapshot_clone() { @@ -885,8 +789,7 @@ impl LvolSnapshotOps for Lvol { let usage = parent_snap_lvol.usage(); Some( total_ancestor_snap_size - - (usage.allocated_bytes_snapshots - + usage.allocated_bytes), + - (usage.allocated_bytes_snapshots + usage.allocated_bytes), ) } None => None, @@ -914,28 +817,18 @@ impl LvolSnapshotOps for Lvol { reset_snapshot_tree_usage_cache_with_parent_uuid(self); return; } - if let Some(snapshot_parent_uuid) = Lvol::get_blob_xattr( - self.blob_checked(), - SnapshotXattrs::ParentId.name(), - ) { - if let Some(bdev) = - UntypedBdev::lookup_by_uuid_str(snapshot_parent_uuid.as_str()) - { + if let Some(snapshot_parent_uuid) = + Lvol::get_blob_xattr(self.blob_checked(), SnapshotXattrs::ParentId.name()) + { + if let Some(bdev) = UntypedBdev::lookup_by_uuid_str(snapshot_parent_uuid.as_str()) { if let Ok(parent_lvol) = Lvol::try_from(bdev) { unsafe { - spdk_blob_reset_used_clusters_cache( - parent_lvol.blob_checked(), - ); + spdk_blob_reset_used_clusters_cache(parent_lvol.blob_checked()); } - reset_snapshot_tree_usage_cache_with_parent_uuid( - &parent_lvol, - ); + reset_snapshot_tree_usage_cache_with_parent_uuid(&parent_lvol); } } else { - reset_snapshot_tree_usage_cache_with_wildcard( - self, - snapshot_parent_uuid, - ); + reset_snapshot_tree_usage_cache_with_wildcard(self, snapshot_parent_uuid); } } } @@ -964,20 +857,14 @@ fn reset_snapshot_tree_usage_cache_with_parent_uuid(lvol: &Lvol) { /// bdev by matching parent uuid got from snapshot attribute. /// todo: need more optimization to adding new function in spdk to relate /// snapshot and clone blobs. -fn reset_snapshot_tree_usage_cache_with_wildcard( - lvol: &Lvol, - snapshot_parent_uuid: String, -) { +fn reset_snapshot_tree_usage_cache_with_wildcard(lvol: &Lvol, snapshot_parent_uuid: String) { let mut successor_clones: Vec = vec![]; let mut successor_snapshots = Lvol::list_all_lvol_snapshots(None) .iter() .map(|v| v.snapshot_lvol()) .filter_map(|l| { - let uuid = Lvol::get_blob_xattr( - lvol.blob_checked(), - SnapshotXattrs::ParentId.name(), - ); + let uuid = Lvol::get_blob_xattr(lvol.blob_checked(), SnapshotXattrs::ParentId.name()); match uuid { Some(uuid) if uuid == snapshot_parent_uuid => Some(l.clone()), _ => None, diff --git a/io-engine/src/lvs/lvs_error.rs b/io-engine/src/lvs/lvs_error.rs index a76689ac7..167fa0834 100644 --- a/io-engine/src/lvs/lvs_error.rs +++ b/io-engine/src/lvs/lvs_error.rs @@ -59,9 +59,7 @@ impl BsError { // Unknown errno may indicate that the source negative i32 value // was passed instead of taking the abs. warn!("Blob store: got unknown errno"); - BsError::Generic { - source: value, - } + BsError::Generic { source: value } } Errno::EINVAL => BsError::InvalidArgument {}, Errno::ENOENT => BsError::LvolNotFound {}, @@ -73,9 +71,7 @@ impl BsError { Errno::ENOSPC => BsError::NoSpace {}, Errno::EMFILE => BsError::OutOfMetadata {}, Errno::EOVERFLOW => BsError::CapacityOverflow {}, - _ => BsError::Generic { - source: value, - }, + _ => BsError::Generic { source: value }, } } @@ -94,9 +90,7 @@ impl BsError { impl ToErrno for BsError { fn to_errno(self) -> Errno { match self { - Self::Generic { - source, - } => source, + Self::Generic { source } => source, Self::InvalidArgument {} => Errno::EINVAL, Self::LvolNotFound {} => Errno::ENOENT, Self::VolAlreadyExists {} => Errno::EEXIST, @@ -155,12 +149,7 @@ pub enum LvsError { source: BsError, msg: String, }, - #[snafu(display( - "errno {}: Invalid cluster-size {}, for pool {}", - source, - msg, - name - ))] + #[snafu(display("errno {}: Invalid cluster-size {}, for pool {}", source, msg, name))] InvalidClusterSize { source: BsError, name: String, @@ -212,12 +201,7 @@ pub enum LvsError { source: CoreError, name: String, }, - #[snafu(display( - "failed to get property {} ({}) from {}", - prop, - source, - name - ))] + #[snafu(display("failed to get property {} ({}) from {}", prop, source, name))] GetProperty { source: BsError, prop: PropName, @@ -257,20 +241,12 @@ pub enum LvsError { FlushFailed { name: String, }, - #[snafu(display( - "Snapshot parameters for replica {} is not correct: {}", - name, - msg - ))] + #[snafu(display("Snapshot parameters for replica {} is not correct: {}", name, msg))] SnapshotConfigFailed { name: String, msg: String, }, - #[snafu(display( - "Clone parameters for replica {} are not correct: {}", - name, - msg - ))] + #[snafu(display("Clone parameters for replica {} are not correct: {}", name, msg))] CloneConfigFailed { name: String, msg: String, @@ -289,104 +265,41 @@ pub enum LvsError { impl ToErrno for LvsError { fn to_errno(self) -> Errno { match self { - Self::Import { - source, .. - } => source.to_errno(), - Self::PoolCreate { - source, .. - } => source.to_errno(), - Self::Export { - source, .. - } => source.to_errno(), - Self::Destroy { - .. - } => Errno::ENXIO, - Self::Grow { - .. - } => Errno::ENXIO, - Self::PoolNotFound { - source, .. - } => source.to_errno(), - Self::InvalidBdev { - .. - } => Errno::ENXIO, - Self::Invalid { - source, .. - } => source.to_errno(), - Self::InvalidClusterSize { - source, .. - } => source.to_errno(), - Self::InvalidMetadataParam { - .. - } => Errno::EINVAL, - Self::RepExists { - source, .. - } => source.to_errno(), - Self::RepCreate { - source, .. - } => source.to_errno(), - Self::RepDestroy { - source, .. - } => source.to_errno(), - Self::RepResize { - source, .. - } => source.to_errno(), - Self::NotALvol { - source, .. - } => source.to_errno(), - Self::LvolShare { - source, .. - } => source.to_errno(), - Self::UpdateShareProperties { - source, .. - } => source.to_errno(), - Self::LvolUnShare { - source, .. - } => source.to_errno(), - Self::GetProperty { - source, .. - } => source.to_errno(), - Self::SetProperty { - source, .. - } => source.to_errno(), - Self::SyncProperty { - source, .. - } => source.to_errno(), - Self::SnapshotCreate { - source, .. - } => source.to_errno(), - Self::FlushFailed { - .. - } => Errno::EIO, - Self::Property { - source, .. - } => source.to_errno(), - Self::SnapshotConfigFailed { - .. - } - | Self::ReplicaShareProtocol { - .. - } => Errno::EINVAL, - Self::SnapshotCloneCreate { - source, .. - } => source.to_errno(), - Self::CloneConfigFailed { - .. - } => Errno::EINVAL, - Self::WipeFailed { - .. - } => Errno::EINVAL, - Self::ResourceLockFailed { - .. - } => Errno::EBUSY, + Self::Import { source, .. } => source.to_errno(), + Self::PoolCreate { source, .. } => source.to_errno(), + Self::Export { source, .. } => source.to_errno(), + Self::Destroy { .. } => Errno::ENXIO, + Self::Grow { .. } => Errno::ENXIO, + Self::PoolNotFound { source, .. } => source.to_errno(), + Self::InvalidBdev { .. } => Errno::ENXIO, + Self::Invalid { source, .. } => source.to_errno(), + Self::InvalidClusterSize { source, .. } => source.to_errno(), + Self::InvalidMetadataParam { .. } => Errno::EINVAL, + Self::RepExists { source, .. } => source.to_errno(), + Self::RepCreate { source, .. } => source.to_errno(), + Self::RepDestroy { source, .. } => source.to_errno(), + Self::RepResize { source, .. } => source.to_errno(), + Self::NotALvol { source, .. } => source.to_errno(), + Self::LvolShare { source, .. } => source.to_errno(), + Self::UpdateShareProperties { source, .. } => source.to_errno(), + Self::LvolUnShare { source, .. } => source.to_errno(), + Self::GetProperty { source, .. } => source.to_errno(), + Self::SetProperty { source, .. } => source.to_errno(), + Self::SyncProperty { source, .. } => source.to_errno(), + Self::SnapshotCreate { source, .. } => source.to_errno(), + Self::FlushFailed { .. } => Errno::EIO, + Self::Property { source, .. } => source.to_errno(), + Self::SnapshotConfigFailed { .. } | Self::ReplicaShareProtocol { .. } => Errno::EINVAL, + Self::SnapshotCloneCreate { source, .. } => source.to_errno(), + Self::CloneConfigFailed { .. } => Errno::EINVAL, + Self::WipeFailed { .. } => Errno::EINVAL, + Self::ResourceLockFailed { .. } => Errno::EBUSY, } } } impl From for LvsError { fn from(source: crate::core::wiper::Error) -> Self { - Self::WipeFailed { - source, - } + Self::WipeFailed { source } } } diff --git a/io-engine/src/lvs/lvs_iter.rs b/io-engine/src/lvs/lvs_iter.rs index 33b4cded1..455c13773 100644 --- a/io-engine/src/lvs/lvs_iter.rs +++ b/io-engine/src/lvs/lvs_iter.rs @@ -1,8 +1,4 @@ -use spdk_rs::libspdk::{ - lvol_store_bdev, - vbdev_lvol_store_first, - vbdev_lvol_store_next, -}; +use spdk_rs::libspdk::{lvol_store_bdev, vbdev_lvol_store_first, vbdev_lvol_store_next}; use super::{Lvs, LvsBdev}; diff --git a/io-engine/src/lvs/lvs_lvol.rs b/io-engine/src/lvs/lvs_lvol.rs index 8fa080152..c18038c61 100644 --- a/io-engine/src/lvs/lvs_lvol.rs +++ b/io-engine/src/lvs/lvs_lvol.rs @@ -15,23 +15,11 @@ use std::{ }; use spdk_rs::libspdk::{ - spdk_blob, - spdk_blob_calc_used_clusters, - spdk_blob_get_num_clusters, - spdk_blob_get_num_clusters_ancestors, - spdk_blob_get_xattr_value, - spdk_blob_is_read_only, - spdk_blob_is_thin_provisioned, - spdk_blob_set_xattr, - spdk_blob_sync_md, - spdk_bs_get_cluster_size, - spdk_bs_get_parent_blob, - spdk_bs_iter_next, - spdk_lvol, - vbdev_lvol_destroy, - vbdev_lvol_get_from_bdev, - vbdev_lvol_resize, - LVS_CLEAR_WITH_UNMAP, + spdk_blob, spdk_blob_calc_used_clusters, spdk_blob_get_num_clusters, + spdk_blob_get_num_clusters_ancestors, spdk_blob_get_xattr_value, spdk_blob_is_read_only, + spdk_blob_is_thin_provisioned, spdk_blob_set_xattr, spdk_blob_sync_md, + spdk_bs_get_cluster_size, spdk_bs_get_parent_blob, spdk_bs_iter_next, spdk_lvol, + vbdev_lvol_destroy, vbdev_lvol_get_from_bdev, vbdev_lvol_resize, LVS_CLEAR_WITH_UNMAP, }; use super::{BsError, Lvs, LvsError}; @@ -40,26 +28,12 @@ use crate::{ bdev::PtplFileOps, core::{ logical_volume::{LogicalVolume, LvolSpaceUsage}, - Bdev, - CloneXattrs, - LvolSnapshotOps, - NvmfShareProps, - Protocol, - PtplProps, - Share, - SnapshotXattrs, - UntypedBdev, - UpdateProps, + Bdev, CloneXattrs, LvolSnapshotOps, NvmfShareProps, Protocol, PtplProps, Share, + SnapshotXattrs, UntypedBdev, UpdateProps, }, eventing::Event, ffihelper::{ - cb_arg, - done_cb, - errno_result_from_i32, - pair, - ErrnoResult, - FfiResult, - IntoCString, + cb_arg, done_cb, errno_result_from_i32, pair, ErrnoResult, FfiResult, IntoCString, }, pool_backend::PoolBackend, }; @@ -163,8 +137,7 @@ impl Debug for Lvol { self.pool_uuid(), self.name(), if self.is_thin() { "thin " } else { "" }, - Byte::from(self.size()) - .get_appropriate_unit(byte_unit::UnitType::Binary) + Byte::from(self.size()).get_appropriate_unit(byte_unit::UnitType::Binary) ) } } @@ -234,12 +207,13 @@ impl Share for Lvol { /// unshare the nvmf target async fn unshare(mut self: Pin<&mut Self>) -> Result<(), Self::Error> { - Pin::new(&mut self.as_bdev()).unshare().await.map_err(|e| { - LvsError::LvolUnShare { + Pin::new(&mut self.as_bdev()) + .unshare() + .await + .map_err(|e| LvsError::LvolUnShare { source: e, name: self.name(), - } - })?; + })?; self.as_mut().set(PropValue::Shared(false)).await?; @@ -325,8 +299,7 @@ impl Lvol { // write zero to the first 8MB which wipes the metadata and the // first 4MB of the data partition - let wipe_size = - std::cmp::min(self.as_bdev().size_in_bytes(), WIPE_SUPER_LEN); + let wipe_size = std::cmp::min(self.as_bdev().size_in_bytes(), WIPE_SUPER_LEN); hdl.write_zeroes_at(0, wipe_size).await.map_err(|e| { error!(?self, ?e); LvsError::RepDestroy { @@ -346,9 +319,7 @@ impl Lvol { errno: i32, ) { let sender = unsafe { - Box::from_raw( - sender_ptr as *mut oneshot::Sender>, - ) + Box::from_raw(sender_ptr as *mut oneshot::Sender>) }; sender .send(errno_result_from_i32(lvol_ptr, errno)) @@ -412,8 +383,7 @@ impl Lvol { } } - let sl = - std::slice::from_raw_parts(val as *const u8, size as usize); + let sl = std::slice::from_raw_parts(val as *const u8, size as usize); std::str::from_utf8(sl).map_or_else( |error| { warn!( @@ -473,18 +443,15 @@ impl Lvol { // Sync metadata if requested. let (snd, rcv) = oneshot::channel::(); - unsafe { - spdk_blob_sync_md( - self.blob_checked(), - Some(blob_attr_set_cb), - cb_arg(snd), - ) - }; + unsafe { spdk_blob_sync_md(self.blob_checked(), Some(blob_attr_set_cb), cb_arg(snd)) }; match rcv.await.expect("sync attribute callback disappeared") { 0 => Ok(()), errno => { - error!(lvol=self.name(), errno,"Failed to sync blob metadata, properties might be out of sync"); + error!( + lvol = self.name(), + errno, "Failed to sync blob metadata, properties might be out of sync" + ); Err(LvsError::SyncProperty { source: BsError::from_i32(errno), name: self.name(), @@ -563,24 +530,15 @@ pub trait LvsLvol: LogicalVolume + Share { /// Write the property prop on to the lvol but do not sync the metadata yet. /// Returns whether the property was modified or not. - async fn set_no_sync( - self: Pin<&mut Self>, - prop: PropValue, - ) -> Result; + async fn set_no_sync(self: Pin<&mut Self>, prop: PropValue) -> Result; /// Write the property prop on to the lvol which is stored on disk. /// If the property has been modified the metadata is synced. - async fn set( - mut self: Pin<&mut Self>, - prop: PropValue, - ) -> Result<(), LvsError>; + async fn set(mut self: Pin<&mut Self>, prop: PropValue) -> Result<(), LvsError>; /// Write the properties on to the lvol which is stored on disk. /// If any of the properties are modified the metadata is synced. - async fn set_props( - mut self: Pin<&mut Self>, - props: Vec, - ) -> Result<(), LvsError> { + async fn set_props(mut self: Pin<&mut Self>, props: Vec) -> Result<(), LvsError> { let mut sync = false; for property in props { if self.as_mut().set_no_sync(property).await? { @@ -600,28 +558,18 @@ pub trait LvsLvol: LogicalVolume + Share { async fn sync_metadata(self: Pin<&mut Self>) -> Result<(), LvsError>; /// Callback is executed when blobstore fetching is done using spdk api. - extern "C" fn blob_op_complete_cb( - arg: *mut c_void, - _blob: *mut spdk_blob, - errno: i32, - ); + extern "C" fn blob_op_complete_cb(arg: *mut c_void, _blob: *mut spdk_blob, errno: i32); /// Get the first spdk_blob from the Lvol Blobstor. fn bs_iter_first(&self) -> *mut spdk_blob; /// Get the next spdk_blob from the current blob. - async fn bs_iter_next( - &self, - curr_blob: *mut spdk_blob, - ) -> Option<*mut spdk_blob>; + async fn bs_iter_next(&self, curr_blob: *mut spdk_blob) -> Option<*mut spdk_blob>; /// Get the next spdk_blob from the parent blob. /// # Safety /// TODO - unsafe fn bs_iter_parent( - &self, - curr_blob: *mut spdk_blob, - ) -> Option<*mut spdk_blob>; + unsafe fn bs_iter_parent(&self, curr_blob: *mut spdk_blob) -> Option<*mut spdk_blob>; /// Get lvol inner ptr. fn as_inner_ptr(&self) -> *mut spdk_lvol; @@ -692,8 +640,7 @@ impl LogicalVolume for Lvol { let bs = self.lvs().blob_store(); let blob = self.blob_checked(); let cluster_size = unsafe { spdk_bs_get_cluster_size(bs) }; - let num_allocated_clusters = - unsafe { spdk_blob_calc_used_clusters(blob) }; + let num_allocated_clusters = unsafe { spdk_blob_calc_used_clusters(blob) }; cluster_size * num_allocated_clusters } /// Returns Lvol disk space usage. @@ -711,16 +658,12 @@ impl LogicalVolume for Lvol { match spdk_blob_get_num_clusters_ancestors(bs, blob, &mut c) { 0 => c, errno => { - error!( - ?self, - errno, "Failed to get snapshot space usage" - ); + error!(?self, errno, "Failed to get snapshot space usage"); 0 } } }; - let allocated_bytes_snapshots = - cluster_size * num_allocated_clusters_snapshots; + let allocated_bytes_snapshots = cluster_size * num_allocated_clusters_snapshots; LvolSpaceUsage { capacity_bytes: self.size(), allocated_bytes: cluster_size * num_allocated_clusters, @@ -738,9 +681,7 @@ impl LogicalVolume for Lvol { // for the clone C1. For S5 allocated_bytes_snapshot_from_clone // will consider ancestor value from C1. allocated_bytes_snapshot_from_clone: self - .calculate_clone_source_snap_usage( - allocated_bytes_snapshots, - ), + .calculate_clone_source_snap_usage(allocated_bytes_snapshots), } } } @@ -769,10 +710,7 @@ impl LogicalVolume for Lvol { } fn snapshot_uuid(&self) -> Option { - Lvol::get_blob_xattr( - self.blob_checked(), - CloneXattrs::SourceUuid.name(), - ) + Lvol::get_blob_xattr(self.blob_checked(), CloneXattrs::SourceUuid.name()) } fn share_protocol(&self) -> Protocol { @@ -804,18 +742,16 @@ impl LvsLvol for Lvol { /// Lvol is considered as clone if its sourceuuid attribute is a valid /// snapshot. if it is clone, return the snapshot lvol. fn is_snapshot_clone(&self) -> Option { - if let Some(source_uuid) = Lvol::get_blob_xattr( - self.blob_checked(), - CloneXattrs::SourceUuid.name(), - ) { - let snap_lvol = - match UntypedBdev::lookup_by_uuid_str(source_uuid.as_str()) { - Some(bdev) => match Lvol::try_from(bdev) { - Ok(l) => l, - _ => return None, - }, - None => return None, - }; + if let Some(source_uuid) = + Lvol::get_blob_xattr(self.blob_checked(), CloneXattrs::SourceUuid.name()) + { + let snap_lvol = match UntypedBdev::lookup_by_uuid_str(source_uuid.as_str()) { + Some(bdev) => match Lvol::try_from(bdev) { + Ok(l) => l, + _ => return None, + }, + None => return None, + }; return Some(snap_lvol); } None @@ -849,45 +785,35 @@ impl LvsLvol for Lvol { }; match prop { - PropName::Shared => { - match unsafe { CStr::from_ptr(value).to_str() } { - Ok("true") => Ok(PropValue::Shared(true)), - Ok("false") => Ok(PropValue::Shared(false)), - _ => einval(), - } - } - PropName::AllowedHosts => { - match unsafe { CStr::from_ptr(value).to_str() } { - Ok("") => Ok(PropValue::AllowedHosts(vec![])), - Ok(list) => Ok(PropValue::AllowedHosts( - list.split(',') - .map(|s| s.to_string()) - .collect::>(), - )), - _ => einval(), - } - } - PropName::EntityId => { - match unsafe { CStr::from_ptr(value).to_str() } { - Ok(id) => Ok(PropValue::EntityId(id.to_string())), - _ => einval(), - } - } + PropName::Shared => match unsafe { CStr::from_ptr(value).to_str() } { + Ok("true") => Ok(PropValue::Shared(true)), + Ok("false") => Ok(PropValue::Shared(false)), + _ => einval(), + }, + PropName::AllowedHosts => match unsafe { CStr::from_ptr(value).to_str() } { + Ok("") => Ok(PropValue::AllowedHosts(vec![])), + Ok(list) => Ok(PropValue::AllowedHosts( + list.split(',').map(|s| s.to_string()).collect::>(), + )), + _ => einval(), + }, + PropName::EntityId => match unsafe { CStr::from_ptr(value).to_str() } { + Ok(id) => Ok(PropValue::EntityId(id.to_string())), + _ => einval(), + }, } } /// Callback executed after synchronizing the lvols metadata. extern "C" fn blob_sync_cb(sender_ptr: *mut c_void, errno: i32) { - let sender = - unsafe { Box::from_raw(sender_ptr as *mut oneshot::Sender) }; + let sender = unsafe { Box::from_raw(sender_ptr as *mut oneshot::Sender) }; sender.send(errno).expect("blob cb receiver is gone"); } /// Destroy the lvol. async fn destroy(mut self) -> Result { let event = self.event(EventAction::Delete); extern "C" fn destroy_cb(sender: *mut c_void, errno: i32) { - let sender = - unsafe { Box::from_raw(sender as *mut oneshot::Sender) }; + let sender = unsafe { Box::from_raw(sender as *mut oneshot::Sender) }; sender.send(errno).unwrap(); } self.reset_snapshot_tree_usage_cache(!self.is_snapshot()); @@ -898,9 +824,7 @@ impl LvsLvol for Lvol { let ptpl = self.ptpl(); let (s, r) = pair::(); - unsafe { - vbdev_lvol_destroy(self.as_inner_ptr(), Some(destroy_cb), cb_arg(s)) - }; + unsafe { vbdev_lvol_destroy(self.as_inner_ptr(), Some(destroy_cb), cb_arg(s)) }; r.await .expect("lvol destroy callback is gone") @@ -923,10 +847,7 @@ impl LvsLvol for Lvol { Ok(name) } - async fn set_no_sync( - self: Pin<&mut Self>, - prop: PropValue, - ) -> Result { + async fn set_no_sync(self: Pin<&mut Self>, prop: PropValue) -> Result { let blob = self.blob_checked(); if self.is_snapshot() { @@ -983,10 +904,7 @@ impl LvsLvol for Lvol { Ok(true) } - async fn set( - mut self: Pin<&mut Self>, - prop: PropValue, - ) -> Result<(), LvsError> { + async fn set(mut self: Pin<&mut Self>, prop: PropValue) -> Result<(), LvsError> { if self.as_mut().set_no_sync(prop).await? { self.sync_metadata().await?; } @@ -1008,24 +926,18 @@ impl LvsLvol for Lvol { spdk_blob_sync_md(blob, Some(Self::blob_sync_cb), cb_arg(s)); }; - r.await.expect("sync callback is gone").to_result(|e| { - LvsError::SyncProperty { + r.await + .expect("sync callback is gone") + .to_result(|e| LvsError::SyncProperty { source: BsError::from_i32(e), name: self.name(), - } - })?; + })?; Ok(()) } /// Blobstore Common Callback function. - extern "C" fn blob_op_complete_cb( - arg: *mut c_void, - blob: *mut spdk_blob, - errno: i32, - ) { - let s = unsafe { - Box::from_raw(arg as *mut oneshot::Sender<(*mut spdk_blob, i32)>) - }; + extern "C" fn blob_op_complete_cb(arg: *mut c_void, blob: *mut spdk_blob, errno: i32) { + let s = unsafe { Box::from_raw(arg as *mut oneshot::Sender<(*mut spdk_blob, i32)>) }; if errno != 0 { error!("Blobstore Operation failed, errno {errno}"); } @@ -1038,10 +950,7 @@ impl LvsLvol for Lvol { } /// Get the next spdk_blob from the current blob. - async fn bs_iter_next( - &self, - curr_blob: *mut spdk_blob, - ) -> Option<*mut spdk_blob> { + async fn bs_iter_next(&self, curr_blob: *mut spdk_blob) -> Option<*mut spdk_blob> { let (s, r) = oneshot::channel::<(*mut spdk_blob, i32)>(); unsafe { spdk_bs_iter_next( @@ -1060,10 +969,7 @@ impl LvsLvol for Lvol { /// Get the parent spdk_blob from the current blob. /// # Safety /// TODO - unsafe fn bs_iter_parent( - &self, - curr_blob: *mut spdk_blob, - ) -> Option<*mut spdk_blob> { + unsafe fn bs_iter_parent(&self, curr_blob: *mut spdk_blob) -> Option<*mut spdk_blob> { let parent_blob = spdk_bs_get_parent_blob(curr_blob); if parent_blob.is_null() { None @@ -1144,12 +1050,9 @@ impl LvsLvol for Lvol { extern "C" fn lvol_resize_cb(cb_arg: *mut c_void, errno: i32) { let mut retcode = errno; let ctx = cb_arg as *mut ResizeCbCtx; - let (lvol, req_size) = - unsafe { (Lvol::from_inner_ptr((*ctx).lvol), (*ctx).req_size) }; + let (lvol, req_size) = unsafe { (Lvol::from_inner_ptr((*ctx).lvol), (*ctx).req_size) }; let sender = unsafe { - Box::from_raw( - (*ctx).sender as *mut oneshot::Sender>, - ) + Box::from_raw((*ctx).sender as *mut oneshot::Sender>) }; if retcode == 0 && (lvol.size() < req_size) { diff --git a/io-engine/src/lvs/lvs_store.rs b/io-engine/src/lvs/lvs_store.rs index 4934fd5df..95faf36e6 100644 --- a/io-engine/src/lvs/lvs_store.rs +++ b/io-engine/src/lvs/lvs_store.rs @@ -1,10 +1,4 @@ -use std::{ - convert::TryFrom, - fmt::Debug, - os::raw::c_void, - pin::Pin, - ptr::NonNull, -}; +use std::{convert::TryFrom, fmt::Debug, os::raw::c_void, pin::Pin, ptr::NonNull}; use byte_unit::Byte; use events_api::event::EventAction; @@ -13,55 +7,24 @@ use nix::errno::Errno; use pin_utils::core_reexport::fmt::Formatter; use spdk_rs::libspdk::{ - spdk_bdev_update_bs_blockcnt, - spdk_blob_store, - spdk_bs_free_cluster_count, - spdk_bs_get_cluster_size, - spdk_bs_get_md_len, - spdk_bs_get_page_size, - spdk_bs_get_used_md, - spdk_bs_total_data_cluster_count, - spdk_lvol, - spdk_lvol_opts, - spdk_lvol_opts_init, - spdk_lvol_store, - spdk_lvs_grow_live, - vbdev_get_lvol_store_by_name, - vbdev_get_lvol_store_by_uuid, - vbdev_get_lvs_bdev_by_lvs, - vbdev_lvol_create_with_opts, - vbdev_lvs_create, - vbdev_lvs_create_with_uuid, - vbdev_lvs_destruct, - vbdev_lvs_import, - vbdev_lvs_unload, - LVOL_CLEAR_WITH_NONE, - LVOL_CLEAR_WITH_UNMAP, - LVS_CLEAR_WITH_NONE, + spdk_bdev_update_bs_blockcnt, spdk_blob_store, spdk_bs_free_cluster_count, + spdk_bs_get_cluster_size, spdk_bs_get_md_len, spdk_bs_get_page_size, spdk_bs_get_used_md, + spdk_bs_total_data_cluster_count, spdk_lvol, spdk_lvol_opts, spdk_lvol_opts_init, + spdk_lvol_store, spdk_lvs_grow_live, vbdev_get_lvol_store_by_name, + vbdev_get_lvol_store_by_uuid, vbdev_get_lvs_bdev_by_lvs, vbdev_lvol_create_with_opts, + vbdev_lvs_create, vbdev_lvs_create_with_uuid, vbdev_lvs_destruct, vbdev_lvs_import, + vbdev_lvs_unload, LVOL_CLEAR_WITH_NONE, LVOL_CLEAR_WITH_UNMAP, LVS_CLEAR_WITH_NONE, }; use url::Url; -use super::{ - BsError, - ImportErrorReason, - Lvol, - LvsError, - LvsIter, - PropName, - PropValue, -}; +use super::{BsError, ImportErrorReason, Lvol, LvsError, LvsIter, PropName, PropValue}; use crate::{ bdev::{uri, PtplFileOps}, bdev_api::{bdev_destroy, BdevError}, core::{ - logical_volume::LogicalVolume, - snapshot::LvolSnapshotOps, - Bdev, - IoType, - NvmfShareProps, - Share, - UntypedBdev, + logical_volume::LogicalVolume, snapshot::LvolSnapshotOps, Bdev, IoType, NvmfShareProps, + Share, UntypedBdev, }, eventing::Event, ffihelper::{cb_arg, pair, AsStr, ErrnoResult, FfiResult, IntoCString}, @@ -86,10 +49,8 @@ impl Debug for Lvs { self.name(), self.base_bdev().name(), self.base_bdev().uuid(), - Byte::from(self.available()) - .get_appropriate_unit(byte_unit::UnitType::Binary), - Byte::from(self.capacity()) - .get_appropriate_unit(byte_unit::UnitType::Binary) + Byte::from(self.available()).get_appropriate_unit(byte_unit::UnitType::Binary), + Byte::from(self.capacity()).get_appropriate_unit(byte_unit::UnitType::Binary) ) } } @@ -127,14 +88,8 @@ impl Lvs { } /// generic lvol store callback - extern "C" fn lvs_cb( - sender_ptr: *mut c_void, - lvs: *mut spdk_lvol_store, - errno: i32, - ) { - let sender = unsafe { - Box::from_raw(sender_ptr as *mut oneshot::Sender>) - }; + extern "C" fn lvs_cb(sender_ptr: *mut c_void, lvs: *mut spdk_lvol_store, errno: i32) { + let sender = unsafe { Box::from_raw(sender_ptr as *mut oneshot::Sender>) }; if errno == 0 { sender @@ -149,8 +104,7 @@ impl Lvs { /// callback when operation has been performed on lvol extern "C" fn lvs_op_cb(sender: *mut c_void, errno: i32) { - let sender = - unsafe { Box::from_raw(sender as *mut oneshot::Sender) }; + let sender = unsafe { Box::from_raw(sender as *mut oneshot::Sender) }; sender.send(errno).unwrap(); } @@ -198,18 +152,13 @@ impl Lvs { /// returns the total capacity of the store pub fn capacity(&self) -> u64 { let blobs = self.blob_store(); - unsafe { - spdk_bs_get_cluster_size(blobs) - * spdk_bs_total_data_cluster_count(blobs) - } + unsafe { spdk_bs_get_cluster_size(blobs) * spdk_bs_total_data_cluster_count(blobs) } } /// returns the available capacity pub fn available(&self) -> u64 { let blobs = self.blob_store(); - unsafe { - spdk_bs_get_cluster_size(blobs) * spdk_bs_free_cluster_count(blobs) - } + unsafe { spdk_bs_get_cluster_size(blobs) * spdk_bs_free_cluster_count(blobs) } } /// returns the used capacity @@ -225,8 +174,7 @@ impl Lvs { /// returns the base bdev of this lvs pub fn base_bdev(&self) -> UntypedBdev { - let p = - unsafe { (*vbdev_get_lvs_bdev_by_lvs(self.as_inner_ptr())).bdev }; + let p = unsafe { (*vbdev_get_lvs_bdev_by_lvs(self.as_inner_ptr())).bdev }; Bdev::checked_from_ptr(p).unwrap() } @@ -270,11 +218,7 @@ impl Lvs { _ => { return Err(LvsError::Invalid { source: BsError::InvalidArgument {}, - msg: format!( - "invalid number {} of devices {:?}", - disks.len(), - disks, - ), + msg: format!("invalid number {} of devices {:?}", disks.len(), disks,), }) } }; @@ -287,13 +231,12 @@ impl Lvs { debug!("Trying to import lvs '{}' from '{}'...", name, bdev); - let mut bdev = - UntypedBdev::lookup_by_name(bdev).ok_or(LvsError::InvalidBdev { - source: BdevError::BdevNotFound { - name: bdev.to_string(), - }, - name: name.to_string(), - })?; + let mut bdev = UntypedBdev::lookup_by_name(bdev).ok_or(LvsError::InvalidBdev { + source: BdevError::BdevNotFound { + name: bdev.to_string(), + }, + name: name.to_string(), + })?; // examining a bdev that is in-use by an lvs, will hang to avoid this // we will determine the usage of the bdev prior to examining it. @@ -348,9 +291,7 @@ impl Lvs { Err(LvsError::Import { source: BsError::InvalidArgument {}, name: name.to_string(), - reason: ImportErrorReason::NameMismatch { - name: pool_name, - }, + reason: ImportErrorReason::NameMismatch { name: pool_name }, }) } else { lvs.share_all().await; @@ -383,18 +324,14 @@ impl Lvs { Err(LvsError::Import { source: BsError::InvalidArgument {}, name: args.name.clone(), - reason: ImportErrorReason::NameClash { - name: pool_name, - }, + reason: ImportErrorReason::NameClash { name: pool_name }, }) }; } let bdev = match parsed.create().await { Err(e) => match e { - BdevError::BdevExists { - .. - } => Ok(parsed.get_name()), + BdevError::BdevExists { .. } => Ok(parsed.get_name()), BdevError::CreateBdevInvalidParams { source: Errno::EEXIST, .. @@ -425,9 +362,7 @@ impl Lvs { Err(LvsError::Import { source: BsError::InvalidArgument {}, name: args.name, - reason: ImportErrorReason::UuidMismatch { - uuid: pool_uuid, - }, + reason: ImportErrorReason::UuidMismatch { uuid: pool_uuid }, }) } } else { @@ -454,9 +389,7 @@ impl Lvs { /// Creates a pool on base bdev. /// The caller must ensure the base bdev exists. /// This function is made public for tests purposes. - pub async fn create_from_args_inner( - args: PoolArgs, - ) -> Result { + pub async fn create_from_args_inner(args: PoolArgs) -> Result { assert_eq!(args.disks.len(), 1); let bdev = args.disks[0].clone(); @@ -481,9 +414,7 @@ impl Lvs { return Err(LvsError::InvalidClusterSize { source: BsError::InvalidArgument {}, name: args.name, - msg: format!( - "{cluster_size}, larger than max limit {MAX_CLUSTER_SIZE}" - ), + msg: format!("{cluster_size}, larger than max limit {MAX_CLUSTER_SIZE}"), }); } @@ -563,11 +494,10 @@ impl Lvs { args.name, disk ); - let bdev_ops = - uri::parse(&disk).map_err(|e| LvsError::InvalidBdev { - source: e, - name: args.name.clone(), - })?; + let bdev_ops = uri::parse(&disk).map_err(|e| LvsError::InvalidBdev { + source: e, + name: args.name.clone(), + })?; if let Some(pool) = Self::lookup(&args.name) { return if pool.base_bdev().name() == bdev_ops.get_name() { @@ -586,9 +516,7 @@ impl Lvs { // Create the underlying ndev. let bdev_name = match bdev_ops.create().await { Err(e) => match e { - BdevError::BdevExists { - .. - } => Ok(bdev_ops.get_name()), + BdevError::BdevExists { .. } => Ok(bdev_ops.get_name()), BdevError::CreateBdevInvalidParams { source: Errno::EEXIST, .. @@ -622,7 +550,9 @@ impl Lvs { // we failed to delete the base_bdev be loud about it // there is not much we can do about it here, likely // some desc is still holding on to it or something. - error!("failed to delete base_bdev {bdev_name} after failed pool creation"); + error!( + "failed to delete base_bdev {bdev_name} after failed pool creation" + ); }); Err(create) } @@ -650,13 +580,7 @@ impl Lvs { self.unshare_all().await; - unsafe { - vbdev_lvs_unload( - self.as_inner_ptr(), - Some(Self::lvs_op_cb), - cb_arg(s), - ) - }; + unsafe { vbdev_lvs_unload(self.as_inner_ptr(), Some(Self::lvs_op_cb), cb_arg(s)) }; r.await .expect("callback gone while exporting lvs") @@ -705,17 +629,9 @@ impl Lvs { let name = l.name().clone(); let props = NvmfShareProps::new() .with_allowed_hosts(allowed_hosts) - .with_ptpl( - l.ptpl().create().unwrap_or_default(), - ); - if let Err(e) = - Pin::new(&mut l).share_nvmf(Some(props)).await - { - error!( - "failed to share {} {}", - name, - e.to_string() - ); + .with_ptpl(l.ptpl().create().unwrap_or_default()); + if let Err(e) = Pin::new(&mut l).share_nvmf(Some(props)).await { + error!("failed to share {} {}", name, e.to_string()); } } PropValue::Shared(false) => { @@ -746,13 +662,7 @@ impl Lvs { let evt = self.event(EventAction::Delete); - unsafe { - vbdev_lvs_destruct( - self.as_inner_ptr(), - Some(Self::lvs_op_cb), - cb_arg(s), - ) - }; + unsafe { vbdev_lvs_destruct(self.as_inner_ptr(), Some(Self::lvs_op_cb), cb_arg(s)) }; r.await .expect("callback gone while destroying lvs") @@ -813,18 +723,13 @@ impl Lvs { } /// return an iterator for enumerating all snapshots that reside on the pool - pub fn snapshots( - &self, - ) -> Option> { + pub fn snapshots(&self) -> Option> { if let Some(bdev) = UntypedBdev::bdev_first() { let pool_name = format!("{}/", self.name()); Some( bdev.into_iter() .filter(move |b| { - b.driver() == "lvol" - && b.aliases() - .iter() - .any(|a| a.contains(&pool_name)) + b.driver() == "lvol" && b.aliases().iter().any(|a| a.contains(&pool_name)) }) .filter_map(|b| { Lvol::try_from(b).ok().and_then(|l| { @@ -849,10 +754,7 @@ impl Lvs { Some( bdev.into_iter() .filter(move |b| { - b.driver() == "lvol" - && b.aliases() - .iter() - .any(|a| a.contains(&pool_name)) + b.driver() == "lvol" && b.aliases().iter().any(|a| a.contains(&pool_name)) }) .map(|b| Lvol::try_from(b).unwrap()), ) @@ -882,20 +784,14 @@ impl Lvs { } /// create a new lvol on this pool - pub async fn create_lvol_with_opts( - &self, - opts: ReplicaArgs, - ) -> Result { - let clear_method = if self.base_bdev().io_type_supported(IoType::Unmap) - { + pub async fn create_lvol_with_opts(&self, opts: ReplicaArgs) -> Result { + let clear_method = if self.base_bdev().io_type_supported(IoType::Unmap) { LVOL_CLEAR_WITH_UNMAP } else { LVOL_CLEAR_WITH_NONE }; - if !opts.uuid.is_empty() - && UntypedBdev::lookup_by_uuid_str(&opts.uuid).is_some() - { + if !opts.uuid.is_empty() && UntypedBdev::lookup_by_uuid_str(&opts.uuid).is_some() { return Err(LvsError::RepExists { source: BsError::VolAlreadyExists {}, name: opts.uuid, @@ -969,9 +865,7 @@ impl Lvs { .map(Lvol::from_inner_ptr)?; if let Some(id) = opts.entity_id { - if let Err(error) = - Pin::new(&mut lvol).set(PropValue::EntityId(id)).await - { + if let Err(error) = Pin::new(&mut lvol).set(PropValue::EntityId(id)).await { let lvol_uuid = lvol.uuid(); if let Err(error) = lvol.destroy().await { warn!( @@ -1019,9 +913,7 @@ impl LvsPtpl { } impl From<&Lvs> for LvsPtpl { fn from(lvs: &Lvs) -> Self { - Self { - uuid: lvs.uuid(), - } + Self { uuid: lvs.uuid() } } } impl PtplFileOps for LvsPtpl { diff --git a/io-engine/src/lvs/mod.rs b/io-engine/src/lvs/mod.rs index c5a1657c5..d2f2dbbd2 100644 --- a/io-engine/src/lvs/mod.rs +++ b/io-engine/src/lvs/mod.rs @@ -1,35 +1,16 @@ use crate::{ bdev::PtplFileOps, core::{ - snapshot::SnapshotDescriptor, - CloneParams, - LogicalVolume, - Protocol, - PtplProps, - Share, - SnapshotParams, - UpdateProps, + snapshot::SnapshotDescriptor, CloneParams, LogicalVolume, Protocol, PtplProps, Share, + SnapshotParams, UpdateProps, }, pool_backend::{ - Error, - FindPoolArgs, - IPoolFactory, - IPoolProps, - ListPoolArgs, - PoolArgs, - PoolBackend, - PoolMetadataInfo, - PoolOps, - ReplicaArgs, + Error, FindPoolArgs, IPoolFactory, IPoolProps, ListPoolArgs, PoolArgs, PoolBackend, + PoolMetadataInfo, PoolOps, ReplicaArgs, }, replica_backend::{ - FindReplicaArgs, - IReplicaFactory, - ListCloneArgs, - ListReplicaArgs, - ListSnapshotArgs, - ReplicaOps, - SnapshotOps, + FindReplicaArgs, IReplicaFactory, ListCloneArgs, ListReplicaArgs, ListSnapshotArgs, + ReplicaOps, SnapshotOps, }, }; pub use lvol_snapshot::LvolSnapshotIter; @@ -60,16 +41,13 @@ impl ReplicaOps for Lvol { self.as_bdev().shared() } - fn create_ptpl( - &self, - ) -> Result, crate::pool_backend::Error> { - let ptpl = - self.ptpl().create().map_err(|source| LvsError::LvolShare { - source: crate::core::CoreError::Ptpl { - reason: source.to_string(), - }, - name: self.name(), - })?; + fn create_ptpl(&self) -> Result, crate::pool_backend::Error> { + let ptpl = self.ptpl().create().map_err(|source| LvsError::LvolShare { + source: crate::core::CoreError::Ptpl { + reason: source.to_string(), + }, + name: self.name(), + })?; Ok(ptpl) } @@ -93,24 +71,16 @@ impl ReplicaOps for Lvol { Ok(()) } - async fn resize( - &mut self, - size: u64, - ) -> Result<(), crate::pool_backend::Error> { + async fn resize(&mut self, size: u64) -> Result<(), crate::pool_backend::Error> { self.resize_replica(size).await.map_err(Into::into) } - async fn set_entity_id( - &mut self, - id: String, - ) -> Result<(), crate::pool_backend::Error> { + async fn set_entity_id(&mut self, id: String) -> Result<(), crate::pool_backend::Error> { Pin::new(self).set(PropValue::EntityId(id)).await?; Ok(()) } - async fn destroy( - self: Box, - ) -> Result<(), crate::pool_backend::Error> { + async fn destroy(self: Box) -> Result<(), crate::pool_backend::Error> { self.destroy_replica().await?; Ok(()) } @@ -149,10 +119,7 @@ impl SnapshotOps for Lvol { Ok(()) } - async fn create_clone( - &self, - params: CloneParams, - ) -> Result, Error> { + async fn create_clone(&self, params: CloneParams) -> Result, Error> { let clone = LvolSnapshotOps::create_clone(self, params).await?; Ok(Box::new(clone)) } @@ -175,9 +142,7 @@ impl PoolOps for Lvs { Ok(Box::new(lvol)) } - async fn destroy( - self: Box, - ) -> Result<(), crate::pool_backend::Error> { + async fn destroy(self: Box) -> Result<(), crate::pool_backend::Error> { (*self).destroy().await?; Ok(()) } @@ -263,18 +228,12 @@ pub struct PoolLvsFactory {} #[async_trait::async_trait(?Send)] impl IPoolFactory for PoolLvsFactory { - async fn create( - &self, - args: PoolArgs, - ) -> Result, crate::pool_backend::Error> { + async fn create(&self, args: PoolArgs) -> Result, crate::pool_backend::Error> { let lvs = Lvs::create_or_import(args).await?; Ok(Box::new(lvs)) } - async fn import( - &self, - args: PoolArgs, - ) -> Result, crate::pool_backend::Error> { + async fn import(&self, args: PoolArgs) -> Result, crate::pool_backend::Error> { let lvs = Lvs::import_from_args(args).await?; Ok(Box::new(lvs)) } @@ -285,13 +244,8 @@ impl IPoolFactory for PoolLvsFactory { ) -> Result>, crate::pool_backend::Error> { let lvs = match args { FindPoolArgs::Uuid(uuid) => Lvs::lookup_by_uuid(uuid), - FindPoolArgs::UuidOrName(id) => { - Lvs::lookup_by_uuid(id).or_else(|| Lvs::lookup(id)) - } - FindPoolArgs::NameUuid { - name, - uuid, - } => match uuid { + FindPoolArgs::UuidOrName(id) => Lvs::lookup_by_uuid(id).or_else(|| Lvs::lookup(id)), + FindPoolArgs::NameUuid { name, uuid } => match uuid { Some(uuid) => match Lvs::lookup_by_uuid(uuid) { Some(pool) if pool.name() == name => Some(pool), Some(_) => None, @@ -340,10 +294,7 @@ pub struct ReplLvsFactory {} #[async_trait::async_trait(?Send)] impl IReplicaFactory for ReplLvsFactory { - fn bdev_as_replica( - &self, - bdev: crate::core::UntypedBdev, - ) -> Option> { + fn bdev_as_replica(&self, bdev: crate::core::UntypedBdev) -> Option> { let lvol = Lvol::ok_from(bdev)?; if lvol.is_snapshot() { return None; @@ -377,13 +328,9 @@ impl IReplicaFactory for ReplLvsFactory { Ok(lvol.map(|l| Box::new(l) as _)) } - async fn list( - &self, - args: &ListReplicaArgs, - ) -> Result>, Error> { - let retain = |arg: Option<&String>, val: &String| -> bool { - arg.is_none() || arg == Some(val) - }; + async fn list(&self, args: &ListReplicaArgs) -> Result>, Error> { + let retain = + |arg: Option<&String>, val: &String| -> bool { arg.is_none() || arg == Some(val) }; let lvols = lvol_iter::LvolIter::new().filter(|lvol| { retain(args.pool_name.as_ref(), &lvol.pool_name()) @@ -400,9 +347,7 @@ impl IReplicaFactory for ReplLvsFactory { ) -> Result, crate::pool_backend::Error> { // if snapshot_uuid is input, get specific snapshot result Ok(if let Some(ref snapshot_uuid) = args.uuid { - let lvol = match crate::core::UntypedBdev::lookup_by_uuid_str( - snapshot_uuid, - ) { + let lvol = match crate::core::UntypedBdev::lookup_by_uuid_str(snapshot_uuid) { Some(bdev) => Lvol::try_from(bdev)?, None => { return Err(LvsError::Invalid { @@ -414,9 +359,7 @@ impl IReplicaFactory for ReplLvsFactory { }; lvol.list_snapshot_by_snapshot_uuid() } else if let Some(ref replica_uuid) = args.source_uuid { - let lvol = match crate::core::UntypedBdev::lookup_by_uuid_str( - replica_uuid, - ) { + let lvol = match crate::core::UntypedBdev::lookup_by_uuid_str(replica_uuid) { Some(bdev) => Lvol::try_from(bdev)?, None => { return Err(LvsError::Invalid { @@ -437,9 +380,7 @@ impl IReplicaFactory for ReplLvsFactory { args: &ListCloneArgs, ) -> Result>, crate::pool_backend::Error> { let clones = if let Some(snapshot_uuid) = &args.snapshot_uuid { - let snap_lvol = match crate::core::UntypedBdev::lookup_by_uuid_str( - snapshot_uuid, - ) { + let snap_lvol = match crate::core::UntypedBdev::lookup_by_uuid_str(snapshot_uuid) { Some(bdev) => Lvol::try_from(bdev), None => Err(LvsError::Invalid { source: BsError::LvolNotFound {}, diff --git a/io-engine/src/persistent_store.rs b/io-engine/src/persistent_store.rs index a86dfa550..7d56b52c6 100644 --- a/io-engine/src/persistent_store.rs +++ b/io-engine/src/persistent_store.rs @@ -9,15 +9,7 @@ use crate::{ core::Reactor, store::{ etcd::Etcd, - store_defs::{ - DeleteWait, - GetWait, - PutWait, - Store, - StoreError, - StoreKey, - StoreValue, - }, + store_defs::{DeleteWait, GetWait, PutWait, Store, StoreError, StoreKey, StoreValue}, }, }; use futures::channel::oneshot; @@ -166,12 +158,9 @@ impl PersistentStore { } /// Puts a key-value in the store. - pub async fn put( - key: &impl StoreKey, - value: &impl StoreValue, - ) -> Result<(), StoreError> { - let put_value = serde_json::to_value(value) - .expect("Failed to convert value to a serde_json value"); + pub async fn put(key: &impl StoreKey, value: &impl StoreValue) -> Result<(), StoreError> { + let put_value = + serde_json::to_value(value).expect("Failed to convert value to a serde_json value"); let key_string = key.to_string(); let value_clone = put_value.clone(); @@ -229,10 +218,7 @@ impl PersistentStore { info!("Deleting key {} from store.", key_string); match Self::backing_store().delete_kv(&key_string).await { Ok(_) => { - info!( - "Successfully deleted key {} from store.", - key_string - ); + info!("Successfully deleted key {} from store.", key_string); Ok(()) } Err(e) => Err(e), @@ -265,9 +251,7 @@ impl PersistentStore { // Execute the sending of the result on a "Mayastor thread". let rx = Reactor::spawn_at_primary(async move { if tx.send(result).is_err() { - tracing::error!( - "Failed to send completion for 'put' request." - ); + tracing::error!("Failed to send completion for 'put' request."); } }) .expect("Failed to send future to Mayastor thread"); @@ -313,8 +297,7 @@ impl PersistentStore { async fn reconnect() { warn!("Attempting to reconnect to persistent store...."); let persistent_store = Self::instance(); - let backing_store = - Self::connect_to_backing_store(&PersistentStore::endpoint()).await; + let backing_store = Self::connect_to_backing_store(&PersistentStore::endpoint()).await; persistent_store.lock().store = backing_store; } } diff --git a/io-engine/src/pool_backend.rs b/io-engine/src/pool_backend.rs index d6ea486bc..b08cd7d59 100644 --- a/io-engine/src/pool_backend.rs +++ b/io-engine/src/pool_backend.rs @@ -53,18 +53,14 @@ pub enum GenericError { impl From for tonic::Status { fn from(e: GenericError) -> Self { match e { - GenericError::NotFound { - message, - } => tonic::Status::not_found(message), + GenericError::NotFound { message } => tonic::Status::not_found(message), } } } impl ToErrno for GenericError { fn to_errno(self) -> Errno { match self { - GenericError::NotFound { - .. - } => Errno::ENODEV, + GenericError::NotFound { .. } => Errno::ENODEV, } } } @@ -82,52 +78,34 @@ pub enum Error { } impl From for Error { fn from(source: crate::lvs::LvsError) -> Self { - Self::Lvs { - source, - } + Self::Lvs { source } } } impl From for Error { fn from(source: crate::lvm::Error) -> Self { - Self::Lvm { - source, - } + Self::Lvm { source } } } impl From for Error { fn from(source: GenericError) -> Self { - Self::Gen { - source, - } + Self::Gen { source } } } impl From for tonic::Status { fn from(e: Error) -> Self { match e { - Error::Lvs { - source, - } => source.into(), - Error::Lvm { - source, - } => source.into(), - Error::Gen { - source, - } => source.into(), + Error::Lvs { source } => source.into(), + Error::Lvm { source } => source.into(), + Error::Gen { source } => source.into(), } } } impl ToErrno for Error { fn to_errno(self) -> Errno { match self { - Error::Lvs { - source, - } => source.to_errno(), - Error::Lvm { - source, - } => source.to_errno(), - Error::Gen { - source, - } => source.to_errno(), + Error::Lvs { source } => source.to_errno(), + Error::Lvm { source } => source.to_errno(), + Error::Gen { source } => source.to_errno(), } } } @@ -137,14 +115,9 @@ impl ToErrno for Error { /// much as possible, though we can allow for extra pool specific options /// to be passed as parameters. #[async_trait::async_trait(?Send)] -pub trait PoolOps: - IPoolProps + BdevStater + std::fmt::Debug -{ +pub trait PoolOps: IPoolProps + BdevStater + std::fmt::Debug { /// Create a replica on this pool with the given arguments. - async fn create_repl( - &self, - args: ReplicaArgs, - ) -> Result, Error>; + async fn create_repl(&self, args: ReplicaArgs) -> Result, Error>; /// Destroy the pool itself along with all its replicas. async fn destroy(self: Box) -> Result<(), Error>; @@ -167,15 +140,9 @@ pub trait IPoolFactory { async fn import(&self, args: PoolArgs) -> Result, Error>; /// Find the pool which matches the given arguments. /// # Note: the disks are not currently matched. - async fn find( - &self, - args: &FindPoolArgs, - ) -> Result>, Error>; + async fn find(&self, args: &FindPoolArgs) -> Result>, Error>; /// List all pools from this `PoolBackend`. - async fn list( - &self, - args: &ListPoolArgs, - ) -> Result>, Error>; + async fn list(&self, args: &ListPoolArgs) -> Result>, Error>; /// The pool backend type. fn backend(&self) -> PoolBackend; } @@ -273,18 +240,12 @@ impl PoolFactory { /// Returns the factory for the given backend kind. pub fn new(backend: PoolBackend) -> Self { Self(match backend { - PoolBackend::Lvs => { - Box::::default() as _ - } - PoolBackend::Lvm => { - Box::::default() as _ - } + PoolBackend::Lvs => Box::::default() as _, + PoolBackend::Lvm => Box::::default() as _, }) } /// Probe backends for the given name and/or uuid and return the right one. - pub async fn find>( - args: I, - ) -> Result, Error> { + pub async fn find>(args: I) -> Result, Error> { let args = args.into(); let mut error = None; diff --git a/io-engine/src/rebuild/bdev_rebuild.rs b/io-engine/src/rebuild/bdev_rebuild.rs index 23bcbf2a4..a25768579 100644 --- a/io-engine/src/rebuild/bdev_rebuild.rs +++ b/io-engine/src/rebuild/bdev_rebuild.rs @@ -5,9 +5,7 @@ use super::{ rebuild_error::RebuildError, rebuild_job_backend::RebuildBackend, rebuild_task::{RebuildTasks, TaskResult}, - RebuildJob, - RebuildJobOptions, - SEGMENT_TASKS, + RebuildJob, RebuildJobOptions, SEGMENT_TASKS, }; use crate::{ @@ -64,14 +62,8 @@ impl BdevRebuildJobBuilder { } /// Builds a `BdevRebuildJob` which can be started and which will then /// rebuild from source to destination. - pub async fn build( - self, - src_uri: &str, - dst_uri: &str, - ) -> Result { - let descriptor = - RebuildDescriptor::new(src_uri, dst_uri, self.range, self.options) - .await?; + pub async fn build(self, src_uri: &str, dst_uri: &str) -> Result { + let descriptor = RebuildDescriptor::new(src_uri, dst_uri, self.range, self.options).await?; let task_pool = RebuildTasks::new(SEGMENT_TASKS, &descriptor)?; let notify_fn = self.notify_fn.unwrap_or(|_, _| {}); match self.rebuild_map { @@ -117,9 +109,7 @@ pub(super) struct BdevRebuildJobBackend> { } #[async_trait::async_trait(?Send)] -impl> RebuildBackend - for BdevRebuildJobBackend -{ +impl> RebuildBackend for BdevRebuildJobBackend { fn on_state_change(&mut self) { let desc = self.common_desc(); (self.notify_fn)(&desc.src_uri, &desc.dst_uri); @@ -145,11 +135,8 @@ impl> RebuildBackend self.copier .next() .map(|blk| { - self.task_pool.schedule_segment_rebuild( - id, - blk, - self.copier.copier(), - ); + self.task_pool + .schedule_segment_rebuild(id, blk, self.copier.copier()); self.task_pool.active += 1; true }) @@ -161,18 +148,14 @@ impl> RebuildBackend } } -impl> std::fmt::Debug - for BdevRebuildJobBackend -{ +impl> std::fmt::Debug for BdevRebuildJobBackend { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_struct("BdevRebuildJob") .field("next", &self.copier.peek_next()) .finish() } } -impl> std::fmt::Display - for BdevRebuildJobBackend -{ +impl> std::fmt::Display for BdevRebuildJobBackend { fn fmt(&self, _f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { Ok(()) } diff --git a/io-engine/src/rebuild/mod.rs b/io-engine/src/rebuild/mod.rs index 263df5f9a..2edf44726 100644 --- a/io-engine/src/rebuild/mod.rs +++ b/io-engine/src/rebuild/mod.rs @@ -18,11 +18,7 @@ use rebuild_descriptor::RebuildDescriptor; pub(crate) use rebuild_error::{RebuildError, SnapshotRebuildError}; use rebuild_job::RebuildOperation; pub use rebuild_job::{RebuildJob, RebuildJobOptions, RebuildVerifyMode}; -use rebuild_job_backend::{ - RebuildFBendChan, - RebuildJobBackendManager, - RebuildJobRequest, -}; +use rebuild_job_backend::{RebuildFBendChan, RebuildJobBackendManager, RebuildJobRequest}; pub use rebuild_map::RebuildMap; pub use rebuild_state::RebuildState; use rebuild_state::RebuildStates; @@ -35,8 +31,7 @@ pub use snapshot_rebuild::SnapshotRebuildJob; const SEGMENT_TASKS: usize = 16; /// Size of each segment used by the copy task -pub(crate) const SEGMENT_SIZE: u64 = - spdk_rs::libspdk::SPDK_BDEV_LARGE_BUF_MAX_SIZE as u64; +pub(crate) const SEGMENT_SIZE: u64 = spdk_rs::libspdk::SPDK_BDEV_LARGE_BUF_MAX_SIZE as u64; /// Checks whether a range is contained within another range trait WithinRange { diff --git a/io-engine/src/rebuild/nexus_rebuild.rs b/io-engine/src/rebuild/nexus_rebuild.rs index a86952ffa..da485e0b0 100644 --- a/io-engine/src/rebuild/nexus_rebuild.rs +++ b/io-engine/src/rebuild/nexus_rebuild.rs @@ -10,14 +10,8 @@ use crate::{ rebuild_error::{RangeLockFailed, RangeUnlockFailed}, rebuild_job_backend::RebuildJobManager, rebuild_task::{RebuildTask, RebuildTaskCopier}, - rebuilders::{ - FullRebuild, - PartialSeqCopier, - PartialSeqRebuild, - RangeRebuilder, - }, - RebuildMap, - RebuildState, + rebuilders::{FullRebuild, PartialSeqCopier, PartialSeqRebuild, RangeRebuilder}, + RebuildMap, RebuildState, }, }; @@ -27,8 +21,7 @@ use super::{ rebuild_job::RebuildJob, rebuild_job_backend::RebuildBackend, rebuild_task::{RebuildTasks, TaskResult}, - RebuildJobOptions, - SEGMENT_TASKS, + RebuildJobOptions, SEGMENT_TASKS, }; /// A Nexus rebuild job is responsible for managing a rebuild (copy) which reads @@ -89,15 +82,11 @@ impl NexusRebuildJob { options: RebuildJobOptions, notify_fn: fn(String, String) -> (), ) -> Result { - let descriptor = - RebuildDescriptor::new(src_uri, dst_uri, Some(range), options) - .await?; + let descriptor = RebuildDescriptor::new(src_uri, dst_uri, Some(range), options).await?; let tasks = RebuildTasks::new(SEGMENT_TASKS, &descriptor)?; - let backend = NexusRebuildJobBackendStarter::new( - nexus_name, tasks, notify_fn, descriptor, - ) - .await?; + let backend = + NexusRebuildJobBackendStarter::new(nexus_name, tasks, notify_fn, descriptor).await?; let manager = RebuildJobManager::new(); @@ -168,10 +157,7 @@ impl Deref for NexusRebuildDescriptor { /// as a means of locking the range which is being rebuilt ensuring /// there are no concurrent writes to the same range between the /// user IO (through the nexus) and the rebuild itself. -pub(super) struct NexusRebuildJobBackend< - T: RebuildTaskCopier, - R: RangeRebuilder, -> { +pub(super) struct NexusRebuildJobBackend> { /// A pool of tasks which perform the actual data rebuild. task_pool: RebuildTasks, /// The range rebuilder which walks and copies the segments. @@ -205,8 +191,8 @@ impl NexusRebuildJobBackendStarter { notify_fn: fn(String, String) -> (), descriptor: RebuildDescriptor, ) -> Result { - let nexus_descriptor = UntypedBdev::open_by_name(nexus_name, false) - .context(BdevNotFound { + let nexus_descriptor = + UntypedBdev::open_by_name(nexus_name, false).context(BdevNotFound { bdev: nexus_name.to_string(), })?; @@ -239,10 +225,7 @@ impl NexusRebuildJobBackendStarter { } fn into_full( self, - ) -> NexusRebuildJobBackend< - NexusRebuildDescriptor, - FullRebuild, - > { + ) -> NexusRebuildJobBackend> { NexusRebuildJobBackend { task_pool: self.task_pool, notify_fn: self.notify_fn, @@ -258,10 +241,7 @@ impl> RebuildBackend for NexusRebuildJobBackend { fn on_state_change(&mut self) { - (self.notify_fn)( - self.nexus_name.clone(), - self.common_desc().dst_uri.clone(), - ); + (self.notify_fn)(self.nexus_name.clone(), self.common_desc().dst_uri.clone()); } fn common_desc(&self) -> &RebuildDescriptor { @@ -283,11 +263,8 @@ impl> RebuildBackend self.copier .next() .map(|blk| { - self.task_pool.schedule_segment_rebuild( - id, - blk, - self.copier.copier(), - ); + self.task_pool + .schedule_segment_rebuild(id, blk, self.copier.copier()); self.task_pool.active += 1; true }) @@ -336,11 +313,7 @@ impl RebuildTaskCopier for NexusRebuildDescriptor { /// The use of RangeContext here is safe because it is stored on the stack /// for the duration of the calls to lock and unlock. #[inline] - async fn copy_segment( - &self, - blk: u64, - task: &mut RebuildTask, - ) -> Result { + async fn copy_segment(&self, blk: u64, task: &mut RebuildTask) -> Result { let len = self.get_segment_size_blks(blk); // The nexus children have metadata and data partitions, whereas the // nexus has a data partition only. Because we are locking the range on @@ -351,14 +324,11 @@ impl RebuildTaskCopier for NexusRebuildDescriptor { // Wait for LBA range to be locked. // This prevents other I/Os being issued to this LBA range whilst it is // being rebuilt. - let lock = - self.nexus - .lock_lba_range(r) - .await - .context(RangeLockFailed { - blk, - len, - })?; + let lock = self + .nexus + .lock_lba_range(r) + .await + .context(RangeLockFailed { blk, len })?; // Perform the copy. let result = task.copy_one(blk, self).await; @@ -368,10 +338,7 @@ impl RebuildTaskCopier for NexusRebuildDescriptor { self.nexus .unlock_lba_range(lock) .await - .context(RangeUnlockFailed { - blk, - len, - })?; + .context(RangeUnlockFailed { blk, len })?; result } diff --git a/io-engine/src/rebuild/rebuild_descriptor.rs b/io-engine/src/rebuild/rebuild_descriptor.rs index 8ab49e9db..a44d375a5 100644 --- a/io-engine/src/rebuild/rebuild_descriptor.rs +++ b/io-engine/src/rebuild/rebuild_descriptor.rs @@ -1,28 +1,17 @@ use chrono::{DateTime, Utc}; use snafu::ResultExt; -use spdk_rs::{ - libspdk::SPDK_NVME_SC_COMPARE_FAILURE, - DmaBuf, - IoVec, - NvmeStatus, -}; +use spdk_rs::{libspdk::SPDK_NVME_SC_COMPARE_FAILURE, DmaBuf, IoVec, NvmeStatus}; use crate::{ bdev::device_open, bdev_api::bdev_get_name, core::{ - BlockDevice, - BlockDeviceDescriptor, - BlockDeviceHandle, - CoreError, - IoCompletionStatus, - ReadOptions, - SegmentMap, + BlockDevice, BlockDeviceDescriptor, BlockDeviceHandle, CoreError, IoCompletionStatus, + ReadOptions, SegmentMap, }, rebuild::{ rebuild_error::{BdevInvalidUri, NoCopyBuffer}, - WithinRange, - SEGMENT_SIZE, + WithinRange, SEGMENT_SIZE, }, }; @@ -100,16 +89,12 @@ impl RebuildDescriptor { let dst_size = dst_descriptor.get_device().size_in_bytes(); let dst_blk_size = dst_descriptor.get_device().block_len(); - 0 .. dst_size / dst_blk_size + 0..dst_size / dst_blk_size } Some(range) => range, }; - if !Self::validate( - src_handle.get_device(), - dst_handle.get_device(), - &range, - ) { + if !Self::validate(src_handle.get_device(), dst_handle.get_device(), &range) { return Err(RebuildError::InvalidSrcDstRange {}); } @@ -140,16 +125,13 @@ impl RebuildDescriptor { ) -> bool { // todo: make sure we don't overwrite the labels let data_partition_start = 0; - range.within(data_partition_start .. source.num_blocks()) - && range.within(data_partition_start .. destination.num_blocks()) + range.within(data_partition_start..source.num_blocks()) + && range.within(data_partition_start..destination.num_blocks()) && source.block_len() == destination.block_len() } /// Check if the rebuild range is compatible with the rebuild segment map. - pub(super) fn validate_map( - &self, - map: &SegmentMap, - ) -> Result<(), RebuildError> { + pub(super) fn validate_map(&self, map: &SegmentMap) -> Result<(), RebuildError> { if map.size_blks() > self.range.end { return Err(RebuildError::InvalidMapRange {}); } @@ -206,11 +188,7 @@ impl RebuildDescriptor { /// Returns `IoVec` for the givem `DmaBuf`, with length adjusted to the copy /// size for the given offset. Given `DmaBuf` must be large enough. #[inline(always)] - pub(super) fn adjusted_iov( - &self, - buffer: &DmaBuf, - offset_blk: u64, - ) -> IoVec { + pub(super) fn adjusted_iov(&self, buffer: &DmaBuf, offset_blk: u64) -> IoVec { let mut iov = buffer.to_io_vec(); let iov_len = self.get_segment_size_blks(offset_blk) * self.block_size; @@ -243,8 +221,7 @@ impl RebuildDescriptor { // Read from an unallocated block occured, no need to copy it. Err(CoreError::ReadFailed { - status: - IoCompletionStatus::NvmeError(NvmeStatus::UNWRITTEN_BLOCK), + status: IoCompletionStatus::NvmeError(NvmeStatus::UNWRITTEN_BLOCK), .. }) => Ok(false), @@ -263,11 +240,7 @@ impl RebuildDescriptor { iovs: &[IoVec], ) -> Result<(), RebuildError> { self.dst_io_handle() - .writev_blocks_async( - iovs, - offset_blk, - self.get_segment_size_blks(offset_blk), - ) + .writev_blocks_async(iovs, offset_blk, self.get_segment_size_blks(offset_blk)) .await .map_err(|err| RebuildError::WriteIoFailed { source: err, @@ -298,17 +271,13 @@ impl RebuildDescriptor { match self .dst_io_handle() - .comparev_blocks_async( - iovs, - offset_blk, - self.get_segment_size_blks(offset_blk), - ) + .comparev_blocks_async(iovs, offset_blk, self.get_segment_size_blks(offset_blk)) .await { Ok(_) => Ok(()), Err(CoreError::CompareFailed { status: - IoCompletionStatus::NvmeError(NvmeStatus::Media(SPDK_NVME_SC_COMPARE_FAILURE)), + IoCompletionStatus::NvmeError(NvmeStatus::Media(SPDK_NVME_SC_COMPARE_FAILURE)), .. }) => self.verify_failure(offset_blk), Err(err) => Err(RebuildError::VerifyIoFailed { diff --git a/io-engine/src/rebuild/rebuild_error.rs b/io-engine/src/rebuild/rebuild_error.rs index 8531ce2a7..e001b98d2 100644 --- a/io-engine/src/rebuild/rebuild_error.rs +++ b/io-engine/src/rebuild/rebuild_error.rs @@ -17,9 +17,7 @@ pub enum RebuildError { InvalidSrcDstRange {}, #[snafu(display("Map range is not compatible with rebuild range"))] InvalidMapRange {}, - #[snafu(display( - "The same device was specified for both source and destination: {bdev}" - ))] + #[snafu(display("The same device was specified for both source and destination: {bdev}"))] SameBdev { bdev: String }, #[snafu(display("Failed to get a handle for bdev {}", bdev))] NoBdevHandle { source: CoreError, bdev: String }, @@ -33,11 +31,7 @@ pub enum RebuildError { WriteIoFailed { source: CoreError, bdev: String }, #[snafu(display("Verify IO failed for bdev {}", bdev))] VerifyIoFailed { source: CoreError, bdev: String }, - #[snafu(display( - "Verify compare failed for bdev {}: {}", - bdev, - verify_message - ))] + #[snafu(display("Verify compare failed for bdev {}: {}", bdev, verify_message))] VerifyCompareFailed { bdev: String, verify_message: String, @@ -82,7 +76,10 @@ pub enum RebuildError { FrontendGone, #[snafu(display("The rebuild backend has been dropped"))] BackendGone, - #[snafu(display("The rebuild task pool channel is unexpectedly closed with {} active tasks", active))] + #[snafu(display( + "The rebuild task pool channel is unexpectedly closed with {} active tasks", + active + ))] RebuildTasksChannel { active: usize }, #[snafu(display("Snapshot Rebuild: {source}"))] SnapshotRebuild { source: SnapshotRebuildError }, @@ -105,8 +102,6 @@ pub enum SnapshotRebuildError { impl From for RebuildError { fn from(source: SnapshotRebuildError) -> Self { - Self::SnapshotRebuild { - source, - } + Self::SnapshotRebuild { source } } } diff --git a/io-engine/src/rebuild/rebuild_instances.rs b/io-engine/src/rebuild/rebuild_instances.rs index 89ef822c9..b51854414 100644 --- a/io-engine/src/rebuild/rebuild_instances.rs +++ b/io-engine/src/rebuild/rebuild_instances.rs @@ -2,8 +2,7 @@ macro_rules! gen_rebuild_instances { ($T:ty) => { /// List of rebuild jobs indexed by the destination's replica uri. - type RebuildJobInstances = - std::collections::HashMap>; + type RebuildJobInstances = std::collections::HashMap>; impl $T { /// Get the rebuild job instances container, we ensure that this can @@ -29,9 +28,7 @@ macro_rules! gen_rebuild_instances { } /// Lookup a rebuild job by its name then remove and drop it. - pub fn remove( - name: &str, - ) -> Result, super::RebuildError> { + pub fn remove(name: &str) -> Result, super::RebuildError> { match Self::get_instances().remove(name) { Some(job) => Ok(job), None => Err(RebuildError::JobNotFound { @@ -50,18 +47,13 @@ macro_rules! gen_rebuild_instances { }) } else { let job = std::sync::Arc::new(self); - let _ = rebuild_list.insert( - job.name().to_owned(), - job.clone(), - ); + let _ = rebuild_list.insert(job.name().to_owned(), job.clone()); Ok(job) } } /// Lookup a rebuild job by its name and return it. - pub fn lookup( - name: &str, - ) -> Result, super::RebuildError> { + pub fn lookup(name: &str) -> Result, super::RebuildError> { if let Some(job) = Self::get_instances().get(name) { Ok(job.clone()) } else { diff --git a/io-engine/src/rebuild/rebuild_job.rs b/io-engine/src/rebuild/rebuild_job.rs index 2b594b1f6..290ea711c 100644 --- a/io-engine/src/rebuild/rebuild_job.rs +++ b/io-engine/src/rebuild/rebuild_job.rs @@ -4,13 +4,8 @@ use chrono::Utc; use futures::channel::oneshot; use super::{ - HistoryRecord, - RebuildError, - RebuildJobBackendManager, - RebuildJobRequest, - RebuildState, - RebuildStates, - RebuildStats, + HistoryRecord, RebuildError, RebuildJobBackendManager, RebuildJobRequest, RebuildState, + RebuildStates, RebuildStats, }; use crate::{ core::{Reactors, ReadOptions, VerboseError}, @@ -121,10 +116,7 @@ impl RebuildJob { /// Creates a new RebuildJob taking a specific backend implementation and /// running the generic backend manager. - pub(super) fn from_manager( - manager: &RebuildJobManager, - desc: &RebuildDescriptor, - ) -> Self { + pub(super) fn from_manager(manager: &RebuildJobManager, desc: &RebuildDescriptor) -> Self { Self { src_uri: desc.src_uri.to_string(), dst_uri: desc.dst_uri.to_string(), @@ -137,9 +129,7 @@ impl RebuildJob { /// Schedules the job to start in a future and returns a complete channel /// which can be waited on. - pub async fn start( - &self, - ) -> Result, RebuildError> { + pub async fn start(&self) -> Result, RebuildError> { self.exec_client_op(RebuildOperation::Start)?; self.add_completion_listener() } @@ -280,19 +270,12 @@ impl RebuildJob { } /// Internal operations can bypass previous pending operations. - fn exec_internal_op( - &self, - op: RebuildOperation, - ) -> Result<(), RebuildError> { + fn exec_internal_op(&self, op: RebuildOperation) -> Result<(), RebuildError> { self.exec_op(op, true) } /// Single state machine where all operations are handled. - fn exec_op( - &self, - op: RebuildOperation, - override_pending: bool, - ) -> Result<(), RebuildError> { + fn exec_op(&self, op: RebuildOperation, override_pending: bool) -> Result<(), RebuildError> { let wake_up = self.states.write().exec_op(op, override_pending)?; if wake_up { self.wake_up(); @@ -314,9 +297,7 @@ impl RebuildJob { }); } - fn add_completion_listener( - &self, - ) -> Result, RebuildError> { + fn add_completion_listener(&self) -> Result, RebuildError> { let (sender, receiver) = oneshot::channel(); let list = match self.complete_chan.upgrade() { None => Err(RebuildError::BackendGone), diff --git a/io-engine/src/rebuild/rebuild_job_backend.rs b/io-engine/src/rebuild/rebuild_job_backend.rs index d7c6f39f7..bd0d8f6e5 100644 --- a/io-engine/src/rebuild/rebuild_job_backend.rs +++ b/io-engine/src/rebuild/rebuild_job_backend.rs @@ -10,12 +10,7 @@ use crossbeam::channel::{unbounded, Receiver, Sender}; use futures::{channel::oneshot, FutureExt, StreamExt}; use super::{ - RebuildDescriptor, - RebuildError, - RebuildState, - RebuildStates, - RebuildStats, - RebuildTasks, + RebuildDescriptor, RebuildError, RebuildState, RebuildStates, RebuildStats, RebuildTasks, TaskResult, }; @@ -39,25 +34,18 @@ pub(super) struct RebuildFBendChan { impl RebuildFBendChan { fn new() -> Self { let (sender, receiver) = async_channel::unbounded(); - Self { - sender, - receiver, - } + Self { sender, receiver } } async fn recv(&mut self) -> Option { self.receiver.recv().await.ok() } /// Get a clone of the receive channel. - pub(super) fn recv_clone( - &self, - ) -> async_channel::Receiver { + pub(super) fn recv_clone(&self) -> async_channel::Receiver { self.receiver.clone() } /// Get a clone of the send channel. - pub(super) fn sender_clone( - &self, - ) -> async_channel::Sender { + pub(super) fn sender_clone(&self) -> async_channel::Sender { self.sender.clone() } } @@ -66,9 +54,7 @@ impl RebuildFBendChan { /// A rebuild backend must implement this trait allowing it to /// be used by the `RebuildJobManager`. #[async_trait::async_trait(?Send)] -pub(super) trait RebuildBackend: - std::fmt::Debug + std::fmt::Display -{ +pub(super) trait RebuildBackend: std::fmt::Debug + std::fmt::Display { /// Callback for rebuild state change notifications. fn on_state_change(&mut self); @@ -99,8 +85,7 @@ pub(super) struct RebuildJobManager { /// Current state of the rebuild job. pub(super) states: Arc>, /// Channel list which allows the await of the rebuild. - pub(super) complete_chan: - Arc>>>, + pub(super) complete_chan: Arc>>>, /// Channel to share information between frontend and backend. pub(super) info_chan: RebuildFBendChan, /// Job serial number. @@ -167,10 +152,7 @@ impl RebuildJobManager { serial, } } - pub fn into_backend( - self, - backend: impl RebuildBackend + 'static, - ) -> RebuildJobBackendManager { + pub fn into_backend(self, backend: impl RebuildBackend + 'static) -> RebuildJobBackendManager { RebuildJobBackendManager { manager: self, backend: Box::new(backend), @@ -284,10 +266,7 @@ impl RebuildJobBackendManager { } /// Internal operations can bypass previous pending operations. - fn exec_internal_op( - &self, - op: super::RebuildOperation, - ) -> Result { + fn exec_internal_op(&self, op: super::RebuildOperation) -> Result { self.states.write().exec_op(op, true) } @@ -305,8 +284,7 @@ impl RebuildJobBackendManager { ); let blocks_transferred = std::cmp::min( - self.task_pool().segments_transferred - * descriptor.segment_size_blks, + self.task_pool().segments_transferred * descriptor.segment_size_blks, blocks_total, ); @@ -354,9 +332,7 @@ impl RebuildJobBackendManager { "{self}: failed to wait for {active} rebuild tasks \ due to task channel failure" ); - self.fail_with(RebuildError::RebuildTasksChannel { - active, - }); + self.fail_with(RebuildError::RebuildTasksChannel { active }); } fn task_pool(&self) -> &RebuildTasks { self.backend.task_pool() @@ -372,7 +348,7 @@ impl RebuildJobBackendManager { self.task_pool().active ); - for n in 0 .. self.task_pool().total { + for n in 0..self.task_pool().total { if !self.start_task_by_id(n) { break; } @@ -468,10 +444,7 @@ impl RebuildJobBackendManager { /// Handles a request messages replying to it if necessary. /// Returns false if the message was empty (ie the frontend is gone) - async fn handle_message( - &mut self, - message: Option, - ) -> bool { + async fn handle_message(&mut self, message: Option) -> bool { match message { Some(RebuildJobRequest::WakeUp) => {} Some(RebuildJobRequest::GetStats(reply)) => { diff --git a/io-engine/src/rebuild/rebuild_state.rs b/io-engine/src/rebuild/rebuild_state.rs index 7dde6c9de..6006aa035 100644 --- a/io-engine/src/rebuild/rebuild_state.rs +++ b/io-engine/src/rebuild/rebuild_state.rs @@ -182,9 +182,7 @@ impl RebuildStates { } }, RebuildOperation::Complete => match self.current { - S::Init | S::Paused | S::Stopped | S::Failed | S::Completed => { - Err(e) - } + S::Init | S::Paused | S::Stopped | S::Failed | S::Completed => Err(e), S::Running => { self.set_pending(S::Completed, override_pending)?; Ok(false) diff --git a/io-engine/src/rebuild/rebuild_task.rs b/io-engine/src/rebuild/rebuild_task.rs index d68733b48..c922dfa1b 100644 --- a/io-engine/src/rebuild/rebuild_task.rs +++ b/io-engine/src/rebuild/rebuild_task.rs @@ -40,10 +40,7 @@ pub(super) struct RebuildTask { } impl RebuildTask { - pub(super) fn new( - buffer: DmaBuf, - sender: mpsc::Sender, - ) -> Self { + pub(super) fn new(buffer: DmaBuf, sender: mpsc::Sender) -> Self { Self { buffer, sender, @@ -114,14 +111,11 @@ impl RebuildTasks { /// Create a rebuild tasks pool for the given rebuild descriptor. /// Each task can be schedule to run concurrently, and each task /// gets its own `DmaBuf` from where it reads and writes from. - pub(super) fn new( - task_count: usize, - desc: &RebuildDescriptor, - ) -> Result { + pub(super) fn new(task_count: usize, desc: &RebuildDescriptor) -> Result { // only sending one message per channel at a time so we don't need // the extra buffer let channel = mpsc::channel(0); - let tasks = (0 .. task_count).map(|_| { + let tasks = (0..task_count).map(|_| { let buffer = desc.dma_malloc(SEGMENT_SIZE)?; let task = RebuildTask::new(buffer, channel.0.clone()); Ok(Arc::new(Mutex::new(task))) @@ -204,11 +198,7 @@ pub(super) trait RebuildTaskCopier { fn descriptor(&self) -> &RebuildDescriptor; /// Copies an entire segment at the given block address, from source to /// target using a `DmaBuf`. - async fn copy_segment( - &self, - blk: u64, - task: &mut RebuildTask, - ) -> Result; + async fn copy_segment(&self, blk: u64, task: &mut RebuildTask) -> Result; } #[async_trait::async_trait(?Send)] @@ -218,11 +208,7 @@ impl RebuildTaskCopier for RebuildDescriptor { } /// Copies one segment worth of data from source into destination. - async fn copy_segment( - &self, - blk: u64, - task: &mut RebuildTask, - ) -> Result { + async fn copy_segment(&self, blk: u64, task: &mut RebuildTask) -> Result { task.copy_one(blk, self).await } } diff --git a/io-engine/src/rebuild/rebuilders.rs b/io-engine/src/rebuild/rebuilders.rs index 7d3f2eae1..2bbaf242f 100644 --- a/io-engine/src/rebuild/rebuilders.rs +++ b/io-engine/src/rebuild/rebuilders.rs @@ -3,8 +3,7 @@ use crate::{ rebuild::{ rebuild_descriptor::RebuildDescriptor, rebuild_task::{RebuildTask, RebuildTaskCopier}, - RebuildError, - RebuildMap, + RebuildError, RebuildMap, }, }; use bit_vec::BitVec; @@ -42,9 +41,7 @@ impl FullRebuild { let desc = copier.descriptor(); let range = desc.range.clone(); Self { - range: PeekableIterator::new( - range.step_by(desc.segment_size_blks as usize), - ), + range: PeekableIterator::new(range.step_by(desc.segment_size_blks as usize)), copier: Rc::new(copier), } } @@ -144,16 +141,12 @@ impl PartialSeqRebuild { let desc = copier.descriptor(); let range = desc.range.clone(); Self { - range: PeekableIterator::new( - range.step_by(desc.segment_size_blks as usize), - ), + range: PeekableIterator::new(range.step_by(desc.segment_size_blks as usize)), copier: Rc::new(PartialSeqCopier::new(map, copier)), } } } -impl RangeRebuilder> - for PartialSeqRebuild -{ +impl RangeRebuilder> for PartialSeqRebuild { fn next(&mut self) -> Option { self.range.next() } @@ -209,11 +202,7 @@ impl RebuildTaskCopier for PartialSeqCopier { } /// Copies one segment worth of data from source into destination. - async fn copy_segment( - &self, - blk: u64, - task: &mut RebuildTask, - ) -> Result { + async fn copy_segment(&self, blk: u64, task: &mut RebuildTask) -> Result { if self.is_blk_sync(blk) { return Ok(false); } diff --git a/io-engine/src/rebuild/snapshot_rebuild.rs b/io-engine/src/rebuild/snapshot_rebuild.rs index d095690c6..40125fd3b 100644 --- a/io-engine/src/rebuild/snapshot_rebuild.rs +++ b/io-engine/src/rebuild/snapshot_rebuild.rs @@ -8,9 +8,7 @@ use crate::{ core::{Bdev, LogicalVolume, Reactors, ReadOptions, SegmentMap}, gen_rebuild_instances, rebuild::{ - bdev_rebuild::BdevRebuildJobBuilder, - rebuild_error::SnapshotRebuildError, - BdevRebuildJob, + bdev_rebuild::BdevRebuildJobBuilder, rebuild_error::SnapshotRebuildError, BdevRebuildJob, }, }; @@ -170,8 +168,7 @@ impl SnapshotRebuildJobBuilder { let lvol = Bdev::lookup_by_uuid_str(uri) .ok_or(SnapshotRebuildError::LocalBdevNotFound {}) .and_then(|bdev| { - crate::lvs::Lvol::try_from(bdev) - .map_err(|_| SnapshotRebuildError::NotAReplica {}) + crate::lvs::Lvol::try_from(bdev).map_err(|_| SnapshotRebuildError::NotAReplica {}) })?; Ok(lvol) } @@ -247,8 +244,7 @@ impl SnapshotRebuildJob { /// Helps create a `Self` using a builder: `SnapshotRebuildJobBuilder`. pub fn builder() -> SnapshotRebuildJobBuilder { SnapshotRebuildJobBuilder::builder().with_option( - RebuildJobOptions::default() - .with_read_opts(ReadOptions::CurrentUnwrittenFail), + RebuildJobOptions::default().with_read_opts(ReadOptions::CurrentUnwrittenFail), ) } /// Get a list of all snapshot rebuild jobs. diff --git a/io-engine/src/replica_backend.rs b/io-engine/src/replica_backend.rs index 2c1076c72..c85d15f48 100644 --- a/io-engine/src/replica_backend.rs +++ b/io-engine/src/replica_backend.rs @@ -1,15 +1,7 @@ use super::pool_backend::{Error, GenericError, PoolBackend}; use crate::core::{ - snapshot::SnapshotDescriptor, - BdevStater, - BdevStats, - CloneParams, - LogicalVolume, - Protocol, - PtplProps, - SnapshotParams, - UntypedBdev, - UpdateProps, + snapshot::SnapshotDescriptor, BdevStater, BdevStats, CloneParams, LogicalVolume, Protocol, + PtplProps, SnapshotParams, UntypedBdev, UpdateProps, }; use std::{fmt::Debug, ops::Deref}; @@ -19,13 +11,9 @@ use std::{fmt::Debug, ops::Deref}; /// specific options to be passed as parameters. /// A `Replica` is also a `LogicalVolume` and also has `Share` traits. #[async_trait::async_trait(?Send)] -pub trait ReplicaOps: - LogicalVolume + BdevStater -{ +pub trait ReplicaOps: LogicalVolume + BdevStater { fn shared(&self) -> Option; - fn create_ptpl( - &self, - ) -> Result, crate::pool_backend::Error>; + fn create_ptpl(&self) -> Result, crate::pool_backend::Error>; /// Shares the replica via nvmf. async fn share_nvmf( @@ -41,22 +29,15 @@ pub trait ReplicaOps: ) -> Result<(), crate::pool_backend::Error>; /// Resize the replica to the given new size. - async fn resize( - &mut self, - size: u64, - ) -> Result<(), crate::pool_backend::Error>; + async fn resize(&mut self, size: u64) -> Result<(), crate::pool_backend::Error>; /// Set the replica's entity id. - async fn set_entity_id( - &mut self, - id: String, - ) -> Result<(), crate::pool_backend::Error>; + async fn set_entity_id(&mut self, id: String) -> Result<(), crate::pool_backend::Error>; /// Destroy the replica from its parent pool. /// # Warning /// Destroying implies unsharing, which might fail for some reason, example /// if the target is in a bad state, or if IOs are stuck. /// todo: return back `Self` in case of an error. - async fn destroy(self: Box) - -> Result<(), crate::pool_backend::Error>; + async fn destroy(self: Box) -> Result<(), crate::pool_backend::Error>; /// Snapshot Operations /// @@ -68,13 +49,7 @@ pub trait ReplicaOps: txn_id: &str, snap_uuid: &str, ) -> Option { - SnapshotParams::prepare( - snap_name, - entity_id, - txn_id, - snap_uuid, - self.uuid(), - ) + SnapshotParams::prepare(snap_name, entity_id, txn_id, snap_uuid, self.uuid()) } /// Create a snapshot using the given parameters and yields an object which /// implements `SnapshotOps`. In turn this can be used to create clones, @@ -92,9 +67,7 @@ pub trait ReplicaOps: #[async_trait::async_trait(?Send)] pub trait SnapshotOps: LogicalVolume + Debug { /// Destroys the snapshot itself. - async fn destroy_snapshot( - self: Box, - ) -> Result<(), crate::pool_backend::Error>; + async fn destroy_snapshot(self: Box) -> Result<(), crate::pool_backend::Error>; /// Prepares a clone config for creating a clone from a snapshot. fn prepare_clone_config( @@ -178,10 +151,7 @@ impl FindReplicaArgs { #[async_trait::async_trait(?Send)] pub trait IReplicaFactory { /// If the bdev is a `ReplicaOps`, move and retrieve it as a `ReplicaOps`. - fn bdev_as_replica( - &self, - bdev: crate::core::UntypedBdev, - ) -> Option>; + fn bdev_as_replica(&self, bdev: crate::core::UntypedBdev) -> Option>; /// Finds the replica specified by the arguments, returning None if it /// cannot be found. async fn find( @@ -222,10 +192,7 @@ pub struct ReplicaBdevStats { impl ReplicaBdevStats { /// Create a new `Self` from the given parts. pub fn new(stats: BdevStats, entity_id: Option) -> Self { - Self { - stats, - entity_id, - } + Self { stats, entity_id } } } @@ -247,9 +214,7 @@ pub struct FindSnapshotArgs { impl FindSnapshotArgs { /// Create new `Self`. pub fn new(uuid: String) -> Self { - Self { - uuid, - } + Self { uuid } } } @@ -271,18 +236,12 @@ impl ReplicaFactory { /// Returns the factory for the given backend kind. pub fn new(backend: PoolBackend) -> Self { Self(match backend { - PoolBackend::Lvs => { - Box::::default() as _ - } - PoolBackend::Lvm => { - Box::::default() as _ - } + PoolBackend::Lvs => Box::::default() as _, + PoolBackend::Lvm => Box::::default() as _, }) } /// Get the given bdev as a `ReplicaOps`. - pub(crate) fn bdev_as_replica( - bdev: crate::core::UntypedBdev, - ) -> Option> { + pub(crate) fn bdev_as_replica(bdev: crate::core::UntypedBdev) -> Option> { for factory in Self::factories() { if let Some(replica) = factory.as_factory().bdev_as_replica(bdev) { return Some(replica); @@ -291,9 +250,7 @@ impl ReplicaFactory { None } /// Probe backends for the given name and/or uuid and return the right one. - pub async fn find( - args: &FindReplicaArgs, - ) -> Result, Error> { + pub async fn find(args: &FindReplicaArgs) -> Result, Error> { let mut error = None; for factory in Self::factories() { diff --git a/io-engine/src/sleep.rs b/io-engine/src/sleep.rs index 22e01ac93..0985b4247 100644 --- a/io-engine/src/sleep.rs +++ b/io-engine/src/sleep.rs @@ -13,9 +13,7 @@ pub fn mayastor_sleep(duration: Duration) -> oneshot::Receiver<()> { tokio::time::sleep(duration).await; let rx = Reactor::spawn_at_primary(async move { if tx.send(()).is_err() { - tracing::error!( - "Failed to send completion for Mayastor sleep." - ); + tracing::error!("Failed to send completion for Mayastor sleep."); } }) .unwrap(); diff --git a/io-engine/src/store/etcd.rs b/io-engine/src/store/etcd.rs index 1c33ab6a7..d470c2e03 100644 --- a/io-engine/src/store/etcd.rs +++ b/io-engine/src/store/etcd.rs @@ -1,18 +1,8 @@ //! Implementation of an etcd key-value store. use crate::store::store_defs::{ - Connect, - Delete, - DeserialiseValue, - Get, - Put, - SerialiseValue, - Store, - StoreError, - StoreError::MissingEntry, - StoreKey, - StoreValue, - ValueString, + Connect, Delete, DeserialiseValue, Get, Put, SerialiseValue, Store, StoreError, + StoreError::MissingEntry, StoreKey, StoreValue, ValueString, }; use async_trait::async_trait; use etcd_client::Client; @@ -60,19 +50,16 @@ impl Store for Etcd { } /// 'Get' the value for the given key from etcd. - async fn get_kv( - &mut self, - key: &K, - ) -> Result { + async fn get_kv(&mut self, key: &K) -> Result { let resp = self.0.get(key.to_string(), None).await.context(Get { key: key.to_string(), })?; match resp.kvs().first() { - Some(kv) => Ok(serde_json::from_slice(kv.value()).context( - DeserialiseValue { + Some(kv) => Ok( + serde_json::from_slice(kv.value()).context(DeserialiseValue { value: kv.value_str().context(ValueString {})?, - }, - )?), + })?, + ), None => Err(MissingEntry { key: key.to_string(), }), @@ -80,10 +67,7 @@ impl Store for Etcd { } /// 'Delete' the entry with the given key from etcd. - async fn delete_kv( - &mut self, - key: &K, - ) -> Result<(), StoreError> { + async fn delete_kv(&mut self, key: &K) -> Result<(), StoreError> { self.0.delete(key.to_string(), None).await.context(Delete { key: key.to_string(), })?; diff --git a/io-engine/src/store/store_defs.rs b/io-engine/src/store/store_defs.rs index 2c3e50543..fc0bc8d68 100644 --- a/io-engine/src/store/store_defs.rs +++ b/io-engine/src/store/store_defs.rs @@ -26,9 +26,9 @@ pub enum StoreError { }, /// Failed to wait for 'put' operation. #[snafu(display( - "Failed to wait for 'put' operation to complete for key {} and value {:?}.", - key, - value, + "Failed to wait for 'put' operation to complete for key {} and value {:?}.", + key, + value, ))] PutWait { key: String, @@ -36,17 +36,10 @@ pub enum StoreError { source: futures::channel::oneshot::Canceled, }, /// Failed to 'get' an entry from the store. - #[snafu(display( - "Failed to 'get' entry with key {}. Error {}", - key, - source - ))] + #[snafu(display("Failed to 'get' entry with key {}. Error {}", key, source))] Get { key: String, source: Error }, /// Failed to wait for 'get' operation. - #[snafu(display( - "Failed to wait for 'get' operation to complete for key {}.", - key, - ))] + #[snafu(display("Failed to wait for 'get' operation to complete for key {}.", key,))] GetWait { key: String, source: futures::channel::oneshot::Canceled, @@ -55,27 +48,16 @@ pub enum StoreError { #[snafu(display("Entry with key {} not found.", key))] MissingEntry { key: String }, /// Failed to 'delete' an entry from the store. - #[snafu(display( - "Failed to 'delete' entry with key {}. Error {}", - key, - source - ))] + #[snafu(display("Failed to 'delete' entry with key {}. Error {}", key, source))] Delete { key: String, source: Error }, /// Failed to wait for 'delete' operation. - #[snafu(display( - "Failed to wait for 'delete' operation to complete for key {}.", - key, - ))] + #[snafu(display("Failed to wait for 'delete' operation to complete for key {}.", key,))] DeleteWait { key: String, source: futures::channel::oneshot::Canceled, }, /// Failed to 'watch' an entry in the store. - #[snafu(display( - "Failed to 'watch' entry with key {}. Error {}", - key, - source - ))] + #[snafu(display("Failed to 'watch' entry with key {}. Error {}", key, source))] Watch { key: String, source: Error }, /// Empty key. #[snafu(display("Failed to get key as string. Error {}", source))] @@ -84,11 +66,7 @@ pub enum StoreError { #[snafu(display("Failed to get value as string. Error {}", source))] ValueString { source: Error }, /// Failed to deserialise value. - #[snafu(display( - "Failed to deserialise value {}. Error {}", - value, - source - ))] + #[snafu(display("Failed to deserialise value {}. Error {}", value, source))] DeserialiseValue { value: String, source: SerdeError }, /// Failed to serialise value. #[snafu(display("Failed to serialise value. Error {}", source))] @@ -116,16 +94,10 @@ pub trait Store: Sync + Send + Clone { ) -> Result<(), StoreError>; /// Get an entry from the store. - async fn get_kv( - &mut self, - key: &K, - ) -> Result; + async fn get_kv(&mut self, key: &K) -> Result; /// Delete an entry from the store. - async fn delete_kv( - &mut self, - key: &K, - ) -> Result<(), StoreError>; + async fn delete_kv(&mut self, key: &K) -> Result<(), StoreError>; /// Identify whether or not the store is online. async fn online(&mut self) -> bool; diff --git a/io-engine/src/subsys/config/mod.rs b/io-engine/src/subsys/config/mod.rs index 90531b613..fcb9ad503 100644 --- a/io-engine/src/subsys/config/mod.rs +++ b/io-engine/src/subsys/config/mod.rs @@ -13,23 +13,14 @@ use once_cell::sync::OnceCell; use serde::{Deserialize, Serialize}; use snafu::Snafu; use spdk_rs::libspdk::{ - spdk_json_write_ctx, - spdk_json_write_val_raw, - spdk_subsystem, - spdk_subsystem_fini_next, + spdk_json_write_ctx, spdk_json_write_val_raw, spdk_subsystem, spdk_subsystem_fini_next, spdk_subsystem_init_next, }; use crate::{ jsonrpc::{jsonrpc_register, Code, RpcErrorCode}, subsys::config::opts::{ - BdevOpts, - GetOpts, - IoBufOpts, - NexusOpts, - NvmeBdevOpts, - NvmfTgtConfig, - PosixSocketOpts, + BdevOpts, GetOpts, IoBufOpts, NexusOpts, NvmeBdevOpts, NvmfTgtConfig, PosixSocketOpts, }, }; @@ -69,8 +60,11 @@ impl ConfigSubsystem { error!("error writing config file {} {}", target, e); } } else { - warn!("request to save config file but no source file was given, guess \ - you have to scribble it down yourself {}", '\u{1f609}'); + warn!( + "request to save config file but no source file was given, guess \ + you have to scribble it down yourself {}", + '\u{1f609}' + ); } Ok(()) }; @@ -93,11 +87,7 @@ impl ConfigSubsystem { }; unsafe { - spdk_json_write_val_raw( - w, - data.as_ptr() as *const _, - data.as_bytes().len() as u64, - ); + spdk_json_write_val_raw(w, data.as_ptr() as *const _, data.as_bytes().len() as u64); } } diff --git a/io-engine/src/subsys/config/opts.rs b/io-engine/src/subsys/config/opts.rs index aef697fc9..b6a84db65 100644 --- a/io-engine/src/subsys/config/opts.rs +++ b/io-engine/src/subsys/config/opts.rs @@ -8,20 +8,10 @@ use serde::{Deserialize, Serialize}; use spdk_rs::{ ffihelper::copy_str_with_null, libspdk::{ - bdev_nvme_get_opts, - bdev_nvme_set_opts, - spdk_bdev_get_opts, - spdk_bdev_nvme_opts, - spdk_bdev_opts, - spdk_bdev_set_opts, - spdk_iobuf_get_opts, - spdk_iobuf_opts, - spdk_iobuf_set_opts, - spdk_nvmf_target_opts, - spdk_nvmf_transport_opts, - spdk_sock_impl_get_opts, - spdk_sock_impl_opts, - spdk_sock_impl_set_opts, + bdev_nvme_get_opts, bdev_nvme_set_opts, spdk_bdev_get_opts, spdk_bdev_nvme_opts, + spdk_bdev_opts, spdk_bdev_set_opts, spdk_iobuf_get_opts, spdk_iobuf_opts, + spdk_iobuf_set_opts, spdk_nvmf_target_opts, spdk_nvmf_transport_opts, + spdk_sock_impl_get_opts, spdk_sock_impl_opts, spdk_sock_impl_set_opts, }, struct_size_init, }; @@ -96,15 +86,11 @@ pub enum NvmfTgtTransport { impl NvmfTransportOpts { /// Tweak a few opts more suited for rdma. fn for_rdma(mut self) -> Self { - self.in_capsule_data_size = try_from_env( - "NVMF_RDMA_IN_CAPSULE_DATA_SIZE", - self.in_capsule_data_size, - ); + self.in_capsule_data_size = + try_from_env("NVMF_RDMA_IN_CAPSULE_DATA_SIZE", self.in_capsule_data_size); self.io_unit_size = try_from_env("NVMF_RDMA_IO_UNIT_SIZE", 8192); // SPDK_NVMF_RDMA_MIN_IO_BUFFER_SIZE - self.data_wr_pool_size = - try_from_env("NVMF_RDMA_DATA_WR_POOL_SIZE", 4095); // SPDK_NVMF_RDMA_DEFAULT_DATA_WR_POOL_SIZE - self.num_shared_buf = - try_from_env("NVMF_RDMA_NUM_SHARED_BUF", self.num_shared_buf); + self.data_wr_pool_size = try_from_env("NVMF_RDMA_DATA_WR_POOL_SIZE", 4095); // SPDK_NVMF_RDMA_DEFAULT_DATA_WR_POOL_SIZE + self.num_shared_buf = try_from_env("NVMF_RDMA_NUM_SHARED_BUF", self.num_shared_buf); self } } @@ -271,8 +257,7 @@ where if in_units == 0 && !value.is_zero() { Err(format!("must be at least 1{}", unit.units())) } else { - T::try_from(unit.value(value)) - .map_err(|error| error.to_string()) + T::try_from(unit.value(value)).map_err(|error| error.to_string()) } } Err(error) => Err(error.to_string()), @@ -299,10 +284,7 @@ impl Default for NvmfTransportOpts { in_capsule_data_size: 4096, max_io_size: 131_072, io_unit_size: 131_072, - max_qpairs_per_ctrl: try_from_env( - "NVMF_TCP_MAX_QPAIRS_PER_CTRL", - 32, - ), + max_qpairs_per_ctrl: try_from_env("NVMF_TCP_MAX_QPAIRS_PER_CTRL", 32), num_shared_buf: try_from_env("NVMF_TCP_NUM_SHARED_BUF", 2047), buf_cache_size: try_from_env("NVMF_TCP_BUF_CACHE_SIZE", 64), dif_insert_or_strip: false, @@ -399,9 +381,7 @@ pub struct NvmeBdevOpts { impl GetOpts for NvmeBdevOpts { fn get(&self) -> Self { let opts: spdk_bdev_nvme_opts = unsafe { zeroed() }; - unsafe { - bdev_nvme_get_opts(&opts as *const _ as *mut spdk_bdev_nvme_opts) - }; + unsafe { bdev_nvme_get_opts(&opts as *const _ as *mut spdk_bdev_nvme_opts) }; opts.into() } @@ -421,21 +401,13 @@ impl Default for NvmeBdevOpts { fn default() -> Self { Self { action_on_timeout: 4, - timeout_us: time_try_from_env( - "NVME_TIMEOUT", - 5_000_000, - TimeUnit::MicroSeconds, - ), + timeout_us: time_try_from_env("NVME_TIMEOUT", 5_000_000, TimeUnit::MicroSeconds), timeout_admin_us: time_try_from_env( "NVME_TIMEOUT_ADMIN", 5_000_000, TimeUnit::MicroSeconds, ), - keep_alive_timeout_ms: time_try_from_env( - "NVME_KATO", - 10_000, - TimeUnit::MilliSeconds, - ), + keep_alive_timeout_ms: time_try_from_env("NVME_KATO", 10_000, TimeUnit::MilliSeconds), transport_retry_count: try_from_env("NVME_RETRY_COUNT", 0), arbitration_burst: 0, low_priority_weight: 0, @@ -631,14 +603,8 @@ impl Default for PosixSocketOpts { enable_zero_copy_send: true, enable_quickack: try_from_env("SOCK_ENABLE_QUICKACK", true), enable_placement_id: try_from_env("SOCK_ENABLE_PLACEMENT_ID", 0), - enable_zerocopy_send_server: try_from_env( - "SOCK_ZEROCOPY_SEND_SERVER", - true, - ), - enable_zerocopy_send_client: try_from_env( - "SOCK_ZEROCOPY_SEND_CLIENT", - false, - ), + enable_zerocopy_send_server: try_from_env("SOCK_ZEROCOPY_SEND_SERVER", true), + enable_zerocopy_send_client: try_from_env("SOCK_ZEROCOPY_SEND_CLIENT", false), zerocopy_threshold: 0, } } diff --git a/io-engine/src/subsys/config/pool.rs b/io-engine/src/subsys/config/pool.rs index 0269dfeeb..da919cc31 100644 --- a/io-engine/src/subsys/config/pool.rs +++ b/io-engine/src/subsys/config/pool.rs @@ -114,9 +114,7 @@ impl PoolConfig { /// Capture current pool configuration pub fn capture() -> PoolConfig { let pools = LvsBdev::iter().map(Pool::from).collect(); - PoolConfig { - pools: Some(pools), - } + PoolConfig { pools: Some(pools) } } /// Create pools specified in this configuration @@ -126,11 +124,7 @@ impl PoolConfig { for pool in pools.iter() { info!("creating pool {}", pool.name); if let Err(error) = create_pool(pool.into()).await { - error!( - "failed to create pool {}: {}", - pool.name, - error.verbose() - ); + error!("failed to create pool {}: {}", pool.name, error.verbose()); failures += 1; } } @@ -212,9 +206,7 @@ struct Replica { share: Option, } -async fn create_pool( - args: PoolArgs, -) -> Result { +async fn create_pool(args: PoolArgs) -> Result { if args.disks.is_empty() { return Err(Status::invalid_argument("Missing devices")); } diff --git a/io-engine/src/subsys/mod.rs b/io-engine/src/subsys/mod.rs index 381f1230f..2b949f76b 100644 --- a/io-engine/src/subsys/mod.rs +++ b/io-engine/src/subsys/mod.rs @@ -4,29 +4,16 @@ pub use config::{ opts::{NexusOpts, NvmeBdevOpts}, pool::PoolConfig, - Config, - ConfigSubsystem, + Config, ConfigSubsystem, }; pub use nvmf::{ - set_snapshot_time, - Error as NvmfError, - NvmeCpl, - NvmfReq, - NvmfSubsystem, - SubType, + set_snapshot_time, Error as NvmfError, NvmeCpl, NvmfReq, NvmfSubsystem, SubType, Target as NvmfTarget, }; -use spdk_rs::libspdk::{ - spdk_add_subsystem, - spdk_add_subsystem_depend, - spdk_subsystem_depend, -}; +use spdk_rs::libspdk::{spdk_add_subsystem, spdk_add_subsystem_depend, spdk_subsystem_depend}; use std::mem::zeroed; -pub use registration::{ - registration_grpc::Registration, - RegistrationSubsystem, -}; +pub use registration::{registration_grpc::Registration, RegistrationSubsystem}; use crate::subsys::nvmf::Nvmf; diff --git a/io-engine/src/subsys/nvmf/admin_cmd.rs b/io-engine/src/subsys/nvmf/admin_cmd.rs index dcda1af51..5b1e8dd6e 100644 --- a/io-engine/src/subsys/nvmf/admin_cmd.rs +++ b/io-engine/src/subsys/nvmf/admin_cmd.rs @@ -17,25 +17,12 @@ use crate::{ }; use spdk_rs::{ libspdk::{ - nvme_cmd_cdw10_get, - nvme_cmd_cdw11_get, - nvme_status_get, - spdk_bdev, - spdk_bdev_desc, - spdk_io_channel, - spdk_nvme_cmd, - spdk_nvme_cpl, - spdk_nvme_status, - spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, - spdk_nvmf_request, - spdk_nvmf_request_complete, - spdk_nvmf_request_copy_to_buf, - spdk_nvmf_request_get_bdev, - spdk_nvmf_request_get_cmd, - spdk_nvmf_request_get_response, - spdk_nvmf_request_get_subsystem, - spdk_nvmf_set_custom_admin_cmd_hdlr, - spdk_nvmf_subsystem_get_max_nsid, + nvme_cmd_cdw10_get, nvme_cmd_cdw11_get, nvme_status_get, spdk_bdev, spdk_bdev_desc, + spdk_io_channel, spdk_nvme_cmd, spdk_nvme_cpl, spdk_nvme_status, + spdk_nvmf_bdev_ctrlr_nvme_passthru_admin, spdk_nvmf_request, spdk_nvmf_request_complete, + spdk_nvmf_request_copy_to_buf, spdk_nvmf_request_get_bdev, spdk_nvmf_request_get_cmd, + spdk_nvmf_request_get_response, spdk_nvmf_request_get_subsystem, + spdk_nvmf_set_custom_admin_cmd_hdlr, spdk_nvmf_subsystem_get_max_nsid, }, nvme_admin_opc, }; @@ -62,10 +49,7 @@ impl NvmfReq { /// Returns the NVMe completion pub fn response(&self) -> NvmeCpl { NvmeCpl( - NonNull::new(unsafe { - &mut *spdk_nvmf_request_get_response(self.0.as_ptr()) - }) - .unwrap(), + NonNull::new(unsafe { &mut *spdk_nvmf_request_get_response(self.0.as_ptr()) }).unwrap(), ) } @@ -120,19 +104,15 @@ pub fn set_snapshot_time(cmd: &mut spdk_nvme_cmd) -> u64 { } /// Decode snapshot information from incoming NVMe admin command data. -fn decode_snapshot_params( - req: *mut spdk_nvmf_request, -) -> Option { +fn decode_snapshot_params(req: *mut spdk_nvmf_request) -> Option { const ITEM_SZ: usize = std::mem::size_of::(); let mut val: Vec = Vec::with_capacity(ITEM_SZ * 2); let encoded_msg = unsafe { - let bytes_copied = spdk_nvmf_request_copy_to_buf( - req, - val.as_mut_ptr() as _, - val.capacity() as u64, - ) as usize; + let bytes_copied = + spdk_nvmf_request_copy_to_buf(req, val.as_mut_ptr() as _, val.capacity() as u64) + as usize; info!( "## length = {}, iov_cnt = {}, size = {}", @@ -188,9 +168,7 @@ extern "C" fn nvmf_create_snapshot_hdlr(req: *mut spdk_nvmf_request) -> i32 { let mut bdev: *mut spdk_bdev = std::ptr::null_mut(); let mut desc: *mut spdk_bdev_desc = std::ptr::null_mut(); let mut ch: *mut spdk_io_channel = std::ptr::null_mut(); - let rc = unsafe { - spdk_nvmf_request_get_bdev(1, req, &mut bdev, &mut desc, &mut ch) - }; + let rc = unsafe { spdk_nvmf_request_get_bdev(1, req, &mut bdev, &mut desc, &mut ch) }; if rc != 0 { /* No bdev found for this namespace. Continue. */ debug!("no bdev found"); @@ -201,9 +179,7 @@ extern "C" fn nvmf_create_snapshot_hdlr(req: *mut spdk_nvmf_request) -> i32 { if bd.driver() == nexus::NEXUS_MODULE_NAME { // Received command on a published Nexus set_snapshot_time(unsafe { &mut *spdk_nvmf_request_get_cmd(req) }); - unsafe { - spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(bdev, desc, ch, req, None) - } + unsafe { spdk_nvmf_bdev_ctrlr_nvme_passthru_admin(bdev, desc, ch, req, None) } } else { // Received command on a shared replica (lvol) let nvmf_req = NvmfReq(NonNull::new(req).unwrap()); @@ -215,11 +191,7 @@ extern "C" fn nvmf_create_snapshot_hdlr(req: *mut spdk_nvmf_request) -> i32 { } } -async fn create_remote_snapshot( - bdev: UntypedBdev, - params: SnapshotParams, - nvmf_req: NvmfReq, -) { +async fn create_remote_snapshot(bdev: UntypedBdev, params: SnapshotParams, nvmf_req: NvmfReq) { let Some(mut replica_ops) = ReplicaFactory::bdev_as_replica(bdev) else { debug!("unsupported bdev driver"); nvmf_req.complete_error(nix::errno::Errno::ENOTSUP as i32); diff --git a/io-engine/src/subsys/nvmf/mod.rs b/io-engine/src/subsys/nvmf/mod.rs index dc31fd816..55e95c0d8 100644 --- a/io-engine/src/subsys/nvmf/mod.rs +++ b/io-engine/src/subsys/nvmf/mod.rs @@ -15,11 +15,7 @@ use snafu::Snafu; pub use admin_cmd::{set_snapshot_time, NvmeCpl, NvmfReq}; use poll_groups::PollGroup; -use spdk_rs::libspdk::{ - spdk_subsystem, - spdk_subsystem_fini_next, - spdk_subsystem_init_next, -}; +use spdk_rs::libspdk::{spdk_subsystem, spdk_subsystem_fini_next, spdk_subsystem_init_next}; pub use subsystem::{NvmfSubsystem, SubType}; pub use target::Target; @@ -54,21 +50,13 @@ impl RpcErrorCode for Error { pub enum Error { #[snafu(display("Failed to create nvmf target {}", msg))] CreateTarget { msg: String }, - #[snafu(display( - "Failed to destroy nvmf target {}: {}", - endpoint, - source - ))] + #[snafu(display("Failed to destroy nvmf target {}: {}", endpoint, source))] DestroyTarget { source: Errno, endpoint: String }, #[snafu(display("Failed to create poll groups {}", msg))] PgError { msg: String }, #[snafu(display("Failed to create transport {}", msg))] Transport { source: Errno, msg: String }, - #[snafu(display( - "Failed to {} subsystem '{}': subsystem is busy", - op, - nqn - ))] + #[snafu(display("Failed to {} subsystem '{}': subsystem is busy", op, nqn))] SubsystemBusy { nqn: String, op: String }, #[snafu(display("Failed nvmf subsystem operation for {} {} error: {}", source.desc(), nqn, msg))] Subsystem { diff --git a/io-engine/src/subsys/nvmf/poll_groups.rs b/io-engine/src/subsys/nvmf/poll_groups.rs index c56638ac1..8cbf76d28 100644 --- a/io-engine/src/subsys/nvmf/poll_groups.rs +++ b/io-engine/src/subsys/nvmf/poll_groups.rs @@ -1,8 +1,4 @@ -use spdk_rs::libspdk::{ - spdk_nvmf_poll_group, - spdk_nvmf_poll_group_create, - spdk_nvmf_tgt, -}; +use spdk_rs::libspdk::{spdk_nvmf_poll_group, spdk_nvmf_poll_group_create, spdk_nvmf_tgt}; use crate::core::Mthread; diff --git a/io-engine/src/subsys/nvmf/subsystem.rs b/io-engine/src/subsys/nvmf/subsystem.rs index 22b7dc76e..0a2ae1642 100644 --- a/io-engine/src/subsys/nvmf/subsystem.rs +++ b/io-engine/src/subsys/nvmf/subsystem.rs @@ -11,54 +11,26 @@ use nix::errno::Errno; use spdk_rs::{ libspdk::{ - nvmf_subsystem_find_listener, - nvmf_subsystem_set_cntlid_range, - spdk_nvmf_ctrlr_set_cpl_error_cb, - spdk_nvmf_ns_get_bdev, - spdk_nvmf_ns_opts, - spdk_nvmf_request, - spdk_nvmf_subsystem, - spdk_nvmf_subsystem_add_host, - spdk_nvmf_subsystem_add_listener, - spdk_nvmf_subsystem_add_ns_ext, - spdk_nvmf_subsystem_create, - spdk_nvmf_subsystem_destroy, - spdk_nvmf_subsystem_disconnect_host, - spdk_nvmf_subsystem_event, - spdk_nvmf_subsystem_get_first, - spdk_nvmf_subsystem_get_first_host, - spdk_nvmf_subsystem_get_first_listener, - spdk_nvmf_subsystem_get_first_ns, - spdk_nvmf_subsystem_get_next, - spdk_nvmf_subsystem_get_next_host, - spdk_nvmf_subsystem_get_next_listener, - spdk_nvmf_subsystem_get_nqn, - spdk_nvmf_subsystem_listener_get_trid, - spdk_nvmf_subsystem_pause, - spdk_nvmf_subsystem_remove_host, - spdk_nvmf_subsystem_remove_ns, - spdk_nvmf_subsystem_resume, - spdk_nvmf_subsystem_set_allow_any_host, - spdk_nvmf_subsystem_set_ana_reporting, - spdk_nvmf_subsystem_set_ana_state, - spdk_nvmf_subsystem_set_event_cb, - spdk_nvmf_subsystem_set_mn, - spdk_nvmf_subsystem_set_sn, - spdk_nvmf_subsystem_start, - spdk_nvmf_subsystem_state_change_done, - spdk_nvmf_subsystem_stop, - spdk_nvmf_tgt, - spdk_nvmf_tgt_get_transport, - SPDK_NVME_SCT_GENERIC, - SPDK_NVME_SC_CAPACITY_EXCEEDED, - SPDK_NVME_SC_RESERVATION_CONFLICT, - SPDK_NVMF_SUBTYPE_DISCOVERY, - SPDK_NVMF_SUBTYPE_NVME, + nvmf_subsystem_find_listener, nvmf_subsystem_set_cntlid_range, + spdk_nvmf_ctrlr_set_cpl_error_cb, spdk_nvmf_ns_get_bdev, spdk_nvmf_ns_opts, + spdk_nvmf_request, spdk_nvmf_subsystem, spdk_nvmf_subsystem_add_host, + spdk_nvmf_subsystem_add_listener, spdk_nvmf_subsystem_add_ns_ext, + spdk_nvmf_subsystem_create, spdk_nvmf_subsystem_destroy, + spdk_nvmf_subsystem_disconnect_host, spdk_nvmf_subsystem_event, + spdk_nvmf_subsystem_get_first, spdk_nvmf_subsystem_get_first_host, + spdk_nvmf_subsystem_get_first_listener, spdk_nvmf_subsystem_get_first_ns, + spdk_nvmf_subsystem_get_next, spdk_nvmf_subsystem_get_next_host, + spdk_nvmf_subsystem_get_next_listener, spdk_nvmf_subsystem_get_nqn, + spdk_nvmf_subsystem_listener_get_trid, spdk_nvmf_subsystem_pause, + spdk_nvmf_subsystem_remove_host, spdk_nvmf_subsystem_remove_ns, spdk_nvmf_subsystem_resume, + spdk_nvmf_subsystem_set_allow_any_host, spdk_nvmf_subsystem_set_ana_reporting, + spdk_nvmf_subsystem_set_ana_state, spdk_nvmf_subsystem_set_event_cb, + spdk_nvmf_subsystem_set_mn, spdk_nvmf_subsystem_set_sn, spdk_nvmf_subsystem_start, + spdk_nvmf_subsystem_state_change_done, spdk_nvmf_subsystem_stop, spdk_nvmf_tgt, + spdk_nvmf_tgt_get_transport, SPDK_NVME_SCT_GENERIC, SPDK_NVME_SC_CAPACITY_EXCEEDED, + SPDK_NVME_SC_RESERVATION_CONFLICT, SPDK_NVMF_SUBTYPE_DISCOVERY, SPDK_NVMF_SUBTYPE_NVME, }, - struct_size_init, - NvmeStatus, - NvmfController, - NvmfSubsystemEvent, + struct_size_init, NvmeStatus, NvmfController, NvmfSubsystemEvent, }; use crate::{ @@ -73,8 +45,7 @@ use crate::{ make_subsystem_serial, nvmf::{ transport::{TransportId, RDMA_TRANSPORT}, - Error, - NVMF_TGT, + Error, NVMF_TGT, }, Config, }, @@ -119,9 +90,7 @@ impl IntoIterator for NvmfSubsystem { fn into_iter(self) -> Self::IntoIter { NVMF_TGT.with(|t| { - NvmfSubsystemIterator(unsafe { - spdk_nvmf_subsystem_get_first(t.borrow().tgt.as_ptr()) - }) + NvmfSubsystemIterator(unsafe { spdk_nvmf_subsystem_get_first(t.borrow().tgt.as_ptr()) }) }) } } @@ -354,11 +323,7 @@ impl NvmfSubsystem { nex.rm_initiator(&ctrlr.hostnqn()); unsafe { - spdk_nvmf_ctrlr_set_cpl_error_cb( - ctrlr.0.as_ptr(), - None, - std::ptr::null_mut(), - ); + spdk_nvmf_ctrlr_set_cpl_error_cb(ctrlr.0.as_ptr(), None, std::ptr::null_mut()); } } @@ -426,11 +391,7 @@ impl NvmfSubsystem { ); unsafe { - spdk_nvmf_ctrlr_set_cpl_error_cb( - ctrlr.0.as_ptr(), - None, - std::ptr::null_mut(), - ); + spdk_nvmf_ctrlr_set_cpl_error_cb(ctrlr.0.as_ptr(), None, std::ptr::null_mut()); } } @@ -450,14 +411,7 @@ impl NvmfSubsystem { let ss = NVMF_TGT .with(|t| { let tgt = t.borrow().tgt.as_ptr(); - unsafe { - spdk_nvmf_subsystem_create( - tgt, - nqn.as_ptr(), - SPDK_NVMF_SUBTYPE_NVME, - 1, - ) - } + unsafe { spdk_nvmf_subsystem_create(tgt, nqn.as_ptr(), SPDK_NVMF_SUBTYPE_NVME, 1) } }) .to_result(|_| Error::Subsystem { source: Errno::EEXIST, @@ -482,30 +436,29 @@ impl NvmfSubsystem { make_sn(uuid) }; - unsafe { spdk_nvmf_subsystem_set_sn(ss.as_ptr(), sn.as_ptr()) } - .to_result(|e| Error::Subsystem { + unsafe { spdk_nvmf_subsystem_set_sn(ss.as_ptr(), sn.as_ptr()) }.to_result(|e| { + Error::Subsystem { source: Errno::from_raw(e), nqn: uuid.into(), msg: "failed to set serial".into(), - })?; + } + })?; let mn = CString::new(NVME_CONTROLLER_MODEL_ID).unwrap(); - unsafe { spdk_nvmf_subsystem_set_mn(ss.as_ptr(), mn.as_ptr()) } - .to_result(|e| Error::Subsystem { + unsafe { spdk_nvmf_subsystem_set_mn(ss.as_ptr(), mn.as_ptr()) }.to_result(|e| { + Error::Subsystem { source: Errno::from_raw(e), nqn: uuid.into(), msg: "failed to set model number".into(), - })?; + } + })?; Ok(NvmfSubsystem(ss)) } /// unfortunately, we cannot always use the bdev UUID which is a shame and /// mostly due to testing. - pub fn new_with_uuid( - uuid: &str, - bdev: &UntypedBdev, - ) -> Result { + pub fn new_with_uuid(uuid: &str, bdev: &UntypedBdev) -> Result { let ss = NvmfSubsystem::new(uuid)?; ss.set_ana_reporting(false)?; ss.allow_any(false); @@ -538,9 +491,7 @@ impl NvmfSubsystem { ); let bdev_cname = CString::new(bdev.name()).unwrap(); - let ptpl = ptpl.map(|ptpl| { - CString::new(ptpl.to_string_lossy().to_string()).unwrap() - }); + let ptpl = ptpl.map(|ptpl| CString::new(ptpl.to_string_lossy().to_string()).unwrap()); let ptpl_ptr = match &ptpl { Some(ptpl) => ptpl.as_ptr(), None => ptr::null_mut(), @@ -622,17 +573,14 @@ impl NvmfSubsystem { pub fn allowed_hosts(&self) -> Vec { let mut hosts = Vec::with_capacity(4); - let mut host = - unsafe { spdk_nvmf_subsystem_get_first_host(self.0.as_ptr()) }; + let mut host = unsafe { spdk_nvmf_subsystem_get_first_host(self.0.as_ptr()) }; while !host.is_null() { let host_str = unsafe { (*host).nqn.as_str() }; hosts.push(host_str.to_string()); - host = unsafe { - spdk_nvmf_subsystem_get_next_host(self.0.as_ptr(), host) - }; + host = unsafe { spdk_nvmf_subsystem_get_next_host(self.0.as_ptr(), host) }; } hosts @@ -644,10 +592,7 @@ impl NvmfSubsystem { /// /// It does not disconnect non-registered hosts, eg: hosts which /// were connected before the allowed_hosts was configured. - pub async fn set_allowed_hosts>( - &self, - hosts: &[H], - ) -> Result<(), Error> { + pub async fn set_allowed_hosts>(&self, hosts: &[H]) -> Result<(), Error> { if hosts.is_empty() { return Ok(()); } @@ -655,8 +600,7 @@ impl NvmfSubsystem { let hosts = hosts.iter().map(AsRef::as_ref).collect::>(); self.allow_hosts(&hosts)?; - let mut host = - unsafe { spdk_nvmf_subsystem_get_first_host(self.0.as_ptr()) }; + let mut host = unsafe { spdk_nvmf_subsystem_get_first_host(self.0.as_ptr()) }; let mut hosts_to_disconnect = vec![]; { @@ -669,9 +613,7 @@ impl NvmfSubsystem { if !hosts.contains(&host_str) { hosts_to_disconnect.push(host_str.to_string()); } - host = unsafe { - spdk_nvmf_subsystem_get_next_host(self.0.as_ptr(), host) - }; + host = unsafe { spdk_nvmf_subsystem_get_next_host(self.0.as_ptr(), host) }; } } @@ -697,11 +639,7 @@ impl NvmfSubsystem { pub fn allow_host(&self, host: &str) -> Result<(), Error> { let host = Self::cstr(host)?; unsafe { - spdk_nvmf_subsystem_add_host( - self.0.as_ptr(), - host.as_ptr(), - std::ptr::null_mut(), - ) + spdk_nvmf_subsystem_add_host(self.0.as_ptr(), host.as_ptr(), std::ptr::null_mut()) } .to_result(|errno| Error::Subsystem { source: Errno::from_raw(errno), @@ -721,14 +659,13 @@ impl NvmfSubsystem { /// Disallow a host from connecting to the subsystem. pub fn disallow_host(&self, host: &str) -> Result<(), Error> { let host = Self::cstr(host)?; - unsafe { - spdk_nvmf_subsystem_remove_host(self.0.as_ptr(), host.as_ptr()) - } - .to_result(|errno| Error::Subsystem { - source: Errno::from_raw(errno), - nqn: self.get_nqn(), - msg: format!("failed to remove allowed host: {host:?}"), - })?; + unsafe { spdk_nvmf_subsystem_remove_host(self.0.as_ptr(), host.as_ptr()) }.to_result( + |errno| Error::Subsystem { + source: Errno::from_raw(errno), + nqn: self.get_nqn(), + msg: format!("failed to remove allowed host: {host:?}"), + }, + )?; Ok(()) } @@ -750,13 +687,13 @@ impl NvmfSubsystem { ); } - r.await.expect("done_cb callback gone").to_result(|error| { - Error::Subsystem { + r.await + .expect("done_cb callback gone") + .to_result(|error| Error::Subsystem { source: Errno::from_raw(error), msg: "Failed to disconnect host".to_string(), nqn: host.to_owned(), - } - }) + }) } /// enable Asymmetric Namespace Access (ANA) reporting @@ -771,43 +708,29 @@ impl NvmfSubsystem { return Ok(()); } } - unsafe { - spdk_nvmf_subsystem_set_ana_reporting(self.0.as_ptr(), enable) - } - .to_result(|e| Error::Subsystem { - source: Errno::from_raw(e), - nqn: self.get_nqn(), - msg: format!("failed to set ANA reporting, enable {enable}"), - })?; + unsafe { spdk_nvmf_subsystem_set_ana_reporting(self.0.as_ptr(), enable) }.to_result( + |e| Error::Subsystem { + source: Errno::from_raw(e), + nqn: self.get_nqn(), + msg: format!("failed to set ANA reporting, enable {enable}"), + }, + )?; Ok(()) } /// set controller ID range - pub fn set_cntlid_range( - &self, - cntlid_min: u16, - cntlid_max: u16, - ) -> Result<(), Error> { - unsafe { - nvmf_subsystem_set_cntlid_range( - self.0.as_ptr(), - cntlid_min, - cntlid_max, - ) - } - .to_result(|e| Error::Subsystem { - source: Errno::from_raw(e), - nqn: self.get_nqn(), - msg: format!("failed to set controller ID range [{cntlid_min}, {cntlid_max}]"), - })?; + pub fn set_cntlid_range(&self, cntlid_min: u16, cntlid_max: u16) -> Result<(), Error> { + unsafe { nvmf_subsystem_set_cntlid_range(self.0.as_ptr(), cntlid_min, cntlid_max) } + .to_result(|e| Error::Subsystem { + source: Errno::from_raw(e), + nqn: self.get_nqn(), + msg: format!("failed to set controller ID range [{cntlid_min}, {cntlid_max}]"), + })?; Ok(()) } // we currently allow all listeners to the subsystem - async fn add_listener( - &self, - transport: NvmfTgtTransport, - ) -> Result<(), Error> { + async fn add_listener(&self, transport: NvmfTgtTransport) -> Result<(), Error> { extern "C" fn listen_cb(arg: *mut c_void, status: i32) { let s = unsafe { Box::from_raw(arg as *mut oneshot::Sender) }; s.send(status).unwrap(); @@ -816,8 +739,7 @@ impl NvmfSubsystem { let cfg = Config::get(); // dont yet enable both ports, IOW just add one transportID now - let trid_replica = - TransportId::new(cfg.nexus_opts.nvmf_replica_port, transport); + let trid_replica = TransportId::new(cfg.nexus_opts.nvmf_replica_port, transport); let (s, r) = oneshot::channel::(); unsafe { @@ -829,23 +751,19 @@ impl NvmfSubsystem { ); } - r.await.expect("listener callback gone").to_result(|e| { - Error::Transport { + r.await + .expect("listener callback gone") + .to_result(|e| Error::Transport { source: Errno::from_raw(e), msg: "Failed to add listener".to_string(), - } - }) + }) } /// TODO async fn change_state( &self, op: &str, - f: impl Fn( - *mut spdk_nvmf_subsystem, - spdk_nvmf_subsystem_state_change_done, - *mut c_void, - ) -> i32, + f: impl Fn(*mut spdk_nvmf_subsystem, spdk_nvmf_subsystem_state_change_done, *mut c_void) -> i32, ) -> Result<(), Error> { extern "C" fn state_change_cb( _ss: *mut spdk_nvmf_subsystem, @@ -879,11 +797,9 @@ impl NvmfSubsystem { n ); - crate::sleep::mayastor_sleep(std::time::Duration::from_millis( - 100, - )) - .await - .unwrap(); + crate::sleep::mayastor_sleep(std::time::Duration::from_millis(100)) + .await + .unwrap(); }; match rc { @@ -921,17 +837,17 @@ impl NvmfSubsystem { // Only attempt rdma listener addition for this subsystem after making // sure the Mayastor nvmf tgt has rdma transport created. if need_rdma && self.nvmf_tgt_has_rdma_xprt() { - let _ = - self.add_listener(NvmfTgtTransport::Rdma) - .await - .map_err(|e| { - warn!( - "NvmfSubsystem RDMA listener add failed {}. \ + let _ = self + .add_listener(NvmfTgtTransport::Rdma) + .await + .map_err(|e| { + warn!( + "NvmfSubsystem RDMA listener add failed {}. \ Subsystem will be accessible over TCP only.\ {:?}", - e, self - ); - }); + e, self + ); + }); } if let Err(e) = self @@ -987,13 +903,10 @@ impl NvmfSubsystem { /// as today? pub async fn get_ana_state(&self) -> Result { let cfg = Config::get(); - let trid_replica = TransportId::new( - cfg.nexus_opts.nvmf_replica_port, - NvmfTgtTransport::Tcp, - ); - let listener = unsafe { - nvmf_subsystem_find_listener(self.0.as_ptr(), trid_replica.as_ptr()) - }; + let trid_replica = + TransportId::new(cfg.nexus_opts.nvmf_replica_port, NvmfTgtTransport::Tcp); + let listener = + unsafe { nvmf_subsystem_find_listener(self.0.as_ptr(), trid_replica.as_ptr()) }; if listener.is_null() { Err(Error::Listener { nqn: self.get_nqn(), @@ -1056,9 +969,8 @@ impl NvmfSubsystem { /// stop all subsystems pub async fn stop_all(tgt: *mut spdk_nvmf_tgt) { - let subsystem = unsafe { - NonNull::new(spdk_nvmf_subsystem_get_first(tgt)).map(NvmfSubsystem) - }; + let subsystem = + unsafe { NonNull::new(spdk_nvmf_subsystem_get_first(tgt)).map(NvmfSubsystem) }; if let Some(subsystem) = subsystem { for s in subsystem.into_iter() { @@ -1076,9 +988,7 @@ impl NvmfSubsystem { /// Get the first subsystem within the system pub fn first() -> Option { NVMF_TGT.with(|t| { - let ss = unsafe { - spdk_nvmf_subsystem_get_first(t.borrow().tgt.as_ptr()) - }; + let ss = unsafe { spdk_nvmf_subsystem_get_first(t.borrow().tgt.as_ptr()) }; if ss.is_null() { None @@ -1112,10 +1022,7 @@ impl NvmfSubsystem { fn nvmf_tgt_has_rdma_xprt(&self) -> bool { NVMF_TGT.with(|t| { let transport = unsafe { - spdk_nvmf_tgt_get_transport( - t.borrow().tgt.as_ptr(), - RDMA_TRANSPORT.as_ptr(), - ) + spdk_nvmf_tgt_get_transport(t.borrow().tgt.as_ptr(), RDMA_TRANSPORT.as_ptr()) }; !transport.is_null() }) @@ -1123,26 +1030,22 @@ impl NvmfSubsystem { fn listeners_to_vec(&self) -> Option> { unsafe { - let mut listener = - spdk_nvmf_subsystem_get_first_listener(self.0.as_ptr()); + let mut listener = spdk_nvmf_subsystem_get_first_listener(self.0.as_ptr()); if listener.is_null() { return None; } - let mut ids = vec![TransportId( - *spdk_nvmf_subsystem_listener_get_trid(listener), - )]; + let mut ids = vec![TransportId(*spdk_nvmf_subsystem_listener_get_trid( + listener, + ))]; loop { - listener = spdk_nvmf_subsystem_get_next_listener( - self.0.as_ptr(), - listener, - ); + listener = spdk_nvmf_subsystem_get_next_listener(self.0.as_ptr(), listener); if !listener.is_null() { - ids.push(TransportId( - *spdk_nvmf_subsystem_listener_get_trid(listener), - )); + ids.push(TransportId(*spdk_nvmf_subsystem_listener_get_trid( + listener, + ))); continue; } else { break; @@ -1201,9 +1104,7 @@ impl NqnTarget<'_> { for b in bdev.into_iter() { match b.driver() { NEXUS_MODULE_NAME if b.name() == name => { - return Self::Nexus(unsafe { - Nexus::unsafe_from_untyped_bdev(*b) - }); + return Self::Nexus(unsafe { Nexus::unsafe_from_untyped_bdev(*b) }); } "lvol" if b.name() == name => { return Lvol::try_from(b).map_or(Self::None, Self::Replica) diff --git a/io-engine/src/subsys/nvmf/target.rs b/io-engine/src/subsys/nvmf/target.rs index 9e38f4c7e..abcc7bdd6 100644 --- a/io-engine/src/subsys/nvmf/target.rs +++ b/io-engine/src/subsys/nvmf/target.rs @@ -8,22 +8,11 @@ use std::{ use nix::errno::Errno; use spdk_rs::libspdk::{ - spdk_env_get_core_count, - spdk_nvmf_listen_opts, - spdk_nvmf_listen_opts_init, - spdk_nvmf_poll_group_destroy, - spdk_nvmf_subsystem_create, - spdk_nvmf_subsystem_set_mn, - spdk_nvmf_target_opts, - spdk_nvmf_tgt, - spdk_nvmf_tgt_create, - spdk_nvmf_tgt_destroy, - spdk_nvmf_tgt_listen_ext, - spdk_nvmf_tgt_stop_listen, - spdk_subsystem_fini_next, - spdk_subsystem_init_next, - SPDK_NVMF_DISCOVERY_NQN, - SPDK_NVMF_SUBTYPE_DISCOVERY, + spdk_env_get_core_count, spdk_nvmf_listen_opts, spdk_nvmf_listen_opts_init, + spdk_nvmf_poll_group_destroy, spdk_nvmf_subsystem_create, spdk_nvmf_subsystem_set_mn, + spdk_nvmf_target_opts, spdk_nvmf_tgt, spdk_nvmf_tgt_create, spdk_nvmf_tgt_destroy, + spdk_nvmf_tgt_listen_ext, spdk_nvmf_tgt_stop_listen, spdk_subsystem_fini_next, + spdk_subsystem_init_next, SPDK_NVMF_DISCOVERY_NQN, SPDK_NVMF_SUBTYPE_DISCOVERY, }; use crate::{ @@ -36,8 +25,7 @@ use crate::{ poll_groups::PollGroup, subsystem::NvmfSubsystem, transport::{self, get_ipv4_address, TransportId}, - Error, - NVMF_PGS, + Error, NVMF_PGS, }, Config, }, @@ -114,11 +102,9 @@ impl Target { /// initialize the target and advance states fn init(&mut self) -> Result<()> { let cfg = Config::get(); - let tgt_ptr: Box = - cfg.nvmf_tgt_conf.clone().into(); + let tgt_ptr: Box = cfg.nvmf_tgt_conf.clone().into(); - let tgt = - unsafe { spdk_nvmf_tgt_create(&*tgt_ptr as *const _ as *mut _) }; + let tgt = unsafe { spdk_nvmf_tgt_create(&*tgt_ptr as *const _ as *mut _) }; if tgt.is_null() { return Err(Error::CreateTarget { msg: "tgt pointer is None".to_string(), @@ -195,10 +181,9 @@ impl Target { /// init the poll groups per core fn init_poll_groups(&self) { Reactors::iter().for_each(|r| { - if let Some(t) = Mthread::new( - format!("mayastor_nvmf_tcp_pg_core_{}", r.core()), - r.core(), - ) { + if let Some(t) = + Mthread::new(format!("mayastor_nvmf_tcp_pg_core_{}", r.core()), r.core()) + { r.send_future(Self::create_poll_group(self.tgt.as_ptr(), t)); } }); @@ -214,9 +199,7 @@ impl Target { let mut tgt = tgt.borrow_mut(); NVMF_PGS.with(|p| p.borrow_mut().push(pg)); tgt.poll_group_count += 1; - if tgt.poll_group_count - == unsafe { spdk_env_get_core_count() as u16 } - { + if tgt.poll_group_count == unsafe { spdk_env_get_core_count() as u16 } { Reactors::master().send_future(async { NVMF_TGT.with(|tgt| { tgt.borrow_mut().next_state(); @@ -232,10 +215,7 @@ impl Target { /// replica port i.e. NVMF_PORT_REPLICA. fn listen(&mut self) -> Result<()> { let cfg = Config::get(); - let trid_nexus = TransportId::new( - cfg.nexus_opts.nvmf_nexus_port, - NvmfTgtTransport::Tcp, - ); + let trid_nexus = TransportId::new(cfg.nexus_opts.nvmf_nexus_port, NvmfTgtTransport::Tcp); let mut opts = spdk_nvmf_listen_opts { opts_size: 0, transport_specific: null(), @@ -249,13 +229,8 @@ impl Target { std::mem::size_of::() as u64, ); } - let rc = unsafe { - spdk_nvmf_tgt_listen_ext( - self.tgt.as_ptr(), - trid_nexus.as_ptr(), - &mut opts, - ) - }; + let rc = + unsafe { spdk_nvmf_tgt_listen_ext(self.tgt.as_ptr(), trid_nexus.as_ptr(), &mut opts) }; if rc != 0 { return Err(Error::CreateTarget { @@ -263,16 +238,10 @@ impl Target { }); } - let trid_replica = TransportId::new( - cfg.nexus_opts.nvmf_replica_port, - NvmfTgtTransport::Tcp, - ); + let trid_replica = + TransportId::new(cfg.nexus_opts.nvmf_replica_port, NvmfTgtTransport::Tcp); let rc = unsafe { - spdk_nvmf_tgt_listen_ext( - self.tgt.as_ptr(), - trid_replica.as_ptr(), - &mut opts, - ) + spdk_nvmf_tgt_listen_ext(self.tgt.as_ptr(), trid_replica.as_ptr(), &mut opts) }; if rc != 0 { @@ -311,10 +280,7 @@ impl Target { /// replica port i.e. NVMF_PORT_REPLICA. fn listen_rdma(&mut self) -> Result<()> { let cfg = Config::get(); - let trid_nexus = TransportId::new( - cfg.nexus_opts.nvmf_nexus_port, - NvmfTgtTransport::Rdma, - ); + let trid_nexus = TransportId::new(cfg.nexus_opts.nvmf_nexus_port, NvmfTgtTransport::Rdma); let mut opts = spdk_nvmf_listen_opts { opts_size: 0, transport_specific: null(), @@ -328,13 +294,8 @@ impl Target { std::mem::size_of::() as u64, ); } - let rc = unsafe { - spdk_nvmf_tgt_listen_ext( - self.tgt.as_ptr(), - trid_nexus.as_ptr(), - &mut opts, - ) - }; + let rc = + unsafe { spdk_nvmf_tgt_listen_ext(self.tgt.as_ptr(), trid_nexus.as_ptr(), &mut opts) }; if rc != 0 { return Err(Error::CreateTarget { @@ -342,16 +303,10 @@ impl Target { }); } - let trid_replica = TransportId::new( - cfg.nexus_opts.nvmf_replica_port, - NvmfTgtTransport::Rdma, - ); + let trid_replica = + TransportId::new(cfg.nexus_opts.nvmf_replica_port, NvmfTgtTransport::Rdma); let rc = unsafe { - spdk_nvmf_tgt_listen_ext( - self.tgt.as_ptr(), - trid_replica.as_ptr(), - &mut opts, - ) + spdk_nvmf_tgt_listen_ext(self.tgt.as_ptr(), trid_replica.as_ptr(), &mut opts) }; if rc != 0 { @@ -382,15 +337,13 @@ impl Target { }; let mn = CString::new(NVME_CONTROLLER_MODEL_ID).unwrap(); - unsafe { - spdk_nvmf_subsystem_set_mn(discovery.0.as_ptr(), mn.as_ptr()) - } - .to_result(|e| Error::Subsystem { - source: Errno::from_raw(e), - nqn: "discovery".into(), - msg: "failed to set serial".into(), - }) - .unwrap(); + unsafe { spdk_nvmf_subsystem_set_mn(discovery.0.as_ptr(), mn.as_ptr()) } + .to_result(|e| Error::Subsystem { + source: Errno::from_raw(e), + nqn: "discovery".into(), + msg: "failed to set serial".into(), + }) + .unwrap(); discovery.allow_any(true); discovery @@ -436,10 +389,8 @@ impl Target { t.borrow().iter().for_each(|pg| { trace!("destroying pg: {:?}", pg); unsafe { - pg.thread.send_msg_unsafe( - pg_destroy, - Box::into_raw(Box::new(pg.clone())) as *mut _, - ); + pg.thread + .send_msg_unsafe(pg_destroy, Box::into_raw(Box::new(pg.clone())) as *mut _); } }); }) @@ -481,14 +432,10 @@ impl Target { ); } else { let cfg = Config::get(); - let trid_nexus = TransportId::new( - cfg.nexus_opts.nvmf_nexus_port, - NvmfTgtTransport::Tcp, - ); - let trid_replica = TransportId::new( - cfg.nexus_opts.nvmf_replica_port, - NvmfTgtTransport::Tcp, - ); + let trid_nexus = + TransportId::new(cfg.nexus_opts.nvmf_nexus_port, NvmfTgtTransport::Tcp); + let trid_replica = + TransportId::new(cfg.nexus_opts.nvmf_replica_port, NvmfTgtTransport::Tcp); let mut trid_vec = vec![trid_nexus, trid_replica]; // todo: handle by fetching current listeners dynamically here. // Since this is shutdown path we're good this way for @@ -505,19 +452,11 @@ impl Target { } for trid in trid_vec { - unsafe { - spdk_nvmf_tgt_stop_listen(self.tgt.as_ptr(), trid.as_ptr()) - }; + unsafe { spdk_nvmf_tgt_stop_listen(self.tgt.as_ptr(), trid.as_ptr()) }; } } - unsafe { - spdk_nvmf_tgt_destroy( - self.tgt.as_ptr(), - Some(destroy_cb), - std::ptr::null_mut(), - ) - } + unsafe { spdk_nvmf_tgt_destroy(self.tgt.as_ptr(), Some(destroy_cb), std::ptr::null_mut()) } } /// start the shutdown of the target and subsystems diff --git a/io-engine/src/subsys/nvmf/transport.rs b/io-engine/src/subsys/nvmf/transport.rs index 6e1267e74..3e7a1657e 100644 --- a/io-engine/src/subsys/nvmf/transport.rs +++ b/io-engine/src/subsys/nvmf/transport.rs @@ -11,12 +11,8 @@ use once_cell::sync::Lazy; use spdk_rs::{ ffihelper::{copy_cstr_with_null, copy_str_with_null}, libspdk::{ - spdk_nvme_transport_id, - spdk_nvmf_tgt_add_transport, - spdk_nvmf_transport_create, - SPDK_NVME_TRANSPORT_RDMA, - SPDK_NVME_TRANSPORT_TCP, - SPDK_NVMF_ADRFAM_IPV4, + spdk_nvme_transport_id, spdk_nvmf_tgt_add_transport, spdk_nvmf_transport_create, + SPDK_NVME_TRANSPORT_RDMA, SPDK_NVME_TRANSPORT_TCP, SPDK_NVMF_ADRFAM_IPV4, SPDK_NVMF_TRSVCID_MAX_LEN, }, }; @@ -31,18 +27,14 @@ use crate::{ }, }; -static TCP_TRANSPORT: Lazy = - Lazy::new(|| CString::new("TCP").unwrap()); +static TCP_TRANSPORT: Lazy = Lazy::new(|| CString::new("TCP").unwrap()); -pub static RDMA_TRANSPORT: Lazy = - Lazy::new(|| CString::new("RDMA").unwrap()); +pub static RDMA_TRANSPORT: Lazy = Lazy::new(|| CString::new("RDMA").unwrap()); pub async fn create_and_add_transports(add_rdma: bool) -> Result<(), Error> { let cfg = Config::get(); let mut opts = cfg.nvmf_tgt_conf.opts_tcp.into(); - let transport = unsafe { - spdk_nvmf_transport_create(TCP_TRANSPORT.as_ptr(), &mut opts) - }; + let transport = unsafe { spdk_nvmf_transport_create(TCP_TRANSPORT.as_ptr(), &mut opts) }; transport.to_result(|_| Error::Transport { source: Errno::UnknownErrno, @@ -67,9 +59,7 @@ pub async fn create_and_add_transports(add_rdma: bool) -> Result<(), Error> { if add_rdma { info!("Adding RDMA transport for Mayastor Nvmf target"); let mut opts = cfg.nvmf_tgt_conf.opts_rdma.into(); - let transport = unsafe { - spdk_nvmf_transport_create(RDMA_TRANSPORT.as_ptr(), &mut opts) - }; + let transport = unsafe { spdk_nvmf_transport_create(RDMA_TRANSPORT.as_ptr(), &mut opts) }; let ret = transport.to_result(|_| Error::Transport { source: Errno::UnknownErrno, @@ -125,9 +115,7 @@ impl TransportId { let address = get_ipv4_address().unwrap(); let (xprt_type, xprt_cstr) = match transport { NvmfTgtTransport::Tcp => (SPDK_NVME_TRANSPORT_TCP, &TCP_TRANSPORT), - NvmfTgtTransport::Rdma => { - (SPDK_NVME_TRANSPORT_RDMA, &RDMA_TRANSPORT) - } + NvmfTgtTransport::Rdma => (SPDK_NVME_TRANSPORT_RDMA, &RDMA_TRANSPORT), }; let mut trid = spdk_nvme_transport_id { @@ -185,8 +173,6 @@ impl Debug for TransportId { pub(crate) fn get_ipv4_address() -> Result { match MayastorEnvironment::get_nvmf_tgt_ip() { Ok(val) => Ok(val), - Err(msg) => Err(Error::CreateTarget { - msg, - }), + Err(msg) => Err(Error::CreateTarget { msg }), } } diff --git a/io-engine/src/subsys/registration/mod.rs b/io-engine/src/subsys/registration/mod.rs index 4b82917f0..73b1dfa81 100644 --- a/io-engine/src/subsys/registration/mod.rs +++ b/io-engine/src/subsys/registration/mod.rs @@ -8,10 +8,7 @@ use crate::core::MayastorEnvironment; use http::Uri; use registration_grpc::Registration; use spdk_rs::libspdk::{ - spdk_add_subsystem, - spdk_subsystem, - spdk_subsystem_fini_next, - spdk_subsystem_init_next, + spdk_add_subsystem, spdk_subsystem, spdk_subsystem_fini_next, spdk_subsystem_init_next, }; use std::{convert::TryFrom, mem::zeroed}; @@ -71,8 +68,7 @@ impl RegistrationSubsystem { fn new() -> Self { info!("creating Mayastor registration subsystem..."); let ss = spdk_subsystem { - name: b"mayastor_grpc_registration\x00" as *const u8 - as *const libc::c_char, + name: b"mayastor_grpc_registration\x00" as *const u8 as *const libc::c_char, init: Some(Self::init), fini: Some(Self::fini), write_config_json: None, diff --git a/io-engine/src/subsys/registration/registration_grpc.rs b/io-engine/src/subsys/registration/registration_grpc.rs index 404d8ad30..57cbd7c8f 100644 --- a/io-engine/src/subsys/registration/registration_grpc.rs +++ b/io-engine/src/subsys/registration/registration_grpc.rs @@ -4,10 +4,7 @@ use crate::core::{MayastorBugFixes, MayastorFeatures}; use futures::{select, FutureExt, StreamExt}; use http::Uri; use io_engine_api::v1::registration::{ - registration_client, - ApiVersion as ApiVersionGrpc, - DeregisterRequest, - RegisterRequest, + registration_client, ApiVersion as ApiVersionGrpc, DeregisterRequest, RegisterRequest, }; use once_cell::sync::OnceCell; use std::{env, str::FromStr, time::Duration}; @@ -110,15 +107,11 @@ impl Registration { node: node.to_owned(), node_nqn: node_nqn.to_owned(), grpc_endpoint: grpc_endpoint.to_owned(), - hb_interval_sec: match env::var("MAYASTOR_HB_INTERVAL_SEC") - .map(|v| v.parse::()) - { + hb_interval_sec: match env::var("MAYASTOR_HB_INTERVAL_SEC").map(|v| v.parse::()) { Ok(Ok(num)) => Duration::from_secs(num), _ => HB_INTERVAL_SEC, }, - hb_timeout_sec: match env::var("MAYASTOR_HB_TIMEOUT_SEC") - .map(|v| v.parse::()) - { + hb_timeout_sec: match env::var("MAYASTOR_HB_TIMEOUT_SEC").map(|v| v.parse::()) { Ok(Ok(num)) => Duration::from_secs(num), _ => HB_TIMEOUT_SEC, }, @@ -156,8 +149,7 @@ impl Registration { /// Register a new node over rpc pub async fn register(&mut self) -> Result<(), tonic::Status> { let api_versions = self.config.api_versions.iter(); - let api_versions = - api_versions.map(|v| ApiVersionGrpc::from(*v) as i32); + let api_versions = api_versions.map(|v| ApiVersionGrpc::from(*v) as i32); let register = RegisterRequest { id: self.config.node.to_string(), grpc_endpoint: self.config.grpc_endpoint.clone(), diff --git a/io-engine/tests/block_device_nvmf.rs b/io-engine/tests/block_device_nvmf.rs index acebcac84..850e59959 100755 --- a/io-engine/tests/block_device_nvmf.rs +++ b/io-engine/tests/block_device_nvmf.rs @@ -8,27 +8,18 @@ use common::compose::{ mayastor::{BdevShareRequest, BdevUri, JsonRpcRequest, Null}, GrpcConnect, }, - Builder, - ComposeTest, - MayastorTest, + Builder, ComposeTest, MayastorTest, }; use io_engine::{ bdev::{device_create, device_destroy, device_lookup, device_open}, constants::NVME_NQN_PREFIX, - core::{ - BlockDevice, - BlockDeviceHandle, - DeviceEventType, - IoCompletionStatus, - MayastorCliArgs, - }, + core::{BlockDevice, BlockDeviceHandle, DeviceEventType, IoCompletionStatus, MayastorCliArgs}, subsys::{Config, NvmeBdevOpts}, }; use std::{ - slice, - str, + slice, str, sync::{ atomic::{AtomicBool, AtomicPtr, AtomicU64, Ordering}, Arc, @@ -263,12 +254,7 @@ fn clear_callback_invocation_flag() { fn flag_callback_invocation() { assert_eq!( - INVOCATION_FLAG.compare_exchange( - false, - true, - Ordering::Acquire, - Ordering::Relaxed, - ), + INVOCATION_FLAG.compare_exchange(false, true, Ordering::Acquire, Ordering::Relaxed,), Ok(false), "Callback is called more than once" ); @@ -276,12 +262,7 @@ fn flag_callback_invocation() { fn check_callback_invocation() { assert_eq!( - INVOCATION_FLAG.compare_exchange( - true, - false, - Ordering::Acquire, - Ordering::Relaxed, - ), + INVOCATION_FLAG.compare_exchange(true, false, Ordering::Acquire, Ordering::Relaxed,), Ok(true), "Callback has not been called" ); @@ -456,11 +437,7 @@ async fn nvmf_io_stats() { ); let mut ctx = IoCtx { - dma_buf: vec![create_io_buffer( - alignment, - 6 * BUF_SIZE, - IO_PATTERN, - )], + dma_buf: vec![create_io_buffer(alignment, 6 * BUF_SIZE, IO_PATTERN)], handle, }; @@ -564,8 +541,7 @@ async fn nvmf_device_read_write_at() { let handle = descr.into_handle().unwrap(); let device = handle.get_device(); - let guard_buf = - create_io_buffer(device.alignment(), BUF_SIZE, GUARD_PATTERN); + let guard_buf = create_io_buffer(device.alignment(), BUF_SIZE, GUARD_PATTERN); // First, write 2 guard buffers before and after target I/O location. let mut r = handle.write_at(OP_OFFSET, &guard_buf).await.unwrap(); @@ -577,8 +553,7 @@ async fn nvmf_device_read_write_at() { assert_eq!(r, BUF_SIZE, "The amount of data written mismatches"); // Write data buffer between guard buffers. - let data_buf = - create_io_buffer(device.alignment(), BUF_SIZE, IO_PATTERN); + let data_buf = create_io_buffer(device.alignment(), BUF_SIZE, IO_PATTERN); r = handle .write_at(OP_OFFSET + BUF_SIZE, &data_buf) .await @@ -644,11 +619,7 @@ async fn nvmf_device_readv_test() { // Make sure callback is invoked only once. flag_callback_invocation(); - assert_eq!( - status, - IoCompletionStatus::Success, - "readv_blocks() failed" - ); + assert_eq!(status, IoCompletionStatus::Success, "readv_blocks() failed"); // Make sure we have the correct device. assert_eq!( @@ -659,10 +630,7 @@ async fn nvmf_device_readv_test() { // Make sure we were passed the same pattern string as requested. let s = unsafe { - let slice = slice::from_raw_parts( - ctx as *const u8, - MAYASTOR_CTRLR_TITLE.len(), - ); + let slice = slice::from_raw_parts(ctx as *const u8, MAYASTOR_CTRLR_TITLE.len()); str::from_utf8(slice).unwrap() }; @@ -687,11 +655,7 @@ async fn nvmf_device_readv_test() { // Create a buffer with the guard pattern. let mut io_ctx = IoCtx { - dma_buf: vec![create_io_buffer( - alignment, - BUF_SIZE, - GUARD_PATTERN, - )], + dma_buf: vec![create_io_buffer(alignment, BUF_SIZE, GUARD_PATTERN)], handle, }; @@ -781,10 +745,7 @@ async fn nvmf_device_writev_test() { // Make sure we were passed the same pattern string as requested. let s = unsafe { - let slice = slice::from_raw_parts( - ctx as *const u8, - MAYASTOR_CTRLR_TITLE.len(), - ); + let slice = slice::from_raw_parts(ctx as *const u8, MAYASTOR_CTRLR_TITLE.len()); str::from_utf8(slice).unwrap() }; @@ -813,8 +774,7 @@ async fn nvmf_device_writev_test() { // Store device name for further checking from I/O callback. DEVICE_NAME.set(name.clone()).unwrap(); - let guard_buf = - create_io_buffer(alignment, BUF_SIZE, GUARD_PATTERN); + let guard_buf = create_io_buffer(alignment, BUF_SIZE, GUARD_PATTERN); // First, write 2 guard buffers before and after target I/O // location. @@ -827,9 +787,7 @@ async fn nvmf_device_writev_test() { assert_eq!(r, BUF_SIZE, "The amount of data written mismatches"); let ctx = IoCtx { - dma_buf: vec![create_io_buffer( - alignment, BUF_SIZE, IO_PATTERN, - )], + dma_buf: vec![create_io_buffer(alignment, BUF_SIZE, IO_PATTERN)], handle, }; @@ -932,11 +890,7 @@ async fn nvmf_device_readv_iovs_test() { // Make sure callback is invoked only once. flag_callback_invocation(); - assert_eq!( - status, - IoCompletionStatus::Success, - "readv_blocks() failed" - ); + assert_eq!(status, IoCompletionStatus::Success, "readv_blocks() failed"); // Make sure we have the correct device. assert_eq!( @@ -947,10 +901,7 @@ async fn nvmf_device_readv_iovs_test() { // Make sure we were passed the same pattern string as requested. let s = unsafe { - let slice = slice::from_raw_parts( - ctx as *const u8, - MAYASTOR_CTRLR_TITLE.len(), - ); + let slice = slice::from_raw_parts(ctx as *const u8, MAYASTOR_CTRLR_TITLE.len()); str::from_utf8(slice).unwrap() }; @@ -1093,10 +1044,7 @@ async fn nvmf_device_writev_iovs_test() { // Make sure we were passed the same pattern string as requested. let s = unsafe { - let slice = slice::from_raw_parts( - ctx as *const u8, - MAYASTOR_CTRLR_TITLE.len(), - ); + let slice = slice::from_raw_parts(ctx as *const u8, MAYASTOR_CTRLR_TITLE.len()); str::from_utf8(slice).unwrap() }; @@ -1129,15 +1077,11 @@ async fn nvmf_device_writev_iovs_test() { buffers.push(buf); } - let io_ctx = IoCtx { - buffers, - handle, - }; + let io_ctx = IoCtx { buffers, handle }; // First, write 2 guard buffers before and after target I/O // location. - let guard_buf = - create_io_buffer(alignment, GUARD_SIZE, GUARD_PATTERN); + let guard_buf = create_io_buffer(alignment, GUARD_SIZE, GUARD_PATTERN); let mut r = io_ctx .handle .write_at(OP_OFFSET - GUARD_SIZE, &guard_buf) @@ -1236,9 +1180,10 @@ async fn nvmf_device_admin_ctrl() { let descr = device_open(&name, false).unwrap(); let handle = descr.into_handle().unwrap(); - handle.nvme_admin_custom(0xCF).await.expect_err( - "successfully executed invalid NVMe admin command (0xCF)", - ); + handle + .nvme_admin_custom(0xCF) + .await + .expect_err("successfully executed invalid NVMe admin command (0xCF)"); }) .await; @@ -1284,10 +1229,7 @@ async fn nvmf_device_reset() { // Make sure we were passed the same pattern string as requested. let s = unsafe { - let slice = slice::from_raw_parts( - ctx as *const u8, - MAYASTOR_CTRLR_TITLE.len(), - ); + let slice = slice::from_raw_parts(ctx as *const u8, MAYASTOR_CTRLR_TITLE.len()); str::from_utf8(slice).unwrap() }; @@ -1315,9 +1257,7 @@ async fn nvmf_device_reset() { ) .unwrap(); - AtomicPtr::new(Box::into_raw(Box::new(DeviceIoCtx { - handle, - }))) + AtomicPtr::new(Box::into_raw(Box::new(DeviceIoCtx { handle }))) }) .await; @@ -1331,9 +1271,7 @@ async fn nvmf_device_reset() { ms.spawn(async move { let io_ctx = unsafe { Box::from_raw(op_ctx.into_inner()) }; - println!( - "Identifying controller using a newly recreated I/O channels." - ); + println!("Identifying controller using a newly recreated I/O channels."); io_ctx.handle.nvme_identify_ctrlr().await.unwrap(); println!("Controller successfully identified"); }) @@ -1384,10 +1322,7 @@ async fn wipe_device_blocks(is_unmap: bool) { // Make sure we were passed the same pattern string as requested. let s = unsafe { - let slice = slice::from_raw_parts( - ctx as *const u8, - MAYASTOR_CTRLR_TITLE.len(), - ); + let slice = slice::from_raw_parts(ctx as *const u8, MAYASTOR_CTRLR_TITLE.len()); str::from_utf8(slice).unwrap() }; @@ -1409,8 +1344,7 @@ async fn wipe_device_blocks(is_unmap: bool) { (device.block_len(), device.alignment()) }; - let guard_buf = - create_io_buffer(alignment, BUF_SIZE, GUARD_PATTERN); + let guard_buf = create_io_buffer(alignment, BUF_SIZE, GUARD_PATTERN); // Store device name for further checking from I/O callback. // Note that wipe_device_blocks() is called twice by different @@ -1467,9 +1401,7 @@ async fn wipe_device_blocks(is_unmap: bool) { .unwrap(); } - AtomicPtr::new(Box::into_raw(Box::new(DeviceIoCtx { - handle, - }))) + AtomicPtr::new(Box::into_raw(Box::new(DeviceIoCtx { handle }))) }) .await; @@ -1572,10 +1504,7 @@ async fn nvmf_reset_abort_io() { // Make sure we were passed the same pattern string as requested. let s = unsafe { - let slice = slice::from_raw_parts( - ctx as *const u8, - MAYASTOR_CTRLR_TITLE.len(), - ); + let slice = slice::from_raw_parts(ctx as *const u8, MAYASTOR_CTRLR_TITLE.len()); str::from_utf8(slice).unwrap() }; @@ -1604,10 +1533,7 @@ async fn nvmf_reset_abort_io() { // Make sure we were passed the same pattern string as requested. let s = unsafe { - let slice = slice::from_raw_parts( - ctx as *const u8, - MAYASTOR_CTRLR_TITLE.len(), - ); + let slice = slice::from_raw_parts(ctx as *const u8, MAYASTOR_CTRLR_TITLE.len()); str::from_utf8(slice).unwrap() }; @@ -1634,10 +1560,7 @@ async fn nvmf_reset_abort_io() { // Make sure we were passed the same pattern string as requested. let s = unsafe { - let slice = slice::from_raw_parts( - ctx as *const u8, - MAYASTOR_CTRLR_TITLE.len(), - ); + let slice = slice::from_raw_parts(ctx as *const u8, MAYASTOR_CTRLR_TITLE.len()); str::from_utf8(slice).unwrap() }; assert_eq!(s, MAYASTOR_CTRLR_TITLE); @@ -1661,18 +1584,14 @@ async fn nvmf_reset_abort_io() { DEVICE_NAME.set(name.clone()).unwrap(); let mut io_ctx = IoCtx { - dma_buf: vec![create_io_buffer( - alignment, - BUF_SIZE, - GUARD_PATTERN, - )], + dma_buf: vec![create_io_buffer(alignment, BUF_SIZE, GUARD_PATTERN)], handle, }; // Initiate a 3 read and 3 write operations into the buffer. // We use the same IOVs as we don't care about the I/O result and // care only about failures which we're gonna trigger. - for _ in 0 .. NUM_IOS { + for _ in 0..NUM_IOS { io_ctx .handle .readv_blocks( @@ -1791,10 +1710,7 @@ async fn nvmf_device_io_handle_cleanup() { "Device still resolvable by name after removal" ); - AtomicPtr::new(Box::into_raw(Box::new(DeviceIoCtx { - handle, - alignment, - }))) + AtomicPtr::new(Box::into_raw(Box::new(DeviceIoCtx { handle, alignment }))) }) .await; @@ -1807,9 +1723,7 @@ async fn nvmf_device_io_handle_cleanup() { // are supposed to be invalidated after device removal. ms.spawn(async move { let io_ctx = unsafe { Box::from_raw(op_ctx.into_inner()) }; - println!( - "Identifying controller using a newly recreated I/O channels." - ); + println!("Identifying controller using a newly recreated I/O channels."); // Make sure the same NVMe admin command now fail. io_ctx .handle @@ -1879,9 +1793,7 @@ async fn nvmf_device_hot_remove() { .jsonrpc .json_rpc_call(JsonRpcRequest { method: "nvmf_subsystem_remove_ns".to_string(), - params: format!( - "{{\"nqn\": \"{NVME_NQN_PREFIX}:disk0\", \"nsid\": 1}}" - ), + params: format!("{{\"nqn\": \"{NVME_NQN_PREFIX}:disk0\", \"nsid\": 1}}"), }) .await .unwrap(); @@ -1903,5 +1815,5 @@ async fn nvmf_device_hot_remove() { .await .expect_err("Device has been successfully created for controller without namespaces"); }) - .await; + .await; } diff --git a/io-engine/tests/child_size.rs b/io-engine/tests/child_size.rs index 633b137bc..10dfd2131 100644 --- a/io-engine/tests/child_size.rs +++ b/io-engine/tests/child_size.rs @@ -11,13 +11,11 @@ use io_engine::{ pub mod common; async fn create_nexus(nexus_size: u64, child_sizes: Vec) -> bool { - let children: Vec = (0 .. child_sizes.len()) + let children: Vec = (0..child_sizes.len()) .map(|i| format!("malloc:///m{}?size_mb={}", i, child_sizes[i])) .collect(); - if let Err(error) = - nexus_create("core_nexus", nexus_size * 1024 * 1024, None, &children) - .await + if let Err(error) = nexus_create("core_nexus", nexus_size * 1024 * 1024, None, &children).await { error!("nexus_create() failed: {}", error); return false; @@ -41,20 +39,16 @@ async fn child_size_ok() { let bdev = UntypedBdev::lookup_by_name("core_nexus").unwrap(); assert_eq!(bdev.name(), "core_nexus"); - let bdev = UntypedBdev::lookup_by_name("m0") - .expect("child bdev m0 not found"); + let bdev = UntypedBdev::lookup_by_name("m0").expect("child bdev m0 not found"); assert_eq!(bdev.name(), "m0"); - let bdev = UntypedBdev::lookup_by_name("m1") - .expect("child bdev m1 not found"); + let bdev = UntypedBdev::lookup_by_name("m1").expect("child bdev m1 not found"); assert_eq!(bdev.name(), "m1"); - let bdev = UntypedBdev::lookup_by_name("m2") - .expect("child bdev m2 not found"); + let bdev = UntypedBdev::lookup_by_name("m2").expect("child bdev m2 not found"); assert_eq!(bdev.name(), "m2"); - let nexus = - nexus_lookup_mut("core_nexus").expect("nexus not found"); + let nexus = nexus_lookup_mut("core_nexus").expect("nexus not found"); nexus.destroy().await.unwrap(); assert!(nexus_lookup_mut("core_nexus").is_none()); diff --git a/io-engine/tests/core.rs b/io-engine/tests/core.rs index 0e702dc4c..702e36cd1 100644 --- a/io-engine/tests/core.rs +++ b/io-engine/tests/core.rs @@ -90,8 +90,7 @@ async fn core_2() { .spawn(async { create_nexus().await; - let n = - nexus_lookup_mut("core_nexus").expect("failed to lookup nexus"); + let n = nexus_lookup_mut("core_nexus").expect("failed to lookup nexus"); let d1 = UntypedBdev::open_by_name("core_nexus", true) .expect("failed to open first desc to nexus"); @@ -157,13 +156,8 @@ async fn core_4() { mayastor() .spawn(async move { - let create = nexus_create( - nexus_name, - nexus_size, - None, - &[BDEVNAME1.to_string()], - ) - .await; + let create = + nexus_create(nexus_name, nexus_size, None, &[BDEVNAME1.to_string()]).await; if nexus_ok { create.unwrap_or_else(|_| { panic!( @@ -179,10 +173,7 @@ async fn core_4() { .add_child(BDEVNAME2, true) .await .unwrap_or_else(|_| { - panic!( - "Case {} - Child should have been added", - test_case_index - ) + panic!("Case {} - Child should have been added", test_case_index) }); } else { nexus @@ -224,14 +215,9 @@ async fn core_5() { mayastor() .spawn(async move { - nexus_create( - nexus_name, - nexus_size, - None, - &[BDEVNAME1.to_string()], - ) - .await - .unwrap(); + nexus_create(nexus_name, nexus_size, None, &[BDEVNAME1.to_string()]) + .await + .unwrap(); let mut nexus = nexus_lookup_mut(nexus_name).unwrap(); // need to refactor this test to use nvmf instead of nbd // once the libnvme-rs refactoring is done diff --git a/io-engine/tests/ftl_mount_fs.rs b/io-engine/tests/ftl_mount_fs.rs index 35371d3cb..3ec61d863 100644 --- a/io-engine/tests/ftl_mount_fs.rs +++ b/io-engine/tests/ftl_mount_fs.rs @@ -81,7 +81,7 @@ async fn ftl_mount_fs_multiple() { //prepare_storage!(); let (target, nvmf_dev) = create_connected_nvmf_nexus(ms).await; - for _i in 0 .. 10 { + for _i in 0..10 { common::mount_umount(&nvmf_dev).unwrap(); } @@ -103,7 +103,8 @@ pub fn csal_fio_run_verify(device: &str) -> Result { "#, &vec![device.into()], &run_script::ScriptOptions::new(), - ).unwrap(); + ) + .unwrap(); if exit == 0 { Ok(stdout) } else { @@ -130,9 +131,7 @@ async fn ftl_mount_fs_fio() { } async fn create_nexus() { - let bdev_uri: String = format!( - "{FTL_URI_PREFIX}{FTL_BDEV}?bbdev={BASE_DEV}&cbdev={CACHE_DEV}" - ); + let bdev_uri: String = format!("{FTL_URI_PREFIX}{FTL_BDEV}?bbdev={BASE_DEV}&cbdev={CACHE_DEV}"); let ch = vec![bdev_uri]; nexus_create("nexus", 8 * 1024 * 1024 * 1024, None, &ch) .await diff --git a/io-engine/tests/lock.rs b/io-engine/tests/lock.rs index b07d1ddb0..1c67d037f 100755 --- a/io-engine/tests/lock.rs +++ b/io-engine/tests/lock.rs @@ -17,8 +17,7 @@ const TEST_SUBSYSTEM: &str = "items"; const TEST_RESOURCE: &str = "item1"; fn get_lock_manager() -> &'static ResourceLockManager { - let cfg = - ResourceLockManagerConfig::default().with_subsystem(TEST_SUBSYSTEM, 8); + let cfg = ResourceLockManagerConfig::default().with_subsystem(TEST_SUBSYSTEM, 8); ResourceLockManager::initialize(cfg); ResourceLockManager::get_instance() } diff --git a/io-engine/tests/lock_lba_range.rs b/io-engine/tests/lock_lba_range.rs index 16e96fe7c..c74da7ebd 100644 --- a/io-engine/tests/lock_lba_range.rs +++ b/io-engine/tests/lock_lba_range.rs @@ -6,13 +6,7 @@ use crossbeam::channel::{unbounded, Receiver}; use io_engine::{ bdev::nexus::{nexus_create, nexus_lookup_mut}, - core::{ - MayastorCliArgs, - MayastorEnvironment, - Reactor, - Reactors, - UntypedBdev, - }, + core::{MayastorCliArgs, MayastorEnvironment, Reactor, Reactors, UntypedBdev}, }; use spdk_rs::{BdevDescError, DmaBuf, LbaRange, LbaRangeLock}; @@ -25,7 +19,7 @@ const NUM_NEXUS_CHILDREN: u64 = 2; fn test_ini() { test_init!(); - for i in 0 .. NUM_NEXUS_CHILDREN { + for i in 0..NUM_NEXUS_CHILDREN { common::delete_file(&[get_disk(i)]); common::truncate_file_bytes(&get_disk(i), NEXUS_SIZE); } @@ -36,7 +30,7 @@ fn test_ini() { } fn test_fini() { - for i in 0 .. NUM_NEXUS_CHILDREN { + for i in 0..NUM_NEXUS_CHILDREN { common::delete_file(&[get_disk(i)]); } @@ -56,7 +50,7 @@ fn get_dev(number: u64) -> String { async fn create_nexus() { let mut ch = Vec::new(); - for i in 0 .. NUM_NEXUS_CHILDREN { + for i in 0..NUM_NEXUS_CHILDREN { ch.push(get_dev(i)); } diff --git a/io-engine/tests/lvs_grow.rs b/io-engine/tests/lvs_grow.rs index 2115aa258..62bf8341d 100644 --- a/io-engine/tests/lvs_grow.rs +++ b/io-engine/tests/lvs_grow.rs @@ -4,11 +4,7 @@ use async_trait::async_trait; use once_cell::sync::OnceCell; use std::future::Future; -use spdk_rs::{ - ffihelper::IntoCString, - libspdk::resize_malloc_disk, - UntypedBdev, -}; +use spdk_rs::{ffihelper::IntoCString, libspdk::resize_malloc_disk, UntypedBdev}; use io_engine::{ core::MayastorCliArgs, @@ -20,9 +16,7 @@ use io_engine_tests::{ bdev::{create_bdev, find_bdev_by_name}, compose::{ rpc::v1::{pool::Pool, GrpcConnect, SharedRpcHandle}, - Binary, - Builder, - ComposeTest, + Binary, Builder, ComposeTest, }, pool::PoolBuilder, MayastorTest, @@ -44,7 +38,7 @@ fn ms() -> &'static MayastorTest<'static> { /// Tests if 'a' is approximately equal to 'b' up to the given tolerance (in /// percents). fn approx_eq(a: f64, b: f64, t: f64) -> bool { - assert!(a > 0.0 && b > 0.0 && (0.0 .. 100.0).contains(&t)); + assert!(a > 0.0 && b > 0.0 && (0.0..100.0).contains(&t)); let d = 100.0 * (a - b).abs() / f64::max(a, b); d <= t } @@ -192,10 +186,7 @@ async fn lvs_grow_ms_malloc() { unsafe { // Resize the malloc bdev. let name = BDEV_NAME.to_owned(); - resize_malloc_disk( - name.into_cstring().as_ptr(), - SIZE_AFTER_MB, - ); + resize_malloc_disk(name.into_cstring().as_ptr(), SIZE_AFTER_MB); }; }) .await; @@ -203,8 +194,7 @@ async fn lvs_grow_ms_malloc() { } } - test_grow(async { Box::new(GrowTestMsMalloc {}) as Box }) - .await; + test_grow(async { Box::new(GrowTestMsMalloc {}) as Box }).await; } /// Pool grow test based on gRPC API and malloc bdev. @@ -246,11 +236,7 @@ async fn lvs_grow_api_malloc() { .with_uuid(POOL_UUID) .with_bdev(BDEV_URI); - Self { - test, - ms, - pool, - } + Self { test, ms, pool } } } @@ -271,22 +257,17 @@ async fn lvs_grow_api_malloc() { } async fn device_size(&mut self) -> u64 { - let bdev = - find_bdev_by_name(self.ms.clone(), BDEV_NAME).await.unwrap(); + let bdev = find_bdev_by_name(self.ms.clone(), BDEV_NAME).await.unwrap(); bdev.num_blocks * bdev.blk_size as u64 } async fn grow_device(&mut self) -> u64 { - let bdev = - create_bdev(self.ms.clone(), BDEV_URI_RESIZE).await.unwrap(); + let bdev = create_bdev(self.ms.clone(), BDEV_URI_RESIZE).await.unwrap(); bdev.num_blocks * bdev.blk_size as u64 } } - test_grow(async { - Box::new(GrowTestApiMalloc::new().await) as Box - }) - .await; + test_grow(async { Box::new(GrowTestApiMalloc::new().await) as Box }).await; } /// Pool grow test based on gRPC API and file-based AIO device. @@ -295,8 +276,7 @@ async fn lvs_grow_api_aio() { const DISK_NAME: &str = "/tmp/disk1.img"; const BDEV_NAME: &str = "/host/tmp/disk1.img"; const BDEV_URI: &str = "aio:///host/tmp/disk1.img?blk_size=512"; - const BDEV_URI_RESCAN: &str = - "aio:///host/tmp/disk1.img?blk_size=512&rescan"; + const BDEV_URI_RESCAN: &str = "aio:///host/tmp/disk1.img?blk_size=512&rescan"; const POOL_NAME: &str = "pool0"; const POOL_UUID: &str = "40baf8b5-6256-4f29-b073-61ebf67d9b91"; @@ -335,11 +315,7 @@ async fn lvs_grow_api_aio() { .with_uuid(POOL_UUID) .with_bdev(BDEV_URI); - Self { - test, - ms, - pool, - } + Self { test, ms, pool } } } @@ -360,8 +336,7 @@ async fn lvs_grow_api_aio() { } async fn device_size(&mut self) -> u64 { - let bdev = - find_bdev_by_name(self.ms.clone(), BDEV_NAME).await.unwrap(); + let bdev = find_bdev_by_name(self.ms.clone(), BDEV_NAME).await.unwrap(); bdev.num_blocks * bdev.blk_size as u64 } @@ -370,14 +345,10 @@ async fn lvs_grow_api_aio() { common::truncate_file(DISK_NAME, 128 * 1024); // Rescan AIO bdev (re-read its size from the backing media). - let bdev = - create_bdev(self.ms.clone(), BDEV_URI_RESCAN).await.unwrap(); + let bdev = create_bdev(self.ms.clone(), BDEV_URI_RESCAN).await.unwrap(); bdev.num_blocks * bdev.blk_size as u64 } } - test_grow(async { - Box::new(GrowTestApiAio::new().await) as Box - }) - .await; + test_grow(async { Box::new(GrowTestApiAio::new().await) as Box }).await; } diff --git a/io-engine/tests/lvs_import.rs b/io-engine/tests/lvs_import.rs index 09ca2cd16..0a2fee748 100644 --- a/io-engine/tests/lvs_import.rs +++ b/io-engine/tests/lvs_import.rs @@ -62,7 +62,7 @@ async fn lvs_import_many_volume() { let lvs = Lvs::create_or_import(lvs_args.clone()).await.unwrap(); // Create replicas. - for i in 0 .. REPL_CNT { + for i in 0..REPL_CNT { let repl_name = format!("r_{i}"); let repl_uuid = format!("45c23e54-dc86-45f6-b55b-e44d05f1{i:04}"); @@ -80,12 +80,11 @@ async fn lvs_import_many_volume() { created.insert(repl_name.clone()); // Create snapshots for each replicas. - for j in 0 .. SNAP_CNT { + for j in 0..SNAP_CNT { let snap_name = format!("r_{i}_snap_{j}"); let eid = format!("e_{i}_{j}"); let txn_id = format!("t_{i}_{j}"); - let snap_uuid = - format!("55c23e54-dc89-45f6-b55b-e44d{i:04}{j:04}"); + let snap_uuid = format!("55c23e54-dc89-45f6-b55b-e44d{i:04}{j:04}"); let snap_config = lvol .prepare_snap_config(&snap_name, &eid, &txn_id, &snap_uuid) diff --git a/io-engine/tests/lvs_limits.rs b/io-engine/tests/lvs_limits.rs index 454ac3ff5..2b769e307 100644 --- a/io-engine/tests/lvs_limits.rs +++ b/io-engine/tests/lvs_limits.rs @@ -57,7 +57,7 @@ async fn lvs_metadata_limit() { let lvs = Lvs::create_or_import(lvs_args.clone()).await.unwrap(); // Create replicas. - for i in 0 .. REPL_CNT { + for i in 0..REPL_CNT { let repl_name = format!("r_{i}"); let repl_uuid = format!("45c23e54-dc86-45f6-b55b-e44d05f1{i:04}"); @@ -74,13 +74,8 @@ async fn lvs_metadata_limit() { Ok(lvol) => lvol, Err(err) => { match err { - LvsError::RepCreate { - source, .. - } => { - assert!(matches!( - source, - BsError::OutOfMetadata {} - )); + LvsError::RepCreate { source, .. } => { + assert!(matches!(source, BsError::OutOfMetadata {})); break; } _ => { @@ -91,12 +86,11 @@ async fn lvs_metadata_limit() { }; // Create snapshots for each replicas. - for j in 0 .. SNAP_CNT { + for j in 0..SNAP_CNT { let snap_name = format!("r_{i}_snap_{j}"); let eid = format!("e_{i}_{j}"); let txn_id = format!("t_{i}_{j}"); - let snap_uuid = - format!("55c23e54-dc89-45f6-b55b-e44d{i:04}{j:04}"); + let snap_uuid = format!("55c23e54-dc89-45f6-b55b-e44d{i:04}{j:04}"); let snap_config = lvol .prepare_snap_config(&snap_name, &eid, &txn_id, &snap_uuid) @@ -104,13 +98,8 @@ async fn lvs_metadata_limit() { if let Err(err) = lvol.create_snapshot(snap_config).await { match err { - LvsError::SnapshotCreate { - source, .. - } => { - assert!(matches!( - source, - BsError::OutOfMetadata {} - )); + LvsError::SnapshotCreate { source, .. } => { + assert!(matches!(source, BsError::OutOfMetadata {})); break; } _ => { diff --git a/io-engine/tests/lvs_pool.rs b/io-engine/tests/lvs_pool.rs index 4241c32df..b8feccc05 100644 --- a/io-engine/tests/lvs_pool.rs +++ b/io-engine/tests/lvs_pool.rs @@ -1,13 +1,7 @@ use common::MayastorTest; use io_engine::{ bdev_api::bdev_create, - core::{ - logical_volume::LogicalVolume, - MayastorCliArgs, - Protocol, - Share, - UntypedBdev, - }, + core::{logical_volume::LogicalVolume, MayastorCliArgs, Protocol, Share, UntypedBdev}, lvs::{Lvs, LvsLvol, PropName, PropValue}, pool_backend::{PoolArgs, PoolBackend}, subsys::NvmfSubsystem, @@ -31,11 +25,7 @@ async fn lvs_pool_test() { .output() .expect("failed to execute mkdir"); - common::delete_file(&[ - DISKNAME1.into(), - DISKNAME2.into(), - DISKNAME3.into(), - ]); + common::delete_file(&[DISKNAME1.into(), DISKNAME2.into(), DISKNAME3.into()]); common::truncate_file(DISKNAME1, 128 * 1024); common::truncate_file(DISKNAME2, 128 * 1024); common::truncate_file(DISKNAME3, 128 * 1024); @@ -79,10 +69,8 @@ async fn lvs_pool_test() { // create directly here to ensure that if we // have an idempotent snafu, we dont crash and // burn - ms.spawn(async { - assert!(Lvs::create_from_args_inner(pool_args).await.is_err()) - }) - .await; + ms.spawn(async { assert!(Lvs::create_from_args_inner(pool_args).await.is_err()) }) + .await; // should fail to import the pool that is already imported // similar to above, we use the import directly @@ -164,16 +152,10 @@ async fn lvs_pool_test() { // create 10 lvol on this pool ms.spawn(async { let pool = Lvs::lookup("tpool").unwrap(); - for i in 0 .. 10 { - pool.create_lvol( - &format!("vol-{i}"), - 8 * 1024 * 1024, - None, - true, - None, - ) - .await - .unwrap(); + for i in 0..10 { + pool.create_lvol(&format!("vol-{i}"), 8 * 1024 * 1024, None, true, None) + .await + .unwrap(); } assert_eq!(pool.lvols().unwrap().count(), 10); @@ -193,7 +175,7 @@ async fn lvs_pool_test() { .await .unwrap(); - for i in 0 .. 5 { + for i in 0..5 { pool2 .create_lvol( &format!("pool2-vol-{i}"), @@ -311,16 +293,10 @@ async fn lvs_pool_test() { ms.spawn(async { let pool = Lvs::lookup("tpool").unwrap(); - for i in 0 .. 10 { - pool.create_lvol( - &format!("vol-{i}"), - 8 * 1024 * 1024, - None, - true, - None, - ) - .await - .unwrap(); + for i in 0..10 { + pool.create_lvol(&format!("vol-{i}"), 8 * 1024 * 1024, None, true, None) + .await + .unwrap(); } for mut l in pool.lvols().unwrap() { diff --git a/io-engine/tests/malloc_bdev.rs b/io-engine/tests/malloc_bdev.rs index d6e6ea3fd..34934c29a 100644 --- a/io-engine/tests/malloc_bdev.rs +++ b/io-engine/tests/malloc_bdev.rs @@ -52,7 +52,7 @@ async fn malloc_bdev() { let s0 = b0.as_slice(); let s1 = b1.as_slice(); - for i in 0 .. s0.len() { + for i in 0..s0.len() { assert_eq!(s0[i], 3); assert_eq!(s0[i], s1[i]) } diff --git a/io-engine/tests/memory_pool.rs b/io-engine/tests/memory_pool.rs index 262715108..1f27d69b1 100755 --- a/io-engine/tests/memory_pool.rs +++ b/io-engine/tests/memory_pool.rs @@ -37,15 +37,11 @@ async fn test_get() { // Allocate all available items from the pool, make sure all addresses // are unique. - for i in 0 .. POOL_SIZE { + for i in 0..POOL_SIZE { let id: u64 = i; let pos: u32 = 3 * i as u32; - let o = pool.get(TestCtx { - id, - pos, - ctx: c_c, - }); + let o = pool.get(TestCtx { id, pos, ctx: c_c }); assert!(o.is_some(), "Failed to get element from memory pool"); let p = o.unwrap(); @@ -90,7 +86,7 @@ async fn test_get() { // Now try to allocate elements - we must see the same addresses as the // ones we just freed. - for _ in 0 .. TEST_BULK_SIZE { + for _ in 0..TEST_BULK_SIZE { let o = pool.get(TestCtx { id: 1, pos: 1984, diff --git a/io-engine/tests/mount_fs.rs b/io-engine/tests/mount_fs.rs index 014f5b0da..0d3a18b71 100644 --- a/io-engine/tests/mount_fs.rs +++ b/io-engine/tests/mount_fs.rs @@ -133,7 +133,7 @@ async fn mount_fs_multiple() { prepare_storage!(); let (target, nvmf_dev) = create_connected_nvmf_nexus(ms).await; - for _i in 0 .. 10 { + for _i in 0..10 { common::mount_umount(&nvmf_dev).unwrap(); } diff --git a/io-engine/tests/nexus_add_remove.rs b/io-engine/tests/nexus_add_remove.rs index 33f5525f8..b9270704d 100644 --- a/io-engine/tests/nexus_add_remove.rs +++ b/io-engine/tests/nexus_add_remove.rs @@ -3,9 +3,7 @@ use common::compose::{ mayastor::{BdevShareRequest, BdevUri}, GrpcConnect, }, - Builder, - ComposeTest, - MayastorTest, + Builder, ComposeTest, MayastorTest, }; use io_engine::{ bdev::nexus::{nexus_create, nexus_lookup_mut}, diff --git a/io-engine/tests/nexus_child_location.rs b/io-engine/tests/nexus_child_location.rs index b05f26ee0..e8486cc1e 100644 --- a/io-engine/tests/nexus_child_location.rs +++ b/io-engine/tests/nexus_child_location.rs @@ -77,8 +77,7 @@ async fn child_location() { .await .unwrap(); - let nexus = - nexus_lookup_mut(NEXUS_NAME).expect("Failed to find nexus"); + let nexus = nexus_lookup_mut(NEXUS_NAME).expect("Failed to find nexus"); let children = &nexus.children(); assert_eq!(children.len(), 2); assert!(children[0].is_local().unwrap()); diff --git a/io-engine/tests/nexus_child_online.rs b/io-engine/tests/nexus_child_online.rs index 1e4e02b57..7016261ac 100644 --- a/io-engine/tests/nexus_child_online.rs +++ b/io-engine/tests/nexus_child_online.rs @@ -6,9 +6,7 @@ use common::{ nexus::{ChildState, ChildStateReason}, GrpcConnect, }, - Binary, - Builder, - ComposeTest, + Binary, Builder, ComposeTest, }, file_io::DataSize, nexus::{test_write_to_nexus, NexusBuilder}, @@ -127,14 +125,9 @@ async fn nexus_child_online() { nex_0, } = create_test_storage(&test).await; - test_write_to_nexus( - &nex_0, - DataSize::from_bytes(0), - 1, - DataSize::from_kb(1), - ) - .await - .unwrap(); + test_write_to_nexus(&nex_0, DataSize::from_bytes(0), 1, DataSize::from_kb(1)) + .await + .unwrap(); nex_0 .offline_child_replica_wait(&repl_0, Duration::from_secs(1)) diff --git a/io-engine/tests/nexus_child_retire.rs b/io-engine/tests/nexus_child_retire.rs index 458d94aae..3811ec68c 100644 --- a/io-engine/tests/nexus_child_retire.rs +++ b/io-engine/tests/nexus_child_retire.rs @@ -13,9 +13,7 @@ use common::{ v1, v1::{GrpcConnect, SharedRpcHandle}, }, - Binary, - Builder, - ComposeTest, + Binary, Builder, ComposeTest, }, file_io::DataSize, fio::{FioBuilder, FioJobBuilder}, @@ -30,28 +28,15 @@ pub use spdk_rs::{libspdk::SPDK_NVME_SC_INTERNAL_DEVICE_ERROR, NvmeStatus}; use io_engine::{ bdev::{ - nexus::{ - nexus_create, - nexus_lookup_mut, - ChildState, - FaultReason, - NexusStatus, - }, + nexus::{nexus_create, nexus_lookup_mut, ChildState, FaultReason, NexusStatus}, NexusInfo, }, core::{ fault_injection::{ - add_fault_injection, - FaultDomain, - FaultIoOperation, - FaultIoStage, - FaultMethod, + add_fault_injection, FaultDomain, FaultIoOperation, FaultIoStage, FaultMethod, InjectionBuilder, }, - CoreError, - IoCompletionStatus, - MayastorCliArgs, - Protocol, + CoreError, IoCompletionStatus, MayastorCliArgs, Protocol, }, lvs::Lvs, persistent_store::PersistentStoreBuilder, @@ -375,7 +360,7 @@ async fn nexus_child_retire_persist_unresponsive_with_bdev_io() { .with_io_operation(FaultIoOperation::Write) .with_io_stage(FaultIoStage::Completion) .with_method(FaultMethod::DATA_TRANSFER_ERROR) - .with_block_range(0 .. 1) + .with_block_range(0..1) .build() .unwrap(), ) @@ -456,7 +441,7 @@ async fn nexus_child_retire_persist_failure_with_bdev_io() { .with_io_operation(FaultIoOperation::Write) .with_io_stage(FaultIoStage::Completion) .with_method(FaultMethod::DATA_TRANSFER_ERROR) - .with_block_range(0 .. 1) + .with_block_range(0..1) .build() .unwrap(), ) @@ -574,13 +559,7 @@ async fn init_ms_etcd_test() -> ComposeTest { // Pool #1 and replica #1. pool_0 - .create_lvol( - REPL_NAME_0, - POOL_SIZE, - Some(REPL_UUID_0), - false, - None, - ) + .create_lvol(REPL_NAME_0, POOL_SIZE, Some(REPL_UUID_0), false, None) .await .unwrap(); @@ -593,13 +572,7 @@ async fn init_ms_etcd_test() -> ComposeTest { .unwrap(); pool_1 - .create_lvol( - REPL_NAME_1, - POOL_SIZE, - Some(REPL_UUID_1), - false, - None, - ) + .create_lvol(REPL_NAME_1, POOL_SIZE, Some(REPL_UUID_1), false, None) .await .unwrap(); diff --git a/io-engine/tests/nexus_children_add_remove.rs b/io-engine/tests/nexus_children_add_remove.rs index 78387cd5f..d6d461522 100644 --- a/io-engine/tests/nexus_children_add_remove.rs +++ b/io-engine/tests/nexus_children_add_remove.rs @@ -75,8 +75,7 @@ async fn remove_children_from_nexus() { // lookup the nexus and share it over nvmf ms.spawn(async { - let nexus = - nexus_lookup_mut("remove_from_nexus").expect("nexus is not found!"); + let nexus = nexus_lookup_mut("remove_from_nexus").expect("nexus is not found!"); nexus.share_nvmf(None).await }) .await @@ -84,16 +83,14 @@ async fn remove_children_from_nexus() { // lookup the nexus, and remove a child ms.spawn(async { - let nexus = - nexus_lookup_mut("remove_from_nexus").expect("nexus is not found!"); + let nexus = nexus_lookup_mut("remove_from_nexus").expect("nexus is not found!"); nexus.remove_child(&format!("uring:///{DISKNAME1}")).await }) .await .expect("failed to remove child from nexus"); ms.spawn(async { - let nexus = - nexus_lookup_mut("remove_from_nexus").expect("nexus is not found!"); + let nexus = nexus_lookup_mut("remove_from_nexus").expect("nexus is not found!"); nexus.remove_child(&format!("uring:///{DISKNAME2}")).await }) .await @@ -101,8 +98,7 @@ async fn remove_children_from_nexus() { // add new child but don't rebuild, so it's not healthy! ms.spawn(async { - let nexus = - nexus_lookup_mut("remove_from_nexus").expect("nexus is not found!"); + let nexus = nexus_lookup_mut("remove_from_nexus").expect("nexus is not found!"); nexus .add_child(&format!("uring:///{DISKNAME1}"), true) .await @@ -111,8 +107,7 @@ async fn remove_children_from_nexus() { .expect("should be able to add a child back"); ms.spawn(async { - let nexus = - nexus_lookup_mut("remove_from_nexus").expect("nexus is not found!"); + let nexus = nexus_lookup_mut("remove_from_nexus").expect("nexus is not found!"); nexus.remove_child(&format!("uring:///{DISKNAME2}")).await }) .await @@ -120,8 +115,7 @@ async fn remove_children_from_nexus() { // destroy it ms.spawn(async { - let nexus = - nexus_lookup_mut("remove_from_nexus").expect("nexus is not found!"); + let nexus = nexus_lookup_mut("remove_from_nexus").expect("nexus is not found!"); nexus.destroy().await.unwrap(); }) .await; @@ -154,8 +148,7 @@ async fn nexus_add_child() { .await; ms.spawn(async { - let nexus = - nexus_lookup_mut("nexus_add_child").expect("nexus is not found!"); + let nexus = nexus_lookup_mut("nexus_add_child").expect("nexus is not found!"); nexus .share_nvmf(None) .await @@ -164,8 +157,7 @@ async fn nexus_add_child() { .await; ms.spawn(async { - let nexus = - nexus_lookup_mut("nexus_add_child").expect("nexus is not found!"); + let nexus = nexus_lookup_mut("nexus_add_child").expect("nexus is not found!"); nexus .add_child(&format!("uring:///{DISKNAME3}"), false) .await @@ -174,17 +166,12 @@ async fn nexus_add_child() { .unwrap(); ms.spawn(async { - let nexus = - nexus_lookup_mut("nexus_add_child").expect("nexus is not found!"); + let nexus = nexus_lookup_mut("nexus_add_child").expect("nexus is not found!"); nexus.destroy().await.unwrap(); }) .await; - common::delete_file(&[ - DISKNAME1.into(), - DISKNAME2.into(), - DISKNAME3.into(), - ]); + common::delete_file(&[DISKNAME1.into(), DISKNAME2.into(), DISKNAME3.into()]); } /// Remove a child while I/O is running. @@ -206,11 +193,7 @@ async fn nexus_remove_child_with_io() { ) .add_container_bin( "ms_nex", - Binary::from_dbg("io-engine").with_args(vec![ - "-l", - "3,4", - "-Fnodate,compact,color", - ]), + Binary::from_dbg("io-engine").with_args(vec!["-l", "3,4", "-Fnodate,compact,color"]), ) .with_clean(true) .build() @@ -327,11 +310,7 @@ async fn nexus_channel_get_handles() { ) .add_container_bin( "qms_nex", - Binary::from_dbg("io-engine").with_args(vec![ - "-l", - "5,6", - "-Fnodate,compact,color", - ]), + Binary::from_dbg("io-engine").with_args(vec!["-l", "5,6", "-Fnodate,compact,color"]), ) .with_clean(true) .build() @@ -405,7 +384,7 @@ async fn nexus_channel_get_handles() { nex_0.create().await.unwrap(); nex_0.publish().await.unwrap(); - for _ in 0 .. 20 { + for _ in 0..20 { let j_repl0 = tokio::spawn({ let nex_0 = nex_0.clone(); let repl_0 = repl_0.clone(); diff --git a/io-engine/tests/nexus_crd.rs b/io-engine/tests/nexus_crd.rs index b037ef468..a31ad1119 100644 --- a/io-engine/tests/nexus_crd.rs +++ b/io-engine/tests/nexus_crd.rs @@ -12,11 +12,9 @@ use common::{ compose::{ rpc::v1::{ nexus::{NexusNvmePreemption, NvmeReservation}, - GrpcConnect, - SharedRpcHandle, + GrpcConnect, SharedRpcHandle, }, - Binary, - Builder, + Binary, Builder, }, file_io::DataSize, fio::{spawn_fio_task, FioBuilder, FioJobBuilder, FioJobResult}, @@ -28,11 +26,7 @@ use common::{ test::{add_fault_injection, remove_fault_injection}, }; -use io_engine::core::fault_injection::{ - FaultDomain, - FaultIoOperation, - InjectionBuilder, -}; +use io_engine::core::fault_injection::{FaultDomain, FaultIoOperation, InjectionBuilder}; const POOL_SIZE: u64 = 500; const REPL_SIZE: u64 = 450; @@ -71,12 +65,7 @@ async fn test_nexus_fail(crdt: &str) -> std::io::Result<()> { ) .add_container_bin( "ms_nex", - Binary::from_dbg("io-engine").with_args(vec![ - "-l", - "1,2,3,4", - "--tgt-crdt", - crdt, - ]), + Binary::from_dbg("io-engine").with_args(vec!["-l", "1,2,3,4", "--tgt-crdt", crdt]), ) .with_clean(true) .build() @@ -171,12 +160,7 @@ struct NexusManageTask { } /// Runs multiple FIO I/O jobs. -async fn run_io_task( - s: Sender<()>, - nvmf: &NvmfLocation, - cnt: u32, - rt: u32, -) -> std::io::Result<()> { +async fn run_io_task(s: Sender<()>, nvmf: &NvmfLocation, cnt: u32, rt: u32) -> std::io::Result<()> { let _cg = NmveConnectGuard::connect_addr(&nvmf.addr, &nvmf.nqn); let path = find_mayastor_nvme_device_path(&nvmf.serial) .unwrap() @@ -184,7 +168,7 @@ async fn run_io_task( .unwrap() .to_string(); - let jobs = (0 .. cnt).map(|_| { + let jobs = (0..cnt).map(|_| { FioJobBuilder::new() .with_direct(true) .with_ioengine("libaio") diff --git a/io-engine/tests/nexus_create_destroy.rs b/io-engine/tests/nexus_create_destroy.rs index 836fbeb6b..dac6d3969 100644 --- a/io-engine/tests/nexus_create_destroy.rs +++ b/io-engine/tests/nexus_create_destroy.rs @@ -2,8 +2,7 @@ pub mod common; use common::compose::{ rpc::v0::{ mayastor::{CreateNexusRequest, DestroyNexusRequest, Nexus}, - GrpcConnect, - RpcHandle, + GrpcConnect, RpcHandle, }, Builder, }; @@ -28,7 +27,7 @@ async fn nexus_create_destroy() { let mut hdl = grpc.grpc_handle("ms1").await.unwrap(); - for i in 0 .. NEXUS_COUNT { + for i in 0..NEXUS_COUNT { let nexus = hdl .mayastor .create_nexus(CreateNexusRequest { @@ -90,7 +89,7 @@ async fn nexus_create_multiple_then_destroy() { async fn create_nexuses(handle: &mut RpcHandle, count: usize) -> Vec { let mut nexuses = vec![]; - for i in 0 .. count { + for i in 0..count { let nexus = handle .mayastor .create_nexus(CreateNexusRequest { diff --git a/io-engine/tests/nexus_fault_injection.rs b/io-engine/tests/nexus_fault_injection.rs index 575a7c302..85dc2174a 100644 --- a/io-engine/tests/nexus_fault_injection.rs +++ b/io-engine/tests/nexus_fault_injection.rs @@ -6,12 +6,7 @@ use std::time::Duration; use io_engine::core::{ fault_injection::{ - FaultDomain, - FaultIoOperation, - FaultIoStage, - FaultMethod, - Injection, - InjectionBuilder, + FaultDomain, FaultIoOperation, FaultIoStage, FaultMethod, Injection, InjectionBuilder, }, IoCompletionStatus, }; @@ -22,9 +17,7 @@ use common::{ nexus::{ChildState, ChildStateReason}, GrpcConnect, }, - Binary, - Builder, - ComposeTest, + Binary, Builder, ComposeTest, }, file_io::DataSize, fio::{spawn_fio_task, FioBuilder, FioJobBuilder}, @@ -67,11 +60,7 @@ async fn create_compose_test() -> ComposeTest { ) .add_container_bin( "ms_nex", - Binary::from_dbg("io-engine").with_args(vec![ - "-l", - "3", - "-Fcolor,compact", - ]), + Binary::from_dbg("io-engine").with_args(vec!["-l", "3", "-Fcolor,compact"]), ) .with_clean(true) .build() @@ -164,14 +153,9 @@ async fn test_injection_uri(inj_part: &str) { assert_eq!(&lst[0].device_name, dev_name); // Write less than pool size. - test_write_to_nexus( - &nex_0, - DataSize::from_bytes(0), - 30, - DataSize::from_mb(1), - ) - .await - .unwrap(); + test_write_to_nexus(&nex_0, DataSize::from_bytes(0), 30, DataSize::from_mb(1)) + .await + .unwrap(); // let children = nex_0.get_nexus().await.unwrap().children; @@ -218,8 +202,7 @@ async fn nexus_fault_injection_time_based() { // Create an injection that will start in 1 sec after first I/O // to the device, and end after 5s. - let inj_part = - "domain=child&op=write&stage=compl&begin_at=1000&end_at=5000"; + let inj_part = "domain=child&op=write&stage=compl&begin_at=1000&end_at=5000"; let inj_uri = format!("inject://{dev_name}?{inj_part}"); add_fault_injection(nex_0.rpc(), &inj_uri).await.unwrap(); @@ -229,14 +212,9 @@ async fn nexus_fault_injection_time_based() { assert_eq!(&lst[0].device_name, dev_name); // Write some data. Injection is not yet active. - test_write_to_nexus( - &nex_0, - DataSize::from_bytes(0), - 1, - DataSize::from_kb(1), - ) - .await - .unwrap(); + test_write_to_nexus(&nex_0, DataSize::from_bytes(0), 1, DataSize::from_kb(1)) + .await + .unwrap(); let children = nex_0.get_nexus().await.unwrap().children; assert_eq!(children[0].state, ChildState::Online as i32); @@ -244,14 +222,9 @@ async fn nexus_fault_injection_time_based() { tokio::time::sleep(Duration::from_millis(1000)).await; // Write again. Now the child must fail. - test_write_to_nexus( - &nex_0, - DataSize::from_bytes(0), - 1, - DataSize::from_kb(1), - ) - .await - .unwrap(); + test_write_to_nexus(&nex_0, DataSize::from_bytes(0), 1, DataSize::from_kb(1)) + .await + .unwrap(); let children = nex_0.get_nexus().await.unwrap().children; assert_eq!(children[0].state, ChildState::Faulted as i32); @@ -266,14 +239,9 @@ async fn nexus_fault_injection_time_based() { .unwrap(); // Write again. Now since the injection time ended, it must not fail. - test_write_to_nexus( - &nex_0, - DataSize::from_bytes(0), - 1, - DataSize::from_kb(1), - ) - .await - .unwrap(); + test_write_to_nexus(&nex_0, DataSize::from_bytes(0), 1, DataSize::from_kb(1)) + .await + .unwrap(); let children = nex_0.get_nexus().await.unwrap().children; assert_eq!(children[0].state, ChildState::Online as i32); } @@ -313,14 +281,9 @@ async fn nexus_fault_injection_range_based() { assert_eq!(&lst[0].device_name, dev_name); // Write two blocks from 0 offset. It must not fail. - test_write_to_nexus( - &nex_0, - DataSize::from_bytes(0), - 1, - DataSize::from_kb(1), - ) - .await - .unwrap(); + test_write_to_nexus(&nex_0, DataSize::from_bytes(0), 1, DataSize::from_kb(1)) + .await + .unwrap(); let children = nex_0.get_nexus().await.unwrap().children; assert_eq!(children[0].state, ChildState::Online as i32); @@ -383,8 +346,8 @@ async fn injection_uri_creation() { ))) .with_io_operation(FaultIoOperation::Read) .with_io_stage(FaultIoStage::Completion) - .with_block_range(123 .. 456) - .with_time_range(Duration::from_secs(5) .. Duration::from_secs(10)) + .with_block_range(123..456) + .with_time_range(Duration::from_secs(5)..Duration::from_secs(10)) .with_retries(789) .build() .unwrap(); @@ -420,11 +383,7 @@ async fn replica_bdev_io_injection() { .unwrap() .add_container_bin( "ms_0", - Binary::from_dbg("io-engine").with_args(vec![ - "-l", - "1", - "-Fcompact,color,nodate", - ]), + Binary::from_dbg("io-engine").with_args(vec!["-l", "1", "-Fcompact,color,nodate"]), ) .with_clean(true) .build() diff --git a/io-engine/tests/nexus_fio.rs b/io-engine/tests/nexus_fio.rs index c4272ba6c..0bf0a018b 100644 --- a/io-engine/tests/nexus_fio.rs +++ b/io-engine/tests/nexus_fio.rs @@ -42,12 +42,7 @@ async fn nexus_fio_single_remote() { ) .add_container_bin( "ms_nex", - Binary::from_dbg("io-engine").with_args(vec![ - "-l", - "3,4", - "-F", - "compact,color", - ]), + Binary::from_dbg("io-engine").with_args(vec!["-l", "3,4", "-F", "compact,color"]), ) .with_clean(true) .build() @@ -153,12 +148,7 @@ async fn nexus_fio_mixed() { ) .add_container_bin( "ms_nex", - Binary::from_dbg("io-engine").with_args(vec![ - "-l", - "3,4", - "-F", - "compact,color", - ]), + Binary::from_dbg("io-engine").with_args(vec!["-l", "3,4", "-F", "compact,color"]), ) .with_clean(true) .build() diff --git a/io-engine/tests/nexus_io.rs b/io-engine/tests/nexus_io.rs index 6c9e2a3d7..ac44f37cb 100644 --- a/io-engine/tests/nexus_io.rs +++ b/io-engine/tests/nexus_io.rs @@ -2,13 +2,8 @@ use common::bdev_io; use io_engine::{ bdev::nexus::{ - nexus_create, - nexus_create_v2, - nexus_lookup, - nexus_lookup_mut, - NexusNvmeParams, - NexusPauseState, - NvmeAnaState, + nexus_create, nexus_create_v2, nexus_lookup, nexus_lookup_mut, NexusNvmeParams, + NexusPauseState, NvmeAnaState, }, constants::NVME_NQN_PREFIX, core::{MayastorCliArgs, Protocol}, @@ -26,36 +21,19 @@ use common::{ compose::{ rpc::v0::{ mayastor::{ - CreateNexusRequest, - CreateNexusV2Request, - CreatePoolRequest, - CreateReplicaRequest, - DestroyNexusRequest, - Null, - PublishNexusRequest, + CreateNexusRequest, CreateNexusV2Request, CreatePoolRequest, CreateReplicaRequest, + DestroyNexusRequest, Null, PublishNexusRequest, }, GrpcConnect, }, - Binary, - Builder, - ComposeTest, - }, - nvme::{ - get_nvme_resv_report, - list_mayastor_nvme_devices, - nvme_connect, - nvme_disconnect_nqn, + Binary, Builder, ComposeTest, }, + nvme::{get_nvme_resv_report, list_mayastor_nvme_devices, nvme_connect, nvme_disconnect_nqn}, MayastorTest, }; use io_engine::{ bdev::nexus::{ - ChildState, - Error, - FaultReason, - NexusNvmePreemption, - NexusStatus, - NvmeReservation, + ChildState, Error, FaultReason, NexusNvmePreemption, NexusStatus, NvmeReservation, }, core::Mthread, grpc::v1::nexus::nexus_destroy, @@ -204,7 +182,7 @@ async fn nexus_io_multipath() { // The first attempt will fail with "Duplicate cntlid x with y" error from // kernel - for i in 0 .. 2 { + for i in 0..2 { let status_c0 = nvme_connect(&ip0.to_string(), &nqn, "tcp", false); if i == 0 && status_c0.success() { break; @@ -448,8 +426,7 @@ async fn nexus_io_resv_acquire() { max_cntl_id: 0xffef, resv_key: resv_key2, preempt_key: 0, - children: [format!("nvmf://{ip0}:8420/{HOSTNQN}:{REPL_UUID}")] - .to_vec(), + children: [format!("nvmf://{ip0}:8420/{HOSTNQN}:{REPL_UUID}")].to_vec(), nexus_info_key: "".to_string(), resv_type: None, preempt_policy: 0, @@ -642,8 +619,7 @@ async fn nexus_io_resv_preempt() { max_cntl_id: 0xffef, resv_key: resv_key2, preempt_key: 0, - children: [format!("nvmf://{ip0}:8420/{HOSTNQN}:{REPL_UUID}")] - .to_vec(), + children: [format!("nvmf://{ip0}:8420/{HOSTNQN}:{REPL_UUID}")].to_vec(), nexus_info_key: "".to_string(), resv_type: Some(NvmeReservation::ExclusiveAccess as i32), preempt_policy: NexusNvmePreemption::Holder as i32, @@ -842,12 +818,7 @@ async fn nexus_io_resv_preempt_tabled() { .await .unwrap(); - async fn test_fn( - hdls: &mut [RpcHandle], - resv: NvmeReservation, - resv_key: u64, - local: bool, - ) { + async fn test_fn(hdls: &mut [RpcHandle], resv: NvmeReservation, resv_key: u64, local: bool) { let mayastor = get_ms(); let ip0 = hdls[0].endpoint.ip(); println!("Using resv {} and key {}", resv as u8, resv_key); @@ -884,10 +855,7 @@ async fn nexus_io_resv_preempt_tabled() { max_cntl_id: 0xffef, resv_key, preempt_key: 0, - children: [format!( - "nvmf://{ip0}:8420/{HOSTNQN}:{REPL_UUID}" - )] - .to_vec(), + children: [format!("nvmf://{ip0}:8420/{HOSTNQN}:{REPL_UUID}")].to_vec(), nexus_info_key: "".to_string(), resv_type: Some(resv as i32), preempt_policy: NexusNvmePreemption::Holder as i32, @@ -912,8 +880,7 @@ async fn nexus_io_resv_preempt_tabled() { let shared = matches!( resv, - NvmeReservation::ExclusiveAccessAllRegs - | NvmeReservation::WriteExclusiveAllRegs + NvmeReservation::ExclusiveAccessAllRegs | NvmeReservation::WriteExclusiveAllRegs ); if shared { // we don't currently distinguish between @@ -924,12 +891,9 @@ async fn nexus_io_resv_preempt_tabled() { let mut reserved = false; let registrants = v["regctl"].as_u64().unwrap() as usize; - for i in 0 .. registrants { + for i in 0..registrants { let entry = &v["regctlext"][i]; - assert_eq!( - entry["cntlid"], 0xffff, - "should have dynamic controller ID" - ); + assert_eq!(entry["cntlid"], 0xffff, "should have dynamic controller ID"); if entry["rcsts"] == 1 && !shared { reserved = true; @@ -1156,10 +1120,7 @@ async fn nexus_io_freeze() { .share(Protocol::Nvmf, None) .await .unwrap(); - assert_eq!( - nexus_pause_state(&name), - Some(NexusPauseState::Unpaused) - ); + assert_eq!(nexus_pause_state(&name), Some(NexusPauseState::Unpaused)); }) .await; @@ -1285,14 +1246,9 @@ async fn nexus_io_freeze() { let (s, r) = unbounded::<()>(); tokio::spawn(async move { let device = get_mayastor_nvme_device(); - test_write_to_file( - device, - DataSize::default(), - 32, - DataSize::from_mb(1), - ) - .await - .ok(); + test_write_to_file(device, DataSize::default(), 32, DataSize::from_mb(1)) + .await + .ok(); s.send(()) }); mayastor @@ -1310,14 +1266,11 @@ async fn nexus_io_freeze() { let name = nexus_name.clone(); mayastor .spawn(async move { - let enospc = nexus_lookup(&name) - .map(|n| n.children().iter().all(|c| c.state().is_enospc())); + let enospc = + nexus_lookup(&name).map(|n| n.children().iter().all(|c| c.state().is_enospc())); assert_eq!(enospc, Some(true)); // We're not Paused, because the nexus is faulted due to ENOSPC! - assert_eq!( - nexus_pause_state(&name), - Some(NexusPauseState::Unpaused) - ); + assert_eq!(nexus_pause_state(&name), Some(NexusPauseState::Unpaused)); nexus_lookup_mut(&name).unwrap().destroy().await.unwrap(); }) .await; @@ -1378,9 +1331,7 @@ async fn wait_nexus_faulted( while start.elapsed() <= timeout { let name = name.to_string(); let faulted = mayastor - .spawn(async move { - nexus_lookup(&name).unwrap().status() == NexusStatus::Faulted - }) + .spawn(async move { nexus_lookup(&name).unwrap().status() == NexusStatus::Faulted }) .await; if faulted { return Ok(()); diff --git a/io-engine/tests/nexus_rebuild.rs b/io-engine/tests/nexus_rebuild.rs index 2b69c43e5..aeb775f95 100644 --- a/io-engine/tests/nexus_rebuild.rs +++ b/io-engine/tests/nexus_rebuild.rs @@ -5,12 +5,7 @@ use once_cell::sync::{Lazy, OnceCell}; use tracing::error; use io_engine::{ - bdev::{ - device_create, - device_destroy, - device_open, - nexus::nexus_lookup_mut, - }, + bdev::{device_create, device_destroy, device_open, nexus::nexus_lookup_mut}, core::{MayastorCliArgs, Mthread, Protocol}, rebuild::{BdevRebuildJob, NexusRebuildJob, RebuildState}, }; @@ -47,14 +42,14 @@ fn test_ini(name: &'static str) { *NEXUS_NAME.lock().unwrap() = name; get_err_bdev().clear(); - for i in 0 .. MAX_CHILDREN { + for i in 0..MAX_CHILDREN { common::delete_file(&[get_disk(i)]); common::truncate_file_bytes(&get_disk(i), NEXUS_SIZE + META_SIZE); } } fn test_fini() { - for i in 0 .. MAX_CHILDREN { + for i in 0..MAX_CHILDREN { common::delete_file(&[get_disk(i)]); } } @@ -82,7 +77,7 @@ fn get_dev(number: u64) -> String { async fn nexus_create(size: u64, children: u64, fill_random: bool) { let mut ch = Vec::new(); - for i in 0 .. children { + for i in 0..children { ch.push(get_dev(i)); } @@ -94,9 +89,7 @@ async fn nexus_create(size: u64, children: u64, fill_random: bool) { let device = nexus_share().await; let nexus_device = device.clone(); let (s, r) = unbounded::(); - Mthread::spawn_unaffinitized(move || { - s.send(common::dd_urandom_blkdev(&nexus_device)) - }); + Mthread::spawn_unaffinitized(move || s.send(common::dd_urandom_blkdev(&nexus_device))); let dd_result: i32; reactor_poll!(r, dd_result); assert_eq!(dd_result, 0, "Failed to fill nexus with random data"); @@ -111,9 +104,7 @@ async fn nexus_create(size: u64, children: u64, fill_random: bool) { async fn nexus_share() -> String { let nexus = nexus_lookup_mut(nexus_name()).unwrap(); - let device = common::device_path_from_uri( - &nexus.share(Protocol::Off, None).await.unwrap(), - ); + let device = common::device_path_from_uri(&nexus.share(Protocol::Off, None).await.unwrap()); reactor_poll!(200); device } @@ -221,9 +212,8 @@ async fn rebuild_replica() { .await .unwrap(); - for child in 0 .. NUM_CHILDREN { - NexusRebuildJob::lookup(&get_dev(child)) - .expect_err("Should not exist"); + for child in 0..NUM_CHILDREN { + NexusRebuildJob::lookup(&get_dev(child)).expect_err("Should not exist"); NexusRebuildJob::lookup_src(&get_dev(child)) .iter() @@ -238,16 +228,15 @@ async fn rebuild_replica() { let _ = nexus.start_rebuild(&get_dev(NUM_CHILDREN)).await; - for child in 0 .. NUM_CHILDREN { - NexusRebuildJob::lookup(&get_dev(child)) - .expect_err("rebuild job not created yet"); + for child in 0..NUM_CHILDREN { + NexusRebuildJob::lookup(&get_dev(child)).expect_err("rebuild job not created yet"); } let src = NexusRebuildJob::lookup(&get_dev(NUM_CHILDREN)) .expect("now the job should exist") .src_uri() .to_string(); - for child in 0 .. NUM_CHILDREN { + for child in 0..NUM_CHILDREN { if get_dev(child) != src { NexusRebuildJob::lookup_src(&get_dev(child)) .iter() @@ -370,8 +359,7 @@ async fn rebuild_bdev_partial() { let size = 100 * 1024 * 1024; let seg_size = Self::seg_size(); let blk_size = Self::blk_size(); - let rebuild_map = - SegmentMap::new(size / blk_size, blk_size, seg_size); + let rebuild_map = SegmentMap::new(size / blk_size, blk_size, seg_size); Self(rebuild_map) } fn blk_size() -> u64 { @@ -416,11 +404,7 @@ async fn rebuild_bdev_partial() { .unwrap(); let chan = job.start().await.unwrap(); let state = chan.await.unwrap(); - assert_eq!( - state, - RebuildState::Completed, - "Rebuild should succeed" - ); + assert_eq!(state, RebuildState::Completed, "Rebuild should succeed"); let stats = job.stats().await; assert_eq!( stats.blocks_transferred, dirty_blks, diff --git a/io-engine/tests/nexus_rebuild_parallel.rs b/io-engine/tests/nexus_rebuild_parallel.rs index 450d3562f..a6404b20f 100644 --- a/io-engine/tests/nexus_rebuild_parallel.rs +++ b/io-engine/tests/nexus_rebuild_parallel.rs @@ -5,8 +5,7 @@ use std::time::{Duration, Instant}; use common::{ compose::{ rpc::v1::{nexus::ChildState, GrpcConnect, Status}, - Binary, - Builder, + Binary, Builder, }, nexus::NexusBuilder, pool::PoolBuilder, @@ -33,7 +32,7 @@ async fn nexus_rebuild_parallel() { const DISK_NAME: &str = "disk"; // Create pool data file. - for r in 0 .. R { + for r in 0..R { let name = format!("/tmp/{DISK_NAME}_{r}"); common::delete_file(&[name.clone()]); common::truncate_file_bytes(&name, DISK_SIZE); @@ -91,10 +90,10 @@ async fn nexus_rebuild_parallel() { let mut vols = Vec::new(); - for i in 0 .. N { + for i in 0..N { // Create R replicas on the pools. let mut replicas = Vec::new(); - for r in 0 .. R { + for r in 0..R { let mut repl = ReplicaBuilder::new(ms[r].clone()) .with_pool(&pools[r]) .with_name(&format!("v{i}r{r}")) @@ -116,10 +115,7 @@ async fn nexus_rebuild_parallel() { nex.create().await.unwrap(); - vols.push(Volume { - replicas, - nex, - }); + vols.push(Volume { replicas, nex }); } // Adding replicas / starting rebuilds. @@ -132,17 +128,14 @@ async fn nexus_rebuild_parallel() { .expect("All volumes must go online"); // Delete test files. - for r in 0 .. R { + for r in 0..R { let name = format!("/tmp/{DISK_NAME}_{r}"); common::delete_file(&[name.clone()]); } } /// Monitors and prints volume states. -async fn monitor_volumes( - vols: &Vec, - timeout: Duration, -) -> Result<(), Status> { +async fn monitor_volumes(vols: &Vec, timeout: Duration) -> Result<(), Status> { println!("\nMonitoring {n} volumes", n = vols.len()); let start = Instant::now(); @@ -169,10 +162,7 @@ async fn monitor_volumes( vols_degraded = true; } ChildState::Degraded => { - s = format!( - "{s} REBUILD {p:02} | ", - p = c.rebuild_progress - ); + s = format!("{s} REBUILD {p:02} | ", p = c.rebuild_progress); vols_degraded = true; } } diff --git a/io-engine/tests/nexus_rebuild_partial.rs b/io-engine/tests/nexus_rebuild_partial.rs index 4a1715c0c..5cfc9b441 100644 --- a/io-engine/tests/nexus_rebuild_partial.rs +++ b/io-engine/tests/nexus_rebuild_partial.rs @@ -6,9 +6,7 @@ use common::{ nexus::{ChildState, ChildStateReason}, GrpcConnect, }, - Binary, - Builder, - ComposeTest, + Binary, Builder, ComposeTest, }, file_io::DataSize, nexus::{test_write_to_nexus, NexusBuilder}, @@ -24,10 +22,7 @@ use io_engine_tests::{ #[cfg(feature = "fault-injection")] use io_engine::core::fault_injection::{ - FaultDomain, - FaultIoOperation, - FaultIoStage, - InjectionBuilder, + FaultDomain, FaultIoOperation, FaultIoStage, InjectionBuilder, }; #[cfg(feature = "fault-injection")] @@ -72,11 +67,7 @@ async fn create_compose_test() -> ComposeTest { .unwrap() .add_container_bin( "ms_nex", - Binary::from_dbg("io-engine").with_args(vec![ - "-l", - "1,2,3,4", - "-Fcompact,color", - ]), + Binary::from_dbg("io-engine").with_args(vec!["-l", "1,2,3,4", "-Fcompact,color"]), // Binary::from_dbg("io-engine").with_args(vec!["-l", "1,2,3,4"]), ) .add_container_bin( @@ -201,7 +192,7 @@ async fn nexus_partial_rebuild_io_fault() { .with_domain(FaultDomain::NexusChild) .with_io_operation(FaultIoOperation::Write) .with_io_stage(FaultIoStage::Completion) - .with_block_range(7 * SEG_BLK .. u64::MAX) + .with_block_range(7 * SEG_BLK..u64::MAX) .build_uri() .unwrap(); add_fault_injection(nex_0.rpc(), &inj_uri).await.unwrap(); @@ -314,14 +305,9 @@ async fn nexus_partial_rebuild_offline_online() { assert_eq!(children.len(), 2); // Write 10 x 16 KiB buffers. - test_write_to_nexus( - &nex_0, - DataSize::from_bytes(0), - 10, - DataSize::from_kb(16), - ) - .await - .unwrap(); + test_write_to_nexus(&nex_0, DataSize::from_bytes(0), 10, DataSize::from_kb(16)) + .await + .unwrap(); // Offline the replica. nex_0 diff --git a/io-engine/tests/nexus_rebuild_partial_loop.rs b/io-engine/tests/nexus_rebuild_partial_loop.rs index 75bd0bfa5..3b4f411d8 100644 --- a/io-engine/tests/nexus_rebuild_partial_loop.rs +++ b/io-engine/tests/nexus_rebuild_partial_loop.rs @@ -10,9 +10,7 @@ use std::{ use common::{ compose::{ rpc::v1::{nexus::ChildState, GrpcConnect, SharedRpcHandle, Status}, - Binary, - Builder, - ComposeTest, + Binary, Builder, ComposeTest, }, file_io::DataSize, fio::{spawn_fio_task, FioBuilder, FioJobBuilder}, @@ -225,10 +223,8 @@ async fn nexus_rebuild_partial_loop() { println!("[B]: node restart iter #{itr} ..."); - for (idx, node) in nodes[0 ..= 1].iter_mut().enumerate() { - println!( - "[B]: sleeping before restarting node {idx} ..." - ); + for (idx, node) in nodes[0..=1].iter_mut().enumerate() { + println!("[B]: sleeping before restarting node {idx} ..."); tokio::time::sleep(Duration::from_secs(2)).await; println!("[B]: restarting node {idx} ..."); @@ -236,9 +232,7 @@ async fn nexus_rebuild_partial_loop() { nex.online_child_replica(&node.repl).await.unwrap(); - monitor_nexus(&nex, Duration::from_secs(120)) - .await - .unwrap(); + monitor_nexus(&nex, Duration::from_secs(120)).await.unwrap(); println!("[B]: restarting node {idx} done"); } @@ -247,9 +241,7 @@ async fn nexus_rebuild_partial_loop() { itr += 1; } - println!( - "[B]: starting node restart loop finished after {itr} iteration(s)" - ); + println!("[B]: starting node restart loop finished after {itr} iteration(s)"); } }); tokio::pin!(j1); @@ -264,10 +256,7 @@ async fn nexus_rebuild_partial_loop() { /// Periodically polls the nexus, prints its child status, and checks if the /// children are online. -async fn monitor_nexus( - nex: &NexusBuilder, - timeout: Duration, -) -> Result<(), Status> { +async fn monitor_nexus(nex: &NexusBuilder, timeout: Duration) -> Result<(), Status> { let start = Instant::now(); loop { @@ -277,11 +266,7 @@ async fn monitor_nexus( .iter() .map(|c| { let s = if c.rebuild_progress >= 0 { - format!( - "{s:?} [{p}]", - s = c.state(), - p = c.rebuild_progress, - ) + format!("{s:?} [{p}]", s = c.state(), p = c.rebuild_progress,) } else { format!("{s:?}", s = c.state()) }; diff --git a/io-engine/tests/nexus_rebuild_source.rs b/io-engine/tests/nexus_rebuild_source.rs index 3f2073d89..5544954bd 100644 --- a/io-engine/tests/nexus_rebuild_source.rs +++ b/io-engine/tests/nexus_rebuild_source.rs @@ -4,11 +4,9 @@ use common::{ compose::{ rpc::v1::{ nexus::{ChildState, ChildStateReason}, - GrpcConnect, - SharedRpcHandle, + GrpcConnect, SharedRpcHandle, }, - Binary, - Builder, + Binary, Builder, }, nexus::NexusBuilder, pool::PoolBuilder, @@ -45,7 +43,7 @@ impl TestNode { } async fn clear(&mut self) { - for i in 0 .. self.replicas.len() { + for i in 0..self.replicas.len() { self.replicas[i].destroy().await.unwrap(); } self.replicas.clear(); @@ -62,7 +60,7 @@ async fn test_src_selection( let to = std::time::Duration::from_secs(1); let mut replicas = Vec::new(); - for i in 0 .. child_cfg.len() { + for i in 0..child_cfg.len() { replicas.push(nodes[child_cfg[i]].next_replica().await); } @@ -75,9 +73,7 @@ async fn test_src_selection( nex.create().await.unwrap(); println!("---------"); - println!( - "> {child_cfg:?}: expect to rebuild #{dst} from #{expected_src_idx}" - ); + println!("> {child_cfg:?}: expect to rebuild #{dst} from #{expected_src_idx}"); let children = nex.get_nexus().await.unwrap().children; for (idx, child) in children.iter().enumerate() { @@ -172,7 +168,7 @@ async fn nexus_rebuild_prefer_local_replica() { let mut nodes = Vec::new(); - for idx in 0 .. 3 { + for idx in 0..3 { let ms = conn.grpc_handle_shared(&format!("ms_{idx}")).await.unwrap(); let mut pool = PoolBuilder::new(ms.clone()) diff --git a/io-engine/tests/nexus_rebuild_verify.rs b/io-engine/tests/nexus_rebuild_verify.rs index 83ae8d3ac..f7f6976df 100644 --- a/io-engine/tests/nexus_rebuild_verify.rs +++ b/io-engine/tests/nexus_rebuild_verify.rs @@ -6,11 +6,9 @@ use common::{ compose::{ rpc::v1::{ nexus::{ChildState, ChildStateReason, RebuildJobState}, - GrpcConnect, - SharedRpcHandle, + GrpcConnect, SharedRpcHandle, }, - Binary, - Builder, + Binary, Builder, }, nexus::NexusBuilder, pool::PoolBuilder, @@ -19,11 +17,7 @@ use common::{ }; use io_engine::core::fault_injection::{ - FaultDomain, - FaultIoOperation, - FaultIoStage, - FaultMethod, - InjectionBuilder, + FaultDomain, FaultIoOperation, FaultIoStage, FaultMethod, InjectionBuilder, }; use std::time::Duration; diff --git a/io-engine/tests/nexus_replica_resize.rs b/io-engine/tests/nexus_replica_resize.rs index 8972beecd..3e89bdade 100644 --- a/io-engine/tests/nexus_replica_resize.rs +++ b/io-engine/tests/nexus_replica_resize.rs @@ -3,13 +3,10 @@ pub mod common; use common::{ compose::{ rpc::v1::{ - nexus::NexusState, - snapshot::NexusCreateSnapshotReplicaDescriptor, - GrpcConnect, + nexus::NexusState, snapshot::NexusCreateSnapshotReplicaDescriptor, GrpcConnect, SharedRpcHandle, }, - Binary, - Builder, + Binary, Builder, }, fio::{Fio, FioBuilder, FioJobBuilder}, nexus::NexusBuilder, @@ -93,9 +90,7 @@ impl ResizeTestTrait for ResizeTest { ResizeTest::WithoutReplicaResize => { do_resize_without_replica_resize(nexus, replicas).await } - ResizeTest::AfterReplicaResize => { - do_resize_after_replica_resize(nexus, replicas).await - } + ResizeTest::AfterReplicaResize => do_resize_after_replica_resize(nexus, replicas).await, ResizeTest::WithRebuildingReplica => { do_resize_with_rebuilding_replica(nexus, replicas).await } @@ -127,10 +122,7 @@ async fn do_resize_without_replica_resize( .expect_err("Resize of nexus without resizing ALL replicas must fail"); } -async fn do_resize_after_replica_resize( - nexus: &NexusBuilder, - replicas: Vec<&mut ReplicaBuilder>, -) { +async fn do_resize_after_replica_resize(nexus: &NexusBuilder, replicas: Vec<&mut ReplicaBuilder>) { for replica in replicas { let ret = replica.resize(EXPANDED_SIZE).await.unwrap(); assert!(ret.size >= EXPANDED_SIZE); diff --git a/io-engine/tests/nexus_restart.rs b/io-engine/tests/nexus_restart.rs index 2e2a080dd..45ab66b08 100644 --- a/io-engine/tests/nexus_restart.rs +++ b/io-engine/tests/nexus_restart.rs @@ -3,9 +3,7 @@ pub mod common; use io_engine_tests::{ compose::{ rpc::v1::{GrpcConnect, SharedRpcHandle}, - Binary, - Builder, - ComposeTest, + Binary, Builder, ComposeTest, }, file_io::DataSize, fio::{spawn_fio_task, FioBuilder, FioJobBuilder}, @@ -183,10 +181,7 @@ impl TestCluster { repl.create().await.unwrap(); repl.share().await.unwrap(); - nodes.push(StorageNode { - pool, - repl, - }); + nodes.push(StorageNode { pool, repl }); } Arc::new(Mutex::new(Self { diff --git a/io-engine/tests/nexus_share.rs b/io-engine/tests/nexus_share.rs index ab35bc756..3abbdd0aa 100644 --- a/io-engine/tests/nexus_share.rs +++ b/io-engine/tests/nexus_share.rs @@ -1,13 +1,6 @@ use io_engine::{ bdev::nexus::{nexus_create, nexus_lookup_mut}, - core::{ - mayastor_env_stop, - MayastorCliArgs, - Protocol, - Reactor, - Share, - UntypedBdev, - }, + core::{mayastor_env_stop, MayastorCliArgs, Protocol, Reactor, Share, UntypedBdev}, }; use std::pin::Pin; diff --git a/io-engine/tests/nexus_thin.rs b/io-engine/tests/nexus_thin.rs index e8962c621..a8f5e4617 100644 --- a/io-engine/tests/nexus_thin.rs +++ b/io-engine/tests/nexus_thin.rs @@ -3,9 +3,7 @@ pub mod common; use common::{ compose::{ rpc::v1::{GrpcConnect, SharedRpcHandle}, - Binary, - Builder, - ComposeTest, + Binary, Builder, ComposeTest, }, nexus::NexusBuilder, pool::PoolBuilder, diff --git a/io-engine/tests/nexus_thin_no_space.rs b/io-engine/tests/nexus_thin_no_space.rs index 7a9ef5c99..92b64303a 100644 --- a/io-engine/tests/nexus_thin_no_space.rs +++ b/io-engine/tests/nexus_thin_no_space.rs @@ -6,8 +6,7 @@ use common::{ nexus::{ChildState, ChildStateReason}, GrpcConnect, }, - Binary, - Builder, + Binary, Builder, }, file_io::DataSize, nexus::{find_nexus_by_uuid, test_write_to_nexus, NexusBuilder}, @@ -63,23 +62,12 @@ async fn nexus_thin_nospc_local_single() { nex_0.publish().await.unwrap(); // Write less than pool size. - test_write_to_nexus( - &nex_0, - DataSize::from_bytes(0), - 30, - DataSize::from_mb(1), - ) - .await - .unwrap(); + test_write_to_nexus(&nex_0, DataSize::from_bytes(0), 30, DataSize::from_mb(1)) + .await + .unwrap(); // Write more than pool size. Must result in ENOSPC. - let res = test_write_to_nexus( - &nex_0, - DataSize::from_bytes(0), - 80, - DataSize::from_mb(1), - ) - .await; + let res = test_write_to_nexus(&nex_0, DataSize::from_bytes(0), 80, DataSize::from_mb(1)).await; assert_eq!(res.unwrap_err().raw_os_error().unwrap(), libc::ENOSPC); } @@ -136,23 +124,12 @@ async fn nexus_thin_nospc_remote_single() { nex_0.publish().await.unwrap(); // Write less than pool size. - test_write_to_nexus( - &nex_0, - DataSize::from_bytes(0), - 30, - DataSize::from_mb(1), - ) - .await - .unwrap(); + test_write_to_nexus(&nex_0, DataSize::from_bytes(0), 30, DataSize::from_mb(1)) + .await + .unwrap(); // Write more than pool size. Must result in ENOSPC. - let res = test_write_to_nexus( - &nex_0, - DataSize::from_bytes(0), - 80, - DataSize::from_mb(1), - ) - .await; + let res = test_write_to_nexus(&nex_0, DataSize::from_bytes(0), 80, DataSize::from_mb(1)).await; assert_eq!(res.unwrap_err().raw_os_error().unwrap(), libc::ENOSPC); } diff --git a/io-engine/tests/nexus_thin_rebuild.rs b/io-engine/tests/nexus_thin_rebuild.rs index 5f4b1b60d..7b2c3cb80 100644 --- a/io-engine/tests/nexus_thin_rebuild.rs +++ b/io-engine/tests/nexus_thin_rebuild.rs @@ -3,8 +3,7 @@ pub mod common; use common::{ compose::{ rpc::v1::{GrpcConnect, SharedRpcHandle}, - Binary, - Builder, + Binary, Builder, }, file_io::DataSize, nexus::{test_write_to_nexus, NexusBuilder}, @@ -95,14 +94,9 @@ async fn test_thin_rebuild(cfg: StorConfig) { nex_0.create().await.unwrap(); nex_0.publish().await.unwrap(); - test_write_to_nexus( - &nex_0, - DataSize::from_bytes(0), - 14, - DataSize::from_mb(1), - ) - .await - .unwrap(); + test_write_to_nexus(&nex_0, DataSize::from_bytes(0), 14, DataSize::from_mb(1)) + .await + .unwrap(); nex_0.add_replica(&repl_2, false).await.unwrap(); diff --git a/io-engine/tests/nexus_with_local.rs b/io-engine/tests/nexus_with_local.rs index c35f79a29..ab91b0c8d 100644 --- a/io-engine/tests/nexus_with_local.rs +++ b/io-engine/tests/nexus_with_local.rs @@ -5,18 +5,12 @@ use io_engine::constants::NVME_NQN_PREFIX; use common::compose::{ rpc::v1::{ bdev::{DestroyBdevRequest, ListBdevOptions}, - nexus::{ - AddChildNexusRequest, - CreateNexusRequest, - RemoveChildNexusRequest, - }, + nexus::{AddChildNexusRequest, CreateNexusRequest, RemoveChildNexusRequest}, pool::CreatePoolRequest, replica::CreateReplicaRequest, - GrpcConnect, - RpcHandle, + GrpcConnect, RpcHandle, }, - Binary, - Builder, + Binary, Builder, }; fn nexus_uuid() -> String { @@ -73,9 +67,7 @@ async fn create_replicas(h: &mut RpcHandle) { async fn check_aliases(h: &mut RpcHandle, present: bool) { let bdevs = h .bdev - .list(ListBdevOptions { - name: None, - }) + .list(ListBdevOptions { name: None }) .await .unwrap() .into_inner(); diff --git a/io-engine/tests/nvme_device_timeout.rs b/io-engine/tests/nvme_device_timeout.rs index 4a34137de..6afa9b545 100755 --- a/io-engine/tests/nvme_device_timeout.rs +++ b/io-engine/tests/nvme_device_timeout.rs @@ -9,18 +9,13 @@ use common::compose::{ mayastor::{BdevShareRequest, BdevUri, Null}, GrpcConnect, }, - Builder, - MayastorTest, + Builder, MayastorTest, }; use io_engine::{ bdev::{device_create, device_destroy, device_open}, constants::NVME_NQN_PREFIX, core::{ - BlockDevice, - BlockDeviceHandle, - DeviceTimeoutAction, - IoCompletionStatus, - MayastorCliArgs, + BlockDevice, BlockDeviceHandle, DeviceTimeoutAction, IoCompletionStatus, MayastorCliArgs, ReadOptions, }, subsys::{Config, NvmeBdevOpts}, @@ -31,8 +26,7 @@ pub mod common; const TEST_CTX_STRING: &str = "test context"; -static MAYASTOR: Lazy = - Lazy::new(|| MayastorTest::new(MayastorCliArgs::default())); +static MAYASTOR: Lazy = Lazy::new(|| MayastorTest::new(MayastorCliArgs::default())); static CALLBACK_FLAG: AtomicCell = AtomicCell::new(false); @@ -135,7 +129,7 @@ async fn test_io_timeout(action_on_timeout: DeviceTimeoutAction) { .await; test.pause("ms1").await.unwrap(); - for i in 1 .. 6 { + for i in 1..6 { tokio::time::sleep(std::time::Duration::from_secs(1)).await; println!("waiting for the container to be fully suspended... {i}/5"); } @@ -162,8 +156,7 @@ async fn test_io_timeout(action_on_timeout: DeviceTimeoutAction) { // Make sure we were passed the same pattern string as requested. let s = unsafe { - let slice = - slice::from_raw_parts(ctx as *const u8, TEST_CTX_STRING.len()); + let slice = slice::from_raw_parts(ctx as *const u8, TEST_CTX_STRING.len()); str::from_utf8(slice).unwrap() }; @@ -208,7 +201,7 @@ async fn test_io_timeout(action_on_timeout: DeviceTimeoutAction) { let mut io_timedout = false; // Wait up to 120 seconds till I/O times out. - for i in 1 .. 25 { + for i in 1..25 { println!("waiting for I/O to be timed out... {i}/24"); tokio::time::sleep(std::time::Duration::from_secs(5)).await; // Break the loop if the callback has been called in response to I/O @@ -324,7 +317,7 @@ async fn io_timeout_ignore() { .await; test.pause("ms1").await.unwrap(); - for i in 1 .. 6 { + for i in 1..6 { tokio::time::sleep(std::time::Duration::from_secs(1)).await; println!("waiting for the container to be fully suspended... {i}/5"); } @@ -351,8 +344,7 @@ async fn io_timeout_ignore() { // Make sure we were passed the same pattern string as requested. let s = unsafe { - let slice = - slice::from_raw_parts(ctx as *const u8, TEST_CTX_STRING.len()); + let slice = slice::from_raw_parts(ctx as *const u8, TEST_CTX_STRING.len()); str::from_utf8(slice).unwrap() }; @@ -399,7 +391,7 @@ async fn io_timeout_ignore() { // Wait 5 times longer than timeout interval. Make sure I/O operation not // interrupted. - for i in 1 .. 6 { + for i in 1..6 { println!("waiting for I/O timeout to happen... {i}/5"); tokio::time::sleep(std::time::Duration::from_secs(5)).await; assert!(!CALLBACK_FLAG.load(), "I/O operation interrupted"); diff --git a/io-engine/tests/nvmf.rs b/io-engine/tests/nvmf.rs index cb8de4635..68e3f327a 100644 --- a/io-engine/tests/nvmf.rs +++ b/io-engine/tests/nvmf.rs @@ -1,13 +1,7 @@ use io_engine::{ bdev_api::bdev_create, constants::NVME_NQN_PREFIX, - core::{ - mayastor_env_stop, - MayastorCliArgs, - MayastorEnvironment, - Reactor, - UntypedBdev, - }, + core::{mayastor_env_stop, MayastorCliArgs, MayastorEnvironment, Reactor, UntypedBdev}, subsys::{NvmfSubsystem, SubType}, }; @@ -18,26 +12,17 @@ use common::{ compose::{ rpc::{ v0::{ - mayastor::{ - BdevShareRequest, - BdevUri, - CreateReply, - ShareProtocolNexus, - }, + mayastor::{BdevShareRequest, BdevUri, CreateReply, ShareProtocolNexus}, GrpcConnect, }, v1::{ nexus::{CreateNexusRequest, PublishNexusRequest}, pool::CreatePoolRequest, replica::CreateReplicaRequest, - GrpcConnect as v1GrpcConnect, - RpcHandle, + GrpcConnect as v1GrpcConnect, RpcHandle, }, }, - Binary, - Builder, - ComposeTest, - NetworkMode, + Binary, Builder, ComposeTest, NetworkMode, }, nvme::{nvme_connect, nvme_disconnect_nqn}, }; @@ -113,10 +98,7 @@ fn nvmf_target() { // we should have at least 2 subsystems Reactor::block_on(async { - assert_eq!( - NvmfSubsystem::first().unwrap().into_iter().count(), - 2 - ); + assert_eq!(NvmfSubsystem::first().unwrap().into_iter().count(), 2); }); // verify the bdev is claimed by our target -- make sure we skip @@ -161,10 +143,7 @@ async fn nvmf_set_target_interface() { .name("cargo-test") .network(network) .unwrap() - .add_container_bin( - "ms1", - Binary::from_dbg("io-engine").with_args(args), - ) + .add_container_bin("ms1", Binary::from_dbg("io-engine").with_args(args)) .with_clean(true) .build() .await @@ -179,12 +158,7 @@ async fn nvmf_set_target_interface() { let tgt_ip = match tgt_ip { Some(s) => s.to_string(), None => { - let cnt = test - .list_cluster_containers() - .await - .unwrap() - .pop() - .unwrap(); + let cnt = test.list_cluster_containers().await.unwrap().pop().unwrap(); let networks = cnt.network_settings.unwrap().networks.unwrap(); let ip_addr = networks .get("cargo-test") @@ -215,8 +189,7 @@ async fn nvmf_set_target_interface() { .into_inner() .uri; - let re = Regex::new(r"^nvmf(\+rdma\+tcp|\+tcp)://([0-9.]+):[0-9]+/.*$") - .unwrap(); + let re = Regex::new(r"^nvmf(\+rdma\+tcp|\+tcp)://([0-9.]+):[0-9]+/.*$").unwrap(); let cap = re.captures(&bdev_uri).unwrap(); let shared_ip = cap.get(2).unwrap().as_str(); @@ -267,13 +240,7 @@ async fn test_rdma_target() { .add_container_bin( "ms_0", Binary::from_dbg("io-engine") - .with_args(vec![ - "-l", - "1,2", - "--enable-rdma", - "-T", - iface.as_str(), - ]) + .with_args(vec!["-l", "1,2", "--enable-rdma", "-T", iface.as_str()]) .with_privileged(Some(true)), ) .with_clean(true) diff --git a/io-engine/tests/nvmf_connect.rs b/io-engine/tests/nvmf_connect.rs index 2e7993a1e..bd2851729 100644 --- a/io-engine/tests/nvmf_connect.rs +++ b/io-engine/tests/nvmf_connect.rs @@ -27,7 +27,7 @@ async fn nvmf_connect_async() { let uri = init_nvmf_share().await; - for _ in 0 .. 20 { + for _ in 0..20 { let name = spawn_device_create(&uri).await; let f0 = spawn_get_io_handle_nonblock(&name); @@ -53,7 +53,7 @@ async fn nvmf_connect_async_drop() { let uri = init_nvmf_share().await; - for _ in 0 .. 20 { + for _ in 0..20 { let name = spawn_device_create(&uri).await; let f0 = spawn_get_io_handle_nonblock(&name); diff --git a/io-engine/tests/persistence.rs b/io-engine/tests/persistence.rs index abf9d6486..c8b0c7e57 100644 --- a/io-engine/tests/persistence.rs +++ b/io-engine/tests/persistence.rs @@ -2,29 +2,13 @@ use crate::common::fio_run_verify; use common::compose::{ rpc::v0::{ mayastor::{ - AddChildNexusRequest, - BdevShareRequest, - BdevUri, - Child, - ChildState, - CreateNexusRequest, - CreateReply, - DestroyNexusRequest, - Nexus, - NexusState, - Null, - PublishNexusRequest, - RebuildStateRequest, - RemoveChildNexusRequest, - ShareProtocolNexus, + AddChildNexusRequest, BdevShareRequest, BdevUri, Child, ChildState, CreateNexusRequest, + CreateReply, DestroyNexusRequest, Nexus, NexusState, Null, PublishNexusRequest, + RebuildStateRequest, RemoveChildNexusRequest, ShareProtocolNexus, }, - GrpcConnect, - RpcHandle, + GrpcConnect, RpcHandle, }, - Binary, - Builder, - ComposeTest, - ContainerSpec, + Binary, Builder, ComposeTest, ContainerSpec, }; use etcd_client::Client; @@ -187,9 +171,7 @@ async fn persist_io_failure() { .with_rand_hostnqn(true); target.connect().unwrap(); let devices = target.block_devices(2).unwrap(); - let fio_hdl = tokio::spawn(async move { - fio_run_verify(&devices[0].to_string()).unwrap() - }); + let fio_hdl = tokio::spawn(async move { fio_run_verify(&devices[0].to_string()).unwrap() }); fio_hdl.await.unwrap(); @@ -403,12 +385,7 @@ async fn create_nexus(hdl: &mut RpcHandle, uuid: &str, children: Vec) { .expect("Failed to create nexus."); } -async fn add_child_nexus( - hdl: &mut RpcHandle, - uuid: &str, - child: &str, - norebuild: bool, -) { +async fn add_child_nexus(hdl: &mut RpcHandle, uuid: &str, child: &str, norebuild: bool) { hdl.mayastor .add_child_nexus(AddChildNexusRequest { uuid: uuid.to_string(), @@ -501,11 +478,7 @@ async fn get_nexus_state(hdl: &mut RpcHandle, uuid: &str) -> Option { } /// Returns a child with the given URI. -async fn get_child( - hdl: &mut RpcHandle, - nexus_uuid: &str, - child_uri: &str, -) -> Child { +async fn get_child(hdl: &mut RpcHandle, nexus_uuid: &str, child_uri: &str) -> Child { let n = get_nexus(hdl, nexus_uuid) .await .expect("Failed to get nexus"); diff --git a/io-engine/tests/poller.rs b/io-engine/tests/poller.rs index e78c2af8d..5e7aa8772 100644 --- a/io-engine/tests/poller.rs +++ b/io-engine/tests/poller.rs @@ -2,12 +2,7 @@ use crossbeam::atomic::AtomicCell; use once_cell::sync::Lazy; use parking_lot::Mutex; -use io_engine::core::{ - mayastor_env_stop, - MayastorCliArgs, - MayastorEnvironment, - Reactors, -}; +use io_engine::core::{mayastor_env_stop, MayastorCliArgs, MayastorEnvironment, Reactors}; use spdk_rs::{Cores, PollerBuilder}; @@ -93,9 +88,7 @@ fn poller() { let poller = PollerBuilder::new() .with_core(1) - .with_data(Data { - cnt: Mutex::new(0), - }) + .with_data(Data { cnt: Mutex::new(0) }) .with_poll_fn(|d| { *d.cnt.lock() += 1; assert_eq!(Cores::current(), 1); diff --git a/io-engine/tests/reactor.rs b/io-engine/tests/reactor.rs index ffb71f718..9684ec14e 100644 --- a/io-engine/tests/reactor.rs +++ b/io-engine/tests/reactor.rs @@ -1,11 +1,5 @@ use io_engine::core::{ - mayastor_env_stop, - Cores, - MayastorCliArgs, - MayastorEnvironment, - Mthread, - ReactorState, - Reactors, + mayastor_env_stop, Cores, MayastorCliArgs, MayastorEnvironment, Mthread, ReactorState, Reactors, }; use std::sync::atomic::{AtomicUsize, Ordering}; diff --git a/io-engine/tests/reactor_block_on.rs b/io-engine/tests/reactor_block_on.rs index 90907b1a5..2141af5ca 100644 --- a/io-engine/tests/reactor_block_on.rs +++ b/io-engine/tests/reactor_block_on.rs @@ -1,13 +1,7 @@ use crossbeam::atomic::AtomicCell; use once_cell::sync::Lazy; -use io_engine::core::{ - mayastor_env_stop, - MayastorCliArgs, - MayastorEnvironment, - Reactor, - Reactors, -}; +use io_engine::core::{mayastor_env_stop, MayastorCliArgs, MayastorEnvironment, Reactor, Reactors}; pub mod common; diff --git a/io-engine/tests/replica_crd.rs b/io-engine/tests/replica_crd.rs index c0d8af0e6..517d93745 100644 --- a/io-engine/tests/replica_crd.rs +++ b/io-engine/tests/replica_crd.rs @@ -14,11 +14,7 @@ use common::{ }; use io_engine::core::fault_injection::{ - FaultDomain, - FaultIoOperation, - FaultIoStage, - FaultMethod, - InjectionBuilder, + FaultDomain, FaultIoOperation, FaultIoStage, FaultMethod, InjectionBuilder, }; // Test that the third CRD value is used for a replica target. diff --git a/io-engine/tests/replica_snapshot.rs b/io-engine/tests/replica_snapshot.rs index be1c95bb3..82a4a3b6a 100644 --- a/io-engine/tests/replica_snapshot.rs +++ b/io-engine/tests/replica_snapshot.rs @@ -15,10 +15,7 @@ use common::{ compose::{ rpc::v0::{ mayastor::{ - CreatePoolRequest, - CreateReplicaRequest, - ShareProtocolReplica, - ShareReplicaRequest, + CreatePoolRequest, CreateReplicaRequest, ShareProtocolReplica, ShareReplicaRequest, }, GrpcConnect, }, @@ -112,9 +109,9 @@ async fn replica_snapshot() { // Issue an unimplemented vendor command // This checks that the target is correctly rejecting such commands // In practice the nexus will not send such commands - custom_nvme_admin(0xc1).await.expect_err( - "unexpectedly succeeded invalid nvme admin command", - ); + custom_nvme_admin(0xc1) + .await + .expect_err("unexpectedly succeeded invalid nvme admin command"); bdev_io::read_some(NXNAME, 0, 2, 0xff).await.unwrap(); let ts = create_snapshot().await.unwrap(); // Check that IO to the replica still works after creating a diff --git a/io-engine/tests/replica_thin.rs b/io-engine/tests/replica_thin.rs index 626a41b8b..97e60cd03 100644 --- a/io-engine/tests/replica_thin.rs +++ b/io-engine/tests/replica_thin.rs @@ -84,9 +84,7 @@ async fn replica_thin_used_space() { let u_0_after = repl_0.get_replica().await.unwrap().usage.unwrap(); // We've copied some data, so number of used clusters must increase. - assert!( - u_0_before.num_allocated_clusters < u_0_after.num_allocated_clusters - ); + assert!(u_0_before.num_allocated_clusters < u_0_after.num_allocated_clusters); assert!(p_before.used < p_after.used); // The replica isn't full, so number of used clusters must be less than diff --git a/io-engine/tests/replica_thin_no_space.rs b/io-engine/tests/replica_thin_no_space.rs index c9d98c478..95770c820 100644 --- a/io-engine/tests/replica_thin_no_space.rs +++ b/io-engine/tests/replica_thin_no_space.rs @@ -12,11 +12,7 @@ use common::{ test::add_fault_injection, }; -use io_engine::core::fault_injection::{ - FaultDomain, - FaultIoStage, - InjectionBuilder, -}; +use io_engine::core::fault_injection::{FaultDomain, FaultIoStage, InjectionBuilder}; use spdk_rs::NvmeStatus; @@ -32,11 +28,7 @@ async fn replica_thin_nospc() { .unwrap() .add_container_bin( "ms_0", - Binary::from_dbg("io-engine").with_args(vec![ - "-l", - "1", - "-Fcompact,color,nodate", - ]), + Binary::from_dbg("io-engine").with_args(vec!["-l", "1", "-Fcompact,color,nodate"]), ) .with_clean(true) .build() @@ -107,11 +99,7 @@ async fn replica_nospc_inject() { .unwrap() .add_container_bin( "ms_0", - Binary::from_dbg("io-engine").with_args(vec![ - "-l", - "1", - "-Fcompact,color,nodate", - ]), + Binary::from_dbg("io-engine").with_args(vec!["-l", "1", "-Fcompact,color,nodate"]), ) .with_clean(true) .build() diff --git a/io-engine/tests/replica_timeout.rs b/io-engine/tests/replica_timeout.rs index 0c073ff12..732151e77 100644 --- a/io-engine/tests/replica_timeout.rs +++ b/io-engine/tests/replica_timeout.rs @@ -92,8 +92,7 @@ async fn replica_stop_cont() { .await .expect("should publish nexus over nvmf"); assert!( - UntypedBdev::lookup_by_name(&bdev_get_name(&c).unwrap()) - .is_some(), + UntypedBdev::lookup_by_name(&bdev_get_name(&c).unwrap()).is_some(), "child bdev must exist" ); }) @@ -101,7 +100,7 @@ async fn replica_stop_cont() { test.pause("ms1").await.unwrap(); let mut ticker = tokio::time::interval(Duration::from_secs(1)); - for i in 1 .. 6 { + for i in 1..6 { ticker.tick().await; println!("waiting for the container to be fully suspended... {i}/5"); } @@ -118,7 +117,7 @@ async fn replica_stop_cont() { // KATO is 5s, wait at least that long let n = 10; - for i in 1 ..= n { + for i in 1..=n { ticker.tick().await; println!("unfreeze delay... {i}/{n}"); } @@ -141,8 +140,7 @@ async fn replica_stop_cont() { mayastor .spawn(async move { assert!( - UntypedBdev::lookup_by_name(&bdev_get_name(&c).unwrap()) - .is_none(), + UntypedBdev::lookup_by_name(&bdev_get_name(&c).unwrap()).is_none(), "child bdev must be destroyed" ); let nx = nexus_lookup_mut(NXNAME).unwrap(); diff --git a/io-engine/tests/replica_uri.rs b/io-engine/tests/replica_uri.rs index c4925db63..c94a76f5f 100644 --- a/io-engine/tests/replica_uri.rs +++ b/io-engine/tests/replica_uri.rs @@ -1,17 +1,10 @@ use common::compose::{ rpc::v0::{ mayastor::{ - Bdev, - CreateNexusRequest, - CreatePoolRequest, - CreateReplicaRequest, - Null, - Replica, - ShareProtocolReplica, - ShareReplicaRequest, + Bdev, CreateNexusRequest, CreatePoolRequest, CreateReplicaRequest, Null, Replica, + ShareProtocolReplica, ShareReplicaRequest, }, - GrpcConnect, - RpcHandle, + GrpcConnect, RpcHandle, }, Builder, }; @@ -59,10 +52,7 @@ async fn replica_uri() { hdl.mayastor .create_pool(CreatePoolRequest { name: pool_name(i), - disks: vec![format!( - "malloc:///disk0?size_mb={}", - DISKSIZE_KB / 1024 - )], + disks: vec![format!("malloc:///disk0?size_mb={}", DISKSIZE_KB / 1024)], }) .await .unwrap(); diff --git a/io-engine/tests/reset.rs b/io-engine/tests/reset.rs index 9f42be5a4..6f73f7e6f 100644 --- a/io-engine/tests/reset.rs +++ b/io-engine/tests/reset.rs @@ -61,8 +61,7 @@ async fn nexus_reset_mirror() { .await .unwrap(); - let bdev = - UntypedBdevHandle::open("reset_test", true, true).unwrap(); + let bdev = UntypedBdevHandle::open("reset_test", true, true).unwrap(); bdev.reset().await.unwrap(); }) .await diff --git a/io-engine/tests/resource_stats.rs b/io-engine/tests/resource_stats.rs index dd85ae3d4..2c6f52625 100644 --- a/io-engine/tests/resource_stats.rs +++ b/io-engine/tests/resource_stats.rs @@ -2,8 +2,7 @@ pub mod common; use common::{ compose::{ rpc::v1::{stats::*, GrpcConnect}, - Binary, - Builder, + Binary, Builder, }, fio::{FioBuilder, FioJobBuilder}, nexus::{test_fio_to_nexus, NexusBuilder}, @@ -118,9 +117,7 @@ async fn test_resource_stats() { .lock() .await .stats - .get_pool_io_stats(ListStatsOption { - name: None, - }) + .get_pool_io_stats(ListStatsOption { name: None }) .await .unwrap(); @@ -128,9 +125,7 @@ async fn test_resource_stats() { .lock() .await .stats - .get_pool_io_stats(ListStatsOption { - name: None, - }) + .get_pool_io_stats(ListStatsOption { name: None }) .await .unwrap(); @@ -138,9 +133,7 @@ async fn test_resource_stats() { .lock() .await .stats - .get_nexus_io_stats(ListStatsOption { - name: None, - }) + .get_nexus_io_stats(ListStatsOption { name: None }) .await .unwrap(); @@ -148,9 +141,7 @@ async fn test_resource_stats() { .lock() .await .stats - .get_replica_io_stats(ListStatsOption { - name: None, - }) + .get_replica_io_stats(ListStatsOption { name: None }) .await .unwrap(); @@ -158,9 +149,7 @@ async fn test_resource_stats() { .lock() .await .stats - .get_replica_io_stats(ListStatsOption { - name: None, - }) + .get_replica_io_stats(ListStatsOption { name: None }) .await .unwrap(); @@ -168,9 +157,7 @@ async fn test_resource_stats() { .lock() .await .stats - .get_replica_io_stats(ListStatsOption { - name: None, - }) + .get_replica_io_stats(ListStatsOption { name: None }) .await .unwrap(); @@ -225,9 +212,7 @@ async fn test_resource_stats() { .lock() .await .stats - .get_pool_io_stats(ListStatsOption { - name: None, - }) + .get_pool_io_stats(ListStatsOption { name: None }) .await .unwrap(); @@ -235,9 +220,7 @@ async fn test_resource_stats() { .lock() .await .stats - .get_replica_io_stats(ListStatsOption { - name: None, - }) + .get_replica_io_stats(ListStatsOption { name: None }) .await .unwrap(); @@ -245,9 +228,7 @@ async fn test_resource_stats() { .lock() .await .stats - .get_replica_io_stats(ListStatsOption { - name: None, - }) + .get_replica_io_stats(ListStatsOption { name: None }) .await .unwrap(); @@ -255,9 +236,7 @@ async fn test_resource_stats() { .lock() .await .stats - .get_replica_io_stats(ListStatsOption { - name: None, - }) + .get_replica_io_stats(ListStatsOption { name: None }) .await .unwrap(); @@ -265,9 +244,7 @@ async fn test_resource_stats() { .lock() .await .stats - .get_pool_io_stats(ListStatsOption { - name: None, - }) + .get_pool_io_stats(ListStatsOption { name: None }) .await .unwrap(); @@ -275,9 +252,7 @@ async fn test_resource_stats() { .lock() .await .stats - .get_pool_io_stats(ListStatsOption { - name: None, - }) + .get_pool_io_stats(ListStatsOption { name: None }) .await .unwrap(); @@ -285,9 +260,7 @@ async fn test_resource_stats() { .lock() .await .stats - .get_nexus_io_stats(ListStatsOption { - name: None, - }) + .get_nexus_io_stats(ListStatsOption { name: None }) .await .unwrap(); @@ -318,9 +291,8 @@ async fn test_resource_stats() { assert_ne!(repl_nex_stat.num_write_ops, 0); // Validate num_read_ops of nexus and replica. - let replica_num_read_ops = repl_0_stat.num_read_ops - + repl_1_stat.num_read_ops - + repl_nex_stat.num_read_ops; + let replica_num_read_ops = + repl_0_stat.num_read_ops + repl_1_stat.num_read_ops + repl_nex_stat.num_read_ops; assert_eq!(nexus_stat.num_read_ops, replica_num_read_ops); // Validate num_read_ops of pool and replica. diff --git a/io-engine/tests/snapshot_lvol.rs b/io-engine/tests/snapshot_lvol.rs index eca69a9e8..c8856798f 100755 --- a/io-engine/tests/snapshot_lvol.rs +++ b/io-engine/tests/snapshot_lvol.rs @@ -7,9 +7,7 @@ use common::{ snapshot::ReplicaSnapshotBuilder, }; use io_engine_tests::{ - file_io::DataSize, - nvmf::test_write_to_nvmf, - replica::validate_replicas, + file_io::DataSize, nvmf::test_write_to_nvmf, replica::validate_replicas, snapshot::SnapshotCloneBuilder, }; @@ -20,12 +18,7 @@ use common::{bdev_io, compose::MayastorTest}; use io_engine::{ bdev::{device_create, device_open}, core::{ - CloneParams, - CloneXattrs, - LogicalVolume, - MayastorCliArgs, - SnapshotParams, - SnapshotXattrs, + CloneParams, CloneXattrs, LogicalVolume, MayastorCliArgs, SnapshotParams, SnapshotXattrs, UntypedBdev, }, lvs::{Lvol, Lvs, LvsLvol}, @@ -58,11 +51,7 @@ fn get_ms() -> &'static MayastorTest<'static> { } /// Must be called only in Mayastor context !s -async fn create_test_pool( - pool_name: &str, - disk: String, - cluster_size: Option, -) -> Lvs { +async fn create_test_pool(pool_name: &str, disk: String, cluster_size: Option) -> Lvs { Lvs::create_or_import(PoolArgs { name: pool_name.to_string(), disks: vec![disk], @@ -131,9 +120,8 @@ async fn check_clone(clone_lvol: Lvol, params: CloneParams) { (CloneXattrs::CloneUuid, params.clone_uuid().unwrap()), ]; for (attr_name, attr_value) in attrs { - let v = - Lvol::get_blob_xattr(clone_lvol.blob_checked(), attr_name.name()) - .expect("Failed to get clone attribute"); + let v = Lvol::get_blob_xattr(clone_lvol.blob_checked(), attr_name.name()) + .expect("Failed to get clone attribute"); assert_eq!(v, attr_value, "clone attr doesn't match"); } } @@ -243,10 +231,7 @@ async fn test_lvol_alloc_after_snapshot(index: u32, thin: bool) { .await; } -fn check_snapshot_descriptor( - params: &SnapshotParams, - descr: &LvolSnapshotDescriptor, -) { +fn check_snapshot_descriptor(params: &SnapshotParams, descr: &LvolSnapshotDescriptor) { let snap_params = descr.snapshot_params(); assert_eq!( @@ -303,12 +288,7 @@ async fn test_lvol_bdev_snapshot() { ms.spawn(async move { // Create a pool and lvol. - let pool = create_test_pool( - "pool1", - "malloc:///disk0?size_mb=64".to_string(), - None, - ) - .await; + let pool = create_test_pool("pool1", "malloc:///disk0?size_mb=64".to_string(), None).await; let lvol = pool .create_lvol( "lvol1", @@ -361,12 +341,7 @@ async fn test_lvol_handle_snapshot() { ms.spawn(async move { // Create a pool and lvol. - let pool = create_test_pool( - "pool2", - "malloc:///disk1?size_mb=64".to_string(), - None, - ) - .await; + let pool = create_test_pool("pool2", "malloc:///disk1?size_mb=64".to_string(), None).await; pool.create_lvol( "lvol2", @@ -379,8 +354,7 @@ async fn test_lvol_handle_snapshot() { .expect("Failed to create test lvol"); // Create a snapshot using device handle directly. - let descr = - device_open("lvol2", false).expect("Failed to open volume device"); + let descr = device_open("lvol2", false).expect("Failed to open volume device"); let handle = descr .into_handle() .expect("Failed to get I/O handle for volume device"); @@ -419,12 +393,7 @@ async fn test_lvol_list_snapshot() { ms.spawn(async move { // Create a pool and lvol. - let pool = create_test_pool( - "pool3", - "malloc:///disk3?size_mb=64".to_string(), - None, - ) - .await; + let pool = create_test_pool("pool3", "malloc:///disk3?size_mb=64".to_string(), None).await; let lvol = pool .create_lvol( "lvol3", @@ -508,12 +477,7 @@ async fn test_list_all_lvol_snapshots() { ms.spawn(async move { // Create a pool and lvol. - let pool = create_test_pool( - "pool4", - "malloc:///disk4?size_mb=128".to_string(), - None, - ) - .await; + let pool = create_test_pool("pool4", "malloc:///disk4?size_mb=128".to_string(), None).await; let lvol = pool .create_lvol( "lvol4", @@ -641,12 +605,7 @@ async fn test_list_pool_snapshots() { ms.spawn(async move { // Create a pool and lvol. - let pool = create_test_pool( - "pool6", - "malloc:///disk6?size_mb=32".to_string(), - None, - ) - .await; + let pool = create_test_pool("pool6", "malloc:///disk6?size_mb=32".to_string(), None).await; let lvol = pool .create_lvol( @@ -731,12 +690,7 @@ async fn test_list_all_lvol_snapshots_with_replica_destroy() { ms.spawn(async move { // Create a pool and lvol. - let pool = create_test_pool( - "pool7", - "malloc:///disk7?size_mb=128".to_string(), - None, - ) - .await; + let pool = create_test_pool("pool7", "malloc:///disk7?size_mb=128".to_string(), None).await; let lvol = pool .create_lvol( "lvol7", @@ -1012,12 +966,7 @@ async fn test_snapshot_clone() { ms.spawn(async move { // Create a pool and lvol. - let pool = create_test_pool( - "pool9", - "malloc:///disk5?size_mb=128".to_string(), - None, - ) - .await; + let pool = create_test_pool("pool9", "malloc:///disk5?size_mb=128".to_string(), None).await; let lvol = pool .create_lvol( "lvol9", @@ -1127,12 +1076,8 @@ async fn test_snapshot_volume_provisioning_mode() { ms.spawn(async move { // Create a pool and lvol. - let pool = create_test_pool( - "pool10", - "malloc:///disk10?size_mb=64".to_string(), - None, - ) - .await; + let pool = + create_test_pool("pool10", "malloc:///disk10?size_mb=64".to_string(), None).await; let lvol = pool .create_lvol( @@ -1157,14 +1102,20 @@ async fn test_snapshot_volume_provisioning_mode() { ); // Volume must be reported as thick-provisioned before taking a snapshot. - assert!(!lvol.is_thin(), "Volume is reported as thin-provisioned before taking a snapshot"); + assert!( + !lvol.is_thin(), + "Volume is reported as thin-provisioned before taking a snapshot" + ); lvol.create_snapshot(snapshot_params.clone()) .await .expect("Failed to create the first snapshot for test volume"); // Volume must be reported as thin provisioned after taking a snapshot. - assert!(lvol.is_thin(), "Volume is not reported as thin-provisioned after taking a snapshot"); + assert!( + lvol.is_thin(), + "Volume is not reported as thin-provisioned after taking a snapshot" + ); }) .await; } @@ -1189,8 +1140,7 @@ async fn test_snapshot_attr() { ms.spawn(async move { // Create a pool and lvol. - let mut pool = - create_test_pool("pool20", POOL_DEVICE_NAME.into(), None).await; + let mut pool = create_test_pool("pool20", POOL_DEVICE_NAME.into(), None).await; let lvol = pool .create_lvol( "lvol20", @@ -1242,18 +1192,13 @@ async fn test_snapshot_attr() { let snap_attr_value = String::from("top_secret"); snapshot_lvol - .set_blob_attr( - snap_attr_name.clone(), - snap_attr_value.clone(), - true, - ) + .set_blob_attr(snap_attr_name.clone(), snap_attr_value.clone(), true) .await .expect("Failed to set snapshot attribute"); // Check attribute. - let v = - Lvol::get_blob_xattr(snapshot_lvol.blob_checked(), &snap_attr_name) - .expect("Failed to get snapshot attribute"); + let v = Lvol::get_blob_xattr(snapshot_lvol.blob_checked(), &snap_attr_name) + .expect("Failed to get snapshot attribute"); assert_eq!(v, snap_attr_value, "Snapshot attribute doesn't match"); // Export pool, then reimport it again and check the attribute again. @@ -1293,11 +1238,8 @@ async fn test_snapshot_attr() { .unwrap(); // Get attribute from imported snapshot and check. - let v = Lvol::get_blob_xattr( - imported_snapshot_lvol.blob_checked(), - &snap_attr_name, - ) - .expect("Failed to get snapshot attribute"); + let v = Lvol::get_blob_xattr(imported_snapshot_lvol.blob_checked(), &snap_attr_name) + .expect("Failed to get snapshot attribute"); assert_eq!(v, snap_attr_value, "Snapshot attribute doesn't match"); clean_snapshots(snapshot_list).await; pool.destroy().await.expect("Failed to destroy test pool"); @@ -1310,19 +1252,15 @@ async fn test_delete_snapshot_with_valid_clone() { ms.spawn(async move { // Create a pool and lvol. - let pool = create_test_pool( - "pool13", - "malloc:///disk13?size_mb=128".to_string(), - None, - ) - .await; + let pool = + create_test_pool("pool13", "malloc:///disk13?size_mb=128".to_string(), None).await; let lvol = pool .create_lvol( "lvol13", 32 * 1024 * 1024, Some(&Uuid::new_v4().to_string()), false, - None + None, ) .await .expect("Failed to create test lvol"); @@ -1352,7 +1290,8 @@ async fn test_delete_snapshot_with_valid_clone() { assert_eq!(1, snapshot_list.len(), "Snapshot Count not matched!!"); let snapshot_lvol = UntypedBdev::lookup_by_uuid_str( - snapshot_list.first() + snapshot_list + .first() .unwrap() .snapshot_params() .snapshot_uuid() @@ -1395,7 +1334,8 @@ async fn test_delete_snapshot_with_valid_clone() { snapshot_lvol.destroy_snapshot().await.ok(); let snapshot_list = Lvol::list_all_lvol_snapshots(None); let snapshot_lvol = UntypedBdev::lookup_by_uuid_str( - snapshot_list.first() + snapshot_list + .first() .unwrap() .snapshot_params() .snapshot_uuid() @@ -1408,14 +1348,20 @@ async fn test_delete_snapshot_with_valid_clone() { snapshot_lvol.is_discarded_snapshot(), "Snapshot discardedSnapshotFlag not set properly" ); - clone1.destroy_replica().await.expect("Clone1 Destroy Failed"); + clone1 + .destroy_replica() + .await + .expect("Clone1 Destroy Failed"); let snapshot_list = Lvol::list_all_lvol_snapshots(None); assert_eq!( 1, snapshot_list.len(), "Snapshot should not be deleted as part single clone deletion" ); - clone2.destroy_replica().await.expect("Clone2 Destroy Failed"); + clone2 + .destroy_replica() + .await + .expect("Clone2 Destroy Failed"); let snapshot_list = Lvol::list_all_lvol_snapshots(None); assert_eq!( @@ -1433,12 +1379,8 @@ async fn test_delete_snapshot_with_valid_clone_fail_1() { ms.spawn(async move { // Create a pool and lvol. - let pool = create_test_pool( - "pool14", - "malloc:///disk14?size_mb=128".to_string(), - None, - ) - .await; + let pool = + create_test_pool("pool14", "malloc:///disk14?size_mb=128".to_string(), None).await; let lvol = pool .create_lvol( "lvol14", @@ -1475,7 +1417,8 @@ async fn test_delete_snapshot_with_valid_clone_fail_1() { assert_eq!(1, snapshot_list.len(), "Snapshot Count not matched!!"); let snapshot_lvol = UntypedBdev::lookup_by_uuid_str( - snapshot_list.first() + snapshot_list + .first() .unwrap() .snapshot_params() .snapshot_uuid() @@ -1503,7 +1446,8 @@ async fn test_delete_snapshot_with_valid_clone_fail_1() { snapshot_lvol.destroy_snapshot().await.ok(); let snapshot_list = Lvol::list_all_lvol_snapshots(None); let snapshot_lvol = UntypedBdev::lookup_by_uuid_str( - snapshot_list.first() + snapshot_list + .first() .unwrap() .snapshot_params() .snapshot_uuid() @@ -1617,12 +1561,8 @@ async fn test_snapshot_parent_usage_post_snapshot_destroy() { ms.spawn(async move { // Create a pool and lvol. - let pool = create_test_pool( - "pool16", - "malloc:///disk16?size_mb=128".to_string(), - None, - ) - .await; + let pool = + create_test_pool("pool16", "malloc:///disk16?size_mb=128".to_string(), None).await; let lvol = pool .create_lvol( LVOL_NAME, @@ -1703,12 +1643,8 @@ async fn test_clone_snapshot_usage_post_clone_destroy() { ms.spawn(async move { // Create a pool and lvol. - let pool = create_test_pool( - "pool17", - "malloc:///disk17?size_mb=128".to_string(), - None, - ) - .await; + let pool = + create_test_pool("pool17", "malloc:///disk17?size_mb=128".to_string(), None).await; let lvol = pool .create_lvol( LVOL_NAME, @@ -1795,21 +1731,15 @@ async fn test_clone_snapshot_usage_post_clone_destroy() { snapshot_params.set_name(String::from("lvol17_clone_1_snap2")); snapshot_params.set_snapshot_uuid(Uuid::new_v4().to_string()); snapshot_params.set_txn_id(Uuid::new_v4().to_string()); - bdev_io::write_some( - "lvol17_snap1_clone_1", - 3 * cluster_size, - 16, - 0xbbu8, - ) - .await - .expect("Failed to write data to volume"); + bdev_io::write_some("lvol17_snap1_clone_1", 3 * cluster_size, 16, 0xbbu8) + .await + .expect("Failed to write data to volume"); clone1 .create_snapshot(snapshot_params.clone()) .await .expect("Failed to create the first snapshot for test volume"); let snapshots = clone1.list_snapshot_by_source_uuid(); - let mut clone_snapshot = - snapshots.iter().map(|v| v.snapshot()).collect::>(); + let mut clone_snapshot = snapshots.iter().map(|v| v.snapshot()).collect::>(); lvol.destroy() .await .expect("Original replica destroy failed"); diff --git a/io-engine/tests/snapshot_nexus.rs b/io-engine/tests/snapshot_nexus.rs index fc5a3471a..7b3feff57 100755 --- a/io-engine/tests/snapshot_nexus.rs +++ b/io-engine/tests/snapshot_nexus.rs @@ -11,17 +11,10 @@ use common::{ bdev::ListBdevOptions, pool::CreatePoolRequest, replica::{CreateReplicaRequest, ListReplicaOptions}, - snapshot::{ - ListSnapshotsRequest, - NexusCreateSnapshotReplicaDescriptor, - SnapshotInfo, - }, + snapshot::{ListSnapshotsRequest, NexusCreateSnapshotReplicaDescriptor, SnapshotInfo}, GrpcConnect, }, - Binary, - Builder, - ComposeTest, - MayastorTest, + Binary, Builder, ComposeTest, MayastorTest, }, nexus::NexusBuilder, nvme::{list_mayastor_nvme_devices, nvme_connect, nvme_disconnect_all}, @@ -31,14 +24,8 @@ use common::{ use io_engine::{ bdev::{ - device_create, - device_destroy, - device_open, - nexus::{ - nexus_create, - nexus_lookup_mut, - NexusReplicaSnapshotDescriptor, - }, + device_create, device_destroy, device_open, + nexus::{nexus_create, nexus_lookup_mut, NexusReplicaSnapshotDescriptor}, Nexus, }, constants::NVME_NQN_PREFIX, @@ -51,11 +38,8 @@ use nix::errno::Errno; use io_engine_api::v1::{ replica::list_replica_options, snapshot::{ - destroy_snapshot_request::Pool, - list_snapshots_request, - CreateReplicaSnapshotRequest, - CreateSnapshotCloneRequest, - DestroySnapshotRequest, + destroy_snapshot_request::Pool, list_snapshots_request, CreateReplicaSnapshotRequest, + CreateSnapshotCloneRequest, DestroySnapshotRequest, }, }; use std::{pin::Pin, str}; @@ -178,11 +162,7 @@ async fn launch_instance(create_replicas: bool) -> (ComposeTest, Vec) { let mut bdev_urls = Vec::new(); for n in [replica1_name(), replica2_name()] { - let bdev_url = format!( - "nvmf://{}:8420/{NVME_NQN_PREFIX}:{}", - ms1.endpoint.ip(), - n, - ); + let bdev_url = format!("nvmf://{}:8420/{NVME_NQN_PREFIX}:{}", ms1.endpoint.ip(), n,); bdev_urls.push(bdev_url); } @@ -296,8 +276,7 @@ async fn test_replica_handle_snapshot() { ms.spawn(async move { let device_name = create_device(&urls[0]).await; - let descr = device_open(&device_name, false) - .expect("Can't open remote lvol device"); + let descr = device_open(&device_name, false).expect("Can't open remote lvol device"); let handle = descr.into_handle().unwrap(); handle @@ -338,9 +317,7 @@ async fn test_list_no_snapshots() { // Make sure no devices exist. let bdevs = ms1 .bdev - .list(ListBdevOptions { - name: None, - }) + .list(ListBdevOptions { name: None }) .await .expect("Failed to list existing devices") .into_inner() @@ -364,10 +341,7 @@ async fn test_list_no_snapshots() { assert_eq!(snapshots.len(), 0, "Some snapshots present"); } -fn check_nexus_snapshot_status( - res: &NexusSnapshotStatus, - status: &Vec<(String, u32)>, -) { +fn check_nexus_snapshot_status(res: &NexusSnapshotStatus, status: &Vec<(String, u32)>) { assert_eq!( res.replicas_skipped.len(), 0, @@ -390,10 +364,7 @@ fn check_nexus_snapshot_status( assert!( res.replicas_done.iter().any(|r| { if r.replica_uuid.eq(uuid) { - assert_eq!( - r.status, *e, - "Replica snapshot status doesn't match" - ); + assert_eq!(r.status, *e, "Replica snapshot status doesn't match"); true } else { false @@ -686,14 +657,9 @@ async fn test_snapshot_ancestor_usage() { tokio::spawn(async move { let device = get_mayastor_nvme_device(); - test_write_to_file( - device, - DataSize::default(), - 1, - DataSize::from_mb(1), - ) - .await - .expect("Failed to write to nexus"); + test_write_to_file(device, DataSize::default(), 1, DataSize::from_mb(1)) + .await + .expect("Failed to write to nexus"); s.send(()).expect("Failed to notify the waiter"); }); @@ -728,15 +694,13 @@ async fn test_snapshot_ancestor_usage() { ); assert_eq!( - usage.num_allocated_clusters_snapshots, - usage2.num_allocated_clusters_snapshots, + usage.num_allocated_clusters_snapshots, usage2.num_allocated_clusters_snapshots, "Amount of clusters allocated by snapshots has changed" ); // Create a second snapshot after data has been written to nexus. ms.spawn(async move { - let nexus = - nexus_lookup_mut(&nexus_name()).expect("Can't find the nexus"); + let nexus = nexus_lookup_mut(&nexus_name()).expect("Can't find the nexus"); let snapshot_params = SnapshotParams::new( Some("e62".to_string()), @@ -841,8 +805,7 @@ async fn test_snapshot_ancestor_usage() { // Create the third snapshot and make sure it correctly references space of // 2 pre-existing snapshots. ms.spawn(async move { - let nexus = - nexus_lookup_mut(&nexus_name()).expect("Can't find the nexus"); + let nexus = nexus_lookup_mut(&nexus_name()).expect("Can't find the nexus"); let snapshot_params = SnapshotParams::new( Some("e63".to_string()), diff --git a/io-engine/tests/snapshot_rebuild.rs b/io-engine/tests/snapshot_rebuild.rs index 8182640da..d19ac3380 100644 --- a/io-engine/tests/snapshot_rebuild.rs +++ b/io-engine/tests/snapshot_rebuild.rs @@ -97,10 +97,9 @@ async fn malloc_to_replica() { let src_uri = format!("malloc:///d?size_mb={SIZE_MB}"); let pool = PoolBuilderLocal::malloc("md", POOL_SZ_MB).await.unwrap(); - let replica = - create_replica(&pool, "3be1219f-682b-4672-b88b-8b9d07e8104a") - .await - .unwrap(); + let replica = create_replica(&pool, "3be1219f-682b-4672-b88b-8b9d07e8104a") + .await + .unwrap(); let job = SnapshotRebuildJob::builder() .with_replica_uuid(&replica.uuid()) @@ -133,19 +132,15 @@ async fn replica_to_rebuild_full() { ms.spawn(async move { let pool = PoolBuilderLocal::malloc("md", POOL_SZ_MB).await.unwrap(); - let replica_src = - create_replica(&pool, "2be1219f-682b-4672-b88b-8b9d07e8104a") - .await - .unwrap(); - let replica_dst = - create_replica(&pool, "3be1219f-682b-4672-b88b-8b9d07e8104a") - .await - .unwrap(); + let replica_src = create_replica(&pool, "2be1219f-682b-4672-b88b-8b9d07e8104a") + .await + .unwrap(); + let replica_dst = create_replica(&pool, "3be1219f-682b-4672-b88b-8b9d07e8104a") + .await + .unwrap(); let job = SnapshotRebuildJob::builder() - .with_option( - RebuildJobOptions::default().with_read_opts(ReadOptions::None), - ) + .with_option(RebuildJobOptions::default().with_read_opts(ReadOptions::None)) .with_replica_uuid(&replica_dst.uuid()) .with_snapshot_uri(replica_src.bdev_share_uri().unwrap()) .build() @@ -177,14 +172,12 @@ async fn replica_to_rebuild_partial() { ms.spawn(async move { let pool = PoolBuilderLocal::malloc("md", POOL_SZ_MB).await.unwrap(); - let replica_src = - create_replica(&pool, "2be1219f-682b-4672-b88b-8b9d07e8104a") - .await - .unwrap(); - let replica_dst = - create_replica(&pool, "3be1219f-682b-4672-b88b-8b9d07e8104a") - .await - .unwrap(); + let replica_src = create_replica(&pool, "2be1219f-682b-4672-b88b-8b9d07e8104a") + .await + .unwrap(); + let replica_dst = create_replica(&pool, "3be1219f-682b-4672-b88b-8b9d07e8104a") + .await + .unwrap(); let job = SnapshotRebuildJob::builder() .with_replica_uuid(&replica_dst.uuid()) diff --git a/io-engine/tests/wipe.rs b/io-engine/tests/wipe.rs index dad1fd5c4..0c4873b9f 100644 --- a/io-engine/tests/wipe.rs +++ b/io-engine/tests/wipe.rs @@ -2,9 +2,7 @@ use futures::StreamExt; use io_engine_tests::{ compose::{ rpc::v1::{GrpcConnect, RpcHandle}, - Binary, - Builder, - ComposeTest, + Binary, Builder, ComposeTest, }, dd_urandom_blkdev, nvme::{list_mayastor_nvme_devices, NmveConnectGuard}, @@ -93,9 +91,7 @@ async fn replica_wipe() { wipe_method: WipeMethod::None, chunk_size: 500 * 1024 * 1024 + 512, expected_chunk_size: 500 * 1024 * 1024 + 512, - expected_last_chunk_size: 24 * 1024 * 1024 - - 2 * 512 - - gpt_backup_size, + expected_last_chunk_size: 24 * 1024 * 1024 - 2 * 512 - gpt_backup_size, expected_notifications: 4, expected_successes: 4, }, @@ -132,8 +128,7 @@ async fn replica_wipe() { .await .unwrap(); - let _nvme_guard = - NmveConnectGuard::connect_addr(&nvmf_location.addr, &nvmf_location.nqn); + let _nvme_guard = NmveConnectGuard::connect_addr(&nvmf_location.addr, &nvmf_location.nqn); let device = nvme_device(); // already zeroed up to 8MiB after creation! @@ -173,8 +168,7 @@ async fn wipe_replica( wipe_method: WipeMethod, chunk_size: u64, ) { - let response = - issue_wipe_replica(ms, replica, wipe_method, chunk_size).await; + let response = issue_wipe_replica(ms, replica, wipe_method, chunk_size).await; let stream = response.into_inner(); let responses = collect_stream(stream).await; let last = responses.last(); @@ -183,14 +177,8 @@ async fn wipe_replica( assert_eq!(last.unwrap().as_ref().unwrap().remaining_bytes, 0); } -async fn validate_wipe_replica( - ms: &mut RpcHandle, - replica: &Replica, - wipe: TestWipeReplica, -) { - let response = - issue_wipe_replica(ms, replica, wipe.wipe_method, wipe.chunk_size) - .await; +async fn validate_wipe_replica(ms: &mut RpcHandle, replica: &Replica, wipe: TestWipeReplica) { + let response = issue_wipe_replica(ms, replica, wipe.wipe_method, wipe.chunk_size).await; let stream = response.into_inner(); let responses = collect_stream(stream).await; let oks = responses @@ -254,8 +242,7 @@ async fn validate_wipe_replica( assert_eq!(ok.remaining_bytes, 0, "{wipe:#?}"); assert_eq!( ok.wiped_bytes, - (expected_chunks - 1) * wipe.expected_chunk_size - + wipe.expected_last_chunk_size, + (expected_chunks - 1) * wipe.expected_chunk_size + wipe.expected_last_chunk_size, "{wipe:#?}" ); } diff --git a/jsonrpc/src/error.rs b/jsonrpc/src/error.rs index f253220de..1ef54e1b5 100644 --- a/jsonrpc/src/error.rs +++ b/jsonrpc/src/error.rs @@ -40,10 +40,7 @@ impl From for Code { impl From for tonic::Status { fn from(error: Error) -> Status { match error { - Error::RpcError { - code, - msg, - } => Status::new(code.into(), msg), + Error::RpcError { code, msg } => Status::new(code.into(), msg), _ => Status::new(Code::Internal, error.to_string()), } } @@ -54,18 +51,12 @@ impl fmt::Display for Error { match self { Error::InvalidVersion => write!(f, "Invalid json-rpc version"), Error::InvalidReplyId => write!(f, "Invalid ID of json-rpc reply"), - Error::ConnectError { - sock, - err, - } => { + Error::ConnectError { sock, err } => { write!(f, "Error connecting to {sock}: {err}") } Error::IoError(err) => write!(f, "IO error: {err}"), Error::ParseError(err) => write!(f, "Invalid json reply: {err}"), - Error::RpcError { - code, - msg, - } => { + Error::RpcError { code, msg } => { write!(f, "Json-rpc error {code:?}: {msg}") } Error::GenericError(msg) => write!(f, "{msg}"), diff --git a/jsonrpc/src/lib.rs b/jsonrpc/src/lib.rs index 3eddfd665..5b6fcdb10 100644 --- a/jsonrpc/src/lib.rs +++ b/jsonrpc/src/lib.rs @@ -59,11 +59,7 @@ pub struct RpcError { } /// Make json-rpc request, parse reply, and return user data to caller. -pub async fn call( - sock_path: &str, - method: &str, - args: Option, -) -> Result +pub async fn call(sock_path: &str, method: &str, args: Option) -> Result where A: serde::ser::Serialize, R: 'static + serde::de::DeserializeOwned + Send, @@ -131,10 +127,8 @@ where }); } - serde_json::from_value( - reply.result.unwrap_or(serde_json::value::Value::Null), - ) - .map_err(Error::ParseError) + serde_json::from_value(reply.result.unwrap_or(serde_json::value::Value::Null)) + .map_err(Error::ParseError) } Err(error) => Err(Error::ParseError(error)), } diff --git a/jsonrpc/src/test.rs b/jsonrpc/src/test.rs index 9d6497314..d4bc41150 100644 --- a/jsonrpc/src/test.rs +++ b/jsonrpc/src/test.rs @@ -95,8 +95,7 @@ async fn normal_request_reply() { assert_eq!(req.id.as_i64().unwrap(), 0); assert_eq!(req.jsonrpc.unwrap(), "2.0"); - let params: Args = - serde_json::from_value(req.params.unwrap()).unwrap(); + let params: Args = serde_json::from_value(req.params.unwrap()).unwrap(); let resp = Response { error: None, @@ -152,8 +151,7 @@ fn connect_error() { // create tokio futures runtime let rt = Runtime::new().unwrap(); // try to connect to server which does not exist - let call_res: Result<(), Error> = - rt.block_on(call("/crazy/path/look", "method", Some(()))); + let call_res: Result<(), Error> = rt.block_on(call("/crazy/path/look", "method", Some(()))); match call_res { Ok(_) => panic!("Expected error and got ok"), Err(Error::IoError(err)) => match err.kind() { @@ -306,10 +304,7 @@ async fn rpc_error() { }, |res: Result<(), Error>| match res { Ok(_) => panic!("Expected error and got ok"), - Err(Error::RpcError { - code, - msg, - }) => { + Err(Error::RpcError { code, msg }) => { assert_eq!(code, RpcCode::NotFound); assert_eq!(&msg, "Not found"); } diff --git a/libnvme-rs/src/error.rs b/libnvme-rs/src/error.rs index d47e36553..6f6502a06 100644 --- a/libnvme-rs/src/error.rs +++ b/libnvme-rs/src/error.rs @@ -20,8 +20,6 @@ pub enum NvmeError { impl From for NvmeError { fn from(source: std::io::Error) -> NvmeError { - NvmeError::IoError { - source, - } + NvmeError::IoError { source } } } diff --git a/libnvme-rs/src/nvme_tree.rs b/libnvme-rs/src/nvme_tree.rs index 45111e716..7826a1408 100644 --- a/libnvme-rs/src/nvme_tree.rs +++ b/libnvme-rs/src/nvme_tree.rs @@ -5,9 +5,7 @@ pub(crate) struct NvmeRoot { impl NvmeRoot { pub(crate) fn new(root: *mut crate::bindings::nvme_root) -> Self { - NvmeRoot { - root, - } + NvmeRoot { root } } pub(crate) fn as_mut_ptr(&self) -> *mut crate::bindings::nvme_root { self.root diff --git a/libnvme-rs/src/nvme_uri.rs b/libnvme-rs/src/nvme_uri.rs index 7301371c8..e257487b8 100644 --- a/libnvme-rs/src/nvme_uri.rs +++ b/libnvme-rs/src/nvme_uri.rs @@ -8,12 +8,8 @@ use crate::{ error::NvmeError, nvme_device::NvmeDevice, nvme_tree::{ - NvmeCtrlrIterator, - NvmeHostIterator, - NvmeNamespaceInCtrlrIterator, - NvmeNamespaceIterator, - NvmeRoot, - NvmeSubsystemIterator, + NvmeCtrlrIterator, NvmeHostIterator, NvmeNamespaceInCtrlrIterator, NvmeNamespaceIterator, + NvmeRoot, NvmeSubsystemIterator, }, }; @@ -24,9 +20,7 @@ pub struct NvmeStringWrapper { impl NvmeStringWrapper { pub fn new(s: *mut i8) -> Self { - NvmeStringWrapper { - s, - } + NvmeStringWrapper { s } } pub fn as_ptr(&self) -> *const i8 { self.s @@ -79,9 +73,7 @@ impl TryFrom<&str> for NvmeTarget { type Error = NvmeError; fn try_from(value: &str) -> Result { - let url = Url::parse(value).map_err(|source| NvmeError::UrlError { - source, - })?; + let url = Url::parse(value).map_err(|source| NvmeError::UrlError { source })?; let trtype = match url.scheme() { "nvmf" | "nvmf+tcp" => Ok(NvmeTransportType::Tcp), @@ -135,29 +127,17 @@ impl NvmeTarget { /// Returns Ok on successful connect pub fn connect(&self) -> Result<(), NvmeError> { let r = NvmeRoot::new(unsafe { crate::nvme_scan(std::ptr::null()) }); - let hostid = - NvmeStringWrapper::new(unsafe { crate::nvmf_hostid_from_file() }); + let hostid = NvmeStringWrapper::new(unsafe { crate::nvmf_hostid_from_file() }); let hostnqn = match self.hostnqn_autogen { - true => NvmeStringWrapper::new(unsafe { - crate::nvmf_hostnqn_generate() - }), - false => NvmeStringWrapper::new(unsafe { - crate::nvmf_hostnqn_from_file() - }), + true => NvmeStringWrapper::new(unsafe { crate::nvmf_hostnqn_generate() }), + false => NvmeStringWrapper::new(unsafe { crate::nvmf_hostnqn_from_file() }), }; - let h = unsafe { - crate::nvme_lookup_host( - r.as_mut_ptr(), - hostnqn.as_ptr(), - hostid.as_ptr(), - ) - }; + let h = + unsafe { crate::nvme_lookup_host(r.as_mut_ptr(), hostnqn.as_ptr(), hostid.as_ptr()) }; if h.is_null() { - return Err(NvmeError::LookupHostError { - rc: -libc::ENOMEM, - }); + return Err(NvmeError::LookupHostError { rc: -libc::ENOMEM }); } let subsysnqn = std::ffi::CString::new(self.subsysnqn.clone()).unwrap(); @@ -178,9 +158,7 @@ impl NvmeTarget { ) }; if nvme_ctrl.is_null() { - return Err(NvmeError::CreateCtrlrError { - rc: -libc::ENOMEM, - }); + return Err(NvmeError::CreateCtrlrError { rc: -libc::ENOMEM }); } let cfg = crate::nvme_fabrics_config { host_traddr, @@ -202,9 +180,7 @@ impl NvmeTarget { }; let ret = unsafe { crate::nvmf_add_ctrl(h, nvme_ctrl, &cfg) }; if ret != 0 { - return Err(NvmeError::AddCtrlrError { - rc: ret, - }); + return Err(NvmeError::AddCtrlrError { rc: ret }); } Ok(()) } @@ -212,23 +188,16 @@ impl NvmeTarget { /// List block devices for this target /// /// `retries`: number of times to retry until at least one device is found - pub fn block_devices( - &self, - mut retries: usize, - ) -> Result, NvmeError> { + pub fn block_devices(&self, mut retries: usize) -> Result, NvmeError> { let mut devices = Vec::::new(); loop { - let r = - NvmeRoot::new(unsafe { crate::nvme_scan(std::ptr::null()) }); + let r = NvmeRoot::new(unsafe { crate::nvme_scan(std::ptr::null()) }); let hostiter = NvmeHostIterator::new(&r); for host in hostiter { let subsysiter = NvmeSubsystemIterator::new(host); for subsys in subsysiter { - let cstr = unsafe { - std::ffi::CStr::from_ptr(crate::nvme_subsystem_get_nqn( - subsys, - )) - }; + let cstr = + unsafe { std::ffi::CStr::from_ptr(crate::nvme_subsystem_get_nqn(subsys)) }; if cstr.to_str().unwrap() != self.subsysnqn { continue; } @@ -236,13 +205,9 @@ impl NvmeTarget { for ns in nsiter { devices.push(format!( "/dev/{}", - unsafe { - std::ffi::CStr::from_ptr( - crate::nvme_ns_get_name(ns), - ) - } - .to_str() - .unwrap() + unsafe { std::ffi::CStr::from_ptr(crate::nvme_ns_get_name(ns),) } + .to_str() + .unwrap() )); } } @@ -268,11 +233,8 @@ impl NvmeTarget { for host in hostiter { let subsysiter = NvmeSubsystemIterator::new(host); for subsys in subsysiter { - let cstr = unsafe { - std::ffi::CStr::from_ptr(crate::nvme_subsystem_get_nqn( - subsys, - )) - }; + let cstr = + unsafe { std::ffi::CStr::from_ptr(crate::nvme_subsystem_get_nqn(subsys)) }; if cstr.to_str().unwrap() != self.subsysnqn { continue; } @@ -282,9 +244,7 @@ impl NvmeTarget { if ret == 0 { i += 1; } else { - return Err(NvmeError::FileIoError { - rc: ret, - }); + return Err(NvmeError::FileIoError { rc: ret }); } } } @@ -299,30 +259,22 @@ impl NvmeTarget { NvmeDevice { namespace: unsafe { crate::nvme_ns_get_nsid(n) }, - device: unsafe { - std::ffi::CStr::from_ptr(crate::nvme_ns_get_name(n)) - } - .to_str() - .unwrap() - .to_string(), - firmware: unsafe { - std::ffi::CStr::from_ptr(crate::nvme_ns_get_firmware(n)) - } - .to_str() - .unwrap() - .to_string(), - model: unsafe { - std::ffi::CStr::from_ptr(crate::nvme_ns_get_model(n)) - } - .to_str() - .unwrap() - .to_string(), - serial: unsafe { - std::ffi::CStr::from_ptr(crate::nvme_ns_get_serial(n)) - } - .to_str() - .unwrap() - .to_string(), + device: unsafe { std::ffi::CStr::from_ptr(crate::nvme_ns_get_name(n)) } + .to_str() + .unwrap() + .to_string(), + firmware: unsafe { std::ffi::CStr::from_ptr(crate::nvme_ns_get_firmware(n)) } + .to_str() + .unwrap() + .to_string(), + model: unsafe { std::ffi::CStr::from_ptr(crate::nvme_ns_get_model(n)) } + .to_str() + .unwrap() + .to_string(), + serial: unsafe { std::ffi::CStr::from_ptr(crate::nvme_ns_get_serial(n)) } + .to_str() + .unwrap() + .to_string(), utilisation: nuse, max_lba: unsafe { crate::nvme_ns_get_lba_count(n) }, capacity: nsze, @@ -397,18 +349,14 @@ impl NvmeTarget { #[test] fn nvme_parse_uri() { - let target = - NvmeTarget::try_from("nvmf://1.2.3.4:1234/testnqn.what-ever.foo") - .unwrap(); + let target = NvmeTarget::try_from("nvmf://1.2.3.4:1234/testnqn.what-ever.foo").unwrap(); assert_eq!(target.trsvcid, 1234); assert_eq!(target.traddr, "1.2.3.4"); assert_eq!(target.trtype, NvmeTransportType::Tcp); assert_eq!(target.subsysnqn, "testnqn.what-ever.foo"); - let target = - NvmeTarget::try_from("nvmf+tcp://1.2.3.4:1234/testnqn.what-ever.foo") - .unwrap(); + let target = NvmeTarget::try_from("nvmf+tcp://1.2.3.4:1234/testnqn.what-ever.foo").unwrap(); assert_eq!(target.trsvcid, 1234); assert_eq!(target.traddr, "1.2.3.4"); diff --git a/scripts/rust-style.sh b/scripts/rust-style.sh index 00620ad10..9fcf68d35 100755 --- a/scripts/rust-style.sh +++ b/scripts/rust-style.sh @@ -1,6 +1,6 @@ #!/usr/bin/env bash -FMT_OPTS=${FMT_OPTS:-""} +FMT_OPTS=${FMT_OPTS:-"--config imports_granularity=Crate"} source ${BASH_SOURCE%/*}/../spdk-rs/scripts/rust-linter-env.sh $CARGO fmt --all -- $FMT_OPTS diff --git a/spdk-rs b/spdk-rs index 52f4995c4..93d0d206e 160000 --- a/spdk-rs +++ b/spdk-rs @@ -1 +1 @@ -Subproject commit 52f4995c45caf9bf0fbfb417003b9ce0abae5823 +Subproject commit 93d0d206e48961f02b54bf32d87b13726027974c diff --git a/utils/dependencies b/utils/dependencies index f160a708e..0722218cb 160000 --- a/utils/dependencies +++ b/utils/dependencies @@ -1 +1 @@ -Subproject commit f160a708e71c89f20b9620fca5822dafd392ba61 +Subproject commit 0722218cb0536bb4c90c4b680a12ead4c6d8ce7a