diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 12e76f99c..07a18aba2 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -40,7 +40,9 @@ jobs: with: save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - name: Fmt - run: make format-check + run: make format + - name: Diff check + run: git diff --exit-code clippy: name: clippy diff --git a/Makefile b/Makefile index 7a968862c..c8d18f381 100644 --- a/Makefile +++ b/Makefile @@ -24,15 +24,11 @@ fix: ## Runs Fix with configs .PHONY: format -format: ## Runs Format using nightly toolchain +fmt format: ## Runs Format using nightly toolchain + ./scripts/reflow.sh cargo +nightly fmt --all -.PHONY: format-check -format-check: ## Runs Format using nightly toolchain but only in check mode - cargo +nightly fmt --all --check - - .PHONY: machete machete: ## Runs machete to find unused dependencies cargo machete diff --git a/bin/network-monitor/src/commands/start.rs b/bin/network-monitor/src/commands/start.rs index 3f1cbca6b..fc86dc7f2 100644 --- a/bin/network-monitor/src/commands/start.rs +++ b/bin/network-monitor/src/commands/start.rs @@ -13,8 +13,8 @@ use crate::monitor::tasks::Tasks; /// Start the network monitoring service. /// -/// This function initializes all monitoring tasks including RPC status checking, -/// remote prover testing, faucet testing, and the web frontend. +/// This function initializes all monitoring tasks including RPC status checking, remote prover +/// testing, faucet testing, and the web frontend. #[instrument(target = COMPONENT, name = "start-monitor", skip_all, fields(port = %config.port))] pub async fn start_monitor(config: MonitorConfig) -> Result<()> { // Load configuration from command-line arguments and environment variables diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index a67952ccc..8a3b02f56 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -1,7 +1,7 @@ //! Counter increment task functionality. //! -//! This module contains the implementation for periodically incrementing the counter -//! of the network account deployed at startup by creating and submitting network notes. +//! This module contains the implementation for periodically incrementing the counter of the network +//! account deployed at startup by creating and submitting network notes. use std::path::Path; use std::sync::Arc; @@ -315,8 +315,8 @@ fn send_status(tx: &watch::Sender, status: ServiceStatus) -> Resu /// Run the counter tracking task. /// -/// This function periodically fetches the current counter value from the network -/// and updates the tracking details. +/// This function periodically fetches the current counter value from the network and updates the +/// tracking details. /// /// # Arguments /// diff --git a/bin/network-monitor/src/deploy/mod.rs b/bin/network-monitor/src/deploy/mod.rs index 58278d92e..0e50f5952 100644 --- a/bin/network-monitor/src/deploy/mod.rs +++ b/bin/network-monitor/src/deploy/mod.rs @@ -40,9 +40,8 @@ use crate::deploy::wallet::{create_wallet_account, save_wallet_account}; pub mod counter; pub mod wallet; -/// Create an RPC client configured with the correct genesis metadata in the -/// `Accept` header so that write RPCs such as `SubmitProvenTransaction` are -/// accepted by the node. +/// Create an RPC client configured with the correct genesis metadata in the `Accept` header so that +/// write RPCs such as `SubmitProvenTransaction` are accepted by the node. pub async fn create_genesis_aware_rpc_client( rpc_url: &Url, timeout: Duration, @@ -98,9 +97,8 @@ pub async fn create_genesis_aware_rpc_client( /// Ensure accounts exist, creating them if they don't. /// -/// This function checks if the wallet and counter account files exist. -/// If they don't exist, it creates new accounts and saves them to the specified files. -/// If they do exist, it does nothing. +/// This function checks if the wallet and counter account files exist. If they don't exist, it +/// creates new accounts and saves them to the specified files. If they do exist, it does nothing. /// /// # Arguments /// @@ -141,8 +139,7 @@ pub async fn ensure_accounts_exist( /// Deploy counter account to the network. /// -/// This function creates a counter program account, -/// then saves it to the specified file. +/// This function creates a counter program account, then saves it to the specified file. #[instrument(target = COMPONENT, name = "deploy-counter-account", skip_all, ret(level = "debug"))] pub async fn deploy_counter_account(counter_account: &Account, rpc_url: &Url) -> Result<()> { // Deploy counter account to the network using a genesis-aware RPC client. diff --git a/bin/network-monitor/src/faucet.rs b/bin/network-monitor/src/faucet.rs index 5cc0944b2..176d8cc4b 100644 --- a/bin/network-monitor/src/faucet.rs +++ b/bin/network-monitor/src/faucet.rs @@ -1,7 +1,7 @@ //! Faucet testing functionality. //! -//! This module contains the logic for periodically testing faucet functionality -//! by requesting proof-of-work challenges, solving them, and submitting token requests. +//! This module contains the logic for periodically testing faucet functionality by requesting +//! proof-of-work challenges, solving them, and submitting token requests. use std::time::Duration; diff --git a/bin/network-monitor/src/main.rs b/bin/network-monitor/src/main.rs index 2a288f530..60727895f 100644 --- a/bin/network-monitor/src/main.rs +++ b/bin/network-monitor/src/main.rs @@ -1,7 +1,7 @@ //! Miden Network Monitor //! -//! A monitor application for Miden network infrastructure that provides real-time status -//! monitoring and account deployment capabilities. +//! A monitor application for Miden network infrastructure that provides real-time status monitoring +//! and account deployment capabilities. use anyhow::Result; use clap::Parser; @@ -28,8 +28,8 @@ pub const COMPONENT: &str = "miden-network-monitor"; /// Network Monitor main function. /// -/// This function parses command-line arguments and delegates to the appropriate -/// command handler. The monitor supports two main commands: +/// This function parses command-line arguments and delegates to the appropriate command handler. +/// The monitor supports two main commands: /// - `start`: Runs the network monitoring service with web dashboard /// - `deploy-account`: Creates and deploys Miden accounts to the network #[tokio::main] diff --git a/bin/network-monitor/src/monitor/tasks.rs b/bin/network-monitor/src/monitor/tasks.rs index 233994afb..aa17176db 100644 --- a/bin/network-monitor/src/monitor/tasks.rs +++ b/bin/network-monitor/src/monitor/tasks.rs @@ -355,8 +355,8 @@ impl Tasks { /// Gets the current Unix timestamp in seconds. /// -/// This function is infallible - if the system time is somehow before Unix epoch -/// (extremely unlikely), it returns 0. +/// This function is infallible - if the system time is somehow before Unix epoch (extremely +/// unlikely), it returns 0. pub fn current_unix_timestamp_secs() -> u64 { SystemTime::now() .duration_since(UNIX_EPOCH) diff --git a/bin/network-monitor/src/remote_prover.rs b/bin/network-monitor/src/remote_prover.rs index a9cee796e..4bdb26722 100644 --- a/bin/network-monitor/src/remote_prover.rs +++ b/bin/network-monitor/src/remote_prover.rs @@ -73,8 +73,8 @@ pub struct ProverTestDetails { /// Runs a task that continuously tests remote prover functionality and updates a watch channel. /// -/// This function spawns a task that periodically sends mock request payloads to a remote prover -/// and measures the success/failure rate and performance metrics for proof generation. +/// This function spawns a task that periodically sends mock request payloads to a remote prover and +/// measures the success/failure rate and performance metrics for proof generation. /// /// # Arguments /// @@ -213,8 +213,8 @@ async fn test_remote_prover( /// Converts a `tonic::Status` error to a JSON string with structured error information. /// -/// This function extracts the code, message, details, and metadata from a `tonic::Status` -/// error and serializes them into a JSON string for structured error reporting. +/// This function extracts the code, message, details, and metadata from a `tonic::Status` error and +/// serializes them into a JSON string for structured error reporting. /// /// # Arguments /// @@ -254,9 +254,9 @@ fn tonic_status_to_json(status: &tonic::Status) -> String { /// Generates a mock transaction for testing remote prover functionality. /// -/// This function creates a mock transaction using `MockChainBuilder` similar to what's done -/// in the remote prover tests. The transaction is generated once and can be reused for -/// multiple proof test calls. +/// This function creates a mock transaction using `MockChainBuilder` similar to what's done in the +/// remote prover tests. The transaction is generated once and can be reused for multiple proof test +/// calls. pub async fn generate_mock_transaction() -> anyhow::Result { let mut mock_chain_builder = MockChainBuilder::new(); diff --git a/bin/network-monitor/src/status.rs b/bin/network-monitor/src/status.rs index f00ada304..e23c63824 100644 --- a/bin/network-monitor/src/status.rs +++ b/bin/network-monitor/src/status.rs @@ -1,7 +1,7 @@ //! Network monitor status checker. //! -//! This module contains the logic for checking the status of network services. -//! Individual status checker tasks send updates via watch channels to the web server. +//! This module contains the logic for checking the status of network services. Individual status +//! checker tasks send updates via watch channels to the web server. use std::time::Duration; @@ -132,8 +132,8 @@ pub struct StoreStatusDetails { /// Details of a block producer service. /// -/// This struct contains the details of a block producer service, which is a union of the details -/// of the block producer service. +/// This struct contains the details of a block producer service, which is a union of the details of +/// the block producer service. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct BlockProducerStatusDetails { pub version: String, @@ -155,8 +155,8 @@ pub struct MempoolStatusDetails { /// Details of a remote prover service. /// -/// This struct contains the details of a remote prover service, which is a union of the details -/// of the remote prover service. +/// This struct contains the details of a remote prover service, which is a union of the details of +/// the remote prover service. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct RemoteProverStatusDetails { pub url: String, @@ -267,8 +267,8 @@ impl From for RpcStatusDetails { /// Runs a task that continuously checks RPC status and updates a watch channel. /// -/// This function spawns a task that periodically checks the RPC service status -/// and sends updates through a watch channel. +/// This function spawns a task that periodically checks the RPC service status and sends updates +/// through a watch channel. /// /// # Arguments /// @@ -357,8 +357,8 @@ pub(crate) async fn check_rpc_status( /// Runs a task that continuously checks remote prover status and updates a watch channel. /// -/// This function spawns a task that periodically checks a remote prover service status -/// and sends updates through a watch channel. +/// This function spawns a task that periodically checks a remote prover service status and sends +/// updates through a watch channel. /// /// # Arguments /// diff --git a/bin/remote-prover/src/api/prover.rs b/bin/remote-prover/src/api/prover.rs index b03c06aea..cbcf0f67f 100644 --- a/bin/remote-prover/src/api/prover.rs +++ b/bin/remote-prover/src/api/prover.rs @@ -50,8 +50,8 @@ impl std::str::FromStr for ProofType { /// The prover for the remote prover. /// -/// This enum is used to store the prover for the remote prover. -/// Only one prover is enabled at a time. +/// This enum is used to store the prover for the remote prover. Only one prover is enabled at a +/// time. enum Prover { Transaction(Mutex), Batch(Mutex), diff --git a/bin/remote-prover/src/proxy/mod.rs b/bin/remote-prover/src/proxy/mod.rs index 81290d73a..5da908abd 100644 --- a/bin/remote-prover/src/proxy/mod.rs +++ b/bin/remote-prover/src/proxy/mod.rs @@ -243,9 +243,9 @@ static RATE_LIMITER: LazyLock = LazyLock::new(|| Rate::new(Duration::from_ // REQUEST QUEUE // ================================================================================================ -/// Request queue holds the list of requests that are waiting to be processed by the workers and -/// the time they were enqueued. -/// It is used to keep track of the order of the requests to then assign them to the workers. +/// Request queue holds the list of requests that are waiting to be processed by the workers and the +/// time they were enqueued. It is used to keep track of the order of the requests to then assign +/// them to the workers. pub struct RequestQueue { queue: RwLock>, } @@ -299,8 +299,8 @@ static QUEUE: LazyLock = LazyLock::new(RequestQueue::new); /// Pingora `RequestHeader` injector for OpenTelemetry trace context propagation. /// -/// This allows the proxy to inject trace context into headers that will be forwarded -/// to worker nodes, enabling proper parent-child trace relationships. +/// This allows the proxy to inject trace context into headers that will be forwarded to worker +/// nodes, enabling proper parent-child trace relationships. struct PingoraHeaderInjector<'a>(&'a mut pingora::http::RequestHeader); impl opentelemetry::propagation::Injector for PingoraHeaderInjector<'_> { @@ -321,8 +321,8 @@ impl opentelemetry::propagation::Injector for PingoraHeaderInjector<'_> { /// Custom context for the request/response lifecycle /// /// We use this context to keep track of the number of tries for a request, the unique ID for the -/// request, the worker that will process the request, a span that will be used for traces along -/// the transaction execution, and a timer to track how long the request took. +/// request, the worker that will process the request, a span that will be used for traces along the +/// transaction execution, and a timer to track how long the request took. #[derive(Debug)] pub struct RequestContext { /// Number of tries for the request @@ -362,9 +362,9 @@ impl RequestContext { /// Wrapper around the load balancer that implements the [`ProxyHttp`] trait /// -/// This wrapper is used to implement the [`ProxyHttp`] trait for [`Arc`]. -/// This is necessary because we want to share the load balancer between the proxy server and the -/// health check background service. +/// This wrapper is used to implement the [`ProxyHttp`] trait for [`Arc`]. This is +/// necessary because we want to share the load balancer between the proxy server and the health +/// check background service. #[derive(Debug)] pub struct LoadBalancer(pub Arc); diff --git a/bin/remote-prover/src/proxy/update_workers.rs b/bin/remote-prover/src/proxy/update_workers.rs index 320ac5a67..650e8b18a 100644 --- a/bin/remote-prover/src/proxy/update_workers.rs +++ b/bin/remote-prover/src/proxy/update_workers.rs @@ -22,9 +22,9 @@ pub(crate) struct LoadBalancerUpdateService { server_opts: HttpServerOptions, } -/// Manually implement Debug for `LoadBalancerUpdateService`. -/// [`HttpServerOptions`] does not implement Debug, so we cannot derive Debug for -/// [`LoadBalancerUpdateService`], which is needed for the tracing instrumentation. +/// Manually implement Debug for `LoadBalancerUpdateService`. [`HttpServerOptions`] does not +/// implement Debug, so we cannot derive Debug for [`LoadBalancerUpdateService`], which is needed +/// for the tracing instrumentation. impl fmt::Debug for LoadBalancerUpdateService { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("LBUpdaterService") diff --git a/bin/remote-prover/src/proxy/worker.rs b/bin/remote-prover/src/proxy/worker.rs index aa418e8cb..942c6b493 100644 --- a/bin/remote-prover/src/proxy/worker.rs +++ b/bin/remote-prover/src/proxy/worker.rs @@ -41,15 +41,13 @@ static WORKER_VERSION_REQUIREMENT: LazyLock = LazyLock::new(|| { /// A worker used for processing of requests. /// -/// The worker is used to process requests. -/// It has a backend, a status client, a health status, and a version. -/// The backend is used to send requests to the worker. -/// The status client is used to check the status of the worker. -/// The health status is used to determine if the worker is healthy or unhealthy. -/// The version is used to check if the worker is compatible with the proxy. -/// The `is_available` is used to determine if the worker is available to process requests. -/// The `connection_timeout` is used to set the timeout for the connection to the worker. -/// The `total_timeout` is used to set the timeout for the total request. +/// The worker is used to process requests. It has a backend, a status client, a health status, and +/// a version. The backend is used to send requests to the worker. The status client is used to +/// check the status of the worker. The health status is used to determine if the worker is healthy +/// or unhealthy. The version is used to check if the worker is compatible with the proxy. The +/// `is_available` is used to determine if the worker is available to process requests. The +/// `connection_timeout` is used to set the timeout for the connection to the worker. The +/// `total_timeout` is used to set the timeout for the total request. #[derive(Debug, Clone)] pub struct Worker { backend: Backend, @@ -63,9 +61,9 @@ pub struct Worker { /// The health status of a worker. /// -/// A worker can be either healthy or unhealthy. -/// If the worker is unhealthy, it will have a number of failed attempts. -/// The number of failed attempts is incremented each time the worker is unhealthy. +/// A worker can be either healthy or unhealthy. If the worker is unhealthy, it will have a number +/// of failed attempts. The number of failed attempts is incremented each time the worker is +/// unhealthy. #[derive(Debug, Clone, PartialEq, Serialize)] pub enum WorkerHealthStatus { /// The worker is healthy. diff --git a/bin/remote-prover/src/utils.rs b/bin/remote-prover/src/utils.rs index 121491136..ef1931bee 100644 --- a/bin/remote-prover/src/utils.rs +++ b/bin/remote-prover/src/utils.rs @@ -55,8 +55,8 @@ fn build_grpc_trailers( /// Write a protobuf message as a gRPC response to a Pingora session /// -/// This helper function takes a protobuf message and writes it to a Pingora session -/// in the proper gRPC format, handling message encoding, headers, and trailers. +/// This helper function takes a protobuf message and writes it to a Pingora session in the proper +/// gRPC format, handling message encoding, headers, and trailers. pub async fn write_grpc_response_to_session( session: &mut Session, message: T, @@ -99,8 +99,8 @@ where /// Write a gRPC error response to a Pingora session /// -/// This helper function creates a proper gRPC error response with the specified -/// status code and error message. +/// This helper function creates a proper gRPC error response with the specified status code and +/// error message. pub async fn write_grpc_error_to_session( session: &mut Session, grpc_status: Code, diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index 225001a3b..eee101ecc 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -118,8 +118,8 @@ pub async fn seed_store( /// Generates batches of transactions to be inserted into the store. /// -/// The first transaction in each batch sends assets from the faucet to 255 accounts. -/// The rest of the transactions consume the notes created by the faucet in the previous block. +/// The first transaction in each batch sends assets from the faucet to 255 accounts. The rest of +/// the transactions consume the notes created by the faucet in the previous block. async fn generate_blocks( num_accounts: usize, public_accounts_percentage: u8, @@ -430,8 +430,8 @@ fn create_consume_note_tx( .unwrap() } -/// Creates a transaction from the faucet that creates the given output notes. -/// Updates the faucet account to increase the issuance slot and it's nonce. +/// Creates a transaction from the faucet that creates the given output notes. Updates the faucet +/// account to increase the issuance slot and it's nonce. fn create_emit_note_tx( block_ref: &BlockHeader, faucet: &mut Account, diff --git a/bin/stress-test/src/store/metrics.rs b/bin/stress-test/src/store/metrics.rs index 95f8ce0ff..3479e63ab 100644 --- a/bin/stress-test/src/store/metrics.rs +++ b/bin/stress-test/src/store/metrics.rs @@ -1,7 +1,7 @@ use std::time::Duration; -/// Prints a summary of the benchmark results, including the average and various percentile -/// request latencies to help diagnose performance outliers. +/// Prints a summary of the benchmark results, including the average and various percentile request +/// latencies to help diagnose performance outliers. pub fn print_summary(timers_accumulator: &[Duration]) { let avg_time = timers_accumulator.iter().sum::() / timers_accumulator.len() as u32; println!("Average request latency: {avg_time:?}"); diff --git a/bin/stress-test/src/store/mod.rs b/bin/stress-test/src/store/mod.rs index a0adb87ab..c8a827508 100644 --- a/bin/stress-test/src/store/mod.rs +++ b/bin/stress-test/src/store/mod.rs @@ -149,9 +149,9 @@ pub async fn bench_sync_notes(data_directory: PathBuf, iterations: usize, concur print_summary(&timers_accumulator); } -/// Sends a single `sync_notes` request to the store and returns the elapsed time. -/// The note tags are generated from the account ids, so the request will contain a note tag for -/// each account id, with a block number of 0. +/// Sends a single `sync_notes` request to the store and returns the elapsed time. The note tags are +/// generated from the account ids, so the request will contain a note tag for each account id, with +/// a block number of 0. pub async fn sync_notes( api_client: &mut RpcClient>, account_ids: Vec, @@ -522,9 +522,8 @@ pub async fn load_state(data_directory: &Path) { /// Waits for the store to be ready and accepting requests. /// -/// Periodically checks the store’s status endpoint until it reports `"connected"`. -/// Returns an error if the status does not become `"connected"` after -/// [`STORE_STATUS_RETRIES`] attempts. +/// Periodically checks the store’s status endpoint until it reports `"connected"`. Returns an error +/// if the status does not become `"connected"` after [`STORE_STATUS_RETRIES`] attempts. async fn wait_for_store( store_client: &RpcClient>, ) -> Result<(), String> { diff --git a/crates/block-producer/src/batch_builder/mod.rs b/crates/block-producer/src/batch_builder/mod.rs index 6e991dea4..c7d609526 100644 --- a/crates/block-producer/src/batch_builder/mod.rs +++ b/crates/block-producer/src/batch_builder/mod.rs @@ -28,9 +28,9 @@ use crate::{COMPONENT, TelemetryInjectorExt}; /// Builds [`ProvenBatch`] from sets of transactions. /// -/// Transaction sets are pulled from the mempool at a configurable interval, and passed to -/// a pool of provers for proof generation. Proving is currently unimplemented and is instead -/// simulated via the given proof time and failure rate. +/// Transaction sets are pulled from the mempool at a configurable interval, and passed to a pool of +/// provers for proof generation. Proving is currently unimplemented and is instead simulated via +/// the given proof time and failure rate. pub struct BatchBuilder { /// Represents all batch building workers. /// diff --git a/crates/block-producer/src/block_builder/mod.rs b/crates/block-producer/src/block_builder/mod.rs index ca742904c..b2d555bcc 100644 --- a/crates/block-producer/src/block_builder/mod.rs +++ b/crates/block-producer/src/block_builder/mod.rs @@ -463,8 +463,8 @@ impl BlockProver { /// Validates that the proven block's transaction headers are consistent with the transactions /// passed in the proposed block. /// -/// This expects that transactions from the proposed block and proven block are in the same -/// order, as define by [`OrderedTransactionHeaders`]. +/// This expects that transactions from the proposed block and proven block are in the same order, +/// as define by [`OrderedTransactionHeaders`]. fn validate_tx_headers( proven_block: &ProvenBlock, proposed_txs: &OrderedTransactionHeaders, diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs index 7332d9c08..b8239a8d8 100644 --- a/crates/block-producer/src/mempool/mod.rs +++ b/crates/block-producer/src/mempool/mod.rs @@ -3,8 +3,8 @@ //! //! It performs these tasks by maintaining a dependency graph between all inflight transactions, //! batches and blocks. A parent-child dependency edge between two nodes exists whenever the child -//! consumes a piece of state that the parent node created. To be more specific, node `A` is a -//! child of node `B`: +//! consumes a piece of state that the parent node created. To be more specific, node `A` is a child +//! of node `B`: //! //! - if `B` created an output note which is the input note of `A`, or //! - if `B` updated an account to state `x'`, and `A` is updating this account from `x' -> x''`. @@ -19,8 +19,8 @@ //! must build on the state of the previous block. This in turn implies that a child node can never //! be committed in a block before all of its parents. //! -//! The mempool also enforces that the graph contains no cycles i.e. that the dependency graph -//! is always a directed acyclic graph (DAG). While technically not illegal from a protocol +//! The mempool also enforces that the graph contains no cycles i.e. that the dependency graph is +//! always a directed acyclic graph (DAG). While technically not illegal from a protocol //! perspective, allowing cycles between nodes would require that all nodes within the cycle be //! committed within the same block. //! diff --git a/crates/block-producer/src/mempool/nodes.rs b/crates/block-producer/src/mempool/nodes.rs index 8b9b4ca47..e7baa9072 100644 --- a/crates/block-producer/src/mempool/nodes.rs +++ b/crates/block-producer/src/mempool/nodes.rs @@ -295,11 +295,11 @@ impl Node for BlockNode { /// Contains the current nodes of the state DAG. /// -/// Nodes are purposefully not stored as a single collection since we often want to iterate -/// through specific node types e.g. all available transactions. +/// Nodes are purposefully not stored as a single collection since we often want to iterate through +/// specific node types e.g. all available transactions. /// -/// This data _must_ be kept in sync with the [`InflightState's`] [`NodeIds`] since these are -/// used as the edges of the graph. +/// This data _must_ be kept in sync with the [`InflightState's`] [`NodeIds`] since these are used +/// as the edges of the graph. #[derive(Clone, Debug, PartialEq, Default)] pub(super) struct Nodes { // Nodes in the DAG diff --git a/crates/block-producer/src/mempool/tests/add_transaction.rs b/crates/block-producer/src/mempool/tests/add_transaction.rs index d4ea2d458..cee9a24bd 100644 --- a/crates/block-producer/src/mempool/tests/add_transaction.rs +++ b/crates/block-producer/src/mempool/tests/add_transaction.rs @@ -53,8 +53,8 @@ fn valid_with_state_from_multiple_parents() { } } -/// Ensures that transactions that expire before or within the expiration slack of the chain tip -/// are rejected. +/// Ensures that transactions that expire before or within the expiration slack of the chain tip are +/// rejected. mod tx_expiration { use super::*; diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 0d028f5a4..3df3701ca 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -41,10 +41,10 @@ use crate::{CACHED_MEMPOOL_STATS_UPDATE_INTERVAL, COMPONENT, SERVER_NUM_BATCH_BU /// The block producer server. /// -/// Specifies how to connect to the store, batch prover, and block prover components. -/// The connection to the store is established at startup and retried with exponential backoff -/// until the store becomes available. Once the connection is established, the block producer -/// will start serving requests. +/// Specifies how to connect to the store, batch prover, and block prover components. The connection +/// to the store is established at startup and retried with exponential backoff until the store +/// becomes available. Once the connection is established, the block producer will start serving +/// requests. pub struct BlockProducer { /// The address of the block producer component. pub block_producer_address: SocketAddr, diff --git a/crates/block-producer/src/test_utils/mod.rs b/crates/block-producer/src/test_utils/mod.rs index 0695ceadf..d818c5e85 100644 --- a/crates/block-producer/src/test_utils/mod.rs +++ b/crates/block-producer/src/test_utils/mod.rs @@ -18,8 +18,7 @@ pub mod note; /// Generates random values for tests. /// -/// It prints its seed on construction which allows us to reproduce -/// test failures. +/// It prints its seed on construction which allows us to reproduce test failures. pub struct Random(RpoRandomCoin); impl Random { diff --git a/crates/ntx-builder/src/builder/mod.rs b/crates/ntx-builder/src/builder/mod.rs index 706e804c9..7c9a47a36 100644 --- a/crates/ntx-builder/src/builder/mod.rs +++ b/crates/ntx-builder/src/builder/mod.rs @@ -22,9 +22,9 @@ use crate::transaction::{NtxContext, NtxError}; /// Network transaction builder component. /// /// The network transaction builder is in in charge of building transactions that consume notes -/// against network accounts. These notes are identified and communicated by the block producer. -/// The service maintains a list of unconsumed notes and periodically executes and proves -/// transactions that consume them (reaching out to the store to retrieve state as necessary). +/// against network accounts. These notes are identified and communicated by the block producer. The +/// service maintains a list of unconsumed notes and periodically executes and proves transactions +/// that consume them (reaching out to the store to retrieve state as necessary). pub struct NetworkTransactionBuilder { /// Address of the store gRPC server. store_url: Url, diff --git a/crates/ntx-builder/src/lib.rs b/crates/ntx-builder/src/lib.rs index c3efd1351..092ed8cbf 100644 --- a/crates/ntx-builder/src/lib.rs +++ b/crates/ntx-builder/src/lib.rs @@ -18,6 +18,6 @@ const MAX_NOTES_PER_TX: NonZeroUsize = NonZeroUsize::new(50).unwrap(); /// Maximum number of network transactions which should be in progress concurrently. /// -/// This only counts transactions which are being computed locally and does not include -/// uncommitted transactions in the mempool. +/// This only counts transactions which are being computed locally and does not include uncommitted +/// transactions in the mempool. const MAX_IN_PROGRESS_TXS: usize = 4; diff --git a/crates/ntx-builder/src/state/account.rs b/crates/ntx-builder/src/state/account.rs index 7ca410fb8..14e7c370a 100644 --- a/crates/ntx-builder/src/state/account.rs +++ b/crates/ntx-builder/src/state/account.rs @@ -12,9 +12,9 @@ use miden_objects::note::{Note, Nullifier}; /// An unconsumed network note that may have failed to execute. /// -/// The block number at which the network note was attempted are approximate and may not -/// reflect the exact block number for which the execution attempt failed. The actual block -/// will likely be soon after the number that is recorded here. +/// The block number at which the network note was attempted are approximate and may not reflect the +/// exact block number for which the execution attempt failed. The actual block will likely be soon +/// after the number that is recorded here. #[derive(Debug, Clone)] pub struct InflightNetworkNote { note: SingleTargetNetworkNote, @@ -290,8 +290,8 @@ impl NetworkAccountEffect { /// Checks if the backoff block period has passed. /// -/// The number of blocks passed since the last attempt must be greater than or equal to -/// e^(0.25 * `attempt_count`) rounded to the nearest integer. +/// The number of blocks passed since the last attempt must be greater than or equal to e^(0.25 * +/// `attempt_count`) rounded to the nearest integer. /// /// This evaluates to the following: /// - After 1 attempt, the backoff period is 1 block. diff --git a/crates/ntx-builder/src/state/tests.rs b/crates/ntx-builder/src/state/tests.rs index d41a322b2..2787a6e85 100644 --- a/crates/ntx-builder/src/state/tests.rs +++ b/crates/ntx-builder/src/state/tests.rs @@ -38,8 +38,8 @@ fn create_mock_state() -> State { /// Regression test for issue #1312 /// -/// This test verifies that the `NtxBuilder`'s state handling correctly processes transactions -/// that contain nullifiers without corresponding network notes. This scenario can occur when: +/// This test verifies that the `NtxBuilder`'s state handling correctly processes transactions that +/// contain nullifiers without corresponding network notes. This scenario can occur when: /// - A transaction consumes a non-network note (e.g., a private note) /// - The nullifier is included in the transaction but is not tracked by the `NtxBuilder` /// diff --git a/crates/proto/build.rs b/crates/proto/build.rs index 6d71e8400..023239c17 100644 --- a/crates/proto/build.rs +++ b/crates/proto/build.rs @@ -54,8 +54,8 @@ fn main() -> miette::Result<()> { Ok(()) } -/// Generates protobuf bindings from the given file descriptor set and stores them in the -/// given destination directory. +/// Generates protobuf bindings from the given file descriptor set and stores them in the given +/// destination directory. fn generate_bindings(file_descriptors: FileDescriptorSet, dst_dir: &Path) -> miette::Result<()> { let mut prost_config = tonic_prost_build::Config::new(); prost_config.skip_debug(["AccountId", "Digest"]); diff --git a/crates/proto/src/clients/mod.rs b/crates/proto/src/clients/mod.rs index 3388d7875..0608980fc 100644 --- a/crates/proto/src/clients/mod.rs +++ b/crates/proto/src/clients/mod.rs @@ -318,8 +318,8 @@ impl GrpcClient for ValidatorClient { // STRICT TYPE-SAFE BUILDER (NO DEFAULTS) // ================================================================================================ -/// A type-safe builder that forces the caller to make an explicit decision for each -/// configuration item (TLS, timeout, metadata version, metadata genesis) before connecting. +/// A type-safe builder that forces the caller to make an explicit decision for each configuration +/// item (TLS, timeout, metadata version, metadata genesis) before connecting. /// /// This builder replaces the previous defaulted builder. Callers must explicitly choose TLS, /// timeout, and metadata options before connecting. diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index 8d690803c..7adc96638 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -842,8 +842,8 @@ impl From for proto::primitives::Asset { pub type AccountPrefix = u32; -/// Newtype wrapper for network account prefix. -/// Provides type safety for accounts that are meant for network execution. +/// Newtype wrapper for network account prefix. Provides type safety for accounts that are meant for +/// network execution. #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] pub struct NetworkAccountPrefix(u32); diff --git a/crates/remote-prover-client/src/remote_prover/batch_prover.rs b/crates/remote-prover-client/src/remote_prover/batch_prover.rs index ed9c7a382..8b302abf8 100644 --- a/crates/remote-prover-client/src/remote_prover/batch_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/batch_prover.rs @@ -15,8 +15,8 @@ use crate::remote_prover::generated as proto; // REMOTE BATCH PROVER // ================================================================================================ -/// A [`RemoteBatchProver`] is a batch prover that sends a proposed batch data to a remote -/// gRPC server and receives a proven batch. +/// A [`RemoteBatchProver`] is a batch prover that sends a proposed batch data to a remote gRPC +/// server and receives a proven batch. /// /// When compiled for the `wasm32-unknown-unknown` target, it uses the `tonic_web_wasm_client` /// transport. Otherwise, it uses the built-in `tonic::transport` for native platforms. diff --git a/crates/remote-prover-client/src/remote_prover/block_prover.rs b/crates/remote-prover-client/src/remote_prover/block_prover.rs index d74e9f158..942dc1457 100644 --- a/crates/remote-prover-client/src/remote_prover/block_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/block_prover.rs @@ -18,8 +18,8 @@ use crate::remote_prover::generated as proto; // REMOTE BLOCK PROVER // ================================================================================================ -/// A [`RemoteBlockProver`] is a block prover that sends a proposed block data to a remote -/// gRPC server and receives a proven block. +/// A [`RemoteBlockProver`] is a block prover that sends a proposed block data to a remote gRPC +/// server and receives a proven block. /// /// When compiled for the `wasm32-unknown-unknown` target, it uses the `tonic_web_wasm_client` /// transport. Otherwise, it uses the built-in `tonic::transport` for native platforms. diff --git a/crates/remote-prover-client/src/remote_prover/tx_prover.rs b/crates/remote-prover-client/src/remote_prover/tx_prover.rs index b94c9a9ea..c101f9463 100644 --- a/crates/remote-prover-client/src/remote_prover/tx_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/tx_prover.rs @@ -16,8 +16,8 @@ use crate::remote_prover::generated as proto; // REMOTE TRANSACTION PROVER // ================================================================================================ -/// A [`RemoteTransactionProver`] is a transaction prover that sends witness data to a remote -/// gRPC server and receives a proven transaction. +/// A [`RemoteTransactionProver`] is a transaction prover that sends witness data to a remote gRPC +/// server and receives a proven transaction. /// /// When compiled for the `wasm32-unknown-unknown` target, it uses the `tonic_web_wasm_client` /// transport. Otherwise, it uses the built-in `tonic::transport` for native platforms. diff --git a/crates/rpc/src/server/accept.rs b/crates/rpc/src/server/accept.rs index 4e0e1d06d..368b081c7 100644 --- a/crates/rpc/src/server/accept.rs +++ b/crates/rpc/src/server/accept.rs @@ -19,12 +19,12 @@ pub enum GenesisNegotiation { /// Performs content negotiation by rejecting requests which don't match our RPC version or network. /// Clients can specify these as parameters in our `application/vnd.miden` accept media range. /// -/// The client can specify RPC versions it supports using the [`VersionReq`] format. The network -/// is specified as the genesis block's commitment. If the server cannot satisfy either of these +/// The client can specify RPC versions it supports using the [`VersionReq`] format. The network is +/// specified as the genesis block's commitment. If the server cannot satisfy either of these /// constraints then the request is rejected. /// -/// Note that both values are optional, as is the header itself. If unset, the server considers -/// any value acceptable. +/// Note that both values are optional, as is the header itself. If unset, the server considers any +/// value acceptable. /// /// As part of the accept header's standard, all media ranges are examined in quality weighting /// order until a matching content type is found. This means that the client can set multiple diff --git a/crates/rpc/src/server/health.rs b/crates/rpc/src/server/health.rs index c240edcb1..66abd4fe4 100644 --- a/crates/rpc/src/server/health.rs +++ b/crates/rpc/src/server/health.rs @@ -6,10 +6,9 @@ use tower::{Layer, Service}; /// Simple health check layer that intercepts requests to root path. /// -/// The root path is used by load-balancers and options requests to check the health -/// of the server. Since our gRPC server doesn't serve anything on the root -/// these get logged as errors. This layer instead intercepts these requests -/// and returns `Ok(200)`, preventing the errors. +/// The root path is used by load-balancers and options requests to check the health of the server. +/// Since our gRPC server doesn't serve anything on the root these get logged as errors. This layer +/// instead intercepts these requests and returns `Ok(200)`, preventing the errors. #[derive(Clone)] pub struct HealthCheckLayer; diff --git a/crates/rpc/src/server/mod.rs b/crates/rpc/src/server/mod.rs index 71ef163c2..5f3b6f9bb 100644 --- a/crates/rpc/src/server/mod.rs +++ b/crates/rpc/src/server/mod.rs @@ -25,9 +25,9 @@ mod validator; /// The RPC server component. /// -/// On startup, binds to the provided listener and starts serving the RPC API. -/// It connects lazily to the store and block producer components as needed. -/// Requests will fail if the components are not available. +/// On startup, binds to the provided listener and starts serving the RPC API. It connects lazily to +/// the store and block producer components as needed. Requests will fail if the components are not +/// available. pub struct Rpc { pub listener: TcpListener, pub store_url: Url, diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index 4b4f1dd0b..09ef4e76c 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -370,8 +370,8 @@ async fn send_request( rpc_client.get_block_header_by_number(request).await } -/// Binds a socket on an available port, runs the RPC server on it, and -/// returns a client to talk to the server, along with the socket address. +/// Binds a socket on an available port, runs the RPC server on it, and returns a client to talk to +/// the server, along with the socket address. async fn start_rpc() -> (RpcClient, std::net::SocketAddr, std::net::SocketAddr) { let store_addr = { let store_listener = diff --git a/crates/store/benches/account_tree_historical.rs b/crates/store/benches/account_tree_historical.rs index e03893363..fddbe1d66 100644 --- a/crates/store/benches/account_tree_historical.rs +++ b/crates/store/benches/account_tree_historical.rs @@ -100,8 +100,8 @@ fn setup_account_tree_with_history( // VANILLA ACCOUNTTREE BENCHMARKS // ================================================================================================ -/// Benchmarks vanilla `AccountTree` open (query) operations. -/// This provides a baseline for comparison with historical access operations. +/// Benchmarks vanilla `AccountTree` open (query) operations. This provides a baseline for +/// comparison with historical access operations. fn bench_vanilla_access(c: &mut Criterion) { let mut group = c.benchmark_group("account_tree_vanilla_access"); @@ -121,8 +121,8 @@ fn bench_vanilla_access(c: &mut Criterion) { group.finish(); } -/// Benchmarks vanilla `AccountTree` insertion (mutation) performance. -/// This provides a baseline for comparison with history-tracking insertion. +/// Benchmarks vanilla `AccountTree` insertion (mutation) performance. This provides a baseline for +/// comparison with history-tracking insertion. fn bench_vanilla_insertion(c: &mut Criterion) { let mut group = c.benchmark_group("account_tree_insertion"); diff --git a/crates/store/build.rs b/crates/store/build.rs index d08f3fd0e..eff59d3ce 100644 --- a/crates/store/build.rs +++ b/crates/store/build.rs @@ -1,5 +1,4 @@ -// This build.rs is required to trigger the `diesel_migrations::embed_migrations!` proc-macro in -// `store/src/db/migrations.rs` to include the latest version of the migrations into the binary, see . +// This build.rs is required to trigger the `diesel_migrations::embed_migrations!` proc-macro in `store/src/db/migrations.rs` to include the latest version of the migrations into the binary, see . fn main() { println!("cargo:rerun-if-changed=./src/db/migrations"); // If we do one re-write, the default rules are disabled, diff --git a/crates/store/src/accounts/mod.rs b/crates/store/src/accounts/mod.rs index 71131a615..b704930c1 100644 --- a/crates/store/src/accounts/mod.rs +++ b/crates/store/src/accounts/mod.rs @@ -103,9 +103,9 @@ impl HistoricalOverlay { /// Wraps `AccountTree` with historical query support via reversion overlays. /// -/// This structure maintains a sliding window of historical account states by storing -/// reversion data (mutations that undo changes). Historical witnesses are reconstructed -/// by starting from the latest state and applying reversion overlays backwards in time. +/// This structure maintains a sliding window of historical account states by storing reversion data +/// (mutations that undo changes). Historical witnesses are reconstructed by starting from the +/// latest state and applying reversion overlays backwards in time. #[derive(Debug)] pub struct AccountTreeWithHistory { /// The current block number (latest state). diff --git a/crates/store/src/db/migrations.rs b/crates/store/src/db/migrations.rs index ad78548c6..754268112 100644 --- a/crates/store/src/db/migrations.rs +++ b/crates/store/src/db/migrations.rs @@ -4,8 +4,7 @@ use tracing::instrument; use crate::COMPONENT; -// The rebuild is automatically triggered by `build.rs` as described in -// . +// The rebuild is automatically triggered by `build.rs` as described in . pub const MIGRATIONS: EmbeddedMigrations = embed_migrations!("src/db/migrations"); // TODO we have not tested this in practice! diff --git a/crates/store/src/db/models/conv.rs b/crates/store/src/db/models/conv.rs index ffc7b80f6..0a0913eb5 100644 --- a/crates/store/src/db/models/conv.rs +++ b/crates/store/src/db/models/conv.rs @@ -1,10 +1,10 @@ //! Central place to define conversion from and to database primitive types //! -//! Eventually, all of them should have types and we can implement a trait for them -//! rather than function pairs. +//! Eventually, all of them should have types and we can implement a trait for them rather than +//! function pairs. //! -//! Notice: All of them are infallible. The invariant is a sane content of the database -//! and humans ensure the sanity of casts. +//! Notice: All of them are infallible. The invariant is a sane content of the database and humans +//! ensure the sanity of casts. //! //! Notice: Keep in mind if you _need_ to expand the datatype, only if you require sorting this is //! mandatory! diff --git a/crates/store/src/db/models/mod.rs b/crates/store/src/db/models/mod.rs index 09d4bf92f..ce8094fe2 100644 --- a/crates/store/src/db/models/mod.rs +++ b/crates/store/src/db/models/mod.rs @@ -1,17 +1,14 @@ //! Defines models for usage with the diesel API //! -//! Note: `select` can either be used as -//! `SelectDsl::select(schema::foo::table, (schema::foo::some_cool_id, ))` -//! or -//! `SelectDsl::select(schema::foo::table, FooRawRow::as_selectable())`. +//! Note: `select` can either be used as `SelectDsl::select(schema::foo::table, +//! (schema::foo::some_cool_id, ))` or `SelectDsl::select(schema::foo::table, +//! FooRawRow::as_selectable())`. //! -//! The former can be used to avoid declaring extra types, while the latter -//! is better if a full row is in need of loading and avoids duplicate -//! specification. +//! The former can be used to avoid declaring extra types, while the latter is better if a full row +//! is in need of loading and avoids duplicate specification. //! -//! Note: The fully qualified syntax yields for _much_ better errors. -//! The first step in debugging should always be using the fully qualified -//! calling syntext when dealing with diesel. +//! Note: The fully qualified syntax yields for _much_ better errors. The first step in debugging +//! should always be using the fully qualified calling syntext when dealing with diesel. use std::num::NonZeroUsize; diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 52be3ee84..11e55fff5 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -437,8 +437,8 @@ impl StorageMapValue { /// /// # Returns /// -/// A vector of tuples containing `(slot, key, value, is_latest)` for the given account. -/// Each row contains one of: +/// A vector of tuples containing `(slot, key, value, is_latest)` for the given account. Each row +/// contains one of: /// /// - the historical value for a slot and key specifically on block `block_to` /// - the latest updated value for the slot and key combination, alongside the block number in which @@ -650,8 +650,8 @@ impl TryInto for AccountSummaryRaw { /// Insert an account vault asset row into the DB using the given [`SqliteConnection`]. /// -/// This function will set `is_latest=true` for the new row and update any existing -/// row with the same `(account_id, vault_key)` tuple to `is_latest=false`. +/// This function will set `is_latest=true` for the new row and update any existing row with the +/// same `(account_id, vault_key)` tuple to `is_latest=false`. /// /// # Returns /// @@ -690,8 +690,8 @@ pub(crate) fn insert_account_vault_asset( /// Insert an account storage map value into the DB using the given [`SqliteConnection`]. /// -/// This function will set `is_latest=true` for the new row and update any existing -/// row with the same `(account_id, slot, key)` tuple to `is_latest=false`. +/// This function will set `is_latest=true` for the new row and update any existing row with the +/// same `(account_id, slot, key)` tuple to `is_latest=false`. /// /// # Returns /// diff --git a/crates/store/src/db/models/queries/block_headers.rs b/crates/store/src/db/models/queries/block_headers.rs index 42ec3b0e5..46ca73f5a 100644 --- a/crates/store/src/db/models/queries/block_headers.rs +++ b/crates/store/src/db/models/queries/block_headers.rs @@ -24,8 +24,8 @@ use crate::db::schema; /// /// # Returns /// -/// When `block_num` is [None], the latest block header is returned. Otherwise, the block with -/// the given block height is returned. +/// When `block_num` is [None], the latest block header is returned. Otherwise, the block with the +/// given block height is returned. /// /// ```sql /// -- with argument diff --git a/crates/store/src/db/models/queries/notes.rs b/crates/store/src/db/models/queries/notes.rs index a5c2ffc2d..c05b73cf1 100644 --- a/crates/store/src/db/models/queries/notes.rs +++ b/crates/store/src/db/models/queries/notes.rs @@ -76,14 +76,14 @@ use crate::errors::NoteSyncError; /// /// # Returns /// -/// All matching notes from the first block within the range containing a matching note. -/// A note is considered a match if it has any of the given tags, or if its sender is one of the -/// given account IDs. If no matching notes are found at all, then an empty vector is returned. +/// All matching notes from the first block within the range containing a matching note. A note is +/// considered a match if it has any of the given tags, or if its sender is one of the given account +/// IDs. If no matching notes are found at all, then an empty vector is returned. /// /// # Note /// -/// This method returns notes from a single block. To fetch all notes up to the chain tip, -/// multiple requests are necessary. +/// This method returns notes from a single block. To fetch all notes up to the chain tip, multiple +/// requests are necessary. /// /// # Raw SQL /// @@ -390,8 +390,8 @@ pub(crate) fn select_note_script_by_root( /// /// # Returns /// -/// A set of unconsumed network notes with maximum length of `size` and the page to get -/// the next set. +/// A set of unconsumed network notes with maximum length of `size` and the page to get the next +/// set. /// /// Attention: uses the _implicit_ column `rowid`, which requires to use a few raw SQL nugget /// statements @@ -502,8 +502,8 @@ pub(crate) fn unconsumed_network_notes( /// /// # Returns /// -/// A set of unconsumed network notes with maximum length of `size` and the page to get -/// the next set. +/// A set of unconsumed network notes with maximum length of `size` and the page to get the next +/// set. /// /// # Raw SQL /// @@ -680,10 +680,8 @@ pub struct NoteDetailsRawRow { pub serial_num: Option>, } -// Note: One cannot use `#[diesel(embed)]` to structure -// this, it will yield a significant amount of errors -// when used with join and debugging is painful to put it -// mildly. +// Note: One cannot use `#[diesel(embed)]` to structure this, it will yield a significant amount of +// errors when used with join and debugging is painful to put it mildly. #[derive(Debug, Clone, PartialEq, Queryable)] pub struct NoteRecordWithScriptRawJoined { pub committed_at: i64, diff --git a/crates/store/src/db/models/queries/nullifiers.rs b/crates/store/src/db/models/queries/nullifiers.rs index 95e77ab46..f2ac2cd19 100644 --- a/crates/store/src/db/models/queries/nullifiers.rs +++ b/crates/store/src/db/models/queries/nullifiers.rs @@ -34,9 +34,9 @@ use crate::db::{NullifierInfo, schema}; /// * `nullifier_prefixes`: List of nullifier prefixes to filter by /// - Limit: 0 <= count <= 1000 /// -/// Each value of the `nullifier_prefixes` is only the `prefix_len` most significant bits -/// of the nullifier of interest to the client. This hides the details of the specific -/// nullifier being requested. Currently the only supported prefix length is 16 bits. +/// Each value of the `nullifier_prefixes` is only the `prefix_len` most significant bits of the +/// nullifier of interest to the client. This hides the details of the specific nullifier being +/// requested. Currently the only supported prefix length is 16 bits. /// /// # Returns /// diff --git a/crates/store/src/db/models/queries/transactions.rs b/crates/store/src/db/models/queries/transactions.rs index 7b5caf5e7..35883d032 100644 --- a/crates/store/src/db/models/queries/transactions.rs +++ b/crates/store/src/db/models/queries/transactions.rs @@ -236,10 +236,10 @@ impl TransactionSummaryRowInsert { /// - `transaction_records`: Vector of transaction records, limited by payload size /// /// # Note -/// This function returns complete transaction record information including state commitments -/// and note IDs, allowing for direct conversion to proto `TransactionRecord` without loading -/// full block data. We use a chunked loading strategy to prevent memory exhaustion attacks and -/// ensure predictable resource usage. +/// This function returns complete transaction record information including state commitments and +/// note IDs, allowing for direct conversion to proto `TransactionRecord` without loading full block +/// data. We use a chunked loading strategy to prevent memory exhaustion attacks and ensure +/// predictable resource usage. /// /// # Raw SQL /// ```sql diff --git a/crates/store/src/db/models/utils.rs b/crates/store/src/db/models/utils.rs index 5124beabc..2ae4662ad 100644 --- a/crates/store/src/db/models/utils.rs +++ b/crates/store/src/db/models/utils.rs @@ -4,8 +4,8 @@ use miden_objects::note::Nullifier; use crate::errors::DatabaseError; -/// Utility to convert an iterable container of containing `R`-typed values -/// to a `Vec` and bail at the first failing conversion +/// Utility to convert an iterable container of containing `R`-typed values to a `Vec` and bail +/// at the first failing conversion pub(crate) fn vec_raw_try_into>( raw: impl IntoIterator, ) -> std::result::Result, >::Error> { diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index f269aee3b..cc6d28adc 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -103,8 +103,8 @@ diesel::table! { diesel::joinable!(accounts -> account_codes (code_commitment)); diesel::joinable!(accounts -> block_headers (block_num)); // Note: Cannot use diesel::joinable! with accounts table due to composite primary key -// diesel::joinable!(notes -> accounts (sender)); -// diesel::joinable!(transactions -> accounts (account_id)); +// diesel::joinable!(notes -> accounts (sender)); diesel::joinable!(transactions -> accounts +// (account_id)); diesel::joinable!(notes -> block_headers (committed_at)); diesel::joinable!(notes -> note_scripts (script_root)); diesel::joinable!(nullifiers -> block_headers (block_num)); diff --git a/crates/store/src/genesis/config/mod.rs b/crates/store/src/genesis/config/mod.rs index 193d2f105..dc4c4ff25 100644 --- a/crates/store/src/genesis/config/mod.rs +++ b/crates/store/src/genesis/config/mod.rs @@ -387,8 +387,7 @@ struct AssetEntry { // STORAGE MODE // ================================================================================================ -/// See the [full description](https://0xmiden.github.io/miden-base/account.html?highlight=Accoun#account-storage-mode) -/// for details +/// See the [full description](https://0xmiden.github.io/miden-base/account.html?highlight=Accoun#account-storage-mode) for details #[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize, Default)] pub enum StorageMode { /// Monitor for `Notes` related to the account, in addition to being `Public`. @@ -455,8 +454,8 @@ impl AccountSecrets { // HELPERS // ================================================================================================ -/// Process wallet assets and return them as a fungible asset delta. -/// Track the negative adjustments for the respective faucets. +/// Process wallet assets and return them as a fungible asset delta. Track the negative adjustments +/// for the respective faucets. fn prepare_fungible_asset_update( assets: impl IntoIterator, faucets: &IndexMap, diff --git a/crates/utils/src/cors.rs b/crates/utils/src/cors.rs index 0bc413cdd..a33141a94 100644 --- a/crates/utils/src/cors.rs +++ b/crates/utils/src/cors.rs @@ -17,9 +17,7 @@ const DEFAULT_ALLOW_HEADERS: [HeaderName; 4] = [ /// Enables CORS support. This is required for gRPC-web support. /// -/// The following implementation is based on the one in tonic-web that was deprecated -/// in favor of letting the user configure the CORS layer. Reference: -/// +/// The following implementation is based on the one in tonic-web that was deprecated in favor of letting the user configure the CORS layer. Reference: /// /// # Configuration /// diff --git a/crates/utils/src/limiter.rs b/crates/utils/src/limiter.rs index d02499841..c612f0256 100644 --- a/crates/utils/src/limiter.rs +++ b/crates/utils/src/limiter.rs @@ -4,9 +4,8 @@ //! 1. the external facing RPC //! 2. limiting SQL statements not exceeding parameter limits //! -//! The 1st is good to terminate invalid requests as early as possible, -//! where the second is both a fallback and a safeguard not benching -//! pointless parameter combinations. +//! The 1st is good to terminate invalid requests as early as possible, where the second is both a +//! fallback and a safeguard not benching pointless parameter combinations. #[allow(missing_docs)] #[derive(Debug, thiserror::Error)] @@ -17,8 +16,8 @@ pub struct QueryLimitError { limit: usize, } -/// Checks limits against the desired query parameters, per query parameter and -/// bails if they exceed a defined value. +/// Checks limits against the desired query parameters, per query parameter and bails if they exceed +/// a defined value. pub trait QueryParamLimiter { /// Name of the parameter to mention in the error. const PARAM_NAME: &'static str; @@ -71,8 +70,7 @@ impl QueryParamLimiter for QueryParamNoteTagLimit { const LIMIT: usize = 1000; } -/// Used for the following RPC endpoints -/// `select_notes_by_id` +/// Used for the following RPC endpoints `select_notes_by_id` pub struct QueryParamNoteIdLimit; impl QueryParamLimiter for QueryParamNoteIdLimit { const PARAM_NAME: &str = "note_id"; diff --git a/crates/utils/src/logging.rs b/crates/utils/src/logging.rs index b1425d0f2..89bd5a60a 100644 --- a/crates/utils/src/logging.rs +++ b/crates/utils/src/logging.rs @@ -41,8 +41,7 @@ impl Drop for OtelGuard { /// Trace filtering defaults to `INFO` and can be configured using the conventional `RUST_LOG` /// environment variable. /// -/// The open-telemetry configuration is controlled via environment variables as defined in the -/// [specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#opentelemetry-protocol-exporter) +/// The open-telemetry configuration is controlled via environment variables as defined in the [specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md#opentelemetry-protocol-exporter) /// /// Registers a panic hook so that panic errors are reported to the open-telemetry exporter. /// @@ -94,9 +93,9 @@ fn init_tracer_provider() -> anyhow::Result { /// Allows trace content to be inspected via the returned receiver. /// /// All tests that use this function must be annotated with `#[serial(open_telemetry_tracing)]`. -/// This forces serialization of all such tests. Otherwise, the tested spans could -/// be interleaved during runtime. Also, the global exporter could be re-initialized in -/// the middle of a concurrently running test. +/// This forces serialization of all such tests. Otherwise, the tested spans could be interleaved +/// during runtime. Also, the global exporter could be re-initialized in the middle of a +/// concurrently running test. #[cfg(feature = "testing")] pub fn setup_test_tracing() -> anyhow::Result<( tokio::sync::mpsc::UnboundedReceiver, diff --git a/crates/utils/src/lru_cache.rs b/crates/utils/src/lru_cache.rs index de325bf10..05578d536 100644 --- a/crates/utils/src/lru_cache.rs +++ b/crates/utils/src/lru_cache.rs @@ -5,8 +5,8 @@ use std::sync::Arc; use lru::LruCache as InnerCache; use tokio::sync::Mutex; -/// A newtype wrapper around an LRU cache. Ensures that the cache lock is not held across -/// await points. +/// A newtype wrapper around an LRU cache. Ensures that the cache lock is not held across await +/// points. #[derive(Clone)] pub struct LruCache(Arc>>); diff --git a/crates/utils/src/tracing/grpc.rs b/crates/utils/src/tracing/grpc.rs index f5d0951bf..5bc19067d 100644 --- a/crates/utils/src/tracing/grpc.rs +++ b/crates/utils/src/tracing/grpc.rs @@ -2,13 +2,13 @@ use tracing::field; use crate::tracing::OpenTelemetrySpanExt; -/// Returns a [`trace_fn`](tonic::transport::server::Server) implementation for gRPC requests -/// which adds open-telemetry information to the span. +/// Returns a [`trace_fn`](tonic::transport::server::Server) implementation for gRPC requests which +/// adds open-telemetry information to the span. /// -/// Creates an `info` span following the open-telemetry standard: `{service}/{method}`. -/// The span name is dynamically set using the HTTP path via the `otel.name` field. -/// Additionally also pulls in remote tracing context which allows the server trace to be connected -/// to the client's origin trace. +/// Creates an `info` span following the open-telemetry standard: `{service}/{method}`. The span +/// name is dynamically set using the HTTP path via the `otel.name` field. Additionally also pulls +/// in remote tracing context which allows the server trace to be connected to the client's origin +/// trace. pub fn grpc_trace_fn(request: &http::Request) -> tracing::Span { // A gRPC request's path ends with `..//`. let mut path_segments = request.uri().path().rsplit('/'); diff --git a/crates/utils/src/version/mod.rs b/crates/utils/src/version/mod.rs index 7d378558c..524817131 100644 --- a/crates/utils/src/version/mod.rs +++ b/crates/utils/src/version/mod.rs @@ -1,16 +1,16 @@ #[cfg(feature = "vergen")] pub use vergen::vergen; -/// Contains build metadata which can be formatted into a pretty --version -/// output using its Display implementation. +/// Contains build metadata which can be formatted into a pretty --version output using its Display +/// implementation. /// -/// The build metadata can be embedded at compile time using the `vergen` function -/// available from the `vergen` feature. See that functions description for a list -/// of the environment variables emitted which map nicely to [`LongVersion`]. +/// The build metadata can be embedded at compile time using the `vergen` function available from +/// the `vergen` feature. See that functions description for a list of the environment variables +/// emitted which map nicely to [`LongVersion`]. /// -/// Unfortunately these values must be transferred manually by the end user since the -/// env variables are only available once the caller's build script has run - which is -/// after this crate is compiled. +/// Unfortunately these values must be transferred manually by the end user since the env variables +/// are only available once the caller's build script has run - which is after this crate is +/// compiled. pub struct LongVersion { pub version: &'static str, pub sha: &'static str, diff --git a/scripts/reflow.awk b/scripts/reflow.awk new file mode 100644 index 000000000..d9a7bf488 --- /dev/null +++ b/scripts/reflow.awk @@ -0,0 +1,92 @@ +# Merge Rust comment lines while preserving: +# - original spacing after //, ///, //! +# - empty lines +# - === headings +# - bullets / numbered lists +# - fenced code blocks +# - Markdown headings + +function ltrim(s) { + while (substr(s,1,1)==" " || substr(s,1,1)=="\t") s=substr(s,2) + return s +} + +function is_numbered_list(s, i,c) { + found_digit=0 + for(i=1;i<=length(s);i++){ + c=substr(s,i,1) + if(c>="0" && c<="9"){found_digit=1; continue} + if(found_digit && (c=="."||c==")")) return 1 + return 0 + } + return 0 +} + +{ + line=$0 + + # Detect comment type + if(substr(line,1,3)=="///") type="doc" + else if(substr(line,1,3)=="//!") type="innerdoc" + else if(substr(line,1,2)=="//") type="line" + else type="none" + + if(type!="none"){ + if(type=="doc") prefix="///" + else if(type=="innerdoc") prefix="//!" + else prefix="//" + + raw=substr(line,length(prefix)+1) + # Capture the original spaces after prefix + match_space="" + i=1 + while(i<=length(raw) && (substr(raw,i,1)==" " || substr(raw,i,1)=="\t")){ match_space = match_space substr(raw,i,1); i++ } + text=substr(raw,i) + + trimmed=ltrim(raw) + + # ---------- Fenced code block ---------- + if(substr(trimmed,1,3)=="```"){ + if(in_comment){ print out_prefix merged; in_comment=0; out_space="" } + if(fenced==0) fenced=1; else fenced=0 + print line + next + } + if(fenced==1){ print line; next } + + # ---------- Empty line ---------- + empty=1 + for(j=1;j<=length(trimmed);j++){c=substr(trimmed,j,1); if(c!=" " && c!="\t"){empty=0; break}} + if(empty){ if(in_comment){print out_prefix merged; in_comment=0; out_space=""} print line; next } + + # ---------- === heading ---------- + if(substr(trimmed,1,3)=="==="){ if(in_comment){print out_prefix merged; in_comment=0; out_space=""} print line; next } + + # ---------- Markdown heading ---------- + if(substr(trimmed,1,1)=="#"){ if(in_comment){print out_prefix merged; in_comment=0; out_space=""} print line; next } + + # ---------- Bullet or numbered list ---------- + first=substr(trimmed,1,1) + if(first=="*" || first=="-" || is_numbered_list(trimmed)){ if(in_comment){print out_prefix merged; in_comment=0; out_space=""} print line; next } + + # ---------- Mergeable line ---------- + if(in_comment && type==last_type){ + merged=merged " " text + } else { + if(in_comment) print out_prefix merged + in_comment=1 + last_type=type + merged=text + out_prefix = prefix match_space + } + + next + } + + # Non-comment line + if(in_comment){ print out_prefix merged; in_comment=0; out_space="" } + print line +} + +END { if(in_comment) print out_prefix merged } + diff --git a/scripts/reflow.sh b/scripts/reflow.sh new file mode 100755 index 000000000..c10f9c2c6 --- /dev/null +++ b/scripts/reflow.sh @@ -0,0 +1,4 @@ +find . \( -name target -prune -o -name generated -prune \) -o -type f -name "*.rs" -print0 | + while read -r -d $'\0' x; do + awk -i inplace -f scripts/reflow.awk "$x" + done