From 5e840139b3510ba58569f2b4138ccc816d02b60a Mon Sep 17 00:00:00 2001 From: Mirko von Leipzig <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Wed, 25 Mar 2026 16:46:07 +0200 Subject: [PATCH 1/9] User batch gRPC now includes txs for validation --- Cargo.lock | 1 + crates/block-producer/src/server/mod.rs | 15 +- crates/rpc/Cargo.toml | 1 + crates/rpc/src/server/api.rs | 89 ++- proto/proto/internal/block_producer.proto | 113 ++-- proto/proto/rpc.proto | 740 +++++++++++----------- proto/proto/types/transaction.proto | 77 +-- 7 files changed, 530 insertions(+), 506 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 16ea17404b..e600d10ed1 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -3128,6 +3128,7 @@ dependencies = [ "miden-protocol", "miden-standards", "miden-tx", + "miden-tx-batch-prover", "reqwest", "rstest", "semver 1.0.27", diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index aad5335c5d..c73729e39d 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -14,7 +14,6 @@ use miden_node_utils::clap::GrpcOptionsInternal; use miden_node_utils::formatting::{format_input_notes, format_output_notes}; use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; use miden_node_utils::tracing::grpc::grpc_trace_fn; -use miden_protocol::batch::ProvenBatch; use miden_protocol::block::BlockNumber; use miden_protocol::transaction::ProvenTransaction; use miden_protocol::utils::serde::Deserializable; @@ -361,12 +360,12 @@ impl BlockProducerRpcServer { skip_all, err )] - async fn submit_proven_batch( + async fn submit_batch( &self, - request: proto::transaction::ProvenTransactionBatch, + _request: proto::transaction::TransactionBatch, ) -> Result { - let _batch = ProvenBatch::read_from_bytes(&request.encoded) - .map_err(SubmitProvenBatchError::Deserialization)?; + // let _batch = ProvenBatch::read_from_bytes(&request.encoded) + // .map_err(SubmitProvenBatchError::Deserialization)?; todo!(); } @@ -387,11 +386,11 @@ impl api_server::Api for BlockProducerRpcServer { .map_err(Into::into) } - async fn submit_proven_batch( + async fn submit_batch( &self, - request: tonic::Request, + request: tonic::Request, ) -> Result, Status> { - self.submit_proven_batch(request.into_inner()) + self.submit_batch(request.into_inner()) .await .map(tonic::Response::new) // This Status::from mapping takes care of hiding internal errors. diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 537173e67d..4892f315ff 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -24,6 +24,7 @@ miden-node-proto-build = { workspace = true } miden-node-utils = { workspace = true } miden-protocol = { default-features = true, workspace = true } miden-tx = { default-features = true, workspace = true } +miden-tx-batch-prover = { workspace = true } semver = { version = "1.0" } thiserror = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index a0ec88859a..2e3bd3bdae 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -17,13 +17,14 @@ use miden_node_utils::limiter::{ QueryParamNullifierLimit, QueryParamStorageMapKeyTotalLimit, }; -use miden_protocol::batch::ProvenBatch; +use miden_protocol::batch::ProposedBatch; use miden_protocol::block::{BlockHeader, BlockNumber}; use miden_protocol::note::{Note, NoteRecipient, NoteScript}; use miden_protocol::transaction::{OutputNote, ProvenTransaction, ProvenTransactionBuilder}; use miden_protocol::utils::serde::{Deserializable, Serializable}; use miden_protocol::{MIN_PROOF_SECURITY_LEVEL, Word}; use miden_tx::TransactionVerifier; +use miden_tx_batch_prover::LocalBatchProver; use tonic::{IntoRequest, Request, Response, Status}; use tracing::{debug, info}; use url::Url; @@ -387,47 +388,81 @@ impl api_server::Api for RpcService { /// Deserializes the batch, strips MAST decorators from full output note scripts, rebuilds /// the batch, then forwards it to the block producer. - async fn submit_proven_batch( + async fn submit_batch( &self, - request: tonic::Request, + request: tonic::Request, ) -> Result, Status> { let Some(block_producer) = &self.block_producer else { return Err(Status::unavailable("Batch submission not available in read-only mode")); }; - let mut request = request.into_inner(); - - let batch = ProvenBatch::read_from_bytes(&request.encoded) - .map_err(|err| Status::invalid_argument(err.as_report_context("invalid batch")))?; - - // Build a new batch with output notes' decorators removed - let stripped_outputs: Vec = - strip_output_note_decorators(batch.output_notes().iter()).collect(); + let request = request.into_inner(); - let rebuilt_batch = ProvenBatch::new( - batch.id(), - batch.reference_block_commitment(), - batch.reference_block_num(), - batch.account_updates().clone(), - batch.input_notes().clone(), - stripped_outputs, - batch.batch_expiration_block_num(), - batch.transactions().clone(), - ) - .map_err(|e| Status::invalid_argument(e.to_string()))?; + let batch = ProposedBatch::read_from_bytes(&request.proposed_batch).map_err(|err| { + Status::invalid_argument(err.as_report_context("invalid proposed_batch")) + })?; - request.encoded = rebuilt_batch.to_bytes(); + // Perform this check here since its cheap. If this passes we can safely zip inputs and + // transactions. + if request.transaction_inputs.len() != batch.transactions().len() { + return Err(Status::invalid_argument(format!( + "Number of inputs {} does not match number of transaction {} in batch", + request.transaction_inputs.len(), + batch.transactions().len() + ))); + } - // Only allow deployment transactions for new network accounts - for tx in batch.transactions().as_slice() { - if tx.account_id().is_network() && !tx.initial_state_commitment().is_empty() { + // Only allow deployment transactions for new network accounts. + for tx in batch.transactions() { + if tx.account_id().is_network() + && !tx.account_update().initial_state_commitment().is_empty() + { return Err(Status::invalid_argument( "Network transactions may not be submitted by users yet", )); } } - block_producer.clone().submit_proven_batch(request).await + // Verify batch transaction proofs. + let proof = LocalBatchProver::new(MIN_PROOF_SECURITY_LEVEL).prove(batch.clone()).map_err( + |err| Status::invalid_argument(err.as_report_context("proposed block proof failed")), + )?; + // Verify the reference header matches the canonical chain. + let reference_header = self + .get_block_header_by_number(Request::new(proto::rpc::BlockHeaderByNumberRequest { + block_num: proof.reference_block_num().as_u32().into(), + include_mmr_proof: false.into(), + })) + .await? + .into_inner() + .block_header + .expect("store should always send block header"); + let reference_commitment: Word = reference_header + .chain_commitment + .expect("store should always fill block header") + .try_into() + .expect("store Word should be okay"); + if reference_commitment != proof.reference_block_commitment() { + return Err(Status::invalid_argument(format!( + "batch reference commitment {} at block {} does not match canonical chain's commitemnt of {}", + proof.reference_block_num(), + proof.reference_block_commitment(), + reference_commitment + ))); + } + + // Submit each transaction to the validator. + // + // SAFETY: We checked earlier that the two iterators are the same length. + for (tx, inputs) in batch.transactions().iter().zip(&request.transaction_inputs) { + let request = proto::transaction::ProvenTransaction { + transaction: tx.to_bytes(), + transaction_inputs: inputs.clone().into(), + }; + self.validator.clone().submit_proven_transaction(request).await?; + } + + block_producer.clone().submit_batch(request).await } // -- Status & utility endpoints ---------------------------------------------------------- diff --git a/proto/proto/internal/block_producer.proto b/proto/proto/internal/block_producer.proto index e81f5c2452..cfe2b47732 100644 --- a/proto/proto/internal/block_producer.proto +++ b/proto/proto/internal/block_producer.proto @@ -2,45 +2,38 @@ syntax = "proto3"; package block_producer; +import "google/protobuf/empty.proto"; import "rpc.proto"; -import "types/note.proto"; import "types/blockchain.proto"; +import "types/note.proto"; import "types/primitives.proto"; import "types/transaction.proto"; -import "google/protobuf/empty.proto"; // BLOCK PRODUCER SERVICE // ================================================================================================ service Api { - // Returns the status info. - rpc Status(google.protobuf.Empty) returns (rpc.BlockProducerStatus) {} + // Returns the status info. + rpc Status(google.protobuf.Empty) returns (rpc.BlockProducerStatus) {} - // Submits proven transaction to the Miden network. Returns the node's current block height. - rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} + // Submits proven transaction to the Miden network. Returns the node's current block height. + rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} - // Submits a proven batch to the Miden network. - // - // The batch may include transactions which were are: - // - // - already in the mempool i.e. previously successfully submitted - // - will be submitted to the mempool in the future - // - won't be submitted to the mempool at all - // - // All transactions in the batch but not in the mempool must build on the current mempool - // state following normal transaction submission rules. - // - // Returns the node's current block height. - rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (blockchain.BlockNumber) {} + // Submits a batch of transactions to the Miden network. + // + // All transactions in this batch will be considered atomic, and be committed together or not all. + // + // Returns the node's current block height. + rpc SubmitBatch(transaction.TransactionBatch) returns (blockchain.BlockNumber) {} - // Subscribe to mempool events. - // - // The event stream will contain all events after the current chain tip. This includes all - // currently inflight events that have not yet been committed to the chain. - // - // Currently only a single active subscription is supported. Subscription requests will cancel - // the active subscription, if any. - rpc MempoolSubscription(google.protobuf.Empty) returns (stream MempoolEvent) {} + // Subscribe to mempool events. + // + // The event stream will contain all events after the current chain tip. This includes all + // currently inflight events that have not yet been committed to the chain. + // + // Currently only a single active subscription is supported. Subscription requests will cancel + // the active subscription, if any. + rpc MempoolSubscription(google.protobuf.Empty) returns (stream MempoolEvent) {} } // MEMPOOL SUBSCRIPTION @@ -51,41 +44,41 @@ message MempoolSubscriptionRequest {} // Event from the mempool. message MempoolEvent { - // A block was committed. - // - // This event is sent when a block is committed to the chain. - message BlockCommitted { - blockchain.BlockHeader block_header = 1; - repeated transaction.TransactionId transactions = 2; - } + // A block was committed. + // + // This event is sent when a block is committed to the chain. + message BlockCommitted { + blockchain.BlockHeader block_header = 1; + repeated transaction.TransactionId transactions = 2; + } - // A transaction was added to the mempool. + // A transaction was added to the mempool. + // + // This event is sent when a transaction is added to the mempool. + message TransactionAdded { + // The ID of the transaction. + transaction.TransactionId id = 1; + // Nullifiers consumed by the transaction. + repeated primitives.Digest nullifiers = 2; + // Network notes created by the transaction. + repeated note.NetworkNote network_notes = 3; + // Changes to a network account, if any. This includes creation of new network accounts. // - // This event is sent when a transaction is added to the mempool. - message TransactionAdded { - // The ID of the transaction. - transaction.TransactionId id = 1; - // Nullifiers consumed by the transaction. - repeated primitives.Digest nullifiers = 2; - // Network notes created by the transaction. - repeated note.NetworkNote network_notes = 3; - // Changes to a network account, if any. This includes creation of new network accounts. - // - // The account delta is encoded using [winter_utils::Serializable] implementation - // for [miden_protocol::account::delta::AccountDelta]. - optional bytes network_account_delta = 4; - } + // The account delta is encoded using [winter_utils::Serializable] implementation + // for [miden_protocol::account::delta::AccountDelta]. + optional bytes network_account_delta = 4; + } - // A set of transactions was reverted and dropped from the mempool. - // - // This event is sent when a set of transactions are reverted and dropped from the mempool. - message TransactionsReverted { - repeated transaction.TransactionId reverted = 1; - } + // A set of transactions was reverted and dropped from the mempool. + // + // This event is sent when a set of transactions are reverted and dropped from the mempool. + message TransactionsReverted { + repeated transaction.TransactionId reverted = 1; + } - oneof event { - TransactionAdded transaction_added = 1; - BlockCommitted block_committed = 2; - TransactionsReverted transactions_reverted = 3; - }; + oneof event { + TransactionAdded transaction_added = 1; + BlockCommitted block_committed = 2; + TransactionsReverted transactions_reverted = 3; + } } diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index 1a218539ee..7b57bf3cbb 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -2,109 +2,102 @@ syntax = "proto3"; package rpc; +import "google/protobuf/empty.proto"; import "types/account.proto"; import "types/blockchain.proto"; import "types/note.proto"; import "types/primitives.proto"; import "types/transaction.proto"; -import "google/protobuf/empty.proto"; // RPC API // ================================================================================================ // RPC API for the RPC component service Api { - // Returns the status info of the node. - rpc Status(google.protobuf.Empty) returns (RpcStatus) {} - - // Returns the query parameter limits configured for RPC methods. - // - // These define the maximum number of each parameter a method will accept. - // Exceeding the limit will result in the request being rejected and you should instead send - // multiple smaller requests. - rpc GetLimits(google.protobuf.Empty) returns (RpcLimits) {} - - // Returns a Sparse Merkle Tree opening proof for each requested nullifier - // - // Each proof demonstrates either: - // - **Inclusion**: Nullifier exists in the tree (note was consumed) - // - **Non-inclusion**: Nullifier does not exist (note was not consumed) - // - // The `leaf` field indicates the status: - // * `empty_leaf_index`: Non-inclusion proof (nullifier not in tree) - // * `single` or `multiple`: Inclusion proof only if the requested nullifier appears as a key. - // - // Verify proofs against the nullifier tree root in the latest block header. - rpc CheckNullifiers(NullifierList) returns (CheckNullifiersResponse) {} - - // Returns the latest details of the specified account. - rpc GetAccount(AccountRequest) returns (AccountResponse) {} - - // Returns raw block data for the specified block number. - rpc GetBlockByNumber(blockchain.BlockNumber) returns (blockchain.MaybeBlock) {} - - // Retrieves block header by given block number. Optionally, it also returns the MMR path - // and current chain length to authenticate the block's inclusion. - rpc GetBlockHeaderByNumber(BlockHeaderByNumberRequest) returns (BlockHeaderByNumberResponse) {} - - // Returns a list of notes matching the provided note IDs. - rpc GetNotesById(note.NoteIdList) returns (note.CommittedNoteList) {} - - // Returns the script for a note by its root. - rpc GetNoteScriptByRoot(note.NoteScriptRoot) returns (MaybeNoteScript) {} - - // TRANSACTION SUBMISSION ENDPOINTS - // -------------------------------------------------------------------------------------------- - - // Submits proven transaction to the Miden network. Returns the node's current block height. - rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} - - // Submits a proven batch of transactions to the Miden network. - // - // The batch may include transactions which were are: - // - // - already in the mempool i.e. previously successfully submitted - // - will be submitted to the mempool in the future - // - won't be submitted to the mempool at all - // - // All transactions in the batch but not in the mempool must build on the current mempool - // state following normal transaction submission rules. - // - // Returns the node's current block height. - rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (blockchain.BlockNumber) {} - - // STATE SYNCHRONIZATION ENDPOINTS - // -------------------------------------------------------------------------------------------- - - // Returns transactions records for specific accounts within a block range. - rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} - - // Returns info which can be used by the client to sync up to the tip of chain for the notes - // they are interested in. - // - // Client specifies the `note_tags` they are interested in, and the block height from which to - // search for new for matching notes for. The request will then return the next block containing - // any note matching the provided tags. - // - // The response includes each note's metadata and inclusion proof. - // - // A basic note sync can be implemented by repeatedly requesting the previous response's block - // until reaching the tip of the chain. - rpc SyncNotes(SyncNotesRequest) returns (SyncNotesResponse) {} - - // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - // - // Note that only 16-bit prefixes are supported at this time. - rpc SyncNullifiers(SyncNullifiersRequest) returns (SyncNullifiersResponse) {} - - // Returns account vault updates for specified account within a block range. - rpc SyncAccountVault(SyncAccountVaultRequest) returns (SyncAccountVaultResponse) {} - - // Returns storage map updates for specified account and storage slots within a block range. - rpc SyncAccountStorageMaps(SyncAccountStorageMapsRequest) returns (SyncAccountStorageMapsResponse) {} - - // Returns MMR delta needed to synchronize the chain MMR within the requested block range. - rpc SyncChainMmr(SyncChainMmrRequest) returns (SyncChainMmrResponse) {} + // Returns the status info of the node. + rpc Status(google.protobuf.Empty) returns (RpcStatus) {} + + // Returns the query parameter limits configured for RPC methods. + // + // These define the maximum number of each parameter a method will accept. + // Exceeding the limit will result in the request being rejected and you should instead send + // multiple smaller requests. + rpc GetLimits(google.protobuf.Empty) returns (RpcLimits) {} + + // Returns a Sparse Merkle Tree opening proof for each requested nullifier + // + // Each proof demonstrates either: + // - **Inclusion**: Nullifier exists in the tree (note was consumed) + // - **Non-inclusion**: Nullifier does not exist (note was not consumed) + // + // The `leaf` field indicates the status: + // * `empty_leaf_index`: Non-inclusion proof (nullifier not in tree) + // * `single` or `multiple`: Inclusion proof only if the requested nullifier appears as a key. + // + // Verify proofs against the nullifier tree root in the latest block header. + rpc CheckNullifiers(NullifierList) returns (CheckNullifiersResponse) {} + + // Returns the latest details of the specified account. + rpc GetAccount(AccountRequest) returns (AccountResponse) {} + + // Returns raw block data for the specified block number. + rpc GetBlockByNumber(blockchain.BlockNumber) returns (blockchain.MaybeBlock) {} + + // Retrieves block header by given block number. Optionally, it also returns the MMR path + // and current chain length to authenticate the block's inclusion. + rpc GetBlockHeaderByNumber(BlockHeaderByNumberRequest) returns (BlockHeaderByNumberResponse) {} + + // Returns a list of notes matching the provided note IDs. + rpc GetNotesById(note.NoteIdList) returns (note.CommittedNoteList) {} + + // Returns the script for a note by its root. + rpc GetNoteScriptByRoot(note.NoteScriptRoot) returns (MaybeNoteScript) {} + + // TRANSACTION SUBMISSION ENDPOINTS + // -------------------------------------------------------------------------------------------- + + // Submits proven transaction to the Miden network. Returns the node's current block height. + rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} + + // Submits a batch of transactions to the Miden network. + // + // All transactions in this batch will be considered atomic, and be committed together or not all. + // + // Returns the node's current block height. + rpc SubmitBatch(transaction.TransactionBatch) returns (blockchain.BlockNumber) {} + + // STATE SYNCHRONIZATION ENDPOINTS + // -------------------------------------------------------------------------------------------- + + // Returns transactions records for specific accounts within a block range. + rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} + + // Returns info which can be used by the client to sync up to the tip of chain for the notes + // they are interested in. + // + // Client specifies the `note_tags` they are interested in, and the block height from which to + // search for new for matching notes for. The request will then return the next block containing + // any note matching the provided tags. + // + // The response includes each note's metadata and inclusion proof. + // + // A basic note sync can be implemented by repeatedly requesting the previous response's block + // until reaching the tip of the chain. + rpc SyncNotes(SyncNotesRequest) returns (SyncNotesResponse) {} + + // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. + // + // Note that only 16-bit prefixes are supported at this time. + rpc SyncNullifiers(SyncNullifiersRequest) returns (SyncNullifiersResponse) {} + + // Returns account vault updates for specified account within a block range. + rpc SyncAccountVault(SyncAccountVaultRequest) returns (SyncAccountVaultResponse) {} + + // Returns storage map updates for specified account and storage slots within a block range. + rpc SyncAccountStorageMaps(SyncAccountStorageMapsRequest) returns (SyncAccountStorageMapsResponse) {} + + // Returns MMR delta needed to synchronize the chain MMR within the requested block range. + rpc SyncChainMmr(SyncChainMmrRequest) returns (SyncChainMmrResponse) {} } // RPC STATUS @@ -112,52 +105,50 @@ service Api { // Represents the status of the node. message RpcStatus { - // The rpc component's running version. - string version = 1; + // The rpc component's running version. + string version = 1; - // The genesis commitment. - primitives.Digest genesis_commitment = 2; + // The genesis commitment. + primitives.Digest genesis_commitment = 2; - // The store status. - StoreStatus store = 3; + // The store status. + StoreStatus store = 3; - // The block producer status. - BlockProducerStatus block_producer = 4; + // The block producer status. + BlockProducerStatus block_producer = 4; } - // BLOCK PRODUCER STATUS // ================================================================================================ - // Represents the status of the block producer. message BlockProducerStatus { - // The block producer's running version. - string version = 1; + // The block producer's running version. + string version = 1; - // The block producer's status. - string status = 2; + // The block producer's status. + string status = 2; - // The block producer's current view of the chain tip height. - // - // This is the height of the latest block that the block producer considers - // to be part of the canonical chain. - fixed32 chain_tip = 4; + // The block producer's current view of the chain tip height. + // + // This is the height of the latest block that the block producer considers + // to be part of the canonical chain. + fixed32 chain_tip = 4; - // Statistics about the mempool. - MempoolStats mempool_stats = 3; + // Statistics about the mempool. + MempoolStats mempool_stats = 3; } // Statistics about the mempool. message MempoolStats { - // Number of transactions currently in the mempool waiting to be batched. - uint64 unbatched_transactions = 1; + // Number of transactions currently in the mempool waiting to be batched. + uint64 unbatched_transactions = 1; - // Number of batches currently being proven. - uint64 proposed_batches = 2; + // Number of batches currently being proven. + uint64 proposed_batches = 2; - // Number of proven batches waiting for block inclusion. - uint64 proven_batches = 3; + // Number of proven batches waiting for block inclusion. + uint64 proven_batches = 3; } // STORE STATUS @@ -165,14 +156,14 @@ message MempoolStats { // Represents the status of the store. message StoreStatus { - // The store's running version. - string version = 1; + // The store's running version. + string version = 1; - // The store's status. - string status = 2; + // The store's status. + string status = 2; - // Number of the latest block in the chain. - fixed32 chain_tip = 3; + // Number of the latest block in the chain. + fixed32 chain_tip = 3; } // GET BLOCK HEADER BY NUMBER @@ -183,22 +174,22 @@ message StoreStatus { // // The Merkle path is an MMR proof for the block's leaf, based on the current chain length. message BlockHeaderByNumberRequest { - // The target block height, defaults to latest if not provided. - optional uint32 block_num = 1; - // Whether or not to return authentication data for the block header. - optional bool include_mmr_proof = 2; + // The target block height, defaults to latest if not provided. + optional uint32 block_num = 1; + // Whether or not to return authentication data for the block header. + optional bool include_mmr_proof = 2; } // Represents the result of getting a block header by block number. message BlockHeaderByNumberResponse { - // The requested block header. - blockchain.BlockHeader block_header = 1; + // The requested block header. + blockchain.BlockHeader block_header = 1; - // Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. - optional primitives.MerklePath mmr_path = 2; + // Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. + optional primitives.MerklePath mmr_path = 2; - // Current chain length. - optional fixed32 chain_length = 3; + // Current chain length. + optional fixed32 chain_length = 3; } // GET NOTE SCRIPT BY ROOT @@ -206,8 +197,8 @@ message BlockHeaderByNumberResponse { // Represents a note script or nothing. message MaybeNoteScript { - // The script for a note by its root. - optional note.NoteScript script = 1; + // The script for a note by its root. + optional note.NoteScript script = 1; } // GET ACCOUNT PROOF @@ -215,147 +206,146 @@ message MaybeNoteScript { // Defines the request for account details. message AccountRequest { - // Request the details for a public account. - message AccountDetailRequest { - // Represents a storage slot index and the associated map keys. - message StorageMapDetailRequest { - // Indirection required for use in `oneof {..}` block. - message MapKeys { - // A list of map keys associated with this storage slot. - repeated primitives.Digest map_keys = 1; - } - // Storage slot name. - string slot_name = 1; - - oneof slot_data { - // Request to return all storage map data. If the number exceeds a threshold of 1000 entries, - // the response will not contain them but must be requested separately. - bool all_entries = 2; - - // A list of map keys associated with the given storage slot identified by `slot_name`. - MapKeys map_keys = 3; - } - } - - // Last known code commitment to the requester. The response will include account code - // only if its commitment is different from this value. - // - // If the field is ommiteed, the response will not include the account code. - optional primitives.Digest code_commitment = 1; - - // Last known asset vault commitment to the requester. The response will include asset vault data - // only if its commitment is different from this value. If the value is not present in the - // request, the response will not contain one either. - // If the number of to-be-returned asset entries exceed a threshold, they have to be requested - // separately, which is signaled in the response message with dedicated flag. - optional primitives.Digest asset_vault_commitment = 2; - - // Additional request per storage map. - repeated StorageMapDetailRequest storage_maps = 3; + // Request the details for a public account. + message AccountDetailRequest { + // Represents a storage slot index and the associated map keys. + message StorageMapDetailRequest { + // Indirection required for use in `oneof {..}` block. + message MapKeys { + // A list of map keys associated with this storage slot. + repeated primitives.Digest map_keys = 1; + } + // Storage slot name. + string slot_name = 1; + + oneof slot_data { + // Request to return all storage map data. If the number exceeds a threshold of 1000 entries, + // the response will not contain them but must be requested separately. + bool all_entries = 2; + + // A list of map keys associated with the given storage slot identified by `slot_name`. + MapKeys map_keys = 3; + } } - // ID of the account for which we want to get data - account.AccountId account_id = 1; - - // Optional block height at which to return the proof. + // Last known code commitment to the requester. The response will include account code + // only if its commitment is different from this value. // - // Defaults to current chain tip if unspecified. - optional blockchain.BlockNumber block_num = 2; + // If the field is ommiteed, the response will not include the account code. + optional primitives.Digest code_commitment = 1; + + // Last known asset vault commitment to the requester. The response will include asset vault data + // only if its commitment is different from this value. If the value is not present in the + // request, the response will not contain one either. + // If the number of to-be-returned asset entries exceed a threshold, they have to be requested + // separately, which is signaled in the response message with dedicated flag. + optional primitives.Digest asset_vault_commitment = 2; + + // Additional request per storage map. + repeated StorageMapDetailRequest storage_maps = 3; + } + + // ID of the account for which we want to get data + account.AccountId account_id = 1; + + // Optional block height at which to return the proof. + // + // Defaults to current chain tip if unspecified. + optional blockchain.BlockNumber block_num = 2; - // Request for additional account details; valid only for public accounts. - optional AccountDetailRequest details = 3; + // Request for additional account details; valid only for public accounts. + optional AccountDetailRequest details = 3; } // Represents the result of getting account proof. message AccountResponse { + message AccountDetails { + // Account header. + account.AccountHeader header = 1; - message AccountDetails { - // Account header. - account.AccountHeader header = 1; + // Account storage data + AccountStorageDetails storage_details = 2; - // Account storage data - AccountStorageDetails storage_details = 2; + // Account code; empty if code commitments matched or none was requested. + optional bytes code = 3; - // Account code; empty if code commitments matched or none was requested. - optional bytes code = 3; + // Account asset vault data; empty if vault commitments matched or the requester + // omitted it in the request. + optional AccountVaultDetails vault_details = 4; + } - // Account asset vault data; empty if vault commitments matched or the requester - // omitted it in the request. - optional AccountVaultDetails vault_details = 4; - } - - // The block number at which the account witness was created and the account details were observed. - blockchain.BlockNumber block_num = 1; + // The block number at which the account witness was created and the account details were observed. + blockchain.BlockNumber block_num = 1; - // Account ID, current state commitment, and SMT path. - account.AccountWitness witness = 2; + // Account ID, current state commitment, and SMT path. + account.AccountWitness witness = 2; - // Additional details for public accounts. - optional AccountDetails details = 3; + // Additional details for public accounts. + optional AccountDetails details = 3; } // Account vault details for AccountResponse message AccountVaultDetails { - // A flag that is set to true if the account contains too many assets. This indicates - // to the user that `SyncAccountVault` endpoint should be used to retrieve the - // account's assets - bool too_many_assets = 1; + // A flag that is set to true if the account contains too many assets. This indicates + // to the user that `SyncAccountVault` endpoint should be used to retrieve the + // account's assets + bool too_many_assets = 1; - // When too_many_assets == false, this will contain the list of assets in the - // account's vault - repeated primitives.Asset assets = 2; + // When too_many_assets == false, this will contain the list of assets in the + // account's vault + repeated primitives.Asset assets = 2; } // Account storage details for AccountResponse message AccountStorageDetails { - message AccountStorageMapDetails { - // Wrapper for repeated storage map entries including their proofs. - // Used when specific keys are requested to enable client-side verification. - message MapEntriesWithProofs { - // Definition of individual storage entries including a proof. - message StorageMapEntryWithProof { - primitives.Digest key = 1; - primitives.Digest value = 2; - primitives.SmtOpening proof = 3; - } - - repeated StorageMapEntryWithProof entries = 1; - } - - // Wrapper for repeated storage map entries (without proofs). - // Used when all entries are requested for small maps. - message AllMapEntries { - // Definition of individual storage entries. - message StorageMapEntry { - primitives.Digest key = 1; - primitives.Digest value = 2; - } - - repeated StorageMapEntry entries = 1; - } - - // Storage slot name. - string slot_name = 1; - - // True when the number of entries exceeds the response limit. - // When set, clients should use the `SyncAccountStorageMaps` endpoint. - bool too_many_entries = 2; - - // The map entries (with or without proofs). Empty when too_many_entries is true. - oneof entries { - // All storage entries without proofs (for small maps or full requests). - AllMapEntries all_entries = 3; - - // Specific entries with their SMT proofs (for partial requests). - MapEntriesWithProofs entries_with_proofs = 4; - } + message AccountStorageMapDetails { + // Wrapper for repeated storage map entries including their proofs. + // Used when specific keys are requested to enable client-side verification. + message MapEntriesWithProofs { + // Definition of individual storage entries including a proof. + message StorageMapEntryWithProof { + primitives.Digest key = 1; + primitives.Digest value = 2; + primitives.SmtOpening proof = 3; + } + + repeated StorageMapEntryWithProof entries = 1; + } + + // Wrapper for repeated storage map entries (without proofs). + // Used when all entries are requested for small maps. + message AllMapEntries { + // Definition of individual storage entries. + message StorageMapEntry { + primitives.Digest key = 1; + primitives.Digest value = 2; + } + + repeated StorageMapEntry entries = 1; + } + + // Storage slot name. + string slot_name = 1; + + // True when the number of entries exceeds the response limit. + // When set, clients should use the `SyncAccountStorageMaps` endpoint. + bool too_many_entries = 2; + + // The map entries (with or without proofs). Empty when too_many_entries is true. + oneof entries { + // All storage entries without proofs (for small maps or full requests). + AllMapEntries all_entries = 3; + + // Specific entries with their SMT proofs (for partial requests). + MapEntriesWithProofs entries_with_proofs = 4; } + } - // Account storage header (storage slot info for up to 256 slots) - account.AccountStorageHeader header = 1; + // Account storage header (storage slot info for up to 256 slots) + account.AccountStorageHeader header = 1; - // Additional data for the requested storage maps - repeated AccountStorageMapDetails map_details = 2; + // Additional data for the requested storage maps + repeated AccountStorageMapDetails map_details = 2; } // CHECK NULLIFIERS @@ -363,14 +353,14 @@ message AccountStorageDetails { // List of nullifiers to return proofs for. message NullifierList { - // List of nullifiers to return proofs for. - repeated primitives.Digest nullifiers = 1; + // List of nullifiers to return proofs for. + repeated primitives.Digest nullifiers = 1; } // Represents the result of checking nullifiers. message CheckNullifiersResponse { - // Each requested nullifier has its corresponding nullifier proof at the same position. - repeated primitives.SmtOpening proofs = 1; + // Each requested nullifier has its corresponding nullifier proof at the same position. + repeated primitives.SmtOpening proofs = 1; } // SYNC NULLIFIERS @@ -378,33 +368,33 @@ message CheckNullifiersResponse { // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. message SyncNullifiersRequest { - // Block number from which the nullifiers are requested (inclusive). - BlockRange block_range = 1; + // Block number from which the nullifiers are requested (inclusive). + BlockRange block_range = 1; - // Number of bits used for nullifier prefix. Currently the only supported value is 16. - uint32 prefix_len = 2; + // Number of bits used for nullifier prefix. Currently the only supported value is 16. + uint32 prefix_len = 2; - // List of nullifiers to check. Each nullifier is specified by its prefix with length equal - // to `prefix_len`. - repeated uint32 nullifiers = 3; + // List of nullifiers to check. Each nullifier is specified by its prefix with length equal + // to `prefix_len`. + repeated uint32 nullifiers = 3; } // Represents the result of syncing nullifiers. message SyncNullifiersResponse { - // Represents a single nullifier update. - message NullifierUpdate { - // Nullifier ID. - primitives.Digest nullifier = 1; + // Represents a single nullifier update. + message NullifierUpdate { + // Nullifier ID. + primitives.Digest nullifier = 1; - // Block number. - fixed32 block_num = 2; - } + // Block number. + fixed32 block_num = 2; + } - // Pagination information. - PaginationInfo pagination_info = 1; + // Pagination information. + PaginationInfo pagination_info = 1; - // List of nullifiers matching the prefixes specified in the request. - repeated NullifierUpdate nullifiers = 2; + // List of nullifiers matching the prefixes specified in the request. + repeated NullifierUpdate nullifiers = 2; } // SYNC ACCOUNT VAULT @@ -414,37 +404,37 @@ message SyncNullifiersResponse { // // Allows requesters to sync asset values for specific public accounts within a block range. message SyncAccountVaultRequest { - // Block range from which to start synchronizing. - // - // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - // otherwise an error will be returned. - BlockRange block_range = 1; + // Block range from which to start synchronizing. + // + // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), + // otherwise an error will be returned. + BlockRange block_range = 1; - // Account for which we want to sync asset vault. - account.AccountId account_id = 2; + // Account for which we want to sync asset vault. + account.AccountId account_id = 2; } message SyncAccountVaultResponse { - // Pagination information. - PaginationInfo pagination_info = 1; + // Pagination information. + PaginationInfo pagination_info = 1; - // List of asset updates for the account. - // - // Multiple updates can be returned for a single asset, and the one with a higher `block_num` - // is expected to be retained by the caller. - repeated AccountVaultUpdate updates = 2; + // List of asset updates for the account. + // + // Multiple updates can be returned for a single asset, and the one with a higher `block_num` + // is expected to be retained by the caller. + repeated AccountVaultUpdate updates = 2; } message AccountVaultUpdate { - // Vault key associated with the asset. - primitives.Digest vault_key = 1; + // Vault key associated with the asset. + primitives.Digest vault_key = 1; - // Asset value related to the vault key. - // If not present, the asset was removed from the vault. - optional primitives.Asset asset = 2; + // Asset value related to the vault key. + // If not present, the asset was removed from the vault. + optional primitives.Asset asset = 2; - // Block number at which the above asset was updated in the account vault. - fixed32 block_num = 3; + // Block number at which the above asset was updated in the account vault. + fixed32 block_num = 3; } // SYNC NOTES @@ -455,29 +445,29 @@ message AccountVaultUpdate { // Specifies note tags that requester is interested in. The server will return the first block which // contains a note matching `note_tags` or the chain tip. message SyncNotesRequest { - // Block range from which to start synchronizing. - BlockRange block_range = 1; + // Block range from which to start synchronizing. + BlockRange block_range = 1; - // Specifies the tags which the requester is interested in. - repeated fixed32 note_tags = 2; + // Specifies the tags which the requester is interested in. + repeated fixed32 note_tags = 2; } // Represents the result of syncing notes request. message SyncNotesResponse { - // Pagination information. - PaginationInfo pagination_info = 1; + // Pagination information. + PaginationInfo pagination_info = 1; - // Block header of the block with the first note matching the specified criteria. - blockchain.BlockHeader block_header = 2; + // Block header of the block with the first note matching the specified criteria. + blockchain.BlockHeader block_header = 2; - // Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. - // - // An MMR proof can be constructed for the leaf of index `block_header.block_num` of - // an MMR of forest `chain_tip` with this path. - primitives.MerklePath mmr_path = 3; + // Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. + // + // An MMR proof can be constructed for the leaf of index `block_header.block_num` of + // an MMR of forest `chain_tip` with this path. + primitives.MerklePath mmr_path = 3; - // List of all notes together with the Merkle paths from `response.block_header.note_root`. - repeated note.NoteSyncRecord notes = 4; + // List of all notes together with the Merkle paths from `response.block_header.note_root`. + repeated note.NoteSyncRecord notes = 4; } // SYNC CHAIN MMR @@ -485,21 +475,21 @@ message SyncNotesResponse { // Chain MMR synchronization request. message SyncChainMmrRequest { - // Block range from which to synchronize the chain MMR. - // - // The response will contain MMR delta starting after `block_range.block_from` up to - // `block_range.block_to` or the chain tip (whichever is lower). Set `block_from` to the last - // block already present in the caller's MMR so the delta begins at the next block. - BlockRange block_range = 1; + // Block range from which to synchronize the chain MMR. + // + // The response will contain MMR delta starting after `block_range.block_from` up to + // `block_range.block_to` or the chain tip (whichever is lower). Set `block_from` to the last + // block already present in the caller's MMR so the delta begins at the next block. + BlockRange block_range = 1; } // Represents the result of syncing chain MMR. message SyncChainMmrResponse { - // For which block range the MMR delta is returned. - BlockRange block_range = 1; - // Data needed to update the partial MMR from `request.block_range.block_from + 1` to - // `response.block_range.block_to` or the chain tip. - primitives.MmrDelta mmr_delta = 2; + // For which block range the MMR delta is returned. + BlockRange block_range = 1; + // Data needed to update the partial MMR from `request.block_range.block_from + 1` to + // `response.block_range.block_to` or the chain tip. + primitives.MmrDelta mmr_delta = 2; } // SYNC ACCOUNT STORAGE MAP @@ -510,40 +500,40 @@ message SyncChainMmrResponse { // Allows requesters to sync storage map values for specific public accounts within a block range, // with support for cursor-based pagination to handle large storage maps. message SyncAccountStorageMapsRequest { - // Block range from which to start synchronizing. - // - // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - // otherwise an error will be returned. - BlockRange block_range = 1; + // Block range from which to start synchronizing. + // + // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), + // otherwise an error will be returned. + BlockRange block_range = 1; - // Account for which we want to sync storage maps. - account.AccountId account_id = 3; + // Account for which we want to sync storage maps. + account.AccountId account_id = 3; } message SyncAccountStorageMapsResponse { - // Pagination information. - PaginationInfo pagination_info = 1; + // Pagination information. + PaginationInfo pagination_info = 1; - // The list of storage map updates. - // - // Multiple updates can be returned for a single slot index and key combination, and the one - // with a higher `block_num` is expected to be retained by the caller. - repeated StorageMapUpdate updates = 2; + // The list of storage map updates. + // + // Multiple updates can be returned for a single slot index and key combination, and the one + // with a higher `block_num` is expected to be retained by the caller. + repeated StorageMapUpdate updates = 2; } // Represents a single storage map update. message StorageMapUpdate { - // Block number in which the slot was updated. - fixed32 block_num = 1; + // Block number in which the slot was updated. + fixed32 block_num = 1; - // Storage slot name. - string slot_name = 2; + // Storage slot name. + string slot_name = 2; - // The storage map key. - primitives.Digest key = 3; + // The storage map key. + primitives.Digest key = 3; - // The storage map value. - primitives.Digest value = 4; + // The storage map value. + primitives.Digest value = 4; } // BLOCK RANGE @@ -551,11 +541,11 @@ message StorageMapUpdate { // Represents a block range. message BlockRange { - // Block number from which to start (inclusive). - fixed32 block_from = 1; + // Block number from which to start (inclusive). + fixed32 block_from = 1; - // Block number up to which to check (inclusive). If not specified, checks up to the latest block. - optional fixed32 block_to = 2; + // Block number up to which to check (inclusive). If not specified, checks up to the latest block. + optional fixed32 block_to = 2; } // PAGINATION INFO @@ -570,15 +560,15 @@ message BlockRange { // To request the next chunk, the requester should use `block_num + 1` from the previous response // as the `block_from` for the next request. message PaginationInfo { - // Current chain tip - fixed32 chain_tip = 1; + // Current chain tip + fixed32 chain_tip = 1; - // The block number of the last check included in this response. - // - // For chunked responses, this may be less than `request.block_range.block_to`. - // If it is less than request.block_range.block_to, the user is expected to make a subsequent request - // starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). - fixed32 block_num = 2; + // The block number of the last check included in this response. + // + // For chunked responses, this may be less than `request.block_range.block_to`. + // If it is less than request.block_range.block_to, the user is expected to make a subsequent request + // starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). + fixed32 block_num = 2; } // SYNC TRANSACTIONS @@ -588,29 +578,29 @@ message PaginationInfo { // // Allows requesters to sync transactions for specific accounts within a block range. message SyncTransactionsRequest { - // Block range from which to start synchronizing. - BlockRange block_range = 1; + // Block range from which to start synchronizing. + BlockRange block_range = 1; - // Accounts to sync transactions for. - repeated account.AccountId account_ids = 2; + // Accounts to sync transactions for. + repeated account.AccountId account_ids = 2; } // Represents the result of syncing transactions request. message SyncTransactionsResponse { - // Pagination information. - PaginationInfo pagination_info = 1; + // Pagination information. + PaginationInfo pagination_info = 1; - // List of transaction records. - repeated TransactionRecord transactions = 2; + // List of transaction records. + repeated TransactionRecord transactions = 2; } // Represents a transaction record. message TransactionRecord { - // Block number in which the transaction was included. - fixed32 block_num = 1; + // Block number in which the transaction was included. + fixed32 block_num = 1; - // A transaction header. - transaction.TransactionHeader header = 2; + // A transaction header. + transaction.TransactionHeader header = 2; } // RPC LIMITS @@ -618,16 +608,16 @@ message TransactionRecord { // Represents the query parameter limits for RPC endpoints. message RpcLimits { - // Maps RPC endpoint names to their parameter limits. - // Key: endpoint name (e.g., "CheckNullifiers") - // Value: map of parameter names to their limit values - map endpoints = 1; + // Maps RPC endpoint names to their parameter limits. + // Key: endpoint name (e.g., "CheckNullifiers") + // Value: map of parameter names to their limit values + map endpoints = 1; } // Represents the parameter limits for a single endpoint. message EndpointLimits { - // Maps parameter names to their limit values. - // Key: parameter name (e.g., "nullifier", "account_id") - // Value: limit value - map parameters = 1; + // Maps parameter names to their limit values. + // Key: parameter name (e.g., "nullifier", "account_id") + // Value: limit value + map parameters = 1; } diff --git a/proto/proto/types/transaction.proto b/proto/proto/types/transaction.proto index 8be04946d7..b0e25b5470 100644 --- a/proto/proto/types/transaction.proto +++ b/proto/proto/types/transaction.proto @@ -10,36 +10,41 @@ import "types/primitives.proto"; // Submits proven transaction to the Miden network. message ProvenTransaction { - // Transaction encoded using [winter_utils::Serializable] implementation for - // [miden_protocol::transaction::proven_tx::ProvenTransaction]. - bytes transaction = 1; - // Transaction inputs encoded using [winter_utils::Serializable] implementation for - // [miden_protocol::transaction::TransactionInputs]. - optional bytes transaction_inputs = 2; + // Transaction encoded using [winter_utils::Serializable] implementation for + // [miden_protocol::transaction::proven_tx::ProvenTransaction]. + bytes transaction = 1; + // Transaction inputs encoded using [winter_utils::Serializable] implementation for + // [miden_protocol::transaction::TransactionInputs]. + optional bytes transaction_inputs = 2; } -message ProvenTransactionBatch { - // Encoded using [winter_utils::Serializable] implementation for - // [miden_protocol::transaction::proven_tx::ProvenTransaction]. - bytes encoded = 1; +message TransactionBatch { + // The proposed batch of transaction encoded using [winter_utils::Serializable] implementation + // for [miden_protocol::batch::ProposedBatch]. + bytes proposed_batch = 1; + // Each transaction's inputs encoded using [winter_utils::Serializable] implementation for + // [miden_protocol::transaction::TransactionInputs]. + // + // Order of inputs should match the transaction order in the batch. + repeated bytes transaction_inputs = 2; } // Represents a transaction ID. message TransactionId { - // The transaction ID. - primitives.Digest id = 1; + // The transaction ID. + primitives.Digest id = 1; } // Represents a transaction summary. message TransactionSummary { - // A unique 32-byte identifier of a transaction. - TransactionId transaction_id = 1; + // A unique 32-byte identifier of a transaction. + TransactionId transaction_id = 1; - // The block number in which the transaction was executed. - fixed32 block_num = 2; + // The block number in which the transaction was executed. + fixed32 block_num = 2; - // The ID of the account affected by the transaction. - account.AccountId account_id = 3; + // The ID of the account affected by the transaction. + account.AccountId account_id = 3; } // Represents a commitment to an input note of a transaction. @@ -47,33 +52,33 @@ message TransactionSummary { // For authenticated notes, only the nullifier is present. // For unauthenticated notes, the note header is also included. message InputNoteCommitment { - // The nullifier of the input note. - primitives.Digest nullifier = 1; + // The nullifier of the input note. + primitives.Digest nullifier = 1; - // The note header, present only for unauthenticated input notes. - optional note.NoteHeader header = 2; + // The note header, present only for unauthenticated input notes. + optional note.NoteHeader header = 2; } // Represents a transaction header. message TransactionHeader { - // The unique identifier of the transaction. - TransactionId transaction_id = 1; + // The unique identifier of the transaction. + TransactionId transaction_id = 1; - // ID of the account against which the transaction was executed. - account.AccountId account_id = 2; + // ID of the account against which the transaction was executed. + account.AccountId account_id = 2; - // State commitment of the account before the transaction was executed. - primitives.Digest initial_state_commitment = 3; + // State commitment of the account before the transaction was executed. + primitives.Digest initial_state_commitment = 3; - // State commitment of the account after the transaction was executed. - primitives.Digest final_state_commitment = 4; + // State commitment of the account after the transaction was executed. + primitives.Digest final_state_commitment = 4; - // Input notes of the transaction. - repeated InputNoteCommitment input_notes = 5; + // Input notes of the transaction. + repeated InputNoteCommitment input_notes = 5; - // Output notes of the transaction. - repeated note.NoteHeader output_notes = 6; + // Output notes of the transaction. + repeated note.NoteHeader output_notes = 6; - // The fee paid by the transaction. - primitives.Asset fee = 7; + // The fee paid by the transaction. + primitives.Asset fee = 7; } From 6691da9d144115b4d46bdedbd1beacb141f8c595 Mon Sep 17 00:00:00 2001 From: Mirko von Leipzig <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Wed, 25 Mar 2026 17:05:45 +0200 Subject: [PATCH 2/9] Thread through to mempool --- .../block-producer/src/domain/transaction.rs | 6 +-- crates/block-producer/src/errors.rs | 10 ----- .../src/mempool/graph/transaction.rs | 7 ++++ crates/block-producer/src/mempool/mod.rs | 28 +++++++++++++ crates/block-producer/src/server/mod.rs | 42 ++++++++++++------- 5 files changed, 66 insertions(+), 27 deletions(-) diff --git a/crates/block-producer/src/domain/transaction.rs b/crates/block-producer/src/domain/transaction.rs index 6b59ea3be2..156c3a1fa4 100644 --- a/crates/block-producer/src/domain/transaction.rs +++ b/crates/block-producer/src/domain/transaction.rs @@ -46,7 +46,7 @@ impl AuthenticatedTransaction { /// /// Returns an error if any of the transaction's nullifiers are marked as spent by the inputs. pub fn new_unchecked( - tx: ProvenTransaction, + tx: Arc, inputs: TransactionInputs, ) -> Result { let nullifiers_already_spent = tx @@ -58,7 +58,7 @@ impl AuthenticatedTransaction { } Ok(AuthenticatedTransaction { - inner: Arc::new(tx), + inner: tx, notes_authenticated_by_store: inputs.found_unauthenticated_notes, authentication_height: inputs.current_block_height, store_account_state: inputs.account_commitment, @@ -151,7 +151,7 @@ impl AuthenticatedTransaction { current_block_height: 0.into(), }; // SAFETY: nullifiers were set to None aka are definitely unspent. - Self::new_unchecked(inner, inputs).unwrap() + Self::new_unchecked(Arc::new(inner), inputs).unwrap() } /// Overrides the authentication height with the given value. diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index 9e344e8313..1edbe5caa1 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -93,16 +93,6 @@ pub enum StateConflict { }, } -// Submit proven batch by user errors -// ================================================================================================= - -#[derive(Debug, Error, GrpcError)] -#[grpc(internal)] -pub enum SubmitProvenBatchError { - #[error("batch deserialization failed")] - Deserialization(#[source] miden_protocol::utils::DeserializationError), -} - // Batch building errors // ================================================================================================= diff --git a/crates/block-producer/src/mempool/graph/transaction.rs b/crates/block-producer/src/mempool/graph/transaction.rs index c1ecfd93f4..b96794469d 100644 --- a/crates/block-producer/src/mempool/graph/transaction.rs +++ b/crates/block-producer/src/mempool/graph/transaction.rs @@ -85,6 +85,13 @@ impl TransactionGraph { self.inner.append(tx) } + pub fn append_user_batch( + &mut self, + batch: Vec>, + ) -> Result<(), StateConflict> { + todo!(); + } + pub fn select_batch(&mut self, mut budget: BatchBudget) -> Option { let mut selected = SelectedBatch::builder(); diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs index 3ac5bb3291..ceffa04bc6 100644 --- a/crates/block-producer/src/mempool/mod.rs +++ b/crates/block-producer/src/mempool/mod.rs @@ -245,6 +245,34 @@ impl Mempool { Ok(self.chain_tip) } + #[instrument(target = COMPONENT, name = "mempool.add_user_batch", skip_all)] + pub fn add_user_batch( + &mut self, + txs: &[Arc], + ) -> Result { + assert!(!txs.is_empty(), "Cannot have a batch with no transactions"); + + if self.unbatched_transactions_count() + txs.len() >= self.config.tx_capacity.get() { + return Err(AddTransactionError::CapacityExceeded); + } + + for tx in txs { + self.authentication_staleness_check(tx.authentication_height())?; + self.expiration_check(tx.expires_at())?; + } + + self.transactions + .append_user_batch(txs.to_vec()) + .map_err(AddTransactionError::StateConflict)?; + + for tx in txs { + self.subscription.transaction_added(tx); + } + self.inject_telemetry(); + + Ok(self.chain_tip) + } + /// Returns a set of transactions for the next batch. /// /// Transactions are returned in a valid execution ordering. diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index c73729e39d..79c2026885 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -14,6 +14,7 @@ use miden_node_utils::clap::GrpcOptionsInternal; use miden_node_utils::formatting::{format_input_notes, format_output_notes}; use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; use miden_node_utils::tracing::grpc::grpc_trace_fn; +use miden_protocol::batch::ProposedBatch; use miden_protocol::block::BlockNumber; use miden_protocol::transaction::ProvenTransaction; use miden_protocol::utils::serde::Deserializable; @@ -28,7 +29,7 @@ use url::Url; use crate::batch_builder::BatchBuilder; use crate::block_builder::BlockBuilder; use crate::domain::transaction::AuthenticatedTransaction; -use crate::errors::{AddTransactionError, BlockProducerError, StoreError, SubmitProvenBatchError}; +use crate::errors::{AddTransactionError, BlockProducerError, StoreError}; use crate::mempool::{BatchBudget, BlockBudget, Mempool, MempoolConfig, SharedMempool}; use crate::store::StoreClient; use crate::validator::BlockProducerValidatorClient; @@ -341,17 +342,11 @@ impl BlockProducerRpcServer { .map_err(AddTransactionError::StoreConnectionFailed)?; // SAFETY: we assume that the rpc component has verified the transaction proof already. - let tx = AuthenticatedTransaction::new_unchecked(tx, inputs) + let tx = AuthenticatedTransaction::new_unchecked(Arc::new(tx), inputs) .map(Arc::new) .map_err(AddTransactionError::StateConflict)?; - self.mempool - .lock() - .await - .lock() - .await - .add_transaction(tx) - .map(|block_height| proto::blockchain::BlockNumber { block_num: block_height.as_u32() }) + self.mempool.lock().await.lock().await.add_transaction(tx).map(Into::into) } #[instrument( @@ -362,12 +357,31 @@ impl BlockProducerRpcServer { )] async fn submit_batch( &self, - _request: proto::transaction::TransactionBatch, - ) -> Result { - // let _batch = ProvenBatch::read_from_bytes(&request.encoded) - // .map_err(SubmitProvenBatchError::Deserialization)?; + request: proto::transaction::TransactionBatch, + ) -> Result { + let batch = ProposedBatch::read_from_bytes(&request.proposed_batch) + .map_err(AddTransactionError::TransactionDeserializationFailed)?; + + // We assume that the rpc component has verified everything, including the transaction + // proofs. + + let mut txs = Vec::with_capacity(batch.transactions().len()); + for tx in batch.transactions() { + let inputs = self + .store + .get_tx_inputs(tx) + .await + .map_err(AddTransactionError::StoreConnectionFailed)?; + + // SAFETY: We assume that the rpc component has verified the transaction proofs, as well + // as the batch integrity itself. + let tx = AuthenticatedTransaction::new_unchecked(Arc::clone(tx), inputs) + .map(Arc::new) + .map_err(AddTransactionError::StateConflict)?; + txs.push(tx); + } - todo!(); + self.mempool.lock().await.lock().await.add_user_batch(&txs).map(Into::into) } } From a1f2066b569a881ae887b811b3db23cb7221eb73 Mon Sep 17 00:00:00 2001 From: Mirko von Leipzig <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Wed, 25 Mar 2026 17:20:35 +0200 Subject: [PATCH 3/9] Basic appending to transaction graph --- .../block-producer/src/domain/transaction.rs | 8 +++---- .../src/mempool/graph/transaction.rs | 24 ++++++++++++++++++- 2 files changed, 27 insertions(+), 5 deletions(-) diff --git a/crates/block-producer/src/domain/transaction.rs b/crates/block-producer/src/domain/transaction.rs index 156c3a1fa4..06e2bb4bde 100644 --- a/crates/block-producer/src/domain/transaction.rs +++ b/crates/block-producer/src/domain/transaction.rs @@ -128,6 +128,10 @@ impl AuthenticatedTransaction { pub fn expires_at(&self) -> BlockNumber { self.inner.expiration_block_num() } + + pub fn raw_proven_transaction(&self) -> &ProvenTransaction { + &self.inner + } } #[cfg(test)] @@ -171,8 +175,4 @@ impl AuthenticatedTransaction { self.store_account_state = None; self } - - pub fn raw_proven_transaction(&self) -> &ProvenTransaction { - &self.inner - } } diff --git a/crates/block-producer/src/mempool/graph/transaction.rs b/crates/block-producer/src/mempool/graph/transaction.rs index b96794469d..d4429af8b9 100644 --- a/crates/block-producer/src/mempool/graph/transaction.rs +++ b/crates/block-producer/src/mempool/graph/transaction.rs @@ -1,8 +1,10 @@ use std::collections::{HashMap, HashSet}; +use std::ops::Deref; use std::sync::Arc; use miden_protocol::Word; use miden_protocol::account::AccountId; +use miden_protocol::batch::BatchId; use miden_protocol::block::BlockNumber; use miden_protocol::note::Nullifier; use miden_protocol::transaction::TransactionId; @@ -89,7 +91,27 @@ impl TransactionGraph { &mut self, batch: Vec>, ) -> Result<(), StateConflict> { - todo!(); + let batch_id = + BatchId::from_transactions(batch.iter().map(|tx| tx.raw_proven_transaction())); + + // Append each transaction, but revert atomically on error. + for (idx, tx) in batch.iter().enumerate() { + if let Err(err) = self.append(Arc::clone(tx)) { + // We revert in reverse order because inner.revert panics if the node doesn't exist. + for tx in batch.iter().take(idx).rev() { + let reverted = self.inner.revert_node_and_descendants(tx.id()); + assert_eq!(reverted.len(), 1); + assert_eq!(&reverted[0], tx); + } + + return Err(err); + } + } + + // TODO: Create a bidirectional batch <-> transactions mapping. + // TODO: Use mapping to never select these independently. + // TODO: Use mapping when reverting to also revert the rest. + Ok(()) } pub fn select_batch(&mut self, mut budget: BatchBudget) -> Option { From 9786578b7d4d76bd859ad1f01b9788ed919c6e99 Mon Sep 17 00:00:00 2001 From: Mirko von Leipzig <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Thu, 26 Mar 2026 14:39:00 +0200 Subject: [PATCH 4/9] Implement user batches within TransactionGraph --- .../block-producer/src/batch_builder/mod.rs | 8 +- crates/block-producer/src/domain/batch.rs | 4 - .../block-producer/src/mempool/graph/batch.rs | 10 +- .../block-producer/src/mempool/graph/dag.rs | 4 +- .../src/mempool/graph/transaction.rs | 137 ++++++++++++++---- crates/block-producer/src/mempool/mod.rs | 15 +- crates/block-producer/src/mempool/tests.rs | 20 +-- 7 files changed, 135 insertions(+), 63 deletions(-) diff --git a/crates/block-producer/src/batch_builder/mod.rs b/crates/block-producer/src/batch_builder/mod.rs index 34dab83a3f..549d76261f 100644 --- a/crates/block-producer/src/batch_builder/mod.rs +++ b/crates/block-producer/src/batch_builder/mod.rs @@ -200,12 +200,12 @@ impl BatchJob { batch: SelectedBatch, ) -> Result<(SelectedBatch, BatchInputs), BuildBatchError> { let block_references = batch - .txs() + .transactions() .iter() .map(Deref::deref) .map(AuthenticatedTransaction::reference_block); let unauthenticated_notes = batch - .txs() + .transactions() .iter() .map(Deref::deref) .flat_map(AuthenticatedTransaction::unauthenticated_note_commitments); @@ -325,10 +325,10 @@ impl BatchProver { impl TelemetryInjectorExt for SelectedBatch { fn inject_telemetry(&self) { Span::current().set_attribute("batch.id", self.id()); - Span::current().set_attribute("transactions.count", self.txs().len()); + Span::current().set_attribute("transactions.count", self.transactions().len()); // Accumulate all telemetry based on transactions. let (tx_ids, input_notes_count, output_notes_count, unauth_notes_count) = - self.txs().iter().fold( + self.transactions().iter().fold( (vec![], 0, 0, 0), |( mut tx_ids, diff --git a/crates/block-producer/src/domain/batch.rs b/crates/block-producer/src/domain/batch.rs index 0fd2029bfc..9c3cbb7e17 100644 --- a/crates/block-producer/src/domain/batch.rs +++ b/crates/block-producer/src/domain/batch.rs @@ -34,10 +34,6 @@ impl SelectedBatch { self.id } - pub(crate) fn txs(&self) -> &[Arc] { - &self.txs - } - pub(crate) fn into_transactions(self) -> Vec> { self.txs } diff --git a/crates/block-producer/src/mempool/graph/batch.rs b/crates/block-producer/src/mempool/graph/batch.rs index 929f824381..be4b264531 100644 --- a/crates/block-producer/src/mempool/graph/batch.rs +++ b/crates/block-producer/src/mempool/graph/batch.rs @@ -18,15 +18,15 @@ impl GraphNode for SelectedBatch { type Id = BatchId; fn nullifiers(&self) -> Box + '_> { - Box::new(self.txs().iter().flat_map(|tx| tx.nullifiers())) + Box::new(self.transactions().iter().flat_map(|tx| tx.nullifiers())) } fn output_notes(&self) -> Box + '_> { - Box::new(self.txs().iter().flat_map(|tx| tx.output_note_commitments())) + Box::new(self.transactions().iter().flat_map(|tx| tx.output_note_commitments())) } fn unauthenticated_notes(&self) -> Box + '_> { - Box::new(self.txs().iter().flat_map(|tx| tx.unauthenticated_note_commitments())) + Box::new(self.transactions().iter().flat_map(|tx| tx.unauthenticated_note_commitments())) } fn account_updates( @@ -134,9 +134,9 @@ impl BatchGraph { /// /// Panics if the batch does not exist, or has existing ancestors in the batch /// graph. - pub fn prune(&mut self, batch: BatchId) { - self.inner.prune(batch); + pub fn prune(&mut self, batch: BatchId) -> SelectedBatch { self.proven.remove(&batch); + self.inner.prune(batch) } pub fn proven_count(&self) -> usize { diff --git a/crates/block-producer/src/mempool/graph/dag.rs b/crates/block-producer/src/mempool/graph/dag.rs index 8952f6db53..f17c4f6778 100644 --- a/crates/block-producer/src/mempool/graph/dag.rs +++ b/crates/block-producer/src/mempool/graph/dag.rs @@ -219,14 +219,14 @@ where /// # Panics /// /// Panics if this node has any ancestor nodes, or if this node was not selected. - pub fn prune(&mut self, id: N::Id) { + pub fn prune(&mut self, id: N::Id) -> N { assert!( self.edges.parents_of(&id).is_empty(), "Cannot prune node {id} as it still has ancestors", ); assert!(self.selected.contains(&id), "Cannot prune node {id} as it was not selected"); - self.remove(id); + self.remove(id) } /// Unconditionally removes the given node from the graph, deleting its edges and state. diff --git a/crates/block-producer/src/mempool/graph/transaction.rs b/crates/block-producer/src/mempool/graph/transaction.rs index d4429af8b9..e63d5e4905 100644 --- a/crates/block-producer/src/mempool/graph/transaction.rs +++ b/crates/block-producer/src/mempool/graph/transaction.rs @@ -1,5 +1,4 @@ use std::collections::{HashMap, HashSet}; -use std::ops::Deref; use std::sync::Arc; use miden_protocol::Word; @@ -77,6 +76,9 @@ pub struct TransactionGraph { /// These are batch or block proving errors in which the transaction was a part of. This is /// used to identify potentially buggy transactions that should be evicted. failures: HashMap, + + user_batch_txs: HashMap>, + txs_user_batch: HashMap, } impl TransactionGraph { @@ -89,7 +91,7 @@ impl TransactionGraph { pub fn append_user_batch( &mut self, - batch: Vec>, + batch: &[Arc], ) -> Result<(), StateConflict> { let batch_id = BatchId::from_transactions(batch.iter().map(|tx| tx.raw_proven_transaction())); @@ -108,22 +110,79 @@ impl TransactionGraph { } } - // TODO: Create a bidirectional batch <-> transactions mapping. - // TODO: Use mapping to never select these independently. - // TODO: Use mapping when reverting to also revert the rest. + let txs = batch.iter().map(GraphNode::id).collect::>(); + for tx in &txs { + self.txs_user_batch.insert(*tx, batch_id); + } + self.user_batch_txs.insert(batch_id, txs); + Ok(()) } - pub fn select_batch(&mut self, mut budget: BatchBudget) -> Option { + pub fn select_batch(&mut self, budget: BatchBudget) -> Option { + self.select_user_batch().or_else(|| self.select_conventional_batch(budget)) + } + + fn select_user_batch(&mut self) -> Option { + // Comb through all user batch candidates. + let candidate_batches = self + .inner + .selection_candidates() + .values() + .filter_map(|tx| self.txs_user_batch.get(&tx.id())) + .copied() + .collect::>(); + + 'outer: for candidate in candidate_batches { + let mut selected = SelectedBatch::builder(); + + let txs = self + .user_batch_txs + .get(&candidate) + .cloned() + .expect("bi-directional mapping should be coherent"); + + for tx in txs { + let Some(tx) = self.inner.selection_candidates().get(&tx).copied() else { + // Rollback this batch selection since it cannot complete. + for tx in selected.txs.into_iter().rev() { + self.inner.deselect(tx.id()); + } + + continue 'outer; + }; + let tx = Arc::clone(tx); + + self.inner.select_candidate(tx.id()); + selected.push(tx); + } + + assert!(!selected.is_empty(), "User batch should not be empty"); + return Some(selected.build()); + } + + None + } + + fn select_conventional_batch(&mut self, mut budget: BatchBudget) -> Option { let mut selected = SelectedBatch::builder(); - while let Some((id, tx)) = self.inner.selection_candidates().pop_first() { - if budget.check_then_subtract(tx) == BudgetStatus::Exceeded { + loop { + // Select arbitrary candidate which is _not_ part of a user batch. + let candidates = self.inner.selection_candidates(); + let Some(candidate) = + candidates.values().find(|tx| !self.txs_user_batch.contains_key(&tx.id())) + else { + break; + }; + + if budget.check_then_subtract(candidate) == BudgetStatus::Exceeded { break; } - selected.push(Arc::clone(tx)); - self.inner.select_candidate(*id); + let candidate = Arc::clone(candidate); + self.inner.select_candidate(candidate.id()); + selected.push(candidate); } if selected.is_empty() { @@ -156,22 +215,40 @@ impl TransactionGraph { /// /// This includes batches that have been marked as proven. /// - /// Returns the reverted batches in the _reverse_ chronological order they were appended in. + /// Returns the reverted transactions in the _reverse_ chronological order they were appended + /// in. pub fn revert_tx_and_descendants(&mut self, transaction: TransactionId) -> Vec { - // We need this check because `inner.revert..` panics if the node is unknown. - if !self.inner.contains(&transaction) { - return Vec::default(); - } + // This is a bit more involved because we also need to atomically revert user batches. + let mut to_revert = vec![transaction]; + let mut reverted = Vec::new(); + + while let Some(revert) = to_revert.pop() { + // We need this check because `inner.revert..` panics if the node is unknown. + // + // And this transaction might already have been reverted as part of descendents in a + // prior loop. + if !self.inner.contains(&revert) { + continue; + } - let reverted = self - .inner - .revert_node_and_descendants(transaction) - .into_iter() - .map(|tx| tx.id()) - .collect(); + let x = self.inner.revert_node_and_descendants(transaction); - for tx in &reverted { - self.failures.remove(tx); + // Clean up book keeping and also revert transactions from the same user batch, if any. + for tx in &x { + self.failures.remove(&tx.id()); + + // Note that this is a pretty rough shod approach. We just dump the entire batch of + // transactions in, which will result in at least the current + // transaction being duplicated in `to_revert`. This isn't a concern + // though since we skip already processed transactions at the top of the loop. + if let Some(batch) = self.txs_user_batch.remove(&tx.id()) { + if let Some(batch) = self.user_batch_txs.remove(&batch) { + to_revert.extend(batch); + } + } + } + + reverted.extend(x.into_iter().map(|tx| tx.id())); } reverted @@ -216,15 +293,19 @@ impl TransactionGraph { reverted } - /// Prunes the given transaction. + /// Prunes the given given batch's transactions. /// /// # Panics /// - /// Panics if the transaction does not exist, or has existing ancestors in the transaction + /// Panics if the transactions do not exist, or has existing ancestors in the transaction /// graph. - pub fn prune(&mut self, transaction: TransactionId) { - self.inner.prune(transaction); - self.failures.remove(&transaction); + pub fn prune(&mut self, batch: &SelectedBatch) { + for tx in batch.transactions() { + self.inner.prune(tx.id()); + self.failures.remove(&tx.id()); + self.txs_user_batch.remove(&tx.id()); + } + self.user_batch_txs.remove(&batch.id()); } /// Number of transactions which have not been selected for inclusion in a batch. diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs index ceffa04bc6..f505b887eb 100644 --- a/crates/block-producer/src/mempool/mod.rs +++ b/crates/block-producer/src/mempool/mod.rs @@ -256,13 +256,15 @@ impl Mempool { return Err(AddTransactionError::CapacityExceeded); } + // TODO: check budget. + for tx in txs { self.authentication_staleness_check(tx.authentication_height())?; self.expiration_check(tx.expires_at())?; } self.transactions - .append_user_batch(txs.to_vec()) + .append_user_batch(txs) .map_err(AddTransactionError::StateConflict)?; for tx in txs { @@ -506,15 +508,8 @@ impl Mempool { // // The same logic follows for transactions. for batch in block.iter().map(|batch| batch.id()) { - self.batches.prune(batch); - } - - for tx in block - .iter() - .flat_map(|batch| batch.transactions().as_slice()) - .map(TransactionHeader::id) - { - self.transactions.prune(tx); + let batch = self.batches.prune(batch); + self.transactions.prune(&batch); } } diff --git a/crates/block-producer/src/mempool/tests.rs b/crates/block-producer/src/mempool/tests.rs index 4c7059acf1..0e03966936 100644 --- a/crates/block-producer/src/mempool/tests.rs +++ b/crates/block-producer/src/mempool/tests.rs @@ -71,15 +71,15 @@ fn children_of_failed_batches_are_ignored() { let (mut uut, _) = Mempool::for_tests(); uut.add_transaction(txs[0].clone()).unwrap(); let parent_batch = uut.select_batch().unwrap(); - assert_eq!(parent_batch.txs(), vec![txs[0].clone()]); + assert_eq!(parent_batch.transactions(), vec![txs[0].clone()]); uut.add_transaction(txs[1].clone()).unwrap(); let child_batch_a = uut.select_batch().unwrap(); - assert_eq!(child_batch_a.txs(), vec![txs[1].clone()]); + assert_eq!(child_batch_a.transactions(), vec![txs[1].clone()]); uut.add_transaction(txs[2].clone()).unwrap(); let next_batch = uut.select_batch().unwrap(); - assert_eq!(next_batch.txs(), vec![txs[2].clone()]); + assert_eq!(next_batch.transactions(), vec![txs[2].clone()]); // Child batch jobs are now dangling. uut.rollback_batch(parent_batch.id()); @@ -118,7 +118,7 @@ fn failed_batch_transactions_are_requeued() { reference.add_transaction(txs[2].clone()).unwrap(); reference .transactions - .increment_failure_count(failed_batch.txs().iter().map(|tx| tx.id())); + .increment_failure_count(failed_batch.transactions().iter().map(|tx| tx.id())); assert_eq!(uut, reference); } @@ -326,9 +326,9 @@ fn pass_through_txs_on_an_empty_account() { // Ensure the batch contains a,b and final. Final should also be the last tx since its order // is required. - assert!(batch.txs().contains(&tx_pass_through_a)); - assert!(batch.txs().contains(&tx_pass_through_b)); - assert_eq!(batch.txs().last().unwrap(), &tx_final); + assert!(batch.transactions().contains(&tx_pass_through_a)); + assert!(batch.transactions().contains(&tx_pass_through_b)); + assert_eq!(batch.transactions().last().unwrap(), &tx_final); } /// Tests that pass through transactions retain parent-child relations based on notes, even though @@ -366,11 +366,11 @@ fn pass_through_txs_with_note_dependencies() { // relationship was correctly inferred by the mempool. uut.add_transaction(tx_pass_through_a.clone()).unwrap(); let batch_a = uut.select_batch().unwrap(); - assert_eq!(batch_a.txs(), std::slice::from_ref(&tx_pass_through_a)); + assert_eq!(batch_a.transactions(), std::slice::from_ref(&tx_pass_through_a)); uut.add_transaction(tx_pass_through_b.clone()).unwrap(); let batch_b = uut.select_batch().unwrap(); - assert_eq!(batch_b.txs(), std::slice::from_ref(&tx_pass_through_b)); + assert_eq!(batch_b.transactions(), std::slice::from_ref(&tx_pass_through_b)); // Rollback (a) and check that (b) also reverted by comparing to the reference. uut.rollback_batch(batch_a.id()); @@ -378,7 +378,7 @@ fn pass_through_txs_with_note_dependencies() { reference.add_transaction(tx_pass_through_b).unwrap(); reference .transactions - .increment_failure_count(batch_a.txs().iter().map(|tx| tx.id())); + .increment_failure_count(batch_a.transactions().iter().map(|tx| tx.id())); assert_eq!(uut, reference); } From ba58888817acf4540bb80a8c67753aa054ddd370 Mon Sep 17 00:00:00 2001 From: Mirko von Leipzig <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Thu, 26 Mar 2026 16:28:43 +0200 Subject: [PATCH 5/9] Check batch budget --- crates/block-producer/src/mempool/mod.rs | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs index f505b887eb..4bcb1dbd05 100644 --- a/crates/block-producer/src/mempool/mod.rs +++ b/crates/block-producer/src/mempool/mod.rs @@ -66,6 +66,7 @@ use tracing::instrument; use crate::domain::batch::SelectedBatch; use crate::domain::transaction::AuthenticatedTransaction; use crate::errors::{AddTransactionError, StateConflict}; +use crate::mempool::budget::BudgetStatus; use crate::{ COMPONENT, DEFAULT_MEMPOOL_TX_CAPACITY, @@ -256,7 +257,14 @@ impl Mempool { return Err(AddTransactionError::CapacityExceeded); } - // TODO: check budget. + // Ensure the batch doesn't exceed the mempool budget for batches. + let mut budget = self.config.batch_budget; + for tx in txs { + if budget.check_then_subtract(tx) == BudgetStatus::Exceeded { + // TODO: better error plox. + return Err(AddTransactionError::CapacityExceeded); + } + } for tx in txs { self.authentication_staleness_check(tx.authentication_height())?; From d656f186aee330369cf0a3b4bc9940e77e2b3013 Mon Sep 17 00:00:00 2001 From: Mirko von Leipzig <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Thu, 26 Mar 2026 17:13:14 +0200 Subject: [PATCH 6/9] Improve mempool error naming --- crates/block-producer/src/errors.rs | 8 +++--- crates/block-producer/src/mempool/mod.rs | 24 ++++++++--------- .../src/mempool/tests/add_transaction.rs | 26 ++++++++++--------- crates/block-producer/src/server/mod.rs | 18 ++++++------- 4 files changed, 39 insertions(+), 37 deletions(-) diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index 1edbe5caa1..7b0cca0246 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -34,12 +34,12 @@ pub enum BlockProducerError { }, } -// Transaction adding errors +// Add transaction and add user batch errors // ================================================================================================= #[derive(Debug, Error, GrpcError)] -pub enum AddTransactionError { - #[error("failed to retrieve transaction inputs from the store")] +pub enum MempoolSubmissionError { + #[error("failed to retrieve inputs from the store")] #[grpc(internal)] StoreConnectionFailed(#[source] StoreError), @@ -55,7 +55,7 @@ pub enum AddTransactionError { stale_limit: BlockNumber, }, - #[error("transaction deserialization failed")] + #[error("request deserialization failed")] TransactionDeserializationFailed(#[source] miden_protocol::utils::DeserializationError), #[error( diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs index 4bcb1dbd05..7850a12d16 100644 --- a/crates/block-producer/src/mempool/mod.rs +++ b/crates/block-producer/src/mempool/mod.rs @@ -65,7 +65,7 @@ use tracing::instrument; use crate::domain::batch::SelectedBatch; use crate::domain::transaction::AuthenticatedTransaction; -use crate::errors::{AddTransactionError, StateConflict}; +use crate::errors::{MempoolSubmissionError, StateConflict}; use crate::mempool::budget::BudgetStatus; use crate::{ COMPONENT, @@ -230,16 +230,16 @@ impl Mempool { pub fn add_transaction( &mut self, tx: Arc, - ) -> Result { + ) -> Result { if self.unbatched_transactions_count() >= self.config.tx_capacity.get() { - return Err(AddTransactionError::CapacityExceeded); + return Err(MempoolSubmissionError::CapacityExceeded); } self.authentication_staleness_check(tx.authentication_height())?; self.expiration_check(tx.expires_at())?; self.transactions .append(Arc::clone(&tx)) - .map_err(AddTransactionError::StateConflict)?; + .map_err(MempoolSubmissionError::StateConflict)?; self.subscription.transaction_added(&tx); self.inject_telemetry(); @@ -250,11 +250,11 @@ impl Mempool { pub fn add_user_batch( &mut self, txs: &[Arc], - ) -> Result { + ) -> Result { assert!(!txs.is_empty(), "Cannot have a batch with no transactions"); if self.unbatched_transactions_count() + txs.len() >= self.config.tx_capacity.get() { - return Err(AddTransactionError::CapacityExceeded); + return Err(MempoolSubmissionError::CapacityExceeded); } // Ensure the batch doesn't exceed the mempool budget for batches. @@ -262,7 +262,7 @@ impl Mempool { for tx in txs { if budget.check_then_subtract(tx) == BudgetStatus::Exceeded { // TODO: better error plox. - return Err(AddTransactionError::CapacityExceeded); + return Err(MempoolSubmissionError::CapacityExceeded); } } @@ -273,7 +273,7 @@ impl Mempool { self.transactions .append_user_batch(txs) - .map_err(AddTransactionError::StateConflict)?; + .map_err(MempoolSubmissionError::StateConflict)?; for tx in txs { self.subscription.transaction_added(tx); @@ -549,14 +549,14 @@ impl Mempool { fn authentication_staleness_check( &self, authentication_height: BlockNumber, - ) -> Result<(), AddTransactionError> { + ) -> Result<(), MempoolSubmissionError> { let limit = self .chain_tip .checked_sub(self.committed_blocks.len() as u32) .expect("amount of committed blocks cannot exceed the chain tip"); if authentication_height < limit { - return Err(AddTransactionError::StaleInputs { + return Err(MempoolSubmissionError::StaleInputs { input_block: authentication_height, stale_limit: limit, }); @@ -571,10 +571,10 @@ impl Mempool { Ok(()) } - fn expiration_check(&self, expired_at: BlockNumber) -> Result<(), AddTransactionError> { + fn expiration_check(&self, expired_at: BlockNumber) -> Result<(), MempoolSubmissionError> { let limit = self.chain_tip + self.config.expiration_slack; if expired_at <= limit { - return Err(AddTransactionError::Expired { expired_at, limit }); + return Err(MempoolSubmissionError::Expired { expired_at, limit }); } Ok(()) diff --git a/crates/block-producer/src/mempool/tests/add_transaction.rs b/crates/block-producer/src/mempool/tests/add_transaction.rs index 4747f179b3..559c63936b 100644 --- a/crates/block-producer/src/mempool/tests/add_transaction.rs +++ b/crates/block-producer/src/mempool/tests/add_transaction.rs @@ -5,7 +5,7 @@ use miden_protocol::Word; use miden_protocol::block::BlockHeader; use crate::domain::transaction::AuthenticatedTransaction; -use crate::errors::{AddTransactionError, StateConflict}; +use crate::errors::{MempoolSubmissionError, StateConflict}; use crate::mempool::Mempool; use crate::test_utils::{MockProvenTxBuilder, mock_account_id}; @@ -105,7 +105,7 @@ mod tx_expiration { assert_matches!( result, - Err(AddTransactionError::Expired { .. }), + Err(MempoolSubmissionError::Expired { .. }), "Failed run with expiration {i} and limit {limit}" ); } @@ -121,7 +121,7 @@ mod tx_expiration { let tx = Arc::new(tx); let result = uut.add_transaction(tx); - assert_matches!(result, Err(AddTransactionError::Expired { .. })); + assert_matches!(result, Err(MempoolSubmissionError::Expired { .. })); } } @@ -235,7 +235,7 @@ fn duplicate_nullifiers_are_rejected() { // We overlap with one nullifier. assert_matches!( result, - Err(AddTransactionError::StateConflict(StateConflict::NullifiersAlreadyExist(..))) + Err(MempoolSubmissionError::StateConflict(StateConflict::NullifiersAlreadyExist(..))) ); } @@ -268,7 +268,9 @@ fn duplicate_output_notes_are_rejected() { assert_matches!( result, - Err(AddTransactionError::StateConflict(StateConflict::OutputNotesAlreadyExist(..))) + Err(MempoolSubmissionError::StateConflict(StateConflict::OutputNotesAlreadyExist( + .. + ))) ); } @@ -301,9 +303,9 @@ fn unknown_unauthenticated_notes_are_rejected() { assert_matches!( result, - Err(AddTransactionError::StateConflict(StateConflict::UnauthenticatedNotesMissing( - .. - ))) + Err(MempoolSubmissionError::StateConflict( + StateConflict::UnauthenticatedNotesMissing(..) + )) ); } @@ -403,7 +405,7 @@ mod account_state { assert_matches!( result, - Err(AddTransactionError::StateConflict( + Err(MempoolSubmissionError::StateConflict( StateConflict::AccountCommitmentMismatch { .. } )) ); @@ -433,7 +435,7 @@ mod account_state { let result = uut.add_transaction(tx); assert_matches!( result, - Err(AddTransactionError::StateConflict( + Err(MempoolSubmissionError::StateConflict( StateConflict::AccountCommitmentMismatch { .. } )) ); @@ -483,7 +485,7 @@ mod new_account { let result = uut.add_transaction(tx); assert_matches!( result, - Err(AddTransactionError::StateConflict( + Err(MempoolSubmissionError::StateConflict( StateConflict::AccountCommitmentMismatch { .. } )) ); @@ -509,7 +511,7 @@ mod new_account { let result = uut.add_transaction(tx); assert_matches!( result, - Err(AddTransactionError::StateConflict( + Err(MempoolSubmissionError::StateConflict( StateConflict::AccountCommitmentMismatch { .. } )) ); diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 79c2026885..dd4c63da7b 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -29,7 +29,7 @@ use url::Url; use crate::batch_builder::BatchBuilder; use crate::block_builder::BlockBuilder; use crate::domain::transaction::AuthenticatedTransaction; -use crate::errors::{AddTransactionError, BlockProducerError, StoreError}; +use crate::errors::{BlockProducerError, MempoolSubmissionError, StoreError}; use crate::mempool::{BatchBudget, BlockBudget, Mempool, MempoolConfig, SharedMempool}; use crate::store::StoreClient; use crate::validator::BlockProducerValidatorClient; @@ -314,11 +314,11 @@ impl BlockProducerRpcServer { async fn submit_proven_transaction( &self, request: proto::transaction::ProvenTransaction, - ) -> Result { + ) -> Result { debug!(target: COMPONENT, ?request); let tx = ProvenTransaction::read_from_bytes(&request.transaction) - .map_err(AddTransactionError::TransactionDeserializationFailed)?; + .map_err(MempoolSubmissionError::TransactionDeserializationFailed)?; let tx_id = tx.id(); @@ -339,12 +339,12 @@ impl BlockProducerRpcServer { .store .get_tx_inputs(&tx) .await - .map_err(AddTransactionError::StoreConnectionFailed)?; + .map_err(MempoolSubmissionError::StoreConnectionFailed)?; // SAFETY: we assume that the rpc component has verified the transaction proof already. let tx = AuthenticatedTransaction::new_unchecked(Arc::new(tx), inputs) .map(Arc::new) - .map_err(AddTransactionError::StateConflict)?; + .map_err(MempoolSubmissionError::StateConflict)?; self.mempool.lock().await.lock().await.add_transaction(tx).map(Into::into) } @@ -358,9 +358,9 @@ impl BlockProducerRpcServer { async fn submit_batch( &self, request: proto::transaction::TransactionBatch, - ) -> Result { + ) -> Result { let batch = ProposedBatch::read_from_bytes(&request.proposed_batch) - .map_err(AddTransactionError::TransactionDeserializationFailed)?; + .map_err(MempoolSubmissionError::TransactionDeserializationFailed)?; // We assume that the rpc component has verified everything, including the transaction // proofs. @@ -371,13 +371,13 @@ impl BlockProducerRpcServer { .store .get_tx_inputs(tx) .await - .map_err(AddTransactionError::StoreConnectionFailed)?; + .map_err(MempoolSubmissionError::StoreConnectionFailed)?; // SAFETY: We assume that the rpc component has verified the transaction proofs, as well // as the batch integrity itself. let tx = AuthenticatedTransaction::new_unchecked(Arc::clone(tx), inputs) .map(Arc::new) - .map_err(AddTransactionError::StateConflict)?; + .map_err(MempoolSubmissionError::StateConflict)?; txs.push(tx); } From 4b264df189a19ff9a65b63e88ca62f742b209eb5 Mon Sep 17 00:00:00 2001 From: Mirko von Leipzig <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Thu, 26 Mar 2026 17:49:38 +0200 Subject: [PATCH 7/9] Add tests --- crates/block-producer/src/mempool/tests.rs | 1 + .../src/mempool/tests/add_user_batch.rs | 107 ++++++++++++++++++ 2 files changed, 108 insertions(+) create mode 100644 crates/block-producer/src/mempool/tests/add_user_batch.rs diff --git a/crates/block-producer/src/mempool/tests.rs b/crates/block-producer/src/mempool/tests.rs index 0e03966936..b83af0cc8e 100644 --- a/crates/block-producer/src/mempool/tests.rs +++ b/crates/block-producer/src/mempool/tests.rs @@ -12,6 +12,7 @@ use crate::test_utils::MockProvenTxBuilder; use crate::test_utils::batch::TransactionBatchConstructor; mod add_transaction; +mod add_user_batch; impl Mempool { /// Returns an empty [`Mempool`] and a perfect clone intended for use as the Unit Under Test and diff --git a/crates/block-producer/src/mempool/tests/add_user_batch.rs b/crates/block-producer/src/mempool/tests/add_user_batch.rs new file mode 100644 index 0000000000..6d4aa3aeec --- /dev/null +++ b/crates/block-producer/src/mempool/tests/add_user_batch.rs @@ -0,0 +1,107 @@ +use std::sync::Arc; + +use assert_matches::assert_matches; +use miden_protocol::batch::BatchId; +use pretty_assertions::assert_eq; + +use crate::domain::transaction::AuthenticatedTransaction; +use crate::errors::{MempoolSubmissionError, StateConflict}; +use crate::mempool::Mempool; +use crate::test_utils::MockProvenTxBuilder; + +/// This checks that transactions from a user batch remain as the same batch upon selection. +/// +/// Since the selection process is random, its difficult to test this directly, but this at +/// least acts as a smoke test. We select two batches and check that one of them is the user +/// batch. +#[test] +fn user_batch_is_isolated_from_other_transactions() { + let (mut uut, _) = Mempool::for_tests(); + + let conventional_a = build_tx(MockProvenTxBuilder::with_account_index(200)); + let conventional_b = build_tx(MockProvenTxBuilder::with_account_index(201)); + + uut.add_transaction(conventional_a.clone()).unwrap(); + uut.add_transaction(conventional_b.clone()).unwrap(); + + let user_batch_txs = MockProvenTxBuilder::sequential(); + let user_batch_id = + BatchId::from_transactions(user_batch_txs.iter().map(|tx| tx.raw_proven_transaction())); + uut.add_user_batch(&user_batch_txs).unwrap(); + + let batch_a = uut.select_batch().unwrap(); + let batch_b = uut.select_batch().unwrap(); + + let (user, conventional) = if batch_a.id() == user_batch_id { + (batch_a, batch_b) + } else { + (batch_b, batch_a) + }; + + assert_eq!(user.id(), user_batch_id); + assert_eq!(user.transactions(), user_batch_txs.as_slice()); + + assert_eq!(conventional.transactions().len(), 2); + assert!(conventional.transactions().contains(&conventional_a)); + assert!(conventional.transactions().contains(&conventional_b)); +} + +#[test] +fn user_batch_respects_batch_budget() { + let (mut uut, _) = Mempool::for_tests(); + uut.config.batch_budget.transactions = 1; + + let user_batch_txs = MockProvenTxBuilder::sequential(); + let result = uut.add_user_batch(&user_batch_txs[..2]); + + assert_matches!(result, Err(MempoolSubmissionError::CapacityExceeded)); +} + +#[test] +fn user_batch_with_internal_state_conflicts_are_rejected() { + let (mut uut, reference) = Mempool::for_tests(); + + let conflicting_a = tx_with_nullifiers(10, 0..1); + let conflicting_b = tx_with_nullifiers(11, 0..1); + + let result = uut.add_user_batch(&[conflicting_a.clone(), conflicting_b.clone()]); + + assert_matches!( + result, + Err(MempoolSubmissionError::StateConflict(StateConflict::NullifiersAlreadyExist(..))) + ); + + assert_eq!(uut, reference); +} + +#[test] +fn user_batch_conflicts_with_existing_state_are_rejected() { + let (mut uut, mut reference) = Mempool::for_tests(); + + let existing = tx_with_nullifiers(20, 5..6); + uut.add_transaction(existing.clone()).unwrap(); + reference.add_transaction(existing.clone()).unwrap(); + + let conflicting = tx_with_nullifiers(21, 5..6); + let companion = tx_with_nullifiers(22, 6..7); + + let result = uut.add_user_batch(&[conflicting.clone(), companion.clone()]); + + assert_matches!( + result, + Err(MempoolSubmissionError::StateConflict(StateConflict::NullifiersAlreadyExist(..))) + ); + + assert_eq!(uut, reference); +} + +fn build_tx(builder: MockProvenTxBuilder) -> Arc { + Arc::new(AuthenticatedTransaction::from_inner(builder.build())) +} + +fn tx_with_nullifiers( + account_index: u32, + range: std::ops::Range, +) -> Arc { + build_tx(MockProvenTxBuilder::with_account_index(account_index).nullifiers_range(range)) +} From 6dd5f53d652200d33c8fc978983153ed7e160d9a Mon Sep 17 00:00:00 2001 From: Mirko von Leipzig <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Thu, 26 Mar 2026 18:26:47 +0200 Subject: [PATCH 8/9] changelog --- CHANGELOG.md | 1 + proto/proto/internal/block_producer.proto | 102 +-- proto/proto/rpc.proto | 728 +++++++++++----------- proto/proto/types/transaction.proto | 80 +-- 4 files changed, 456 insertions(+), 455 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 496312b0cd..0cd5327eb8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -14,6 +14,7 @@ - Fixed `TransactionHeader` serialization for row insertion on database & fixed transaction cursor on retrievals ([#1701](https://github.com/0xMiden/node/issues/1701)). - Added KMS signing support in validator ([#1677](https://github.com/0xMiden/node/pull/1677)). - Added per-IP gRPC rate limiting across services as well as global concurrent connection limit ([#1746](https://github.com/0xMiden/node/issues/1746)). +- Users can now submit atomic transaction batches via `SubmitBatch` gRPC endpoint ([#1846](https://github.com/0xMiden/node/pull/1846)). ### Changes diff --git a/proto/proto/internal/block_producer.proto b/proto/proto/internal/block_producer.proto index cfe2b47732..9ab2be140a 100644 --- a/proto/proto/internal/block_producer.proto +++ b/proto/proto/internal/block_producer.proto @@ -13,27 +13,27 @@ import "types/transaction.proto"; // ================================================================================================ service Api { - // Returns the status info. - rpc Status(google.protobuf.Empty) returns (rpc.BlockProducerStatus) {} + // Returns the status info. + rpc Status(google.protobuf.Empty) returns (rpc.BlockProducerStatus) {} - // Submits proven transaction to the Miden network. Returns the node's current block height. - rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} + // Submits proven transaction to the Miden network. Returns the node's current block height. + rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} - // Submits a batch of transactions to the Miden network. - // - // All transactions in this batch will be considered atomic, and be committed together or not all. - // - // Returns the node's current block height. - rpc SubmitBatch(transaction.TransactionBatch) returns (blockchain.BlockNumber) {} + // Submits a batch of transactions to the Miden network. + // + // All transactions in this batch will be considered atomic, and be committed together or not all. + // + // Returns the node's current block height. + rpc SubmitBatch(transaction.TransactionBatch) returns (blockchain.BlockNumber) {} - // Subscribe to mempool events. - // - // The event stream will contain all events after the current chain tip. This includes all - // currently inflight events that have not yet been committed to the chain. - // - // Currently only a single active subscription is supported. Subscription requests will cancel - // the active subscription, if any. - rpc MempoolSubscription(google.protobuf.Empty) returns (stream MempoolEvent) {} + // Subscribe to mempool events. + // + // The event stream will contain all events after the current chain tip. This includes all + // currently inflight events that have not yet been committed to the chain. + // + // Currently only a single active subscription is supported. Subscription requests will cancel + // the active subscription, if any. + rpc MempoolSubscription(google.protobuf.Empty) returns (stream MempoolEvent) {} } // MEMPOOL SUBSCRIPTION @@ -44,41 +44,41 @@ message MempoolSubscriptionRequest {} // Event from the mempool. message MempoolEvent { - // A block was committed. - // - // This event is sent when a block is committed to the chain. - message BlockCommitted { - blockchain.BlockHeader block_header = 1; - repeated transaction.TransactionId transactions = 2; - } + // A block was committed. + // + // This event is sent when a block is committed to the chain. + message BlockCommitted { + blockchain.BlockHeader block_header = 1; + repeated transaction.TransactionId transactions = 2; + } - // A transaction was added to the mempool. - // - // This event is sent when a transaction is added to the mempool. - message TransactionAdded { - // The ID of the transaction. - transaction.TransactionId id = 1; - // Nullifiers consumed by the transaction. - repeated primitives.Digest nullifiers = 2; - // Network notes created by the transaction. - repeated note.NetworkNote network_notes = 3; - // Changes to a network account, if any. This includes creation of new network accounts. + // A transaction was added to the mempool. // - // The account delta is encoded using [winter_utils::Serializable] implementation - // for [miden_protocol::account::delta::AccountDelta]. - optional bytes network_account_delta = 4; - } + // This event is sent when a transaction is added to the mempool. + message TransactionAdded { + // The ID of the transaction. + transaction.TransactionId id = 1; + // Nullifiers consumed by the transaction. + repeated primitives.Digest nullifiers = 2; + // Network notes created by the transaction. + repeated note.NetworkNote network_notes = 3; + // Changes to a network account, if any. This includes creation of new network accounts. + // + // The account delta is encoded using [winter_utils::Serializable] implementation + // for [miden_protocol::account::delta::AccountDelta]. + optional bytes network_account_delta = 4; + } - // A set of transactions was reverted and dropped from the mempool. - // - // This event is sent when a set of transactions are reverted and dropped from the mempool. - message TransactionsReverted { - repeated transaction.TransactionId reverted = 1; - } + // A set of transactions was reverted and dropped from the mempool. + // + // This event is sent when a set of transactions are reverted and dropped from the mempool. + message TransactionsReverted { + repeated transaction.TransactionId reverted = 1; + } - oneof event { - TransactionAdded transaction_added = 1; - BlockCommitted block_committed = 2; - TransactionsReverted transactions_reverted = 3; - } + oneof event { + TransactionAdded transaction_added = 1; + BlockCommitted block_committed = 2; + TransactionsReverted transactions_reverted = 3; + } } diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index 7b57bf3cbb..d98267b9cd 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -14,90 +14,90 @@ import "types/transaction.proto"; // RPC API for the RPC component service Api { - // Returns the status info of the node. - rpc Status(google.protobuf.Empty) returns (RpcStatus) {} - - // Returns the query parameter limits configured for RPC methods. - // - // These define the maximum number of each parameter a method will accept. - // Exceeding the limit will result in the request being rejected and you should instead send - // multiple smaller requests. - rpc GetLimits(google.protobuf.Empty) returns (RpcLimits) {} - - // Returns a Sparse Merkle Tree opening proof for each requested nullifier - // - // Each proof demonstrates either: - // - **Inclusion**: Nullifier exists in the tree (note was consumed) - // - **Non-inclusion**: Nullifier does not exist (note was not consumed) - // - // The `leaf` field indicates the status: - // * `empty_leaf_index`: Non-inclusion proof (nullifier not in tree) - // * `single` or `multiple`: Inclusion proof only if the requested nullifier appears as a key. - // - // Verify proofs against the nullifier tree root in the latest block header. - rpc CheckNullifiers(NullifierList) returns (CheckNullifiersResponse) {} - - // Returns the latest details of the specified account. - rpc GetAccount(AccountRequest) returns (AccountResponse) {} - - // Returns raw block data for the specified block number. - rpc GetBlockByNumber(blockchain.BlockNumber) returns (blockchain.MaybeBlock) {} - - // Retrieves block header by given block number. Optionally, it also returns the MMR path - // and current chain length to authenticate the block's inclusion. - rpc GetBlockHeaderByNumber(BlockHeaderByNumberRequest) returns (BlockHeaderByNumberResponse) {} - - // Returns a list of notes matching the provided note IDs. - rpc GetNotesById(note.NoteIdList) returns (note.CommittedNoteList) {} - - // Returns the script for a note by its root. - rpc GetNoteScriptByRoot(note.NoteScriptRoot) returns (MaybeNoteScript) {} - - // TRANSACTION SUBMISSION ENDPOINTS - // -------------------------------------------------------------------------------------------- - - // Submits proven transaction to the Miden network. Returns the node's current block height. - rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} - - // Submits a batch of transactions to the Miden network. - // - // All transactions in this batch will be considered atomic, and be committed together or not all. - // - // Returns the node's current block height. - rpc SubmitBatch(transaction.TransactionBatch) returns (blockchain.BlockNumber) {} - - // STATE SYNCHRONIZATION ENDPOINTS - // -------------------------------------------------------------------------------------------- - - // Returns transactions records for specific accounts within a block range. - rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} - - // Returns info which can be used by the client to sync up to the tip of chain for the notes - // they are interested in. - // - // Client specifies the `note_tags` they are interested in, and the block height from which to - // search for new for matching notes for. The request will then return the next block containing - // any note matching the provided tags. - // - // The response includes each note's metadata and inclusion proof. - // - // A basic note sync can be implemented by repeatedly requesting the previous response's block - // until reaching the tip of the chain. - rpc SyncNotes(SyncNotesRequest) returns (SyncNotesResponse) {} - - // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - // - // Note that only 16-bit prefixes are supported at this time. - rpc SyncNullifiers(SyncNullifiersRequest) returns (SyncNullifiersResponse) {} - - // Returns account vault updates for specified account within a block range. - rpc SyncAccountVault(SyncAccountVaultRequest) returns (SyncAccountVaultResponse) {} - - // Returns storage map updates for specified account and storage slots within a block range. - rpc SyncAccountStorageMaps(SyncAccountStorageMapsRequest) returns (SyncAccountStorageMapsResponse) {} - - // Returns MMR delta needed to synchronize the chain MMR within the requested block range. - rpc SyncChainMmr(SyncChainMmrRequest) returns (SyncChainMmrResponse) {} + // Returns the status info of the node. + rpc Status(google.protobuf.Empty) returns (RpcStatus) {} + + // Returns the query parameter limits configured for RPC methods. + // + // These define the maximum number of each parameter a method will accept. + // Exceeding the limit will result in the request being rejected and you should instead send + // multiple smaller requests. + rpc GetLimits(google.protobuf.Empty) returns (RpcLimits) {} + + // Returns a Sparse Merkle Tree opening proof for each requested nullifier + // + // Each proof demonstrates either: + // - **Inclusion**: Nullifier exists in the tree (note was consumed) + // - **Non-inclusion**: Nullifier does not exist (note was not consumed) + // + // The `leaf` field indicates the status: + // * `empty_leaf_index`: Non-inclusion proof (nullifier not in tree) + // * `single` or `multiple`: Inclusion proof only if the requested nullifier appears as a key. + // + // Verify proofs against the nullifier tree root in the latest block header. + rpc CheckNullifiers(NullifierList) returns (CheckNullifiersResponse) {} + + // Returns the latest details of the specified account. + rpc GetAccount(AccountRequest) returns (AccountResponse) {} + + // Returns raw block data for the specified block number. + rpc GetBlockByNumber(blockchain.BlockNumber) returns (blockchain.MaybeBlock) {} + + // Retrieves block header by given block number. Optionally, it also returns the MMR path + // and current chain length to authenticate the block's inclusion. + rpc GetBlockHeaderByNumber(BlockHeaderByNumberRequest) returns (BlockHeaderByNumberResponse) {} + + // Returns a list of notes matching the provided note IDs. + rpc GetNotesById(note.NoteIdList) returns (note.CommittedNoteList) {} + + // Returns the script for a note by its root. + rpc GetNoteScriptByRoot(note.NoteScriptRoot) returns (MaybeNoteScript) {} + + // TRANSACTION SUBMISSION ENDPOINTS + // -------------------------------------------------------------------------------------------- + + // Submits proven transaction to the Miden network. Returns the node's current block height. + rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} + + // Submits a batch of transactions to the Miden network. + // + // All transactions in this batch will be considered atomic, and be committed together or not all. + // + // Returns the node's current block height. + rpc SubmitBatch(transaction.TransactionBatch) returns (blockchain.BlockNumber) {} + + // STATE SYNCHRONIZATION ENDPOINTS + // -------------------------------------------------------------------------------------------- + + // Returns transactions records for specific accounts within a block range. + rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} + + // Returns info which can be used by the client to sync up to the tip of chain for the notes + // they are interested in. + // + // Client specifies the `note_tags` they are interested in, and the block height from which to + // search for new for matching notes for. The request will then return the next block containing + // any note matching the provided tags. + // + // The response includes each note's metadata and inclusion proof. + // + // A basic note sync can be implemented by repeatedly requesting the previous response's block + // until reaching the tip of the chain. + rpc SyncNotes(SyncNotesRequest) returns (SyncNotesResponse) {} + + // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. + // + // Note that only 16-bit prefixes are supported at this time. + rpc SyncNullifiers(SyncNullifiersRequest) returns (SyncNullifiersResponse) {} + + // Returns account vault updates for specified account within a block range. + rpc SyncAccountVault(SyncAccountVaultRequest) returns (SyncAccountVaultResponse) {} + + // Returns storage map updates for specified account and storage slots within a block range. + rpc SyncAccountStorageMaps(SyncAccountStorageMapsRequest) returns (SyncAccountStorageMapsResponse) {} + + // Returns MMR delta needed to synchronize the chain MMR within the requested block range. + rpc SyncChainMmr(SyncChainMmrRequest) returns (SyncChainMmrResponse) {} } // RPC STATUS @@ -105,17 +105,17 @@ service Api { // Represents the status of the node. message RpcStatus { - // The rpc component's running version. - string version = 1; + // The rpc component's running version. + string version = 1; - // The genesis commitment. - primitives.Digest genesis_commitment = 2; + // The genesis commitment. + primitives.Digest genesis_commitment = 2; - // The store status. - StoreStatus store = 3; + // The store status. + StoreStatus store = 3; - // The block producer status. - BlockProducerStatus block_producer = 4; + // The block producer status. + BlockProducerStatus block_producer = 4; } // BLOCK PRODUCER STATUS @@ -123,32 +123,32 @@ message RpcStatus { // Represents the status of the block producer. message BlockProducerStatus { - // The block producer's running version. - string version = 1; + // The block producer's running version. + string version = 1; - // The block producer's status. - string status = 2; + // The block producer's status. + string status = 2; - // The block producer's current view of the chain tip height. - // - // This is the height of the latest block that the block producer considers - // to be part of the canonical chain. - fixed32 chain_tip = 4; + // The block producer's current view of the chain tip height. + // + // This is the height of the latest block that the block producer considers + // to be part of the canonical chain. + fixed32 chain_tip = 4; - // Statistics about the mempool. - MempoolStats mempool_stats = 3; + // Statistics about the mempool. + MempoolStats mempool_stats = 3; } // Statistics about the mempool. message MempoolStats { - // Number of transactions currently in the mempool waiting to be batched. - uint64 unbatched_transactions = 1; + // Number of transactions currently in the mempool waiting to be batched. + uint64 unbatched_transactions = 1; - // Number of batches currently being proven. - uint64 proposed_batches = 2; + // Number of batches currently being proven. + uint64 proposed_batches = 2; - // Number of proven batches waiting for block inclusion. - uint64 proven_batches = 3; + // Number of proven batches waiting for block inclusion. + uint64 proven_batches = 3; } // STORE STATUS @@ -156,14 +156,14 @@ message MempoolStats { // Represents the status of the store. message StoreStatus { - // The store's running version. - string version = 1; + // The store's running version. + string version = 1; - // The store's status. - string status = 2; + // The store's status. + string status = 2; - // Number of the latest block in the chain. - fixed32 chain_tip = 3; + // Number of the latest block in the chain. + fixed32 chain_tip = 3; } // GET BLOCK HEADER BY NUMBER @@ -174,22 +174,22 @@ message StoreStatus { // // The Merkle path is an MMR proof for the block's leaf, based on the current chain length. message BlockHeaderByNumberRequest { - // The target block height, defaults to latest if not provided. - optional uint32 block_num = 1; - // Whether or not to return authentication data for the block header. - optional bool include_mmr_proof = 2; + // The target block height, defaults to latest if not provided. + optional uint32 block_num = 1; + // Whether or not to return authentication data for the block header. + optional bool include_mmr_proof = 2; } // Represents the result of getting a block header by block number. message BlockHeaderByNumberResponse { - // The requested block header. - blockchain.BlockHeader block_header = 1; + // The requested block header. + blockchain.BlockHeader block_header = 1; - // Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. - optional primitives.MerklePath mmr_path = 2; + // Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. + optional primitives.MerklePath mmr_path = 2; - // Current chain length. - optional fixed32 chain_length = 3; + // Current chain length. + optional fixed32 chain_length = 3; } // GET NOTE SCRIPT BY ROOT @@ -197,8 +197,8 @@ message BlockHeaderByNumberResponse { // Represents a note script or nothing. message MaybeNoteScript { - // The script for a note by its root. - optional note.NoteScript script = 1; + // The script for a note by its root. + optional note.NoteScript script = 1; } // GET ACCOUNT PROOF @@ -206,146 +206,146 @@ message MaybeNoteScript { // Defines the request for account details. message AccountRequest { - // Request the details for a public account. - message AccountDetailRequest { - // Represents a storage slot index and the associated map keys. - message StorageMapDetailRequest { - // Indirection required for use in `oneof {..}` block. - message MapKeys { - // A list of map keys associated with this storage slot. - repeated primitives.Digest map_keys = 1; - } - // Storage slot name. - string slot_name = 1; - - oneof slot_data { - // Request to return all storage map data. If the number exceeds a threshold of 1000 entries, - // the response will not contain them but must be requested separately. - bool all_entries = 2; - - // A list of map keys associated with the given storage slot identified by `slot_name`. - MapKeys map_keys = 3; - } + // Request the details for a public account. + message AccountDetailRequest { + // Represents a storage slot index and the associated map keys. + message StorageMapDetailRequest { + // Indirection required for use in `oneof {..}` block. + message MapKeys { + // A list of map keys associated with this storage slot. + repeated primitives.Digest map_keys = 1; + } + // Storage slot name. + string slot_name = 1; + + oneof slot_data { + // Request to return all storage map data. If the number exceeds a threshold of 1000 entries, + // the response will not contain them but must be requested separately. + bool all_entries = 2; + + // A list of map keys associated with the given storage slot identified by `slot_name`. + MapKeys map_keys = 3; + } + } + + // Last known code commitment to the requester. The response will include account code + // only if its commitment is different from this value. + // + // If the field is ommiteed, the response will not include the account code. + optional primitives.Digest code_commitment = 1; + + // Last known asset vault commitment to the requester. The response will include asset vault data + // only if its commitment is different from this value. If the value is not present in the + // request, the response will not contain one either. + // If the number of to-be-returned asset entries exceed a threshold, they have to be requested + // separately, which is signaled in the response message with dedicated flag. + optional primitives.Digest asset_vault_commitment = 2; + + // Additional request per storage map. + repeated StorageMapDetailRequest storage_maps = 3; } - // Last known code commitment to the requester. The response will include account code - // only if its commitment is different from this value. - // - // If the field is ommiteed, the response will not include the account code. - optional primitives.Digest code_commitment = 1; - - // Last known asset vault commitment to the requester. The response will include asset vault data - // only if its commitment is different from this value. If the value is not present in the - // request, the response will not contain one either. - // If the number of to-be-returned asset entries exceed a threshold, they have to be requested - // separately, which is signaled in the response message with dedicated flag. - optional primitives.Digest asset_vault_commitment = 2; - - // Additional request per storage map. - repeated StorageMapDetailRequest storage_maps = 3; - } + // ID of the account for which we want to get data + account.AccountId account_id = 1; - // ID of the account for which we want to get data - account.AccountId account_id = 1; - - // Optional block height at which to return the proof. - // - // Defaults to current chain tip if unspecified. - optional blockchain.BlockNumber block_num = 2; + // Optional block height at which to return the proof. + // + // Defaults to current chain tip if unspecified. + optional blockchain.BlockNumber block_num = 2; - // Request for additional account details; valid only for public accounts. - optional AccountDetailRequest details = 3; + // Request for additional account details; valid only for public accounts. + optional AccountDetailRequest details = 3; } // Represents the result of getting account proof. message AccountResponse { - message AccountDetails { - // Account header. - account.AccountHeader header = 1; + message AccountDetails { + // Account header. + account.AccountHeader header = 1; - // Account storage data - AccountStorageDetails storage_details = 2; + // Account storage data + AccountStorageDetails storage_details = 2; - // Account code; empty if code commitments matched or none was requested. - optional bytes code = 3; + // Account code; empty if code commitments matched or none was requested. + optional bytes code = 3; - // Account asset vault data; empty if vault commitments matched or the requester - // omitted it in the request. - optional AccountVaultDetails vault_details = 4; - } + // Account asset vault data; empty if vault commitments matched or the requester + // omitted it in the request. + optional AccountVaultDetails vault_details = 4; + } - // The block number at which the account witness was created and the account details were observed. - blockchain.BlockNumber block_num = 1; + // The block number at which the account witness was created and the account details were observed. + blockchain.BlockNumber block_num = 1; - // Account ID, current state commitment, and SMT path. - account.AccountWitness witness = 2; + // Account ID, current state commitment, and SMT path. + account.AccountWitness witness = 2; - // Additional details for public accounts. - optional AccountDetails details = 3; + // Additional details for public accounts. + optional AccountDetails details = 3; } // Account vault details for AccountResponse message AccountVaultDetails { - // A flag that is set to true if the account contains too many assets. This indicates - // to the user that `SyncAccountVault` endpoint should be used to retrieve the - // account's assets - bool too_many_assets = 1; + // A flag that is set to true if the account contains too many assets. This indicates + // to the user that `SyncAccountVault` endpoint should be used to retrieve the + // account's assets + bool too_many_assets = 1; - // When too_many_assets == false, this will contain the list of assets in the - // account's vault - repeated primitives.Asset assets = 2; + // When too_many_assets == false, this will contain the list of assets in the + // account's vault + repeated primitives.Asset assets = 2; } // Account storage details for AccountResponse message AccountStorageDetails { - message AccountStorageMapDetails { - // Wrapper for repeated storage map entries including their proofs. - // Used when specific keys are requested to enable client-side verification. - message MapEntriesWithProofs { - // Definition of individual storage entries including a proof. - message StorageMapEntryWithProof { - primitives.Digest key = 1; - primitives.Digest value = 2; - primitives.SmtOpening proof = 3; - } - - repeated StorageMapEntryWithProof entries = 1; + message AccountStorageMapDetails { + // Wrapper for repeated storage map entries including their proofs. + // Used when specific keys are requested to enable client-side verification. + message MapEntriesWithProofs { + // Definition of individual storage entries including a proof. + message StorageMapEntryWithProof { + primitives.Digest key = 1; + primitives.Digest value = 2; + primitives.SmtOpening proof = 3; + } + + repeated StorageMapEntryWithProof entries = 1; + } + + // Wrapper for repeated storage map entries (without proofs). + // Used when all entries are requested for small maps. + message AllMapEntries { + // Definition of individual storage entries. + message StorageMapEntry { + primitives.Digest key = 1; + primitives.Digest value = 2; + } + + repeated StorageMapEntry entries = 1; + } + + // Storage slot name. + string slot_name = 1; + + // True when the number of entries exceeds the response limit. + // When set, clients should use the `SyncAccountStorageMaps` endpoint. + bool too_many_entries = 2; + + // The map entries (with or without proofs). Empty when too_many_entries is true. + oneof entries { + // All storage entries without proofs (for small maps or full requests). + AllMapEntries all_entries = 3; + + // Specific entries with their SMT proofs (for partial requests). + MapEntriesWithProofs entries_with_proofs = 4; + } } - // Wrapper for repeated storage map entries (without proofs). - // Used when all entries are requested for small maps. - message AllMapEntries { - // Definition of individual storage entries. - message StorageMapEntry { - primitives.Digest key = 1; - primitives.Digest value = 2; - } + // Account storage header (storage slot info for up to 256 slots) + account.AccountStorageHeader header = 1; - repeated StorageMapEntry entries = 1; - } - - // Storage slot name. - string slot_name = 1; - - // True when the number of entries exceeds the response limit. - // When set, clients should use the `SyncAccountStorageMaps` endpoint. - bool too_many_entries = 2; - - // The map entries (with or without proofs). Empty when too_many_entries is true. - oneof entries { - // All storage entries without proofs (for small maps or full requests). - AllMapEntries all_entries = 3; - - // Specific entries with their SMT proofs (for partial requests). - MapEntriesWithProofs entries_with_proofs = 4; - } - } - - // Account storage header (storage slot info for up to 256 slots) - account.AccountStorageHeader header = 1; - - // Additional data for the requested storage maps - repeated AccountStorageMapDetails map_details = 2; + // Additional data for the requested storage maps + repeated AccountStorageMapDetails map_details = 2; } // CHECK NULLIFIERS @@ -353,14 +353,14 @@ message AccountStorageDetails { // List of nullifiers to return proofs for. message NullifierList { - // List of nullifiers to return proofs for. - repeated primitives.Digest nullifiers = 1; + // List of nullifiers to return proofs for. + repeated primitives.Digest nullifiers = 1; } // Represents the result of checking nullifiers. message CheckNullifiersResponse { - // Each requested nullifier has its corresponding nullifier proof at the same position. - repeated primitives.SmtOpening proofs = 1; + // Each requested nullifier has its corresponding nullifier proof at the same position. + repeated primitives.SmtOpening proofs = 1; } // SYNC NULLIFIERS @@ -368,33 +368,33 @@ message CheckNullifiersResponse { // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. message SyncNullifiersRequest { - // Block number from which the nullifiers are requested (inclusive). - BlockRange block_range = 1; + // Block number from which the nullifiers are requested (inclusive). + BlockRange block_range = 1; - // Number of bits used for nullifier prefix. Currently the only supported value is 16. - uint32 prefix_len = 2; + // Number of bits used for nullifier prefix. Currently the only supported value is 16. + uint32 prefix_len = 2; - // List of nullifiers to check. Each nullifier is specified by its prefix with length equal - // to `prefix_len`. - repeated uint32 nullifiers = 3; + // List of nullifiers to check. Each nullifier is specified by its prefix with length equal + // to `prefix_len`. + repeated uint32 nullifiers = 3; } // Represents the result of syncing nullifiers. message SyncNullifiersResponse { - // Represents a single nullifier update. - message NullifierUpdate { - // Nullifier ID. - primitives.Digest nullifier = 1; + // Represents a single nullifier update. + message NullifierUpdate { + // Nullifier ID. + primitives.Digest nullifier = 1; - // Block number. - fixed32 block_num = 2; - } + // Block number. + fixed32 block_num = 2; + } - // Pagination information. - PaginationInfo pagination_info = 1; + // Pagination information. + PaginationInfo pagination_info = 1; - // List of nullifiers matching the prefixes specified in the request. - repeated NullifierUpdate nullifiers = 2; + // List of nullifiers matching the prefixes specified in the request. + repeated NullifierUpdate nullifiers = 2; } // SYNC ACCOUNT VAULT @@ -404,37 +404,37 @@ message SyncNullifiersResponse { // // Allows requesters to sync asset values for specific public accounts within a block range. message SyncAccountVaultRequest { - // Block range from which to start synchronizing. - // - // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - // otherwise an error will be returned. - BlockRange block_range = 1; + // Block range from which to start synchronizing. + // + // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), + // otherwise an error will be returned. + BlockRange block_range = 1; - // Account for which we want to sync asset vault. - account.AccountId account_id = 2; + // Account for which we want to sync asset vault. + account.AccountId account_id = 2; } message SyncAccountVaultResponse { - // Pagination information. - PaginationInfo pagination_info = 1; + // Pagination information. + PaginationInfo pagination_info = 1; - // List of asset updates for the account. - // - // Multiple updates can be returned for a single asset, and the one with a higher `block_num` - // is expected to be retained by the caller. - repeated AccountVaultUpdate updates = 2; + // List of asset updates for the account. + // + // Multiple updates can be returned for a single asset, and the one with a higher `block_num` + // is expected to be retained by the caller. + repeated AccountVaultUpdate updates = 2; } message AccountVaultUpdate { - // Vault key associated with the asset. - primitives.Digest vault_key = 1; + // Vault key associated with the asset. + primitives.Digest vault_key = 1; - // Asset value related to the vault key. - // If not present, the asset was removed from the vault. - optional primitives.Asset asset = 2; + // Asset value related to the vault key. + // If not present, the asset was removed from the vault. + optional primitives.Asset asset = 2; - // Block number at which the above asset was updated in the account vault. - fixed32 block_num = 3; + // Block number at which the above asset was updated in the account vault. + fixed32 block_num = 3; } // SYNC NOTES @@ -445,29 +445,29 @@ message AccountVaultUpdate { // Specifies note tags that requester is interested in. The server will return the first block which // contains a note matching `note_tags` or the chain tip. message SyncNotesRequest { - // Block range from which to start synchronizing. - BlockRange block_range = 1; + // Block range from which to start synchronizing. + BlockRange block_range = 1; - // Specifies the tags which the requester is interested in. - repeated fixed32 note_tags = 2; + // Specifies the tags which the requester is interested in. + repeated fixed32 note_tags = 2; } // Represents the result of syncing notes request. message SyncNotesResponse { - // Pagination information. - PaginationInfo pagination_info = 1; + // Pagination information. + PaginationInfo pagination_info = 1; - // Block header of the block with the first note matching the specified criteria. - blockchain.BlockHeader block_header = 2; + // Block header of the block with the first note matching the specified criteria. + blockchain.BlockHeader block_header = 2; - // Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. - // - // An MMR proof can be constructed for the leaf of index `block_header.block_num` of - // an MMR of forest `chain_tip` with this path. - primitives.MerklePath mmr_path = 3; + // Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. + // + // An MMR proof can be constructed for the leaf of index `block_header.block_num` of + // an MMR of forest `chain_tip` with this path. + primitives.MerklePath mmr_path = 3; - // List of all notes together with the Merkle paths from `response.block_header.note_root`. - repeated note.NoteSyncRecord notes = 4; + // List of all notes together with the Merkle paths from `response.block_header.note_root`. + repeated note.NoteSyncRecord notes = 4; } // SYNC CHAIN MMR @@ -475,21 +475,21 @@ message SyncNotesResponse { // Chain MMR synchronization request. message SyncChainMmrRequest { - // Block range from which to synchronize the chain MMR. - // - // The response will contain MMR delta starting after `block_range.block_from` up to - // `block_range.block_to` or the chain tip (whichever is lower). Set `block_from` to the last - // block already present in the caller's MMR so the delta begins at the next block. - BlockRange block_range = 1; + // Block range from which to synchronize the chain MMR. + // + // The response will contain MMR delta starting after `block_range.block_from` up to + // `block_range.block_to` or the chain tip (whichever is lower). Set `block_from` to the last + // block already present in the caller's MMR so the delta begins at the next block. + BlockRange block_range = 1; } // Represents the result of syncing chain MMR. message SyncChainMmrResponse { - // For which block range the MMR delta is returned. - BlockRange block_range = 1; - // Data needed to update the partial MMR from `request.block_range.block_from + 1` to - // `response.block_range.block_to` or the chain tip. - primitives.MmrDelta mmr_delta = 2; + // For which block range the MMR delta is returned. + BlockRange block_range = 1; + // Data needed to update the partial MMR from `request.block_range.block_from + 1` to + // `response.block_range.block_to` or the chain tip. + primitives.MmrDelta mmr_delta = 2; } // SYNC ACCOUNT STORAGE MAP @@ -500,40 +500,40 @@ message SyncChainMmrResponse { // Allows requesters to sync storage map values for specific public accounts within a block range, // with support for cursor-based pagination to handle large storage maps. message SyncAccountStorageMapsRequest { - // Block range from which to start synchronizing. - // - // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - // otherwise an error will be returned. - BlockRange block_range = 1; + // Block range from which to start synchronizing. + // + // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), + // otherwise an error will be returned. + BlockRange block_range = 1; - // Account for which we want to sync storage maps. - account.AccountId account_id = 3; + // Account for which we want to sync storage maps. + account.AccountId account_id = 3; } message SyncAccountStorageMapsResponse { - // Pagination information. - PaginationInfo pagination_info = 1; + // Pagination information. + PaginationInfo pagination_info = 1; - // The list of storage map updates. - // - // Multiple updates can be returned for a single slot index and key combination, and the one - // with a higher `block_num` is expected to be retained by the caller. - repeated StorageMapUpdate updates = 2; + // The list of storage map updates. + // + // Multiple updates can be returned for a single slot index and key combination, and the one + // with a higher `block_num` is expected to be retained by the caller. + repeated StorageMapUpdate updates = 2; } // Represents a single storage map update. message StorageMapUpdate { - // Block number in which the slot was updated. - fixed32 block_num = 1; + // Block number in which the slot was updated. + fixed32 block_num = 1; - // Storage slot name. - string slot_name = 2; + // Storage slot name. + string slot_name = 2; - // The storage map key. - primitives.Digest key = 3; + // The storage map key. + primitives.Digest key = 3; - // The storage map value. - primitives.Digest value = 4; + // The storage map value. + primitives.Digest value = 4; } // BLOCK RANGE @@ -541,11 +541,11 @@ message StorageMapUpdate { // Represents a block range. message BlockRange { - // Block number from which to start (inclusive). - fixed32 block_from = 1; + // Block number from which to start (inclusive). + fixed32 block_from = 1; - // Block number up to which to check (inclusive). If not specified, checks up to the latest block. - optional fixed32 block_to = 2; + // Block number up to which to check (inclusive). If not specified, checks up to the latest block. + optional fixed32 block_to = 2; } // PAGINATION INFO @@ -560,15 +560,15 @@ message BlockRange { // To request the next chunk, the requester should use `block_num + 1` from the previous response // as the `block_from` for the next request. message PaginationInfo { - // Current chain tip - fixed32 chain_tip = 1; + // Current chain tip + fixed32 chain_tip = 1; - // The block number of the last check included in this response. - // - // For chunked responses, this may be less than `request.block_range.block_to`. - // If it is less than request.block_range.block_to, the user is expected to make a subsequent request - // starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). - fixed32 block_num = 2; + // The block number of the last check included in this response. + // + // For chunked responses, this may be less than `request.block_range.block_to`. + // If it is less than request.block_range.block_to, the user is expected to make a subsequent request + // starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). + fixed32 block_num = 2; } // SYNC TRANSACTIONS @@ -578,29 +578,29 @@ message PaginationInfo { // // Allows requesters to sync transactions for specific accounts within a block range. message SyncTransactionsRequest { - // Block range from which to start synchronizing. - BlockRange block_range = 1; + // Block range from which to start synchronizing. + BlockRange block_range = 1; - // Accounts to sync transactions for. - repeated account.AccountId account_ids = 2; + // Accounts to sync transactions for. + repeated account.AccountId account_ids = 2; } // Represents the result of syncing transactions request. message SyncTransactionsResponse { - // Pagination information. - PaginationInfo pagination_info = 1; + // Pagination information. + PaginationInfo pagination_info = 1; - // List of transaction records. - repeated TransactionRecord transactions = 2; + // List of transaction records. + repeated TransactionRecord transactions = 2; } // Represents a transaction record. message TransactionRecord { - // Block number in which the transaction was included. - fixed32 block_num = 1; + // Block number in which the transaction was included. + fixed32 block_num = 1; - // A transaction header. - transaction.TransactionHeader header = 2; + // A transaction header. + transaction.TransactionHeader header = 2; } // RPC LIMITS @@ -608,16 +608,16 @@ message TransactionRecord { // Represents the query parameter limits for RPC endpoints. message RpcLimits { - // Maps RPC endpoint names to their parameter limits. - // Key: endpoint name (e.g., "CheckNullifiers") - // Value: map of parameter names to their limit values - map endpoints = 1; + // Maps RPC endpoint names to their parameter limits. + // Key: endpoint name (e.g., "CheckNullifiers") + // Value: map of parameter names to their limit values + map endpoints = 1; } // Represents the parameter limits for a single endpoint. message EndpointLimits { - // Maps parameter names to their limit values. - // Key: parameter name (e.g., "nullifier", "account_id") - // Value: limit value - map parameters = 1; + // Maps parameter names to their limit values. + // Key: parameter name (e.g., "nullifier", "account_id") + // Value: limit value + map parameters = 1; } diff --git a/proto/proto/types/transaction.proto b/proto/proto/types/transaction.proto index b0e25b5470..0895565934 100644 --- a/proto/proto/types/transaction.proto +++ b/proto/proto/types/transaction.proto @@ -10,41 +10,41 @@ import "types/primitives.proto"; // Submits proven transaction to the Miden network. message ProvenTransaction { - // Transaction encoded using [winter_utils::Serializable] implementation for - // [miden_protocol::transaction::proven_tx::ProvenTransaction]. - bytes transaction = 1; - // Transaction inputs encoded using [winter_utils::Serializable] implementation for - // [miden_protocol::transaction::TransactionInputs]. - optional bytes transaction_inputs = 2; + // Transaction encoded using [winter_utils::Serializable] implementation for + // [miden_protocol::transaction::proven_tx::ProvenTransaction]. + bytes transaction = 1; + // Transaction inputs encoded using [winter_utils::Serializable] implementation for + // [miden_protocol::transaction::TransactionInputs]. + optional bytes transaction_inputs = 2; } message TransactionBatch { - // The proposed batch of transaction encoded using [winter_utils::Serializable] implementation - // for [miden_protocol::batch::ProposedBatch]. - bytes proposed_batch = 1; - // Each transaction's inputs encoded using [winter_utils::Serializable] implementation for - // [miden_protocol::transaction::TransactionInputs]. - // - // Order of inputs should match the transaction order in the batch. - repeated bytes transaction_inputs = 2; + // The proposed batch of transaction encoded using [winter_utils::Serializable] implementation + // for [miden_protocol::batch::ProposedBatch]. + bytes proposed_batch = 1; + // Each transaction's inputs encoded using [winter_utils::Serializable] implementation for + // [miden_protocol::transaction::TransactionInputs]. + // + // Order of inputs should match the transaction order in the batch. + repeated bytes transaction_inputs = 2; } // Represents a transaction ID. message TransactionId { - // The transaction ID. - primitives.Digest id = 1; + // The transaction ID. + primitives.Digest id = 1; } // Represents a transaction summary. message TransactionSummary { - // A unique 32-byte identifier of a transaction. - TransactionId transaction_id = 1; + // A unique 32-byte identifier of a transaction. + TransactionId transaction_id = 1; - // The block number in which the transaction was executed. - fixed32 block_num = 2; + // The block number in which the transaction was executed. + fixed32 block_num = 2; - // The ID of the account affected by the transaction. - account.AccountId account_id = 3; + // The ID of the account affected by the transaction. + account.AccountId account_id = 3; } // Represents a commitment to an input note of a transaction. @@ -52,33 +52,33 @@ message TransactionSummary { // For authenticated notes, only the nullifier is present. // For unauthenticated notes, the note header is also included. message InputNoteCommitment { - // The nullifier of the input note. - primitives.Digest nullifier = 1; + // The nullifier of the input note. + primitives.Digest nullifier = 1; - // The note header, present only for unauthenticated input notes. - optional note.NoteHeader header = 2; + // The note header, present only for unauthenticated input notes. + optional note.NoteHeader header = 2; } // Represents a transaction header. message TransactionHeader { - // The unique identifier of the transaction. - TransactionId transaction_id = 1; + // The unique identifier of the transaction. + TransactionId transaction_id = 1; - // ID of the account against which the transaction was executed. - account.AccountId account_id = 2; + // ID of the account against which the transaction was executed. + account.AccountId account_id = 2; - // State commitment of the account before the transaction was executed. - primitives.Digest initial_state_commitment = 3; + // State commitment of the account before the transaction was executed. + primitives.Digest initial_state_commitment = 3; - // State commitment of the account after the transaction was executed. - primitives.Digest final_state_commitment = 4; + // State commitment of the account after the transaction was executed. + primitives.Digest final_state_commitment = 4; - // Input notes of the transaction. - repeated InputNoteCommitment input_notes = 5; + // Input notes of the transaction. + repeated InputNoteCommitment input_notes = 5; - // Output notes of the transaction. - repeated note.NoteHeader output_notes = 6; + // Output notes of the transaction. + repeated note.NoteHeader output_notes = 6; - // The fee paid by the transaction. - primitives.Asset fee = 7; + // The fee paid by the transaction. + primitives.Asset fee = 7; } From 681eb8740ecc859c0e4af70d6daec5271ee3d8db Mon Sep 17 00:00:00 2001 From: Mirko von Leipzig <48352201+Mirko-von-Leipzig@users.noreply.github.com> Date: Fri, 27 Mar 2026 14:58:19 +0200 Subject: [PATCH 9/9] Serge review --- crates/block-producer/src/mempool/graph/transaction.rs | 6 +++--- crates/block-producer/src/mempool/mod.rs | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/crates/block-producer/src/mempool/graph/transaction.rs b/crates/block-producer/src/mempool/graph/transaction.rs index e63d5e4905..0672ecc40c 100644 --- a/crates/block-producer/src/mempool/graph/transaction.rs +++ b/crates/block-producer/src/mempool/graph/transaction.rs @@ -231,10 +231,10 @@ impl TransactionGraph { continue; } - let x = self.inner.revert_node_and_descendants(transaction); + let reverted_now = self.inner.revert_node_and_descendants(revert); // Clean up book keeping and also revert transactions from the same user batch, if any. - for tx in &x { + for tx in &reverted_now { self.failures.remove(&tx.id()); // Note that this is a pretty rough shod approach. We just dump the entire batch of @@ -248,7 +248,7 @@ impl TransactionGraph { } } - reverted.extend(x.into_iter().map(|tx| tx.id())); + reverted.extend(reverted_now.into_iter().map(|tx| tx.id())); } reverted diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs index 7850a12d16..dbdc64ae4a 100644 --- a/crates/block-producer/src/mempool/mod.rs +++ b/crates/block-producer/src/mempool/mod.rs @@ -253,7 +253,7 @@ impl Mempool { ) -> Result { assert!(!txs.is_empty(), "Cannot have a batch with no transactions"); - if self.unbatched_transactions_count() + txs.len() >= self.config.tx_capacity.get() { + if self.unbatched_transactions_count() + txs.len() > self.config.tx_capacity.get() { return Err(MempoolSubmissionError::CapacityExceeded); }