diff --git a/Cargo.lock b/Cargo.lock index 38063686b..71be1e71e 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2719,7 +2719,6 @@ dependencies = [ "assert_matches", "futures", "itertools 0.14.0", - "miden-block-prover", "miden-lib", "miden-node-proto", "miden-node-proto-build", @@ -2858,12 +2857,14 @@ dependencies = [ "fs-err", "hex", "indexmap 2.12.1", + "miden-block-prover", "miden-lib", "miden-node-proto", "miden-node-proto-build", "miden-node-test-macro", "miden-node-utils", "miden-objects", + "miden-remote-prover-client", "pretty_assertions", "rand 0.9.2", "rand_chacha 0.9.0", @@ -2888,7 +2889,6 @@ dependencies = [ "fs-err", "futures", "miden-air", - "miden-block-prover", "miden-lib", "miden-node-block-producer", "miden-node-proto", diff --git a/bin/node/src/commands/block_producer.rs b/bin/node/src/commands/block_producer.rs index c099a7002..c1359473d 100644 --- a/bin/node/src/commands/block_producer.rs +++ b/bin/node/src/commands/block_producer.rs @@ -88,7 +88,6 @@ impl BlockProducerCommand { store_url, validator_url, batch_prover_url: block_producer.batch_prover_url, - block_prover_url: block_producer.block_prover_url, batch_interval: block_producer.batch_interval, block_interval: block_producer.block_interval, max_txs_per_batch: block_producer.max_txs_per_batch, @@ -128,7 +127,6 @@ mod tests { validator_url: dummy_url(), block_producer: BlockProducerConfig { batch_prover_url: None, - block_prover_url: None, block_interval: std::time::Duration::from_secs(1), batch_interval: std::time::Duration::from_secs(1), max_txs_per_batch: 8, @@ -152,7 +150,6 @@ mod tests { validator_url: dummy_url(), block_producer: BlockProducerConfig { batch_prover_url: None, - block_prover_url: None, block_interval: std::time::Duration::from_secs(1), batch_interval: std::time::Duration::from_secs(1), max_txs_per_batch: miden_objects::MAX_ACCOUNTS_PER_BATCH + 1, /* Use protocol diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index bf66f6c04..417601273 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -7,7 +7,7 @@ use anyhow::Context; use miden_node_block_producer::BlockProducer; use miden_node_ntx_builder::NetworkTransactionBuilder; use miden_node_rpc::Rpc; -use miden_node_store::Store; +use miden_node_store::{BlockProver, Store}; use miden_node_utils::grpc::UrlExt; use miden_node_validator::Validator; use miden_objects::block::BlockSigner; @@ -22,6 +22,7 @@ use super::{ENV_DATA_DIRECTORY, ENV_RPC_URL}; use crate::commands::{ BlockProducerConfig, DEFAULT_TIMEOUT, + ENV_BLOCK_PROVER_URL, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, ENV_VALIDATOR_INSECURE_SECRET_KEY, @@ -70,6 +71,10 @@ pub enum BundledCommand { #[arg(long = "rpc.url", env = ENV_RPC_URL, value_name = "URL")] rpc_url: Url, + /// The remote block prover's gRPC url. If not provided, a local block prover will be used. + #[arg(long = "block-prover.url", env = ENV_BLOCK_PROVER_URL, value_name = "URL")] + block_prover_url: Option, + /// Directory in which the Store component should store the database and raw block data. #[arg(long = "data-directory", env = ENV_DATA_DIRECTORY, value_name = "DIR")] data_directory: PathBuf, @@ -130,6 +135,7 @@ impl BundledCommand { }, BundledCommand::Start { rpc_url, + block_prover_url, data_directory, block_producer, ntx_builder, @@ -142,6 +148,7 @@ impl BundledCommand { let signer = SecretKey::read_from_bytes(hex::decode(secret_key_hex)?.as_ref())?; Self::start( rpc_url, + block_prover_url, data_directory, ntx_builder, block_producer, @@ -156,6 +163,7 @@ impl BundledCommand { #[allow(clippy::too_many_lines)] async fn start( rpc_url: Url, + block_prover_url: Option, data_directory: PathBuf, ntx_builder: NtxBuilderConfig, block_producer: BlockProducerConfig, @@ -172,6 +180,13 @@ impl BundledCommand { .await .context("Failed to bind to RPC gRPC endpoint")?; + // Initialize local or remote block prover. + let block_prover = if let Some(url) = block_prover_url { + Arc::new(BlockProver::new_remote(url)) + } else { + Arc::new(BlockProver::new_local(None)) + }; + let block_producer_address = TcpListener::bind("127.0.0.1:0") .await .context("Failed to bind to block-producer gRPC endpoint")? @@ -214,6 +229,7 @@ impl BundledCommand { block_producer_listener: store_block_producer_listener, ntx_builder_listener: store_ntx_builder_listener, data_directory: data_directory_clone, + block_prover, grpc_timeout, } .serve() @@ -245,7 +261,6 @@ impl BundledCommand { store_url, validator_url, batch_prover_url: block_producer.batch_prover_url, - block_prover_url: block_producer.block_prover_url, batch_interval: block_producer.batch_interval, block_interval: block_producer.block_interval, max_batches_per_block: block_producer.max_batches_per_block, diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index ecfee995f..ea2356c8b 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -93,11 +93,6 @@ pub struct BlockProducerConfig { #[arg(long = "batch-prover.url", env = ENV_BATCH_PROVER_URL, value_name = "URL")] pub batch_prover_url: Option, - /// The remote block prover's gRPC url. If unset, will default to running a prover - /// in-process which is expensive. - #[arg(long = "block-prover.url", env = ENV_BLOCK_PROVER_URL, value_name = "URL")] - pub block_prover_url: Option, - /// The number of transactions per batch. #[arg( long = "max-txs-per-batch", diff --git a/bin/node/src/commands/store.rs b/bin/node/src/commands/store.rs index 4ba41e9eb..a5b29e04b 100644 --- a/bin/node/src/commands/store.rs +++ b/bin/node/src/commands/store.rs @@ -1,9 +1,10 @@ use std::path::{Path, PathBuf}; +use std::sync::Arc; use std::time::Duration; use anyhow::Context; -use miden_node_store::Store; use miden_node_store::genesis::config::{AccountFileWithName, GenesisConfig}; +use miden_node_store::{BlockProver, Store}; use miden_node_utils::grpc::UrlExt; use miden_objects::crypto::dsa::ecdsa_k256_keccak::SecretKey; use miden_objects::utils::Deserializable; @@ -17,6 +18,7 @@ use super::{ }; use crate::commands::{ DEFAULT_TIMEOUT, + ENV_BLOCK_PROVER_URL, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, ENV_VALIDATOR_INSECURE_SECRET_KEY, @@ -72,6 +74,10 @@ pub enum StoreCommand { #[arg(long = "block-producer.url", env = ENV_STORE_BLOCK_PRODUCER_URL, value_name = "URL")] block_producer_url: Url, + /// The remote block prover's gRPC url. If not provided, a local block prover will be used. + #[arg(long = "block-prover.url", env = ENV_BLOCK_PROVER_URL, value_name = "URL")] + block_prover_url: Option, + /// Directory in which to store the database and raw block data. #[arg(long, env = ENV_DATA_DIRECTORY, value_name = "DIR")] data_directory: PathBuf, @@ -115,6 +121,7 @@ impl StoreCommand { rpc_url, ntx_builder_url, block_producer_url, + block_prover_url, data_directory, enable_otel: _, grpc_timeout, @@ -123,6 +130,7 @@ impl StoreCommand { rpc_url, ntx_builder_url, block_producer_url, + block_prover_url, data_directory, grpc_timeout, ) @@ -143,6 +151,7 @@ impl StoreCommand { rpc_url: Url, ntx_builder_url: Url, block_producer_url: Url, + block_prover_url: Option, data_directory: PathBuf, grpc_timeout: Duration, ) -> anyhow::Result<()> { @@ -167,8 +176,17 @@ impl StoreCommand { .await .context("Failed to bind to store's block-producer gRPC URL")?; + let block_prover = { + if let Some(url) = block_prover_url { + Arc::new(BlockProver::new_remote(url)) + } else { + Arc::new(BlockProver::new_local(None)) + } + }; + Store { rpc_listener, + block_prover, ntx_builder_listener, block_producer_listener, data_directory, diff --git a/bin/stress-test/Cargo.toml b/bin/stress-test/Cargo.toml index fa0bbec82..d78e3df8a 100644 --- a/bin/stress-test/Cargo.toml +++ b/bin/stress-test/Cargo.toml @@ -21,7 +21,6 @@ clap = { features = ["derive"], version = "4.5" } fs-err = { workspace = true } futures = { workspace = true } miden-air = { features = ["testing"], workspace = true } -miden-block-prover = { features = ["testing"], workspace = true } miden-lib = { workspace = true } miden-node-block-producer = { workspace = true } miden-node-proto = { workspace = true } diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index 924710a09..9b4490765 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -5,7 +5,6 @@ use std::time::{Duration, Instant}; use metrics::SeedingMetrics; use miden_air::ExecutionProof; -use miden_block_prover::LocalBlockProver; use miden_lib::account::auth::AuthRpoFalcon512; use miden_lib::account::faucets::BasicFungibleFaucet; use miden_lib::account::wallets::BasicWallet; @@ -15,7 +14,7 @@ use miden_lib::utils::Serializable; use miden_node_block_producer::store::StoreClient; use miden_node_proto::domain::batch::BatchInputs; use miden_node_proto::generated::store::rpc_client::RpcClient; -use miden_node_store::{DataDirectory, GenesisState, Store}; +use miden_node_store::{BlockProver, DataDirectory, GenesisState, Store}; use miden_node_utils::tracing::grpc::OtelInterceptor; use miden_objects::account::delta::AccountUpdateDetails; use miden_objects::account::{ @@ -161,7 +160,7 @@ async fn generate_blocks( SecretKey::with_rng(&mut *rng) }; - let mut prev_block = genesis_block.clone(); + let mut prev_block_header = genesis_block.header().clone(); let mut current_anchor_header = genesis_block.header().clone(); for i in 0..total_blocks { @@ -193,7 +192,7 @@ async fn generate_blocks( note_nullifiers.extend(notes.iter().map(|n| n.nullifier().prefix())); // create the tx that creates the notes - let emit_note_tx = create_emit_note_tx(prev_block.header(), &mut faucet, notes.clone()); + let emit_note_tx = create_emit_note_tx(&prev_block_header, &mut faucet, notes.clone()); // collect all the txs block_txs.push(emit_note_tx); @@ -202,27 +201,23 @@ async fn generate_blocks( // create the batches with [TRANSACTIONS_PER_BATCH] txs each let batches: Vec = block_txs .par_chunks(TRANSACTIONS_PER_BATCH) - .map(|txs| create_batch(txs, prev_block.header())) + .map(|txs| create_batch(txs, &prev_block_header)) .collect(); // create the block and send it to the store let block_inputs = get_block_inputs(store_client, &batches, &mut metrics).await; // update blocks - prev_block = apply_block(batches, block_inputs, store_client, &mut metrics).await; - if current_anchor_header.block_epoch() != prev_block.header().block_epoch() { - current_anchor_header = prev_block.header().clone(); + prev_block_header = apply_block(batches, block_inputs, store_client, &mut metrics).await; + if current_anchor_header.block_epoch() != prev_block_header.block_epoch() { + current_anchor_header = prev_block_header.clone(); } // create the consume notes txs to be used in the next block let batch_inputs = - get_batch_inputs(store_client, prev_block.header(), ¬es, &mut metrics).await; - consume_notes_txs = create_consume_note_txs( - prev_block.header(), - accounts, - notes, - &batch_inputs.note_proofs, - ); + get_batch_inputs(store_client, &prev_block_header, ¬es, &mut metrics).await; + consume_notes_txs = + create_consume_note_txs(&prev_block_header, accounts, notes, &batch_inputs.note_proofs); // track store size every 50 blocks if i % 50 == 0 { @@ -239,30 +234,30 @@ async fn generate_blocks( metrics } -/// Given a list of batches and block inputs, creates a `ProvenBlock` and sends it to the store. -/// Tracks the insertion time on the metrics. +/// Sends block data to the store for committal. /// -/// Returns the the inserted block. +/// Returns the the applied block header. async fn apply_block( batches: Vec, block_inputs: BlockInputs, store_client: &StoreClient, metrics: &mut SeedingMetrics, -) -> ProvenBlock { +) -> BlockHeader { let proposed_block = ProposedBlock::new(block_inputs.clone(), batches).unwrap(); let (header, body) = build_block(proposed_block.clone()).unwrap(); - let block_proof = LocalBlockProver::new(0) - .prove_dummy(proposed_block.batches().clone(), header.clone(), block_inputs) - .unwrap(); let signature = EcdsaSecretKey::new().sign(header.commitment()); - let proven_block = ProvenBlock::new_unchecked(header, body, signature, block_proof); - let block_size: usize = proven_block.to_bytes().len(); + let block_size: usize = header.to_bytes().len() + body.to_bytes().len(); + let ordered_batches = proposed_block.batches().clone(); let start = Instant::now(); - store_client.apply_block(&proven_block).await.unwrap(); + + store_client + .apply_block(ordered_batches, block_inputs, header.clone(), body, signature) + .await + .unwrap(); metrics.track_block_insertion(start.elapsed(), block_size); - proven_block + header } // HELPER FUNCTIONS @@ -524,6 +519,15 @@ async fn get_block_inputs( /// - the URL of the store pub async fn start_store( data_directory: PathBuf, +) -> (RpcClient>, Url) { + start_store_with_prover(data_directory, None).await +} + +/// Starts the store with an optional remote block prover URL. +/// If `block_prover_url` is None, the store will use a local block prover. +pub async fn start_store_with_prover( + data_directory: PathBuf, + block_prover_url: Option, ) -> (RpcClient>, Url) { let rpc_listener = TcpListener::bind("127.0.0.1:0") .await @@ -541,10 +545,19 @@ pub async fn start_store( let dir = data_directory.clone(); task::spawn(async move { + let block_prover = { + if let Some(url) = block_prover_url { + Arc::new(BlockProver::new_remote(url)) + } else { + Arc::new(BlockProver::new_local(None)) + } + }; + Store { rpc_listener, ntx_builder_listener, block_producer_listener, + block_prover, data_directory: dir, grpc_timeout: Duration::from_secs(30), } diff --git a/crates/block-producer/Cargo.toml b/crates/block-producer/Cargo.toml index 3b0c37300..26d2fdbed 100644 --- a/crates/block-producer/Cargo.toml +++ b/crates/block-producer/Cargo.toml @@ -22,7 +22,6 @@ tracing-forest = ["miden-node-utils/tracing-forest"] anyhow = { workspace = true } futures = { workspace = true } itertools = { workspace = true } -miden-block-prover = { workspace = true } miden-lib = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } diff --git a/crates/block-producer/src/block_builder/mod.rs b/crates/block-producer/src/block_builder/mod.rs index 8d5c9b806..4d862ebff 100644 --- a/crates/block-producer/src/block_builder/mod.rs +++ b/crates/block-producer/src/block_builder/mod.rs @@ -1,30 +1,24 @@ -use std::ops::{Deref, Range}; +use std::ops::Deref; use std::sync::Arc; use futures::FutureExt; use futures::never::Never; -use miden_block_prover::LocalBlockProver; use miden_lib::block::build_block; use miden_node_utils::tracing::OpenTelemetrySpanExt; -use miden_objects::MIN_PROOF_SECURITY_LEVEL; use miden_objects::batch::{OrderedBatches, ProvenBatch}; use miden_objects::block::{ BlockBody, BlockHeader, BlockInputs, BlockNumber, - BlockProof, ProposedBlock, ProvenBlock, }; use miden_objects::crypto::dsa::ecdsa_k256_keccak::Signature; use miden_objects::note::NoteHeader; -use miden_objects::transaction::{OrderedTransactionHeaders, TransactionHeader}; -use miden_remote_prover_client::remote_prover::block_prover::RemoteBlockProver; -use rand::Rng; +use miden_objects::transaction::TransactionHeader; use tokio::time::Duration; -use tracing::{Span, info, instrument}; -use url::Url; +use tracing::{Span, instrument}; use crate::errors::BuildBlockError; use crate::mempool::SharedMempool; @@ -37,8 +31,6 @@ use crate::{COMPONENT, TelemetryInjectorExt}; pub struct BlockBuilder { pub block_interval: Duration, - /// Used to simulate block proving by sleeping for a random duration selected from this range. - pub simulated_proof_time: Range, /// Simulated block failure rate as a percentage. /// @@ -48,9 +40,6 @@ pub struct BlockBuilder { pub store: StoreClient, pub validator: BlockProducerValidatorClient, - - /// The prover used to prove a proposed block into a proven block. - pub block_prover: BlockProver, } impl BlockBuilder { @@ -60,20 +49,12 @@ impl BlockBuilder { pub fn new( store: StoreClient, validator: BlockProducerValidatorClient, - block_prover_url: Option, block_interval: Duration, ) -> Self { - let block_prover = match block_prover_url { - Some(url) => BlockProver::new_remote(url), - None => BlockProver::new_local(MIN_PROOF_SECURITY_LEVEL), - }; - Self { block_interval, // Note: The range cannot be empty. - simulated_proof_time: Duration::ZERO..Duration::from_millis(1), failure_rate: 0.0, - block_prover, store, validator, } @@ -131,12 +112,8 @@ impl BlockBuilder { ProposedBlock::inject_telemetry(proposed_block); }) .and_then(|(proposed_block, inputs)| self.validate_block(proposed_block, inputs)) - .and_then(|(proposed_block, inputs, header, signature, body)| self.prove_block(proposed_block, inputs, header, signature, body)) - .inspect_ok(ProvenBlock::inject_telemetry) - // Failure must be injected before the final pipeline stage i.e. before commit is called. The system cannot - // handle errors after it considers the process complete (which makes sense). - .and_then(|proven_block| async { self.inject_failure(proven_block) }) - .and_then(|proven_block| self.commit_block(mempool, proven_block)) + // TODO(sergerad): Add SignedBlock to miden-base and update validate_block to return it. + .and_then(|(ordered_batches, block_inputs, header, body, signature)| self.commit_block(mempool, ordered_batches, block_inputs, header, body, signature)) // Handle errors by propagating the error to the root span and rolling back the block. .inspect_err(|err| Span::current().set_error(err)) .or_else(|_err| self.rollback_block(mempool, block_num).never_error()) @@ -230,7 +207,7 @@ impl BlockBuilder { &self, proposed_block: ProposedBlock, block_inputs: BlockInputs, - ) -> Result<(OrderedBatches, BlockInputs, BlockHeader, Signature, BlockBody), BuildBlockError> + ) -> Result<(OrderedBatches, BlockInputs, BlockHeader, BlockBody, Signature), BuildBlockError> { // Concurrently build the block and validate it via the validator. let build_result = tokio::task::spawn_blocking({ @@ -254,53 +231,25 @@ impl BlockBuilder { } let (ordered_batches, ..) = proposed_block.into_parts(); - Ok((ordered_batches, block_inputs, header, signature, body)) + Ok((ordered_batches, block_inputs, header, body, signature)) } - #[instrument(target = COMPONENT, name = "block_builder.prove_block", skip_all, err)] - async fn prove_block( + #[instrument(target = COMPONENT, name = "block_builder.commit_block", skip_all, err)] + async fn commit_block( &self, + mempool: &SharedMempool, ordered_batches: OrderedBatches, block_inputs: BlockInputs, header: BlockHeader, - signature: Signature, body: BlockBody, - ) -> Result { - // Prove block using header and body from validator. - let block_proof = self - .block_prover - .prove(ordered_batches.clone(), header.clone(), block_inputs) - .await?; - self.simulate_proving().await; - - // SAFETY: The header and body are assumed valid and consistent with the proof. - let proven_block = ProvenBlock::new_unchecked(header, body, signature, block_proof); - if proven_block.proof_security_level() < MIN_PROOF_SECURITY_LEVEL { - return Err(BuildBlockError::SecurityLevelTooLow( - proven_block.proof_security_level(), - MIN_PROOF_SECURITY_LEVEL, - )); - } - // TODO(sergerad): Consider removing this validation. Once block proving is implemented, - // this would be replaced with verifying the proof returned from the prover against - // the block header. - validate_tx_headers(&proven_block, &ordered_batches.to_transactions())?; - - Ok(proven_block) - } - - #[instrument(target = COMPONENT, name = "block_builder.commit_block", skip_all, err)] - async fn commit_block( - &self, - mempool: &SharedMempool, - built_block: ProvenBlock, + signature: Signature, ) -> Result<(), BuildBlockError> { self.store - .apply_block(&built_block) + .apply_block(ordered_batches, block_inputs, header.clone(), body, signature) .await .map_err(BuildBlockError::StoreApplyBlockFailed)?; - mempool.lock().await.commit_block(built_block.header().clone()); + mempool.lock().await.commit_block(header); Ok(()) } @@ -309,31 +258,6 @@ impl BlockBuilder { async fn rollback_block(&self, mempool: &SharedMempool, block: BlockNumber) { mempool.lock().await.rollback_block(block); } - - #[instrument(target = COMPONENT, name = "block_builder.simulate_proving", skip_all)] - async fn simulate_proving(&self) { - let proving_duration = rand::rng().random_range(self.simulated_proof_time.clone()); - - Span::current().set_attribute("range.min_s", self.simulated_proof_time.start); - Span::current().set_attribute("range.max_s", self.simulated_proof_time.end); - Span::current().set_attribute("dice_roll_s", proving_duration); - - tokio::time::sleep(proving_duration).await; - } - - #[instrument(target = COMPONENT, name = "block_builder.inject_failure", skip_all, err)] - fn inject_failure(&self, value: T) -> Result { - let roll = rand::rng().random::(); - - Span::current().set_attribute("failure_rate", self.failure_rate); - Span::current().set_attribute("dice_roll", roll); - - if roll < self.failure_rate { - Err(BuildBlockError::InjectedFailure) - } else { - Ok(value) - } - } } /// A wrapper around batches selected for inlucion in a block, primarily used to be able to inject @@ -430,76 +354,3 @@ impl TelemetryInjectorExt for ProvenBlock { span.set_attribute("block.commitments.transaction", header.tx_commitment()); } } - -// BLOCK PROVER -// ================================================================================================ - -pub enum BlockProver { - Local(LocalBlockProver), - Remote(RemoteBlockProver), -} - -impl BlockProver { - pub fn new_local(security_level: u32) -> Self { - info!(target: COMPONENT, "Using local block prover"); - Self::Local(LocalBlockProver::new(security_level)) - } - - pub fn new_remote(endpoint: impl Into) -> Self { - info!(target: COMPONENT, "Using remote block prover"); - Self::Remote(RemoteBlockProver::new(endpoint)) - } - - #[instrument(target = COMPONENT, skip_all, err)] - async fn prove( - &self, - tx_batches: OrderedBatches, - block_header: BlockHeader, - block_inputs: BlockInputs, - ) -> Result { - match self { - Self::Local(prover) => prover - .prove(tx_batches, block_header, block_inputs) - .map_err(BuildBlockError::ProveBlockFailed), - Self::Remote(prover) => prover - .prove(tx_batches, block_header, block_inputs) - .await - .map_err(BuildBlockError::RemoteProverClientError), - } - } -} - -/// Validates that the proven block's transaction headers are consistent with the transactions -/// passed in the proposed block. -/// -/// This expects that transactions from the proposed block and proven block are in the same -/// order, as defined by [`OrderedTransactionHeaders`]. -fn validate_tx_headers( - proven_block: &ProvenBlock, - proposed_txs: &OrderedTransactionHeaders, -) -> Result<(), BuildBlockError> { - if proposed_txs.as_slice().len() != proven_block.body().transactions().as_slice().len() { - return Err(BuildBlockError::other(format!( - "remote prover returned {} transaction headers but {} transactions were passed as part of the proposed block", - proven_block.body().transactions().as_slice().len(), - proposed_txs.as_slice().len() - ))); - } - - // Because we checked the length matches we can zip the iterators up. - // We expect the transaction headers to be in the same order. - for (proposed_header, proven_header) in proposed_txs - .as_slice() - .iter() - .zip(proven_block.body().transactions().as_slice()) - { - if proposed_header != proven_header { - return Err(BuildBlockError::other(format!( - "transaction header with id {} does not match header of the transaction in the proposed block", - proposed_header.id() - ))); - } - } - - Ok(()) -} diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index d53a5ead4..87cfdf8d0 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -1,6 +1,5 @@ use core::error::Error as CoreError; -use miden_block_prover::BlockProverError; use miden_node_proto::errors::{ConversionError, GrpcError}; use miden_objects::account::AccountId; use miden_objects::block::BlockNumber; @@ -215,16 +214,6 @@ pub enum BuildBlockError { ValidateBlockFailed(#[source] Box), #[error("block signature is invalid")] InvalidSignature, - #[error("failed to prove block")] - ProveBlockFailed(#[source] BlockProverError), - /// We sometimes randomly inject errors into the batch building process to test our failure - /// responses. - #[error("nothing actually went wrong, failure was injected on purpose")] - InjectedFailure, - #[error("failed to prove block with remote prover")] - RemoteProverClientError(#[source] RemoteProverClientError), - #[error("block proof security level is too low: {0} < {1}")] - SecurityLevelTooLow(u32, u32), /// Custom error variant for errors not covered by the other variants. #[error("{error_msg}")] Other { diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 73b5a7b3c..2a109fc41 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -55,8 +55,6 @@ pub struct BlockProducer { pub validator_url: Url, /// The address of the batch prover component. pub batch_prover_url: Option, - /// The address of the block prover component. - pub block_prover_url: Option, /// The interval at which to produce batches. pub batch_interval: Duration, /// The interval at which to produce blocks. @@ -125,8 +123,7 @@ impl BlockProducer { info!(target: COMPONENT, "Server initialized"); - let block_builder = - BlockBuilder::new(store.clone(), validator, self.block_prover_url, self.block_interval); + let block_builder = BlockBuilder::new(store.clone(), validator, self.block_interval); let batch_builder = BatchBuilder::new( store.clone(), SERVER_NUM_BATCH_BUILDERS, diff --git a/crates/block-producer/src/store/mod.rs b/crates/block-producer/src/store/mod.rs index 5ea3089cb..1787306e8 100644 --- a/crates/block-producer/src/store/mod.rs +++ b/crates/block-producer/src/store/mod.rs @@ -10,7 +10,9 @@ use miden_node_proto::{AccountState, generated as proto}; use miden_node_utils::formatting::format_opt; use miden_objects::Word; use miden_objects::account::AccountId; -use miden_objects::block::{BlockHeader, BlockInputs, BlockNumber, ProvenBlock}; +use miden_objects::batch::OrderedBatches; +use miden_objects::block::{BlockBody, BlockHeader, BlockInputs, BlockNumber}; +use miden_objects::crypto::dsa::ecdsa_k256_keccak::Signature; use miden_objects::note::Nullifier; use miden_objects::transaction::ProvenTransaction; use miden_objects::utils::Serializable; @@ -238,8 +240,21 @@ impl StoreClient { } #[instrument(target = COMPONENT, name = "store.client.apply_block", skip_all, err)] - pub async fn apply_block(&self, block: &ProvenBlock) -> Result<(), StoreError> { - let request = tonic::Request::new(proto::blockchain::Block { block: block.to_bytes() }); + pub async fn apply_block( + &self, + ordered_batches: OrderedBatches, + block_inputs: BlockInputs, + header: BlockHeader, + body: BlockBody, + signature: Signature, + ) -> Result<(), StoreError> { + let request = tonic::Request::new(proto::store::ApplyBlockRequest { + ordered_batches: ordered_batches.to_bytes(), + block_inputs: block_inputs.to_bytes(), + header: header.to_bytes(), + body: body.to_bytes(), + signature: signature.to_bytes(), + }); self.client.clone().apply_block(request).await.map(|_| ()).map_err(Into::into) } diff --git a/crates/proto/src/generated/blockchain.rs b/crates/proto/src/generated/blockchain.rs index 927eadb05..cd67fcf02 100644 --- a/crates/proto/src/generated/blockchain.rs +++ b/crates/proto/src/generated/blockchain.rs @@ -1,12 +1,4 @@ // This file is @generated by prost-build. -/// Represents a block. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct Block { - /// Block data encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_objects::block::Block\]. - #[prost(bytes = "vec", tag = "1")] - pub block: ::prost::alloc::vec::Vec, -} /// Represents a proposed block. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProposedBlock { diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs index 33703e88a..91dc4229d 100644 --- a/crates/proto/src/generated/store.rs +++ b/crates/proto/src/generated/store.rs @@ -1,4 +1,28 @@ // This file is @generated by prost-build. +/// Applies a block to the state. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct ApplyBlockRequest { + /// Ordered batches encoded using \[winter_utils::Serializable\] implementation for + /// \[miden_objects::batch::OrderedBatches\]. + #[prost(bytes = "vec", tag = "1")] + pub ordered_batches: ::prost::alloc::vec::Vec, + /// Block inputs encoded using \[winter_utils::Serializable\] implementation for + /// \[miden_objects::block::BlockInputs\]. + #[prost(bytes = "vec", tag = "2")] + pub block_inputs: ::prost::alloc::vec::Vec, + /// Block header encoded using \[winter_utils::Serializable\] implementation for + /// \[miden_objects::block::BlockHeader\]. + #[prost(bytes = "vec", tag = "3")] + pub header: ::prost::alloc::vec::Vec, + /// Block header encoded using \[winter_utils::Serializable\] implementation for + /// \[miden_objects::block::BlockBody\]. + #[prost(bytes = "vec", tag = "4")] + pub body: ::prost::alloc::vec::Vec, + /// Signature encoded using \[winter_utils::Serializable\] implementation for + /// \[crypto::dsa::ecdsa_k256_keccak::Signature\]. + #[prost(bytes = "vec", tag = "5")] + pub signature: ::prost::alloc::vec::Vec, +} /// Returns data required to prove the next block. #[derive(Clone, PartialEq, ::prost::Message)] pub struct BlockInputsRequest { @@ -1680,7 +1704,7 @@ pub mod block_producer_client { /// Applies changes of a new block to the DB and in-memory data structures. pub async fn apply_block( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result, tonic::Status> { self.inner .ready() @@ -1816,7 +1840,7 @@ pub mod block_producer_server { /// Applies changes of a new block to the DB and in-memory data structures. async fn apply_block( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result, tonic::Status>; /// Retrieves block header by given block number. Optionally, it also returns the MMR path /// and current chain length to authenticate the block's inclusion. @@ -1928,7 +1952,7 @@ pub mod block_producer_server { struct ApplyBlockSvc(pub Arc); impl< T: BlockProducer, - > tonic::server::UnaryService + > tonic::server::UnaryService for ApplyBlockSvc { type Response = (); type Future = BoxFuture< @@ -1937,7 +1961,7 @@ pub mod block_producer_server { >; fn call( &mut self, - request: tonic::Request, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index 05e9ee263..36b4b3efc 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -1,4 +1,5 @@ use std::net::SocketAddr; +use std::sync::Arc; use std::time::Duration; use http::header::{ACCEPT, CONTENT_TYPE}; @@ -7,8 +8,8 @@ use miden_lib::account::wallets::BasicWallet; use miden_node_proto::clients::{Builder, RpcClient}; use miden_node_proto::generated::rpc::api_client::ApiClient as ProtoClient; use miden_node_proto::generated::{self as proto}; -use miden_node_store::Store; use miden_node_store::genesis::config::GenesisConfig; +use miden_node_store::{BlockProver, Store}; use miden_node_utils::fee::test_fee; use miden_objects::Word; use miden_objects::account::delta::AccountUpdateDetails; @@ -141,11 +142,15 @@ async fn rpc_startup_is_robust_to_network_failures() { .expect("Failed to bind store ntx-builder gRPC endpoint"); let block_producer_listener = TcpListener::bind("127.0.0.1:0").await.expect("store should bind a port"); + + let block_prover = Arc::new(BlockProver::new_local(None)); + task::spawn(async move { Store { rpc_listener, ntx_builder_listener, block_producer_listener, + block_prover, data_directory: data_directory.path().to_path_buf(), grpc_timeout: Duration::from_secs(10), } @@ -443,11 +448,13 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { // kill the runtime. let store_runtime = runtime::Builder::new_multi_thread().enable_time().enable_io().build().unwrap(); + let block_prover = Arc::new(BlockProver::new_local(None)); store_runtime.spawn(async move { Store { rpc_listener, ntx_builder_listener, block_producer_listener, + block_prover, data_directory: dir, grpc_timeout: Duration::from_secs(30), } diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index 41d82fd96..47c9381eb 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -29,19 +29,21 @@ miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { workspace = true } # TODO remove `testing` from `miden-objects`, required for `BlockProof::new_dummy` -miden-objects = { features = ["std", "testing"], workspace = true } -pretty_assertions = { workspace = true } -rand = { workspace = true } -rand_chacha = { workspace = true } -serde = { features = ["derive"], version = "1" } -thiserror = { workspace = true } -tokio = { features = ["fs", "rt-multi-thread"], workspace = true } -tokio-stream = { features = ["net"], workspace = true } -toml = { version = "0.9" } -tonic = { default-features = true, workspace = true } -tonic-reflection = { workspace = true } -tower-http = { features = ["util"], workspace = true } -tracing = { workspace = true } +miden-block-prover = { workspace = true } +miden-objects = { features = ["std", "testing"], workspace = true } +miden-remote-prover-client = { features = ["block-prover"], workspace = true } +pretty_assertions = { workspace = true } +rand = { workspace = true } +rand_chacha = { workspace = true } +serde = { features = ["derive"], version = "1" } +thiserror = { workspace = true } +tokio = { features = ["fs", "rt-multi-thread"], workspace = true } +tokio-stream = { features = ["net"], workspace = true } +toml = { version = "0.9" } +tonic = { default-features = true, workspace = true } +tonic-reflection = { workspace = true } +tower-http = { features = ["util"], workspace = true } +tracing = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 7b48684ed..6227e86c0 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -10,7 +10,8 @@ use miden_node_proto::generated as proto; use miden_objects::Word; use miden_objects::account::AccountId; use miden_objects::asset::{Asset, AssetVaultKey}; -use miden_objects::block::{BlockHeader, BlockNoteIndex, BlockNumber, ProvenBlock}; +use miden_objects::block::{BlockBody, BlockHeader, BlockNoteIndex, BlockNumber}; +use miden_objects::crypto::dsa::ecdsa_k256_keccak::Signature; use miden_objects::crypto::merkle::SparseMerklePath; use miden_objects::note::{ NoteDetails, @@ -248,6 +249,7 @@ impl Db { models::queries::apply_block( conn, genesis.header(), + genesis.signature().clone(), &[], &[], genesis.body().updated_accounts(), @@ -509,7 +511,9 @@ impl Db { &self, allow_acquire: oneshot::Sender<()>, acquire_done: oneshot::Receiver<()>, - block: ProvenBlock, + block_header: BlockHeader, + block_body: BlockBody, + signature: Signature, notes: Vec<(NoteRecord, Option)>, ) -> Result<()> { self.transact("apply block", move |conn| -> Result<()> { @@ -518,11 +522,12 @@ impl Db { models::queries::apply_block( conn, - block.header(), + &block_header, + signature, ¬es, - block.body().created_nullifiers(), - block.body().updated_accounts(), - block.body().transactions(), + block_body.created_nullifiers(), + block_body.updated_accounts(), + block_body.transactions(), )?; // XXX FIXME TODO free floating mutex MUST NOT exist diff --git a/crates/store/src/db/models/queries/block_headers.rs b/crates/store/src/db/models/queries/block_headers.rs index 42ec3b0e5..3388b8328 100644 --- a/crates/store/src/db/models/queries/block_headers.rs +++ b/crates/store/src/db/models/queries/block_headers.rs @@ -14,6 +14,7 @@ use diesel::{ use miden_lib::utils::{Deserializable, Serializable}; use miden_node_utils::limiter::{QueryParamBlockLimit, QueryParamLimiter}; use miden_objects::block::{BlockHeader, BlockNumber}; +use miden_objects::crypto::dsa::ecdsa_k256_keccak::Signature; use super::DatabaseError; use crate::db::models::conv::SqlTypeConvert; @@ -130,6 +131,7 @@ pub struct BlockHeaderRawRow { #[allow(dead_code)] pub block_num: i64, pub block_header: Vec, + pub signature: Vec, } impl TryInto for BlockHeaderRawRow { type Error = DatabaseError; @@ -145,14 +147,7 @@ impl TryInto for BlockHeaderRawRow { pub struct BlockHeaderInsert { pub block_num: i64, pub block_header: Vec, -} -impl From<&BlockHeader> for BlockHeaderInsert { - fn from(block_header: &BlockHeader) -> Self { - Self { - block_num: block_header.block_num().to_raw_sql(), - block_header: block_header.to_bytes(), - } - } + pub signature: Vec, } /// Insert a [`BlockHeader`] to the DB using the given [`SqliteConnection`]. @@ -168,10 +163,15 @@ impl From<&BlockHeader> for BlockHeaderInsert { pub(crate) fn insert_block_header( conn: &mut SqliteConnection, block_header: &BlockHeader, + signature: &Signature, ) -> Result { - let block_header = BlockHeaderInsert::from(block_header); + let block_header_insert = BlockHeaderInsert { + block_num: block_header.block_num().to_raw_sql(), + block_header: block_header.to_bytes(), + signature: signature.to_bytes(), + }; let count = diesel::insert_into(schema::block_headers::table) - .values(&[block_header]) + .values(&[block_header_insert]) .execute(conn)?; Ok(count) } diff --git a/crates/store/src/db/models/queries/mod.rs b/crates/store/src/db/models/queries/mod.rs index 0d40dd8c4..4234f240b 100644 --- a/crates/store/src/db/models/queries/mod.rs +++ b/crates/store/src/db/models/queries/mod.rs @@ -33,6 +33,7 @@ use diesel::SqliteConnection; use miden_objects::account::AccountId; use miden_objects::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; +use miden_objects::crypto::dsa::ecdsa_k256_keccak::Signature; use miden_objects::note::Nullifier; use miden_objects::transaction::OrderedTransactionHeaders; @@ -59,6 +60,7 @@ pub(crate) use notes::*; pub(crate) fn apply_block( conn: &mut SqliteConnection, block_header: &BlockHeader, + signature: Signature, notes: &[(NoteRecord, Option)], nullifiers: &[Nullifier], accounts: &[BlockAccountUpdate], @@ -66,7 +68,7 @@ pub(crate) fn apply_block( ) -> Result { let mut count = 0; // Note: ordering here is important as the relevant tables have FK dependencies. - count += insert_block_header(conn, block_header)?; + count += insert_block_header(conn, block_header, &signature)?; count += upsert_accounts(conn, accounts, block_header.block_num())?; count += insert_scripts(conn, notes.iter().map(|(note, _)| note))?; count += insert_notes(conn, notes)?; diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index 6f36594b9..91aa54175 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -1,5 +1,12 @@ // @generated automatically by Diesel CLI. +diesel::table! { + account_codes (code_commitment) { + code_commitment -> Binary, + code -> Binary, + } +} + diesel::table! { account_storage_map_values (account_id, block_num, slot_name, key) { account_id -> Binary, @@ -25,27 +32,21 @@ diesel::table! { accounts (account_id, block_num) { account_id -> Binary, network_account_id_prefix -> Nullable, + block_num -> BigInt, account_commitment -> Binary, code_commitment -> Nullable, storage -> Nullable, vault -> Nullable, nonce -> Nullable, - block_num -> BigInt, is_latest -> Bool, } } -diesel::table! { - account_codes (code_commitment) { - code_commitment -> Binary, - code -> Binary, - } -} - diesel::table! { block_headers (block_num) { block_num -> BigInt, block_header -> Binary, + signature -> Binary, } } @@ -100,21 +101,11 @@ diesel::table! { } } -diesel::joinable!(accounts -> account_codes (code_commitment)); -diesel::joinable!(accounts -> block_headers (block_num)); -// Note: Cannot use diesel::joinable! with accounts table due to composite primary key -// diesel::joinable!(notes -> accounts (sender)); -// diesel::joinable!(transactions -> accounts (account_id)); -diesel::joinable!(notes -> block_headers (committed_at)); -diesel::joinable!(notes -> note_scripts (script_root)); -diesel::joinable!(nullifiers -> block_headers (block_num)); -diesel::joinable!(transactions -> block_headers (block_num)); - diesel::allow_tables_to_appear_in_same_query!( account_codes, account_storage_map_values, - accounts, account_vault_assets, + accounts, block_headers, note_scripts, notes, diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index e203e217c..87e0b5758 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -96,7 +96,9 @@ fn create_block(conn: &mut SqliteConnection, block_num: BlockNumber) { 11_u8.into(), ); - conn.transaction(|conn| queries::insert_block_header(conn, &block_header)) + let dummy_signature = miden_objects::crypto::dsa::ecdsa_k256_keccak::SecretKey::new() + .sign(block_header.commitment()); + conn.transaction(|conn| queries::insert_block_header(conn, &block_header, &dummy_signature)) .unwrap(); } @@ -863,7 +865,9 @@ fn db_block_header() { ); // test insertion - queries::insert_block_header(conn, &block_header).unwrap(); + let dummy_signature = miden_objects::crypto::dsa::ecdsa_k256_keccak::SecretKey::new() + .sign(block_header.commitment()); + queries::insert_block_header(conn, &block_header, &dummy_signature).unwrap(); // test fetch unknown block header let block_number = 1; @@ -894,7 +898,9 @@ fn db_block_header() { 21_u8.into(), ); - queries::insert_block_header(conn, &block_header2).unwrap(); + let dummy_signature = miden_objects::crypto::dsa::ecdsa_k256_keccak::SecretKey::new() + .sign(block_header2.commitment()); + queries::insert_block_header(conn, &block_header2, &dummy_signature).unwrap(); let res = queries::select_block_header_by_block_num(conn, None).unwrap(); assert_eq!(res.unwrap(), block_header2); diff --git a/crates/store/src/lib.rs b/crates/store/src/lib.rs index ce4956470..50cb264b2 100644 --- a/crates/store/src/lib.rs +++ b/crates/store/src/lib.rs @@ -10,7 +10,7 @@ pub mod state; pub use accounts::{AccountTreeWithHistory, HistoricalError, InMemoryAccountTree}; pub use genesis::GenesisState; -pub use server::{DataDirectory, Store}; +pub use server::{BlockProver, DataDirectory, Store}; // CONSTANTS // ================================================================================================= diff --git a/crates/store/src/server/api.rs b/crates/store/src/server/api.rs index b266feb59..5f1f42329 100644 --- a/crates/store/src/server/api.rs +++ b/crates/store/src/server/api.rs @@ -1,24 +1,43 @@ use std::collections::BTreeSet; use std::sync::Arc; +use std::time::Duration; +use miden_block_prover::BlockProverError; use miden_node_proto::errors::ConversionError; use miden_node_proto::generated as proto; use miden_node_utils::ErrorReport; +use miden_node_utils::tracing::OpenTelemetrySpanExt; use miden_objects::Word; use miden_objects::account::AccountId; -use miden_objects::block::BlockNumber; +use miden_objects::batch::OrderedBatches; +use miden_objects::block::{BlockBody, BlockHeader, BlockInputs, BlockNumber, ProvenBlock}; +use miden_objects::crypto::dsa::ecdsa_k256_keccak::Signature; use miden_objects::note::Nullifier; +use miden_remote_prover_client::RemoteProverClientError; +use rand::Rng; use tonic::{Request, Response, Status}; -use tracing::{info, instrument}; +use tracing::{Span, info, instrument}; use crate::COMPONENT; +pub use crate::server::block_prover::BlockProver; use crate::state::State; +// TODO(currentpr): move error + +#[derive(Debug, thiserror::Error)] +pub enum StoreProverError { + #[error("local proving failed")] + LocalProvingFailed(#[from] BlockProverError), + #[error("remote proving failed")] + RemoteProvingFailed(#[from] RemoteProverClientError), +} + // STORE API // ================================================================================================ pub struct StoreApi { pub(super) state: Arc, + pub(super) block_prover: Arc, } impl StoreApi { @@ -42,6 +61,42 @@ impl StoreApi { mmr_path: mmr_proof.map(|p| Into::into(&p.merkle_path)), })) } + + #[instrument(target = COMPONENT, name = "store.prove_block", skip_all, err)] + pub async fn prove_block( + &self, + ordered_batches: OrderedBatches, + block_inputs: BlockInputs, + header: BlockHeader, + signature: Signature, + body: BlockBody, + ) -> Result { + // Prove block. + let block_proof = self + .block_prover + .prove(ordered_batches.clone(), header.clone(), block_inputs) + .await?; + + // TODO: remove simulation when block proving is implemented. + self.simulate_proving().await; + + // SAFETY: The header and body are assumed valid and consistent with the proof. + let proven_block = ProvenBlock::new_unchecked(header, body, signature, block_proof); + + Ok(proven_block) + } + + #[instrument(target = COMPONENT, name = "store.simulate_proving", skip_all)] + async fn simulate_proving(&self) { + let simulated_proof_time = Duration::ZERO..Duration::from_millis(1); + let proving_duration = rand::rng().random_range(simulated_proof_time.clone()); + + Span::current().set_attribute("range.min_s", simulated_proof_time.start); + Span::current().set_attribute("range.max_s", simulated_proof_time.end); + Span::current().set_attribute("dice_roll_s", proving_duration); + + tokio::time::sleep(proving_duration).await; + } } // UTILITIES diff --git a/crates/store/src/server/block_producer.rs b/crates/store/src/server/block_producer.rs index 91b595aad..17f8104e6 100644 --- a/crates/store/src/server/block_producer.rs +++ b/crates/store/src/server/block_producer.rs @@ -5,7 +5,9 @@ use miden_node_proto::generated::{self as proto}; use miden_node_proto::try_convert; use miden_node_utils::ErrorReport; use miden_objects::Word; -use miden_objects::block::{BlockNumber, ProvenBlock}; +use miden_objects::batch::OrderedBatches; +use miden_objects::block::{BlockBody, BlockHeader, BlockInputs, BlockNumber}; +use miden_objects::crypto::dsa::ecdsa_k256_keccak::Signature; use miden_objects::utils::Deserializable; use tonic::{Request, Response, Status}; use tracing::{debug, info, instrument}; @@ -56,28 +58,48 @@ impl block_producer_server::BlockProducer for StoreApi { )] async fn apply_block( &self, - request: Request, + request: Request, ) -> Result, Status> { + // Read the request. let request = request.into_inner(); - debug!(target: COMPONENT, ?request); - - let block = ProvenBlock::read_from_bytes(&request.block).map_err(|err| { - Status::invalid_argument(err.as_report_context("block deserialization error")) + let ordered_batches = + OrderedBatches::read_from_bytes(&request.ordered_batches).map_err(|err| { + Status::invalid_argument( + err.as_report_context("failed to deserialize ordered batches"), + ) + })?; + let block_inputs = BlockInputs::read_from_bytes(&request.block_inputs).map_err(|err| { + Status::invalid_argument(err.as_report_context("failed to deserialize block inputs")) + })?; + let header = BlockHeader::read_from_bytes(&request.header).map_err(|err| { + Status::invalid_argument(err.as_report_context("failed to deserialize block header")) + })?; + let body = BlockBody::read_from_bytes(&request.body).map_err(|err| { + Status::invalid_argument(err.as_report_context("failed to deserialize block body")) + })?; + let signature = Signature::read_from_bytes(&request.signature).map_err(|err| { + Status::invalid_argument(err.as_report_context("failed to deserialize signature")) })?; - let block_num = block.header().block_num().as_u32(); - + let block_num = header.block_num().as_u32(); info!( target: COMPONENT, block_num, - block_commitment = %block.header().commitment(), - account_count = block.body().updated_accounts().len(), - note_count = block.body().output_notes().count(), - nullifier_count = block.body().created_nullifiers().len(), + block_commitment = %header.commitment(), + account_count = body.updated_accounts().len(), + note_count = body.output_notes().count(), + nullifier_count = body.created_nullifiers().len(), ); - self.state.apply_block(block).await?; + // Apply the block to the state. + self.state.apply_block(header.clone(), body.clone(), signature.clone()).await?; + + // TODO(sergerad): Make block proving async/deferred. I.E. return from this fn before block + // is proven. Prove the block. + self.prove_block(ordered_batches, block_inputs, header, signature, body) + .await + .map_err(|err| Status::internal(err.as_report_context("failed to prove block")))?; Ok(Response::new(())) } diff --git a/crates/store/src/server/block_prover.rs b/crates/store/src/server/block_prover.rs new file mode 100644 index 000000000..f6052c7a5 --- /dev/null +++ b/crates/store/src/server/block_prover.rs @@ -0,0 +1,49 @@ +use miden_block_prover::LocalBlockProver; +use miden_objects::MIN_PROOF_SECURITY_LEVEL; +use miden_objects::batch::OrderedBatches; +use miden_objects::block::{BlockHeader, BlockInputs, BlockProof}; +use miden_remote_prover_client::remote_prover::block_prover::RemoteBlockProver; +use tracing::{info, instrument}; + +use crate::COMPONENT; +use crate::server::api::StoreProverError; + +// BLOCK PROVER +// ================================================================================================ + +/// Block prover which allows for proving via either local or remote backend. +/// +/// The local proving variant is intended for development and testing purposes. +/// The remote proving variant is intended for production use. +pub enum BlockProver { + Local(LocalBlockProver), + Remote(RemoteBlockProver), +} + +impl BlockProver { + pub fn new_local(security_level: Option) -> Self { + info!(target: COMPONENT, "Using local block prover"); + let security_level = security_level.unwrap_or(MIN_PROOF_SECURITY_LEVEL); + Self::Local(LocalBlockProver::new(security_level)) + } + + pub fn new_remote(endpoint: impl Into) -> Self { + info!(target: COMPONENT, "Using remote block prover"); + Self::Remote(RemoteBlockProver::new(endpoint)) + } + + #[instrument(target = COMPONENT, skip_all, err)] + pub async fn prove( + &self, + tx_batches: OrderedBatches, + block_header: BlockHeader, + block_inputs: BlockInputs, + ) -> Result { + match self { + Self::Local(prover) => Ok(prover.prove(tx_batches, block_header, block_inputs)?), + Self::Remote(prover) => { + Ok(prover.prove(tx_batches, block_header, block_inputs).await?) + }, + } + } +} diff --git a/crates/store/src/server/mod.rs b/crates/store/src/server/mod.rs index 036727a88..408459fb7 100644 --- a/crates/store/src/server/mod.rs +++ b/crates/store/src/server/mod.rs @@ -21,12 +21,14 @@ use tracing::{info, instrument}; use crate::blocks::BlockStore; use crate::db::Db; +pub use crate::server::api::BlockProver; use crate::server::db_maintenance::DbMaintenance; use crate::state::State; use crate::{COMPONENT, DATABASE_MAINTENANCE_INTERVAL, GenesisState}; mod api; mod block_producer; +mod block_prover; mod db_maintenance; mod ntx_builder; mod rpc_api; @@ -36,6 +38,7 @@ pub struct Store { pub rpc_listener: TcpListener, pub ntx_builder_listener: TcpListener, pub block_producer_listener: TcpListener, + pub block_prover: Arc, pub data_directory: PathBuf, /// Server-side timeout for an individual gRPC request. /// @@ -98,14 +101,18 @@ impl Store { let db_maintenance_service = DbMaintenance::new(Arc::clone(&state), DATABASE_MAINTENANCE_INTERVAL); - let rpc_service = - store::rpc_server::RpcServer::new(api::StoreApi { state: Arc::clone(&state) }); + let rpc_service = store::rpc_server::RpcServer::new(api::StoreApi { + state: Arc::clone(&state), + block_prover: self.block_prover.clone(), + }); let ntx_builder_service = store::ntx_builder_server::NtxBuilderServer::new(api::StoreApi { state: Arc::clone(&state), + block_prover: self.block_prover.clone(), }); let block_producer_service = store::block_producer_server::BlockProducerServer::new(api::StoreApi { state: Arc::clone(&state), + block_prover: self.block_prover.clone(), }); let reflection_service = tonic_reflection::server::Builder::configure() .register_file_descriptor_set(store_rpc_api_descriptor()) diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index c9225d147..dbf658c04 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -26,7 +26,8 @@ use miden_node_utils::formatting::format_array; use miden_objects::account::{AccountHeader, AccountId, StorageSlot, StorageSlotContent}; use miden_objects::block::account_tree::{AccountTree, AccountWitness, account_id_to_smt_key}; use miden_objects::block::nullifier_tree::{NullifierTree, NullifierWitness}; -use miden_objects::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain, ProvenBlock}; +use miden_objects::block::{BlockBody, BlockHeader, BlockInputs, BlockNumber, Blockchain}; +use miden_objects::crypto::dsa::ecdsa_k256_keccak::Signature; use miden_objects::crypto::merkle::{ Forest, LargeSmt, @@ -188,12 +189,15 @@ impl State { // TODO: This span is logged in a root span, we should connect it to the parent span. #[allow(clippy::too_many_lines)] #[instrument(target = COMPONENT, skip_all, err)] - pub async fn apply_block(&self, block: ProvenBlock) -> Result<(), ApplyBlockError> { + pub async fn apply_block( + &self, + header: BlockHeader, + body: BlockBody, + signature: Signature, + ) -> Result<(), ApplyBlockError> { let _lock = self.writer.try_lock().map_err(|_| ApplyBlockError::ConcurrentWrite)?; - let header = block.header(); - - let tx_commitment = block.body().transactions().commitment(); + let tx_commitment = body.transactions().commitment(); if header.tx_commitment() != tx_commitment { return Err(InvalidBlockError::InvalidBlockTxCommitment { @@ -204,7 +208,7 @@ impl State { } let block_num = header.block_num(); - let block_commitment = block.header().commitment(); + let block_commitment = header.commitment(); // ensures the right block header is being processed let prev_block = self @@ -225,7 +229,7 @@ impl State { return Err(InvalidBlockError::NewBlockInvalidPrevCommitment.into()); } - let block_data = block.to_bytes(); + let block_data = body.to_bytes(); // TODO(currentpr): is this correct? // Save the block to the block store. In a case of a rolled-back DB transaction, the // in-memory state will be unchanged, but the block might still be written into the @@ -249,8 +253,7 @@ impl State { let _span = info_span!(target: COMPONENT, "update_in_memory_structs").entered(); // nullifiers can be produced only once - let duplicate_nullifiers: Vec<_> = block - .body() + let duplicate_nullifiers: Vec<_> = body .created_nullifiers() .iter() .filter(|&n| inner.nullifier_tree.get_block_num(n).is_some()) @@ -272,11 +275,7 @@ impl State { let nullifier_tree_update = inner .nullifier_tree .compute_mutations( - block - .body() - .created_nullifiers() - .iter() - .map(|nullifier| (*nullifier, block_num)), + body.created_nullifiers().iter().map(|nullifier| (*nullifier, block_num)), ) .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; @@ -288,9 +287,7 @@ impl State { let account_tree_update = inner .account_tree .compute_mutations( - block - .body() - .updated_accounts() + body.updated_accounts() .iter() .map(|update| (update.account_id(), update.final_state_commitment())), ) @@ -316,13 +313,12 @@ impl State { }; // build note tree - let note_tree = block.body().compute_block_note_tree(); + let note_tree = body.compute_block_note_tree(); if note_tree.root() != header.note_root() { return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); } - let notes = block - .body() + let notes = body .output_notes() .map(|(note_index, note)| { let (details, nullifier) = match note { @@ -363,10 +359,10 @@ impl State { // in-memory write lock. This requires the DB update to run concurrently, so a new task is // spawned. let db = Arc::clone(&self.db); - let db_update_task = - tokio::spawn( - async move { db.apply_block(allow_acquire, acquire_done, block, notes).await }, - ); + let db_update_task = tokio::spawn(async move { + db.apply_block(allow_acquire, acquire_done, header, body, signature, notes) + .await + }); // Wait for the message from the DB update task, that we ready to commit the DB transaction acquired_allowed.await.map_err(ApplyBlockError::ClosedChannel)?; diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto index 05f515ccf..91ef42c7a 100644 --- a/proto/proto/internal/store.proto +++ b/proto/proto/internal/store.proto @@ -89,7 +89,7 @@ service Rpc { // Store API for the BlockProducer component service BlockProducer { // Applies changes of a new block to the DB and in-memory data structures. - rpc ApplyBlock(blockchain.Block) returns (google.protobuf.Empty) {} + rpc ApplyBlock(ApplyBlockRequest) returns (google.protobuf.Empty) {} // Retrieves block header by given block number. Optionally, it also returns the MMR path // and current chain length to authenticate the block's inclusion. @@ -105,6 +105,28 @@ service BlockProducer { rpc GetTransactionInputs(TransactionInputsRequest) returns (TransactionInputs) {} } +// APPLY BLOCK REQUEST +// ================================================================================================ + +// Applies a block to the state. +message ApplyBlockRequest { + // Ordered batches encoded using [winter_utils::Serializable] implementation for + // [miden_objects::batch::OrderedBatches]. + bytes ordered_batches = 1; + // Block inputs encoded using [winter_utils::Serializable] implementation for + // [miden_objects::block::BlockInputs]. + bytes block_inputs = 2; + // Block header encoded using [winter_utils::Serializable] implementation for + // [miden_objects::block::BlockHeader]. + bytes header = 3; + // Block header encoded using [winter_utils::Serializable] implementation for + // [miden_objects::block::BlockBody]. + bytes body = 4; + // Signature encoded using [winter_utils::Serializable] implementation for + // [crypto::dsa::ecdsa_k256_keccak::Signature]. + bytes signature = 5; +} + // GET BLOCK INPUTS // ================================================================================================ diff --git a/proto/proto/types/blockchain.proto b/proto/proto/types/blockchain.proto index 619ccf1cf..f233f8640 100644 --- a/proto/proto/types/blockchain.proto +++ b/proto/proto/types/blockchain.proto @@ -7,13 +7,6 @@ import "types/primitives.proto"; // BLOCK // ================================================================================================ -// Represents a block. -message Block { - // Block data encoded using [winter_utils::Serializable] implementation for - // [miden_objects::block::Block]. - bytes block = 1; -} - // Represents a proposed block. message ProposedBlock { // Block data encoded using [winter_utils::Serializable] implementation for