diff --git a/CHANGELOG.md b/CHANGELOG.md index befd00df0..6d297d231 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -34,6 +34,7 @@ - Reduced default block interval from `5s` to `2s` ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). - Increased retained account tree history from 33 to 100 blocks to account for the reduced block interval ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). - [BREAKING] Migrated to version `v0.20` of the VM ([#1476](https://github.com/0xMiden/miden-node/pull/1476)). +- [BREAKING] Change account in database representation ([#1481](https://github.com/0xMiden/miden-node/pull/1481)). ### Fixes diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index 2ef2be02c..4330a82de 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -99,11 +99,35 @@ impl From<&AccountInfo> for proto::account::AccountDetails { fn from(AccountInfo { summary, details }: &AccountInfo) -> Self { Self { summary: Some(summary.into()), - details: details.as_ref().map(miden_protocol::utils::Serializable::to_bytes), + details: details.as_ref().map(Serializable::to_bytes), } } } +// ACCOUNT STORAGE HEADER +//================================================================================================ + +impl TryFrom for AccountStorageHeader { + type Error = ConversionError; + + fn try_from(value: proto::account::AccountStorageHeader) -> Result { + let proto::account::AccountStorageHeader { slots } = value; + + let slot_headers = slots + .into_iter() + .map(|slot| { + let slot_name = StorageSlotName::new(slot.slot_name)?; + let slot_type = storage_slot_type_from_raw(slot.slot_type)?; + let commitment = + slot.commitment.ok_or(ConversionError::NotAValidFelt)?.try_into()?; + Ok(StorageSlotHeader::new(slot_name, slot_type, commitment)) + }) + .collect::, ConversionError>>()?; + + Ok(AccountStorageHeader::new(slot_headers)?) + } +} + // ACCOUNT PROOF REQUEST // ================================================================================================ @@ -163,72 +187,6 @@ impl TryFrom for Accoun } } -impl TryFrom for AccountStorageHeader { - type Error = ConversionError; - - fn try_from(value: proto::account::AccountStorageHeader) -> Result { - let proto::account::AccountStorageHeader { slots } = value; - - let slot_headers = slots - .into_iter() - .map(|slot| { - let slot_name = StorageSlotName::new(slot.slot_name)?; - let slot_type = storage_slot_type_from_raw(slot.slot_type)?; - let commitment = - slot.commitment.ok_or(ConversionError::NotAValidFelt)?.try_into()?; - Ok(StorageSlotHeader::new(slot_name, slot_type, commitment)) - }) - .collect::, ConversionError>>()?; - - Ok(AccountStorageHeader::new(slot_headers)?) - } -} - -impl TryFrom - for AccountStorageMapDetails -{ - type Error = ConversionError; - - fn try_from( - value: proto::rpc::account_storage_details::AccountStorageMapDetails, - ) -> Result { - let proto::rpc::account_storage_details::AccountStorageMapDetails { - slot_name, - too_many_entries, - entries, - } = value; - - let slot_name = StorageSlotName::new(slot_name)?; - - // Extract map_entries from the MapEntries message - let map_entries = if let Some(entries) = entries { - entries - .entries - .into_iter() - .map(|entry| { - let key = entry - .key - .ok_or(proto::rpc::account_storage_details::account_storage_map_details::map_entries::StorageMapEntry::missing_field( - stringify!(key), - ))? - .try_into()?; - let value = entry - .value - .ok_or(proto::rpc::account_storage_details::account_storage_map_details::map_entries::StorageMapEntry::missing_field( - stringify!(value), - ))? - .try_into()?; - Ok((key, value)) - }) - .collect::, ConversionError>>()? - } else { - Vec::new() - }; - - Ok(Self { slot_name, too_many_entries, map_entries }) - } -} - #[derive(Debug, Clone, PartialEq, Eq)] pub struct StorageMapRequest { pub slot_name: StorageSlotName, @@ -346,36 +304,48 @@ impl From for proto::account::AccountStorageHeader { } } +// ACCOUNT VAULT DETAILS +//================================================================================================ + +/// Account vault details +/// +/// When an account contains a large number of assets (> +/// [`AccountVaultDetails::MAX_RETURN_ENTRIES`]), including all assets in a single RPC response +/// creates performance issues. In such cases, the `LimitExceeded` variant indicates to the client +/// to use the `SyncAccountVault` endpoint instead. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct AccountVaultDetails { - pub too_many_assets: bool, - pub assets: Vec, +pub enum AccountVaultDetails { + /// The vault has too many assets to return inline. + /// Clients must use `SyncAccountVault` endpoint instead. + LimitExceeded, + + /// The assets in the vault (up to `MAX_RETURN_ENTRIES`). + Assets(Vec), } + impl AccountVaultDetails { - const MAX_RETURN_ENTRIES: usize = 1000; + /// Maximum number of vault entries that can be returned in a single response. + /// Accounts with more assets will have `LimitExceeded` variant. + pub const MAX_RETURN_ENTRIES: usize = 1000; pub fn new(vault: &AssetVault) -> Self { if vault.assets().nth(Self::MAX_RETURN_ENTRIES).is_some() { - Self::too_many() + Self::LimitExceeded } else { - Self { - too_many_assets: false, - assets: Vec::from_iter(vault.assets()), - } + Self::Assets(Vec::from_iter(vault.assets())) } } pub fn empty() -> Self { - Self { - too_many_assets: false, - assets: Vec::new(), - } + Self::Assets(Vec::new()) } - fn too_many() -> Self { - Self { - too_many_assets: true, - assets: Vec::new(), + /// Creates `AccountVaultDetails` from a list of assets. + pub fn from_assets(assets: Vec) -> Self { + if assets.len() > Self::MAX_RETURN_ENTRIES { + Self::LimitExceeded + } else { + Self::Assets(assets) } } } @@ -386,40 +356,69 @@ impl TryFrom for AccountVaultDetails { fn try_from(value: proto::rpc::AccountVaultDetails) -> Result { let proto::rpc::AccountVaultDetails { too_many_assets, assets } = value; - let assets = - Result::, ConversionError>::from_iter(assets.into_iter().map(|asset| { - let asset = asset - .asset - .ok_or(proto::primitives::Asset::missing_field(stringify!(asset)))?; - let asset = Word::try_from(asset)?; - Asset::try_from(asset).map_err(ConversionError::AssetError) - }))?; - Ok(Self { too_many_assets, assets }) + if too_many_assets { + Ok(Self::LimitExceeded) + } else { + let parsed_assets = + Result::, ConversionError>::from_iter(assets.into_iter().map(|asset| { + let asset = asset + .asset + .ok_or(proto::primitives::Asset::missing_field(stringify!(asset)))?; + let asset = Word::try_from(asset)?; + Asset::try_from(asset).map_err(ConversionError::AssetError) + }))?; + Ok(Self::Assets(parsed_assets)) + } } } impl From for proto::rpc::AccountVaultDetails { fn from(value: AccountVaultDetails) -> Self { - let AccountVaultDetails { too_many_assets, assets } = value; - - Self { - too_many_assets, - assets: Vec::from_iter(assets.into_iter().map(|asset| proto::primitives::Asset { - asset: Some(proto::primitives::Digest::from(Word::from(asset))), - })), + match value { + AccountVaultDetails::LimitExceeded => Self { + too_many_assets: true, + assets: Vec::new(), + }, + AccountVaultDetails::Assets(assets) => Self { + too_many_assets: false, + assets: Vec::from_iter(assets.into_iter().map(|asset| proto::primitives::Asset { + asset: Some(proto::primitives::Digest::from(Word::from(asset))), + })), + }, } } } +// ACCOUNT STORAGE MAP DETAILS +//================================================================================================ + +/// Details about an account storage map slot. #[derive(Debug, Clone, PartialEq, Eq)] pub struct AccountStorageMapDetails { pub slot_name: StorageSlotName, - pub too_many_entries: bool, - pub map_entries: Vec<(Word, Word)>, + pub entries: StorageMapEntries, +} + +/// Storage map entries for an account storage slot. +/// +/// When a storage map contains many entries (> [`AccountStorageMapDetails::MAX_RETURN_ENTRIES`]), +/// returning all entries in a single RPC response creates performance issues. In such cases, +/// the `LimitExceeded` variant indicates to the client to use the `SyncStorageMaps` endpoint +/// instead. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum StorageMapEntries { + /// The map has too many entries to return inline. + /// Clients must use `SyncStorageMaps` endpoint instead. + LimitExceeded, + + /// The storage map entries (key-value pairs), up to `MAX_RETURN_ENTRIES`. + /// TODO: For partial responses, also include Merkle proofs and inner SMT nodes. + Entries(Vec<(Word, Word)>), } impl AccountStorageMapDetails { - const MAX_RETURN_ENTRIES: usize = 1000; + /// Maximum number of storage map entries that can be returned in a single response. + pub const MAX_RETURN_ENTRIES: usize = 1000; pub fn new(slot_name: StorageSlotName, slot_data: SlotData, storage_map: &StorageMap) -> Self { match slot_data { @@ -430,13 +429,15 @@ impl AccountStorageMapDetails { fn from_all_entries(slot_name: StorageSlotName, storage_map: &StorageMap) -> Self { if storage_map.num_entries() > Self::MAX_RETURN_ENTRIES { - Self::too_many_entries(slot_name) + Self { + slot_name, + entries: StorageMapEntries::LimitExceeded, + } } else { let map_entries = Vec::from_iter(storage_map.entries().map(|(k, v)| (*k, *v))); Self { slot_name, - too_many_entries: false, - map_entries, + entries: StorageMapEntries::Entries(map_entries), } } } @@ -447,22 +448,54 @@ impl AccountStorageMapDetails { storage_map: &StorageMap, ) -> Self { if keys.len() > Self::MAX_RETURN_ENTRIES { - Self::too_many_entries(slot_name) + Self { + slot_name, + entries: StorageMapEntries::LimitExceeded, + } } else { // TODO For now, we return all entries instead of specific keys with proofs Self::from_all_entries(slot_name, storage_map) } } +} - pub fn too_many_entries(slot_name: StorageSlotName) -> Self { - Self { - slot_name, - too_many_entries: true, - map_entries: Vec::new(), +impl From + for proto::rpc::account_storage_details::AccountStorageMapDetails +{ + fn from(value: AccountStorageMapDetails) -> Self { + use proto::rpc::account_storage_details::account_storage_map_details; + + let AccountStorageMapDetails { slot_name, entries } = value; + + match entries { + StorageMapEntries::LimitExceeded => Self { + slot_name: slot_name.to_string(), + too_many_entries: true, + entries: Some(account_storage_map_details::MapEntries { entries: Vec::new() }), + }, + StorageMapEntries::Entries(map_entries) => { + let entries = Some(account_storage_map_details::MapEntries { + entries: Vec::from_iter(map_entries.into_iter().map(|(key, value)| { + account_storage_map_details::map_entries::StorageMapEntry { + key: Some(key.into()), + value: Some(value.into()), + } + })), + }); + + Self { + slot_name: slot_name.to_string(), + too_many_entries: false, + entries, + } + }, } } } +// ACCOUNT STORAGE DETAILS DETAILS +//================================================================================================ + #[derive(Debug, Clone, PartialEq, Eq)] pub struct AccountStorageDetails { pub header: AccountStorageHeader, @@ -498,27 +531,68 @@ impl From for proto::rpc::AccountStorageDetails { const fn storage_slot_type_from_raw(slot_type: u32) -> Result { Ok(match slot_type { - 0 => StorageSlotType::Map, - 1 => StorageSlotType::Value, + 0 => StorageSlotType::Value, + 1 => StorageSlotType::Map, _ => return Err(ConversionError::EnumDiscriminantOutOfRange), }) } const fn storage_slot_type_to_raw(slot_type: StorageSlotType) -> u32 { match slot_type { - StorageSlotType::Map => 0, - StorageSlotType::Value => 1, + StorageSlotType::Value => 0, + StorageSlotType::Map => 1, } } -/// Represents account details returned in response to an account proof request. -pub struct AccountDetails { - pub account_header: AccountHeader, - pub account_code: Option>, - pub vault_details: AccountVaultDetails, - pub storage_details: AccountStorageDetails, +impl TryFrom + for AccountStorageMapDetails +{ + type Error = ConversionError; + + fn try_from( + value: proto::rpc::account_storage_details::AccountStorageMapDetails, + ) -> Result { + use proto::rpc::account_storage_details::account_storage_map_details::map_entries::StorageMapEntry; + let proto::rpc::account_storage_details::AccountStorageMapDetails { + slot_name, + too_many_entries, + entries, + } = value; + + let slot_name = StorageSlotName::new(slot_name)?; + + let entries = if too_many_entries { + StorageMapEntries::LimitExceeded + } else { + let map_entries = if let Some(entries) = entries { + entries + .entries + .into_iter() + .map(|entry| { + let key = entry + .key + .ok_or(StorageMapEntry::missing_field(stringify!(key)))? + .try_into()?; + let value = entry + .value + .ok_or(StorageMapEntry::missing_field(stringify!(value)))? + .try_into()?; + Ok((key, value)) + }) + .collect::, ConversionError>>()? + } else { + Vec::new() + }; + StorageMapEntries::Entries(map_entries) + }; + + Ok(Self { slot_name, entries }) + } } +// ACCOUNT PROOF RESPONSE +//================================================================================================ + /// Represents the response to an account proof request. pub struct AccountProofResponse { pub block_num: BlockNumber, @@ -558,6 +632,17 @@ impl From for proto::rpc::AccountProofResponse { } } +// ACCOUNT DETAILS +//================================================================================================ + +/// Represents account details returned in response to an account proof request. +pub struct AccountDetails { + pub account_header: AccountHeader, + pub account_code: Option>, + pub vault_details: AccountVaultDetails, + pub storage_details: AccountStorageDetails, +} + impl TryFrom for AccountDetails { type Error = ConversionError; @@ -622,31 +707,6 @@ impl From for proto::rpc::account_proof_response::AccountDetails } } -impl From - for proto::rpc::account_storage_details::AccountStorageMapDetails -{ - fn from(value: AccountStorageMapDetails) -> Self { - use proto::rpc::account_storage_details::account_storage_map_details; - - let AccountStorageMapDetails { slot_name, too_many_entries, map_entries } = value; - - let entries = Some(account_storage_map_details::MapEntries { - entries: Vec::from_iter(map_entries.into_iter().map(|(key, value)| { - account_storage_map_details::map_entries::StorageMapEntry { - key: Some(key.into()), - value: Some(value.into()), - } - })), - }); - - Self { - slot_name: slot_name.to_string(), - too_many_entries, - entries, - } - } -} - // ACCOUNT WITNESS // ================================================================================================ diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index aaafb91a8..38745e610 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -18,17 +18,17 @@ CREATE TABLE accounts ( block_num INTEGER NOT NULL, account_commitment BLOB NOT NULL, code_commitment BLOB, - storage BLOB, - vault BLOB, nonce INTEGER, + storage_header BLOB, -- Serialized AccountStorageHeader from miden-objects + vault_root BLOB, -- Vault root commitment is_latest BOOLEAN NOT NULL DEFAULT 0, -- Indicates if this is the latest state for this account_id PRIMARY KEY (account_id, block_num), CONSTRAINT all_null_or_none_null CHECK ( - (code_commitment IS NOT NULL AND storage IS NOT NULL AND vault IS NOT NULL AND nonce IS NOT NULL) + (code_commitment IS NOT NULL AND nonce IS NOT NULL AND storage_header IS NOT NULL AND vault_root IS NOT NULL) OR - (code_commitment IS NULL AND storage IS NULL AND vault IS NULL AND nonce IS NULL) + (code_commitment IS NULL AND nonce IS NULL AND storage_header IS NULL AND vault_root IS NULL) ) ) WITHOUT ROWID; diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index 9083089f3..2e727113e 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -7,7 +7,7 @@ use diesel::{Connection, RunQueryDsl, SqliteConnection}; use miden_node_proto::domain::account::{AccountInfo, AccountSummary, NetworkAccountPrefix}; use miden_node_proto::generated as proto; use miden_protocol::Word; -use miden_protocol::account::AccountId; +use miden_protocol::account::{AccountHeader, AccountId, AccountStorage}; use miden_protocol::asset::{Asset, AssetVaultKey}; use miden_protocol::block::{BlockHeader, BlockNoteIndex, BlockNumber, ProvenBlock}; use miden_protocol::crypto::merkle::SparseMerklePath; @@ -392,7 +392,7 @@ impl Db { .await } - /// Loads all the account commitments from the DB. + /// TODO marked for removal, replace with paged version #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_all_account_commitments(&self) -> Result> { self.transact("read all account commitments", move |conn| { @@ -401,6 +401,16 @@ impl Db { .await } + /// Returns all account IDs that have public state. + #[allow(dead_code)] // Will be used by InnerForest in next PR + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_all_public_account_ids(&self) -> Result> { + self.transact("read all public account IDs", move |conn| { + queries::select_all_public_account_ids(conn) + }) + .await + } + /// Loads public account details from the DB. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_account(&self, id: AccountId) -> Result { @@ -408,19 +418,6 @@ impl Db { .await } - /// Loads account details at a specific block number from the DB. - #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_historical_account_at( - &self, - id: AccountId, - block_num: BlockNumber, - ) -> Result { - self.transact("Get historical account details", move |conn| { - queries::select_historical_account_at(conn, id, block_num) - }) - .await - } - /// Loads public account details from the DB based on the account ID's prefix. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_network_account_by_prefix( @@ -440,6 +437,64 @@ impl Db { .await } + /// Reconstructs account storage at a specific block from the database + /// + /// This method queries the decomposed storage tables and reconstructs the full + /// `AccountStorage` with SMT backing for Map slots. + // TODO split querying the header from the content + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_account_storage_at_block( + &self, + account_id: AccountId, + block_num: BlockNumber, + ) -> Result { + self.transact("Get account storage at block", move |conn| { + queries::select_account_storage_at_block(conn, account_id, block_num) + }) + .await + } + + /// Queries vault assets at a specific block + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_account_vault_at_block( + &self, + account_id: AccountId, + block_num: BlockNumber, + ) -> Result> { + self.transact("Get account vault at block", move |conn| { + queries::select_account_vault_at_block(conn, account_id, block_num) + }) + .await + } + + /// Queries the account code by its commitment hash. + /// + /// Returns `None` if no code exists with that commitment. + pub async fn select_account_code_by_commitment( + &self, + code_commitment: Word, + ) -> Result>> { + self.transact("Get account code by commitment", move |conn| { + queries::select_account_code_by_commitment(conn, code_commitment) + }) + .await + } + + /// Queries the account header for a specific account at a specific block number. + /// + /// Returns `None` if the account doesn't exist at that block. + pub async fn select_account_header_at_block( + &self, + account_id: AccountId, + block_num: BlockNumber, + ) -> Result> { + self.transact("Get account header at block", move |conn| { + queries::select_account_header_at_block(conn, account_id, block_num) + .map(|opt| opt.map(|(header, _storage_header)| header)) + }) + .await + } + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn get_state_sync( &self, diff --git a/crates/store/src/db/models/conv.rs b/crates/store/src/db/models/conv.rs index 48013b370..37a9b019f 100644 --- a/crates/store/src/db/models/conv.rs +++ b/crates/store/src/db/models/conv.rs @@ -34,7 +34,7 @@ use miden_node_proto::domain::account::NetworkAccountPrefix; use miden_protocol::Felt; -use miden_protocol::account::StorageSlotName; +use miden_protocol::account::{StorageSlotName, StorageSlotType}; use miden_protocol::block::BlockNumber; use miden_protocol::note::{NoteExecutionMode, NoteTag}; @@ -131,6 +131,33 @@ impl SqlTypeConvert for NoteTag { } } +impl SqlTypeConvert for StorageSlotType { + type Raw = i32; + + #[inline(always)] + fn from_raw_sql(raw: Self::Raw) -> Result { + #[derive(Debug, thiserror::Error)] + #[error("invalid storage slot type value {0}")] + struct ValueError(i32); + + Ok(match raw { + 0 => StorageSlotType::Value, + 1 => StorageSlotType::Map, + invalid => { + return Err(Self::map_err(ValueError(invalid))); + }, + }) + } + + #[inline(always)] + fn to_raw_sql(self) -> Self::Raw { + match self { + StorageSlotType::Value => 0, + StorageSlotType::Map => 1, + } + } +} + impl SqlTypeConvert for StorageSlotName { type Raw = String; @@ -157,9 +184,9 @@ pub(crate) fn nullifier_prefix_to_raw_sql(prefix: u16) -> i32 { } #[inline(always)] -pub(crate) fn raw_sql_to_nonce(raw: i64) -> u64 { +pub(crate) fn raw_sql_to_nonce(raw: i64) -> Felt { debug_assert!(raw >= 0); - raw as u64 + Felt::new(raw as u64) } #[inline(always)] pub(crate) fn nonce_to_raw_sql(nonce: Felt) -> i64 { diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index 913adfc40..3d2f66b05 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -1,3 +1,4 @@ +use std::collections::BTreeMap; use std::ops::RangeInclusive; use diesel::prelude::{Queryable, QueryableByName}; @@ -8,8 +9,6 @@ use diesel::{ BoolExpressionMethods, ExpressionMethods, Insertable, - JoinOnDsl, - NullableExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, @@ -24,6 +23,7 @@ use miden_node_utils::limiter::{ QueryParamAccountIdLimit, QueryParamLimiter, }; +use miden_protocol::Word; use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ Account, @@ -31,73 +31,75 @@ use miden_protocol::account::{ AccountDelta, AccountId, AccountStorage, + AccountStorageHeader, NonFungibleDeltaAction, + StorageMap, + StorageSlot, StorageSlotContent, StorageSlotName, + StorageSlotType, }; use miden_protocol::asset::{Asset, AssetVault, AssetVaultKey, FungibleAsset}; use miden_protocol::block::{BlockAccountUpdate, BlockNumber}; use miden_protocol::utils::{Deserializable, Serializable}; -use miden_protocol::{Felt, Word}; use crate::db::models::conv::{SqlTypeConvert, nonce_to_raw_sql, raw_sql_to_nonce}; use crate::db::models::{serialize_vec, vec_raw_try_into}; use crate::db::{AccountVaultValue, schema}; use crate::errors::DatabaseError; +mod at_block; +pub(crate) use at_block::{ + select_account_header_at_block, + select_account_storage_at_block, + select_account_vault_at_block, +}; + +#[cfg(test)] +mod tests; + type StorageMapValueRow = (i64, String, Vec, Vec); -/// Select the latest account details by account id from the DB using the given -/// [`SqliteConnection`]. +// ACCOUNT CODE +// ================================================================================================ + +/// Select account code by its commitment hash from the `account_codes` table. /// /// # Returns /// -/// The latest account details, or an error. +/// The account code bytes if found, or `None` if no code exists with that commitment. /// /// # Raw SQL /// /// ```sql -/// SELECT -/// accounts.account_id, -/// accounts.account_commitment, -/// accounts.block_num, -/// accounts.storage, -/// accounts.vault, -/// accounts.nonce, -/// accounts.code_commitment, -/// account_codes.code -/// FROM -/// accounts -/// LEFT JOIN -/// account_codes ON accounts.code_commitment = account_codes.code_commitment -/// WHERE -/// account_id = ?1 -/// AND is_latest = 1 +/// SELECT code FROM account_codes WHERE code_commitment = ?1 /// ``` -pub(crate) fn select_account( +pub(crate) fn select_account_code_by_commitment( conn: &mut SqliteConnection, - account_id: AccountId, -) -> Result { - let raw = SelectDsl::select( - schema::accounts::table.left_join(schema::account_codes::table.on( - schema::accounts::code_commitment.eq(schema::account_codes::code_commitment.nullable()), - )), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), + code_commitment: Word, +) -> Result>, DatabaseError> { + use schema::account_codes; + + let code_commitment_bytes = code_commitment.to_bytes(); + + let result: Option> = SelectDsl::select( + account_codes::table.filter(account_codes::code_commitment.eq(&code_commitment_bytes)), + account_codes::code, ) - .filter(schema::accounts::account_id.eq(account_id.to_bytes())) - .filter(schema::accounts::is_latest.eq(true)) - .get_result::<(AccountRaw, Option>)>(conn) - .optional()? - .ok_or(DatabaseError::AccountNotFoundInDb(account_id))?; - let info = AccountWithCodeRawJoined::from(raw).try_into()?; - Ok(info) + .first(conn) + .optional()?; + + Ok(result) } -/// Select account details as they are at the given block height. +// ACCOUNT RETRIEVAL +// ================================================================================================ + +/// Select account by ID from the DB using the given [`SqliteConnection`]. /// /// # Returns /// -/// The account details at the specified block, or an error. +/// The latest account info, or an error. /// /// # Raw SQL /// @@ -105,58 +107,107 @@ pub(crate) fn select_account( /// SELECT /// accounts.account_id, /// accounts.account_commitment, -/// accounts.block_num, -/// accounts.storage, -/// accounts.vault, -/// accounts.nonce, -/// accounts.code_commitment, -/// account_codes.code +/// accounts.block_num /// FROM /// accounts -/// LEFT JOIN -/// account_codes ON accounts.code_commitment = account_codes.code_commitment /// WHERE /// account_id = ?1 -/// AND block_num <= ?2 -/// ORDER BY -/// block_num DESC -/// LIMIT -/// 1 +/// AND is_latest = 1 /// ``` -pub(crate) fn select_historical_account_at( +pub(crate) fn select_account( conn: &mut SqliteConnection, account_id: AccountId, - block_num: BlockNumber, ) -> Result { - let raw = SelectDsl::select( - schema::accounts::table.left_join(schema::account_codes::table.on( - schema::accounts::code_commitment.eq(schema::account_codes::code_commitment.nullable()), - )), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), - ) - .filter( - schema::accounts::account_id - .eq(account_id.to_bytes()) - .and(schema::accounts::block_num.le(block_num.to_raw_sql())), + let raw = SelectDsl::select(schema::accounts::table, AccountSummaryRaw::as_select()) + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)) + .get_result::(conn) + .optional()? + .ok_or(DatabaseError::AccountNotFoundInDb(account_id))?; + + let summary: AccountSummary = raw.try_into()?; + + // Backfill account details from database + // For private accounts, we don't store full details in the database + let details = if account_id.has_public_state() { + Some(select_full_account(conn, account_id)?) + } else { + None + }; + + Ok(AccountInfo { summary, details }) +} + +/// Reconstruct full Account from database tables for the latest account state +/// +/// This function queries the database tables to reconstruct a complete Account object: +/// - Code from `account_codes` table +/// - Nonce and storage header from `accounts` table +/// - Storage map entries from `account_storage_map_values` table +/// - Vault from `account_vault_assets` table +/// +/// # Note +/// +/// A stop-gap solution to retain store API and construct `AccountInfo` types. +/// The function should ultimately be removed, and any queries be served from the +/// `State` which contains an `SmtForest` to serve the latest and most recent +/// historical data. +// TODO: remove eventually once refactoring is complete +fn select_full_account( + conn: &mut SqliteConnection, + account_id: AccountId, +) -> Result { + // Get account metadata (nonce, code_commitment) and code in a single join query + let (nonce, code_bytes): (Option, Vec) = SelectDsl::select( + schema::accounts::table.inner_join(schema::account_codes::table), + (schema::accounts::nonce, schema::account_codes::code), ) - .order_by(schema::accounts::block_num.desc()) - .limit(1) - .get_result::<(AccountRaw, Option>)>(conn) + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)) + .get_result(conn) .optional()? .ok_or(DatabaseError::AccountNotFoundInDb(account_id))?; - let info = AccountWithCodeRawJoined::from(raw).try_into()?; - Ok(info) + + let nonce = raw_sql_to_nonce(nonce.ok_or_else(|| { + DatabaseError::DataCorrupted(format!("No nonce found for account {account_id}")) + })?); + + let code = AccountCode::read_from_bytes(&code_bytes)?; + + // Reconstruct storage using existing helper function + let storage = select_latest_account_storage(conn, account_id)?; + + // Reconstruct vault from account_vault_assets table + let vault_entries: Vec<(Vec, Option>)> = SelectDsl::select( + schema::account_vault_assets::table, + (schema::account_vault_assets::vault_key, schema::account_vault_assets::asset), + ) + .filter(schema::account_vault_assets::account_id.eq(account_id.to_bytes())) + .filter(schema::account_vault_assets::is_latest.eq(true)) + .load(conn)?; + + let mut assets = Vec::new(); + for (_key_bytes, maybe_asset_bytes) in vault_entries { + if let Some(asset_bytes) = maybe_asset_bytes { + let asset = Asset::read_from_bytes(&asset_bytes)?; + assets.push(asset); + } + } + + let vault = AssetVault::new(&assets)?; + + Ok(Account::new(account_id, vault, storage, code, nonce, None)?) } -/// Select the latest account details by account ID prefix from the DB using the given -/// [`SqliteConnection`] This method is meant to be used by the network transaction builder. Because -/// network notes get matched through accounts through the account's 30-bit prefix, it is possible -/// that multiple accounts match against a single prefix. In this scenario, the first account is -/// returned. +/// Select the latest account info by account ID prefix from the DB using the given +/// [`SqliteConnection`]. Meant to be used by the network transaction builder. +/// Because network notes get matched through accounts through the account's 30-bit prefix, it is +/// possible that multiple accounts match against a single prefix. In this scenario, the first +/// account is returned. /// /// # Returns /// -/// The latest account details, `None` if the account was not found, or an error. +/// The latest account info, `None` if the account was not found, or an error. /// /// # Raw SQL /// @@ -164,41 +215,34 @@ pub(crate) fn select_historical_account_at( /// SELECT /// accounts.account_id, /// accounts.account_commitment, -/// accounts.block_num, -/// accounts.storage, -/// accounts.vault, -/// accounts.nonce, -/// accounts.code_commitment, -/// account_codes.code +/// accounts.block_num /// FROM /// accounts -/// LEFT JOIN -/// account_codes ON accounts.code_commitment = account_codes.code_commitment /// WHERE /// network_account_id_prefix = ?1 +/// AND is_latest = 1 /// ``` pub(crate) fn select_account_by_id_prefix( conn: &mut SqliteConnection, id_prefix: u32, ) -> Result, DatabaseError> { - let maybe_info = SelectDsl::select( - schema::accounts::table.left_join(schema::account_codes::table.on( - schema::accounts::code_commitment.eq(schema::account_codes::code_commitment.nullable()), - )), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), - ) - .filter(schema::accounts::is_latest.eq(true)) - .filter(schema::accounts::network_account_id_prefix.eq(Some(i64::from(id_prefix)))) - .get_result::<(AccountRaw, Option>)>(conn) - .optional() - .map_err(DatabaseError::Diesel)?; - - let result: Result, DatabaseError> = maybe_info - .map(AccountWithCodeRawJoined::from) - .map(std::convert::TryInto::::try_into) - .transpose(); - - result + let maybe_summary = SelectDsl::select(schema::accounts::table, AccountSummaryRaw::as_select()) + .filter(schema::accounts::is_latest.eq(true)) + .filter(schema::accounts::network_account_id_prefix.eq(Some(i64::from(id_prefix)))) + .get_result::(conn) + .optional() + .map_err(DatabaseError::Diesel)?; + + match maybe_summary { + None => Ok(None), + Some(raw) => { + let summary: AccountSummary = raw.try_into()?; + let account_id = summary.account_id; + // Backfill account details from database + let details = select_full_account(conn, account_id).ok(); + Ok(Some(AccountInfo { summary, details })) + }, + } } /// Select all account commitments from the DB using the given [`SqliteConnection`]. @@ -238,6 +282,48 @@ pub(crate) fn select_all_account_commitments( )) } +/// Select all account IDs that have public state. +/// +/// This filters accounts in-memory after loading only the account IDs (not commitments), +/// which is more efficient than loading full commitments when only IDs are needed. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT +/// account_id +/// FROM +/// accounts +/// WHERE +/// is_latest = 1 +/// ORDER BY +/// block_num ASC +/// ``` +#[allow(dead_code)] // Will be used by InnerForest in next PR +pub(crate) fn select_all_public_account_ids( + conn: &mut SqliteConnection, +) -> Result, DatabaseError> { + // We could technically use a `LIKE` constraint for both postgres and sqlite backends, + // but diesel doesn't expose that. + let raw: Vec> = + SelectDsl::select(schema::accounts::table, schema::accounts::account_id) + .filter(schema::accounts::is_latest.eq(true)) + .order_by(schema::accounts::block_num.asc()) + .load::>(conn)?; + + Result::from_iter( + raw.into_iter() + .map(|bytes| { + AccountId::read_from_bytes(&bytes).map_err(DatabaseError::DeserializationError) + }) + .filter_map(|result| match result { + Ok(id) if id.has_public_state() => Some(Ok(id)), + Ok(_) => None, + Err(e) => Some(Err(e)), + }), + ) +} + /// Select account vault assets within a block range (inclusive). /// /// # Parameters @@ -379,16 +465,11 @@ pub fn select_accounts_by_block_range( /// SELECT /// accounts.account_id, /// accounts.account_commitment, -/// accounts.block_num, -/// accounts.storage, -/// accounts.vault, -/// accounts.nonce, -/// accounts.code_commitment, -/// account_codes.code +/// accounts.block_num /// FROM /// accounts -/// LEFT JOIN -/// account_codes ON accounts.code_commitment = account_codes.code_commitment +/// WHERE +/// is_latest = 1 /// ORDER BY /// block_num ASC /// ``` @@ -396,17 +477,23 @@ pub fn select_accounts_by_block_range( pub(crate) fn select_all_accounts( conn: &mut SqliteConnection, ) -> Result, DatabaseError> { - let accounts_raw = QueryDsl::select( - schema::accounts::table.left_join(schema::account_codes::table.on( - schema::accounts::code_commitment.eq(schema::account_codes::code_commitment.nullable()), - )), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), - ) - .filter(schema::accounts::is_latest.eq(true)) - .load::<(AccountRaw, Option>)>(conn)?; - let account_infos = vec_raw_try_into::( - accounts_raw.into_iter().map(AccountWithCodeRawJoined::from), - )?; + let raw = SelectDsl::select(schema::accounts::table, AccountSummaryRaw::as_select()) + .filter(schema::accounts::is_latest.eq(true)) + .order_by(schema::accounts::block_num.asc()) + .load::(conn)?; + + let summaries: Vec = vec_raw_try_into(raw)?; + + // Backfill account details from database + let account_infos = summaries + .into_iter() + .map(|summary| { + let account_id = summary.account_id; + let details = select_full_account(conn, account_id).ok(); + AccountInfo { summary, details } + }) + .collect(); + Ok(account_infos) } @@ -565,6 +652,76 @@ pub(crate) fn select_account_storage_map_values( Ok(StorageMapValuesPage { last_block_included, values }) } +/// Select latest account storage by querying `accounts.storage_header` where `is_latest=true` +/// and reconstructing full storage from the header plus map values from +/// `account_storage_map_values`. +pub(crate) fn select_latest_account_storage( + conn: &mut SqliteConnection, + account_id: AccountId, +) -> Result { + use schema::account_storage_map_values as t; + + let account_id_bytes = account_id.to_bytes(); + + // Query storage header blob for this account where is_latest = true + let storage_blob: Option> = + SelectDsl::select(schema::accounts::table, schema::accounts::storage_header) + .filter(schema::accounts::account_id.eq(&account_id_bytes)) + .filter(schema::accounts::is_latest.eq(true)) + .first(conn) + .optional()? + .flatten(); + + let Some(blob) = storage_blob else { + // No storage means empty storage + return Ok(AccountStorage::new(Vec::new())?); + }; + + // Deserialize the AccountStorageHeader from the blob + let header = AccountStorageHeader::read_from_bytes(&blob)?; + + // Query all latest map values for this account + let map_values: Vec<(String, Vec, Vec)> = + SelectDsl::select(t::table, (t::slot_name, t::key, t::value)) + .filter(t::account_id.eq(&account_id_bytes)) + .filter(t::is_latest.eq(true)) + .load(conn)?; + + // Group map values by slot name + let mut map_entries_by_slot: BTreeMap> = BTreeMap::new(); + for (slot_name_str, key_bytes, value_bytes) in map_values { + let slot_name: StorageSlotName = slot_name_str.parse().map_err(|_| { + DatabaseError::DataCorrupted(format!("Invalid slot name: {slot_name_str}")) + })?; + let key = Word::read_from_bytes(&key_bytes)?; + let value = Word::read_from_bytes(&value_bytes)?; + map_entries_by_slot.entry(slot_name).or_default().push((key, value)); + } + + // Reconstruct StorageSlots from header slots + map entries + let mut slots = Vec::new(); + for slot_header in header.slots() { + let slot = match slot_header.slot_type() { + StorageSlotType::Value => { + // For value slots, the header value IS the slot value + StorageSlot::with_value(slot_header.name().clone(), slot_header.value()) + }, + StorageSlotType::Map => { + // For map slots, reconstruct from map entries + let entries = map_entries_by_slot.remove(slot_header.name()).unwrap_or_default(); + let storage_map = StorageMap::with_entries(entries)?; + StorageSlot::with_map(slot_header.name().clone(), storage_map) + }, + }; + slots.push(slot); + } + + Ok(AccountStorage::new(slots)?) +} + +// ACCOUNT MUTATION +// ================================================================================================ + #[derive(Queryable, Selectable)] #[diesel(table_name = crate::db::schema::account_vault_assets)] #[diesel(check_for_backend(diesel::sqlite::Sqlite))] @@ -586,73 +743,6 @@ impl TryFrom for AccountVaultValue { } } -#[derive(Debug, Clone, Queryable, QueryableByName, Selectable)] -#[diesel(table_name = schema::accounts)] -#[diesel(check_for_backend(diesel::sqlite::Sqlite))] -pub struct AccountRaw { - pub account_id: Vec, - pub account_commitment: Vec, - pub block_num: i64, - pub storage: Option>, - pub vault: Option>, - pub nonce: Option, -} - -#[derive(Debug, Clone, QueryableByName)] -pub struct AccountWithCodeRawJoined { - #[diesel(embed)] - pub account: AccountRaw, - #[diesel(embed)] - pub code: Option>, -} - -impl From<(AccountRaw, Option>)> for AccountWithCodeRawJoined { - fn from((account, code): (AccountRaw, Option>)) -> Self { - Self { account, code } - } -} - -impl TryInto for AccountWithCodeRawJoined { - type Error = DatabaseError; - fn try_into(self) -> Result { - use proto::domain::account::{AccountInfo, AccountSummary}; - - let account_id = AccountId::read_from_bytes(&self.account.account_id[..])?; - let account_commitment = Word::read_from_bytes(&self.account.account_commitment[..])?; - let block_num = BlockNumber::from_raw_sql(self.account.block_num)?; - let summary = AccountSummary { - account_id, - account_commitment, - block_num, - }; - let maybe_account = self.try_into()?; - Ok(AccountInfo { summary, details: maybe_account }) - } -} - -impl TryInto> for AccountWithCodeRawJoined { - type Error = DatabaseError; - fn try_into(self) -> Result, Self::Error> { - let account_id = AccountId::read_from_bytes(&self.account.account_id[..])?; - - let details = if let (Some(vault), Some(storage), Some(nonce), Some(code)) = - (self.account.vault, self.account.storage, self.account.nonce, self.code) - { - let vault = AssetVault::read_from_bytes(&vault)?; - let storage = AccountStorage::read_from_bytes(&storage)?; - let code = AccountCode::read_from_bytes(&code)?; - let nonce = raw_sql_to_nonce(nonce); - let nonce = Felt::new(nonce); - let account = Account::new_unchecked(account_id, vault, storage, code, nonce, None); - Some(account) - } else { - // a private account - None - }; - Ok(details) - } -} - #[derive(Debug, Clone, PartialEq, Eq, Selectable, Queryable, QueryableByName)] #[diesel(table_name = schema::accounts)] #[diesel(check_for_backend(Sqlite))] @@ -679,7 +769,7 @@ impl TryInto for AccountSummaryRaw { /// Insert an account vault asset row into the DB using the given [`SqliteConnection`]. /// -/// This function will set `is_latest=true` for the new row and update any existing +/// Sets `is_latest=true` for the new row and updates any existing /// row with the same `(account_id, vault_key)` tuple to `is_latest=false`. /// /// # Returns @@ -719,8 +809,8 @@ pub(crate) fn insert_account_vault_asset( /// Insert an account storage map value into the DB using the given [`SqliteConnection`]. /// -/// This function will set `is_latest=true` for the new row and update any existing -/// row with the same `(account_id, slot, key)` tuple to `is_latest=false`. +/// Sets `is_latest=true` for the new row and updates any existing +/// row with the same `(account_id, slot_index, key)` tuple to `is_latest=false`. /// /// # Returns /// @@ -774,32 +864,6 @@ pub(crate) fn upsert_accounts( ) -> Result { use proto::domain::account::NetworkAccountPrefix; - fn select_details_stmt( - conn: &mut SqliteConnection, - account_id: AccountId, - ) -> Result, DatabaseError> { - let account_id = account_id.to_bytes(); - let accounts = SelectDsl::select( - schema::accounts::table.left_join( - schema::account_codes::table.on(schema::accounts::code_commitment - .eq(schema::account_codes::code_commitment.nullable())), - ), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), - ) - .filter(schema::accounts::account_id.eq(account_id)) - .filter(schema::accounts::is_latest.eq(true)) - .get_results::<(AccountRaw, Option>)>(conn)?; - - // SELECT .. FROM accounts LEFT JOIN account_codes - // ON accounts.code_commitment == account_codes.code_commitment - - let accounts = Result::from_iter(accounts.into_iter().filter_map(|x| { - let account_with_code = AccountWithCodeRawJoined::from(x); - account_with_code.try_into().transpose() - }))?; - Ok(accounts) - } - let mut count = 0; for update in accounts { let account_id = update.account_id(); @@ -856,10 +920,8 @@ pub(crate) fn upsert_accounts( }, AccountUpdateDetails::Delta(delta) => { - let mut rows = select_details_stmt(conn, account_id)?.into_iter(); - let Some(account_before) = rows.next() else { - return Err(DatabaseError::AccountNotFoundInDb(account_id)); - }; + // Reconstruct the full account from database tables + let account = select_full_account(conn, account_id)?; // --- collect storage map updates ---------------------------- @@ -873,8 +935,7 @@ pub(crate) fn upsert_accounts( // apply delta to the account; we need to do this before we process asset updates // because we currently need to get the current value of fungible assets from the // account - let account_after = - apply_delta(account_before, delta, &update.final_state_commitment())?; + let account_after = apply_delta(account, delta, &update.final_state_commitment())?; // --- process asset updates ---------------------------------- @@ -929,11 +990,14 @@ pub(crate) fn upsert_accounts( account_commitment: update.final_state_commitment().to_bytes(), block_num: block_num.to_raw_sql(), nonce: full_account.as_ref().map(|account| nonce_to_raw_sql(account.nonce())), - storage: full_account.as_ref().map(|account| account.storage().to_bytes()), - vault: full_account.as_ref().map(|account| account.vault().to_bytes()), code_commitment: full_account .as_ref() .map(|account| account.code().commitment().to_bytes()), + // Store only the header (slot metadata + map roots), not full storage with map contents + storage_header: full_account + .as_ref() + .map(|account| account.storage().to_header().to_bytes()), + vault_root: full_account.as_ref().map(|account| account.vault().root().to_bytes()), is_latest: true, }; @@ -946,7 +1010,6 @@ pub(crate) fn upsert_accounts( insert_account_storage_map_value(conn, acc_id, block_num, slot_name, key, value)?; } - // insert pending vault-asset entries for (acc_id, vault_key, update) in pending_asset_inserts { insert_account_vault_asset(conn, acc_id, block_num, vault_key, update)?; } @@ -991,9 +1054,9 @@ pub(crate) struct AccountRowInsert { pub(crate) block_num: i64, pub(crate) account_commitment: Vec, pub(crate) code_commitment: Option>, - pub(crate) storage: Option>, - pub(crate) vault: Option>, pub(crate) nonce: Option, + pub(crate) storage_header: Option>, + pub(crate) vault_root: Option>, pub(crate) is_latest: bool, } diff --git a/crates/store/src/db/models/queries/accounts/at_block.rs b/crates/store/src/db/models/queries/accounts/at_block.rs new file mode 100644 index 000000000..dc613a9c6 --- /dev/null +++ b/crates/store/src/db/models/queries/accounts/at_block.rs @@ -0,0 +1,269 @@ +use std::collections::BTreeMap; + +use diesel::prelude::Queryable; +use diesel::query_dsl::methods::SelectDsl; +use diesel::{ + BoolExpressionMethods, + ExpressionMethods, + OptionalExtension, + QueryDsl, + RunQueryDsl, + SqliteConnection, +}; +use miden_protocol::account::{ + AccountHeader, + AccountId, + AccountStorage, + AccountStorageHeader, + StorageMap, + StorageSlot, + StorageSlotName, + StorageSlotType, +}; +use miden_protocol::asset::Asset; +use miden_protocol::block::BlockNumber; +use miden_protocol::utils::{Deserializable, Serializable}; +use miden_protocol::{Felt, FieldElement, Word}; + +use crate::db::models::conv::{SqlTypeConvert, raw_sql_to_nonce}; +use crate::db::schema; +use crate::errors::DatabaseError; + +// ACCOUNT HEADER +// ================================================================================================ + +#[derive(Debug, Clone, Queryable)] +struct AccountHeaderDataRaw { + code_commitment: Option>, + nonce: Option, + storage_header: Option>, + vault_root: Option>, +} + +/// Queries the account header for a specific account at a specific block number. +/// +/// This reconstructs the `AccountHeader` by reading from the `accounts` table: +/// - `account_id`, `nonce`, `code_commitment`, `storage_header`, `vault_root` +/// +/// Returns `None` if the account doesn't exist at that block. +/// +/// # Arguments +/// +/// * `conn` - Database connection +/// * `account_id` - The account ID to query +/// * `block_num` - The block number at which to query the account header +/// +/// # Returns +/// +/// * `Ok(Some(AccountHeader))` - The account header if found +/// * `Ok(None)` - If account doesn't exist at that block +/// * `Err(DatabaseError)` - If there's a database error +pub(crate) fn select_account_header_at_block( + conn: &mut SqliteConnection, + account_id: AccountId, + block_num: BlockNumber, +) -> Result, DatabaseError> { + use schema::accounts; + + let account_id_bytes = account_id.to_bytes(); + let block_num_sql = block_num.to_raw_sql(); + + let account_data: Option = SelectDsl::select( + accounts::table + .filter(accounts::account_id.eq(&account_id_bytes)) + .filter(accounts::block_num.le(block_num_sql)) + .order(accounts::block_num.desc()) + .limit(1), + ( + accounts::code_commitment, + accounts::nonce, + accounts::storage_header, + accounts::vault_root, + ), + ) + .first(conn) + .optional()?; + + let Some(AccountHeaderDataRaw { + code_commitment: code_commitment_bytes, + nonce: nonce_raw, + storage_header: storage_header_blob, + vault_root: vault_root_bytes, + }) = account_data + else { + return Ok(None); + }; + + let (storage_commitment, storage_header) = match storage_header_blob { + Some(blob) => { + let header = AccountStorageHeader::read_from_bytes(&blob)?; + let commitment = header.to_commitment(); + (commitment, header) + }, + None => (Word::default(), AccountStorageHeader::new(Vec::new())?), + }; + + let code_commitment = code_commitment_bytes + .map(|bytes| Word::read_from_bytes(&bytes)) + .transpose()? + .unwrap_or(Word::default()); + + let nonce = nonce_raw.map_or(Felt::ZERO, raw_sql_to_nonce); + + let vault_root = vault_root_bytes + .map(|bytes| Word::read_from_bytes(&bytes)) + .transpose()? + .unwrap_or(Word::default()); + + Ok(Some(( + AccountHeader::new(account_id, nonce, vault_root, storage_commitment, code_commitment), + storage_header, + ))) +} + +// ACCOUNT VAULT +// ================================================================================================ + +/// Query vault assets at a specific block by finding the most recent update for each `vault_key`. +pub(crate) fn select_account_vault_at_block( + conn: &mut SqliteConnection, + account_id: AccountId, + block_num: BlockNumber, +) -> Result, DatabaseError> { + use schema::account_vault_assets as t; + + let account_id_bytes = account_id.to_bytes(); + let block_num_sql = block_num.to_raw_sql(); + + // Since Diesel doesn't support composite keys in subqueries easily, we use a two-step approach: + // Step 1: Get max block_num for each vault_key + let latest_blocks_per_vault_key = Vec::from_iter( + QueryDsl::select( + t::table + .filter(t::account_id.eq(&account_id_bytes)) + .filter(t::block_num.le(block_num_sql)) + .group_by(t::vault_key), + (t::vault_key, diesel::dsl::max(t::block_num)), + ) + .load::<(Vec, Option)>(conn)? + .into_iter() + .filter_map(|(key, maybe_block)| maybe_block.map(|block| (key, block))), + ); + + if latest_blocks_per_vault_key.is_empty() { + return Ok(Vec::new()); + } + + // Step 2: Fetch the full rows matching (vault_key, block_num) pairs + let mut assets = Vec::new(); + for (vault_key_bytes, max_block) in latest_blocks_per_vault_key { + // TODO we should not make a query per vault key, but query many at once or + // or find an alternative approach + let result: Option>> = QueryDsl::select( + t::table.filter( + t::account_id + .eq(&account_id_bytes) + .and(t::vault_key.eq(&vault_key_bytes)) + .and(t::block_num.eq(max_block)), + ), + t::asset, + ) + .first(conn) + .optional()?; + if let Some(Some(asset_bytes)) = result { + let asset = Asset::read_from_bytes(&asset_bytes)?; + assets.push(asset); + } + } + + // Sort by vault_key for consistent ordering + assets.sort_by_key(Asset::vault_key); + + Ok(assets) +} + +// ACCOUNT STORAGE +// ================================================================================================ + +/// Returns account storage at a given block by reading from `accounts.storage_header` +/// (which contains the `AccountStorageHeader`) and reconstructing full storage from +/// map values in `account_storage_map_values` table. +pub(crate) fn select_account_storage_at_block( + conn: &mut SqliteConnection, + account_id: AccountId, + block_num: BlockNumber, +) -> Result { + use schema::account_storage_map_values as t; + + let account_id_bytes = account_id.to_bytes(); + let block_num_sql = block_num.to_raw_sql(); + + // Query storage header blob for this account at or before this block + let storage_blob: Option> = + SelectDsl::select(schema::accounts::table, schema::accounts::storage_header) + .filter(schema::accounts::account_id.eq(&account_id_bytes)) + .filter(schema::accounts::block_num.le(block_num_sql)) + .order(schema::accounts::block_num.desc()) + .limit(1) + .first(conn) + .optional()? + .flatten(); + + let Some(blob) = storage_blob else { + // No storage means empty storage + return Ok(AccountStorage::new(Vec::new())?); + }; + + // Deserialize the AccountStorageHeader from the blob + let header = AccountStorageHeader::read_from_bytes(&blob)?; + + // Query all map values for this account up to and including this block. + // For each (slot_name, key), we need the latest value at or before block_num. + // First, get all entries up to block_num + let map_values: Vec<(i64, String, Vec, Vec)> = + SelectDsl::select(t::table, (t::block_num, t::slot_name, t::key, t::value)) + .filter(t::account_id.eq(&account_id_bytes).and(t::block_num.le(block_num_sql))) + .order((t::slot_name.asc(), t::key.asc(), t::block_num.desc())) + .load(conn)?; + + // For each (slot_name, key) pair, keep only the latest entry (highest block_num) + let mut latest_map_entries: BTreeMap<(StorageSlotName, Word), Word> = BTreeMap::new(); + + for (_, slot_name_str, key_bytes, value_bytes) in map_values { + let slot_name: StorageSlotName = slot_name_str.parse().map_err(|_| { + DatabaseError::DataCorrupted(format!("Invalid slot name: {slot_name_str}")) + })?; + let key = Word::read_from_bytes(&key_bytes)?; + let value = Word::read_from_bytes(&value_bytes)?; + + // Only insert if we haven't seen this (slot_name, key) yet + // (since results are ordered by block_num desc, first one is latest) + latest_map_entries.entry((slot_name, key)).or_insert(value); + } + + // Group entries by slot name + let mut map_entries_by_slot: BTreeMap> = BTreeMap::new(); + for ((slot_name, key), value) in latest_map_entries { + map_entries_by_slot.entry(slot_name).or_default().push((key, value)); + } + + // Reconstruct StorageSlots from header slots + map entries + let mut slots = Vec::new(); + for slot_header in header.slots() { + let slot = match slot_header.slot_type() { + StorageSlotType::Value => { + // For value slots, the header value IS the slot value + StorageSlot::with_value(slot_header.name().clone(), slot_header.value()) + }, + StorageSlotType::Map => { + // For map slots, reconstruct from map entries + let entries = map_entries_by_slot.remove(slot_header.name()).unwrap_or_default(); + let storage_map = StorageMap::with_entries(entries)?; + StorageSlot::with_map(slot_header.name().clone(), storage_map) + }, + }; + slots.push(slot); + } + + Ok(AccountStorage::new(slots)?) +} diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs new file mode 100644 index 000000000..67eb24c1f --- /dev/null +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -0,0 +1,552 @@ +//! Tests for the `accounts` module, specifically for account storage and historical queries. + +use diesel::query_dsl::methods::SelectDsl; +use diesel::{Connection, OptionalExtension, QueryDsl, RunQueryDsl}; +use diesel_migrations::MigrationHarness; +use miden_node_utils::fee::test_fee_params; +use miden_protocol::account::auth::PublicKeyCommitment; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{ + Account, + AccountBuilder, + AccountComponent, + AccountDelta, + AccountId, + AccountIdVersion, + AccountStorageMode, + AccountType, + StorageSlot, + StorageSlotName, +}; +use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::utils::Serializable; +use miden_protocol::{EMPTY_WORD, Felt, Word}; +use miden_standards::account::auth::AuthRpoFalcon512; +use miden_standards::code_builder::CodeBuilder; + +use super::*; +use crate::db::migrations::MIGRATIONS; + +fn setup_test_db() -> SqliteConnection { + let mut conn = + SqliteConnection::establish(":memory:").expect("Failed to create in-memory database"); + + conn.run_pending_migrations(MIGRATIONS).expect("Failed to run migrations"); + + conn +} + +fn create_test_account_with_storage() -> (Account, AccountId) { + // Create a simple public account with one value storage slot + let account_id = AccountId::dummy( + [1u8; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + let storage_value = Word::from([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]); + let component_storage = vec![StorageSlot::with_value(StorageSlotName::mock(0), storage_value)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account = AccountBuilder::new([1u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + (account, account_id) +} + +fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { + use crate::db::schema::block_headers; + + let block_header = BlockHeader::new( + 1_u8.into(), + Word::default(), + block_num, + Word::default(), + Word::default(), + Word::default(), + Word::default(), + Word::default(), + Word::default(), + SecretKey::new().public_key(), + test_fee_params(), + 0_u8.into(), + ); + + diesel::insert_into(block_headers::table) + .values(( + block_headers::block_num.eq(i64::from(block_num.as_u32())), + block_headers::block_header.eq(block_header.to_bytes()), + )) + .execute(conn) + .expect("Failed to insert block header"); +} + +// ACCOUNT HEADER AT BLOCK TESTS +// ================================================================================================ + +#[test] +fn test_select_account_header_at_block_returns_none_for_nonexistent() { + let mut conn = setup_test_db(); + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let account_id = AccountId::dummy( + [99u8; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + // Query for a non-existent account + let result = select_account_header_at_block(&mut conn, account_id, block_num) + .expect("Query should succeed"); + + assert!(result.is_none(), "Should return None for non-existent account"); +} + +#[test] +fn test_select_account_header_at_block_returns_correct_header() { + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + // Insert the account + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta), + ); + + upsert_accounts(&mut conn, &[account_update], block_num).expect("upsert_accounts failed"); + + // Query the account header + let (header, _storage_header) = + select_account_header_at_block(&mut conn, account_id, block_num) + .expect("Query should succeed") + .expect("Header should exist"); + + assert_eq!(header.id(), account_id, "Account ID should match"); + assert_eq!(header.nonce(), account.nonce(), "Nonce should match"); + assert_eq!( + header.code_commitment(), + account.code().commitment(), + "Code commitment should match" + ); +} + +#[test] +fn test_select_account_header_at_block_historical_query() { + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + let block_num_1 = BlockNumber::from_epoch(0); + let block_num_2 = BlockNumber::from_epoch(1); + insert_block_header(&mut conn, block_num_1); + insert_block_header(&mut conn, block_num_2); + + // Insert the account at block 1 + let nonce_1 = account.nonce(); + let delta_1 = AccountDelta::try_from(account.clone()).unwrap(); + let account_update_1 = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta_1), + ); + + upsert_accounts(&mut conn, &[account_update_1], block_num_1).expect("First upsert failed"); + + // Query at block 1 - should return the account + let (header_1, _) = select_account_header_at_block(&mut conn, account_id, block_num_1) + .expect("Query should succeed") + .expect("Header should exist at block 1"); + + assert_eq!(header_1.nonce(), nonce_1, "Nonce at block 1 should match"); + + // Query at block 2 - should return the same account (most recent before block 2) + let (header_2, _) = select_account_header_at_block(&mut conn, account_id, block_num_2) + .expect("Query should succeed") + .expect("Header should exist at block 2"); + + assert_eq!(header_2.nonce(), nonce_1, "Nonce at block 2 should match block 1"); +} + +// ACCOUNT VAULT AT BLOCK TESTS +// ================================================================================================ + +#[test] +fn test_select_account_vault_at_block_empty() { + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + // Insert account without vault assets + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta), + ); + + upsert_accounts(&mut conn, &[account_update], block_num).expect("upsert_accounts failed"); + + // Query vault - should return empty (the test account has no assets) + let assets = select_account_vault_at_block(&mut conn, account_id, block_num) + .expect("Query should succeed"); + + assert!(assets.is_empty(), "Account should have no assets"); +} + +// ACCOUNT STORAGE AT BLOCK TESTS +// ================================================================================================ + +#[test] +fn test_select_account_storage_at_block_returns_storage() { + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let original_storage_commitment = account.storage().to_commitment(); + + // Insert the account + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta), + ); + + upsert_accounts(&mut conn, &[account_update], block_num).expect("upsert_accounts failed"); + + // Query storage + let storage = select_account_storage_at_block(&mut conn, account_id, block_num) + .expect("Query should succeed"); + + assert_eq!( + storage.to_commitment(), + original_storage_commitment, + "Storage commitment should match" + ); +} + +#[test] +fn test_upsert_accounts_inserts_storage_header() { + let mut conn = setup_test_db(); + let (account, account_id) = create_test_account_with_storage(); + + // Block 1 + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let storage_commitment_original = account.storage().to_commitment(); + let storage_slots_len = account.storage().slots().len(); + let account_commitment = account.commitment(); + + // Create full state delta from the account + let delta = AccountDelta::try_from(account).unwrap(); + assert!(delta.is_full_state(), "Delta should be full state"); + + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + // Upsert account + let result = upsert_accounts(&mut conn, &[account_update], block_num); + assert!(result.is_ok(), "upsert_accounts failed: {:?}", result.err()); + assert_eq!(result.unwrap(), 1, "Expected 1 account to be inserted"); + + // Query storage header back + let queried_storage = select_latest_account_storage(&mut conn, account_id) + .expect("Failed to query storage header"); + + // Verify storage commitment matches + assert_eq!( + queried_storage.to_commitment(), + storage_commitment_original, + "Storage commitment mismatch" + ); + + // Verify number of slots matches + assert_eq!(queried_storage.slots().len(), storage_slots_len, "Storage slots count mismatch"); + + // Verify exactly 1 latest account with storage exists + let header_count: i64 = schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)) + .filter(schema::accounts::storage_header.is_not_null()) + .count() + .get_result(&mut conn) + .expect("Failed to count accounts with storage"); + + assert_eq!(header_count, 1, "Expected exactly 1 latest account with storage"); +} + +#[test] +fn test_upsert_accounts_updates_is_latest_flag() { + let mut conn = setup_test_db(); + let (account, account_id) = create_test_account_with_storage(); + + // Block 1 and 2 + let block_num_1 = BlockNumber::from_epoch(0); + let block_num_2 = BlockNumber::from_epoch(1); + + insert_block_header(&mut conn, block_num_1); + insert_block_header(&mut conn, block_num_2); + + // Save storage commitment before moving account + let storage_commitment_1 = account.storage().to_commitment(); + let account_commitment_1 = account.commitment(); + + // First update with original account - full state delta + let delta_1 = AccountDelta::try_from(account).unwrap(); + + let account_update_1 = BlockAccountUpdate::new( + account_id, + account_commitment_1, + AccountUpdateDetails::Delta(delta_1), + ); + + upsert_accounts(&mut conn, &[account_update_1], block_num_1).expect("First upsert failed"); + + // Create modified account with different storage value + let storage_value_modified = + Word::from([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]); + let component_storage_modified = + vec![StorageSlot::with_value(StorageSlotName::mock(0), storage_value_modified)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component_2 = AccountComponent::new(account_component_code, component_storage_modified) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account_2 = AccountBuilder::new([1u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component_2) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let storage_commitment_2 = account_2.storage().to_commitment(); + let account_commitment_2 = account_2.commitment(); + + // Second update with modified account - full state delta + let delta_2 = AccountDelta::try_from(account_2).unwrap(); + + let account_update_2 = BlockAccountUpdate::new( + account_id, + account_commitment_2, + AccountUpdateDetails::Delta(delta_2), + ); + + upsert_accounts(&mut conn, &[account_update_2], block_num_2).expect("Second upsert failed"); + + // Verify 2 total account rows exist (both historical records) + let total_accounts: i64 = schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .count() + .get_result(&mut conn) + .expect("Failed to count total accounts"); + + assert_eq!(total_accounts, 2, "Expected 2 total account records"); + + // Verify only 1 is marked as latest + let latest_accounts: i64 = schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)) + .count() + .get_result(&mut conn) + .expect("Failed to count latest accounts"); + + assert_eq!(latest_accounts, 1, "Expected exactly 1 latest account"); + + // Verify latest storage matches second update + let latest_storage = select_latest_account_storage(&mut conn, account_id) + .expect("Failed to query latest storage"); + + assert_eq!( + latest_storage.to_commitment(), + storage_commitment_2, + "Latest storage should match second update" + ); + + // Verify historical query returns first update + let storage_at_block_1 = select_account_storage_at_block(&mut conn, account_id, block_num_1) + .expect("Failed to query storage at block 1"); + + assert_eq!( + storage_at_block_1.to_commitment(), + storage_commitment_1, + "Storage at block 1 should match first update" + ); +} + +#[test] +fn test_upsert_accounts_with_multiple_storage_slots() { + let mut conn = setup_test_db(); + + // Create account with 3 storage slots + let account_id = AccountId::dummy( + [2u8; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + let slot_value_1 = Word::from([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]); + let slot_value_2 = Word::from([Felt::new(5), Felt::new(6), Felt::new(7), Felt::new(8)]); + let slot_value_3 = Word::from([Felt::new(9), Felt::new(10), Felt::new(11), Felt::new(12)]); + + let component_storage = vec![ + StorageSlot::with_value(StorageSlotName::mock(0), slot_value_1), + StorageSlot::with_value(StorageSlotName::mock(1), slot_value_2), + StorageSlot::with_value(StorageSlotName::mock(2), slot_value_3), + ]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account = AccountBuilder::new([2u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let storage_commitment = account.storage().to_commitment(); + let account_commitment = account.commitment(); + let delta = AccountDelta::try_from(account).unwrap(); + + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + upsert_accounts(&mut conn, &[account_update], block_num) + .expect("Upsert with multiple storage slots failed"); + + // Query back and verify + let queried_storage = + select_latest_account_storage(&mut conn, account_id).expect("Failed to query storage"); + + assert_eq!( + queried_storage.to_commitment(), + storage_commitment, + "Storage commitment mismatch" + ); + + // Note: Auth component adds 1 storage slot, so 3 component slots + 1 auth = 4 total + assert_eq!( + queried_storage.slots().len(), + 4, + "Expected 4 storage slots (3 component + 1 auth)" + ); + + // The storage commitment matching proves that all values are correctly preserved. + // We don't check individual slot values by index since slot ordering may vary. +} + +#[test] +fn test_upsert_accounts_with_empty_storage() { + let mut conn = setup_test_db(); + + // Create account with no component storage slots (only auth slot) + let account_id = AccountId::dummy( + [3u8; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component = AccountComponent::new(account_component_code, vec![]) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account = AccountBuilder::new([3u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let storage_commitment = account.storage().to_commitment(); + let account_commitment = account.commitment(); + let delta = AccountDelta::try_from(account).unwrap(); + + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + upsert_accounts(&mut conn, &[account_update], block_num) + .expect("Upsert with empty storage failed"); + + // Query back and verify + let queried_storage = + select_latest_account_storage(&mut conn, account_id).expect("Failed to query storage"); + + assert_eq!( + queried_storage.to_commitment(), + storage_commitment, + "Storage commitment mismatch for empty storage" + ); + + // Note: Auth component adds 1 storage slot, so even "empty" accounts have 1 slot + assert_eq!(queried_storage.slots().len(), 1, "Expected 1 storage slot (auth component)"); + + // Verify the storage header blob exists in database + let storage_header_exists: Option = SelectDsl::select( + schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)), + schema::accounts::storage_header.is_not_null(), + ) + .first(&mut conn) + .optional() + .expect("Failed to check storage header existence"); + + assert_eq!( + storage_header_exists, + Some(true), + "Storage header blob should exist even for empty storage" + ); +} diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index 6f36594b9..90c48380d 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -27,9 +27,9 @@ diesel::table! { network_account_id_prefix -> Nullable, account_commitment -> Binary, code_commitment -> Nullable, - storage -> Nullable, - vault -> Nullable, nonce -> Nullable, + storage_header -> Nullable, + vault_root -> Nullable, block_num -> BigInt, is_latest -> Bool, } diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index 413f8a524..3988e160d 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -12,6 +12,7 @@ use miden_protocol::account::delta::AccountUpdateDetails; use miden_protocol::account::{ Account, AccountBuilder, + AccountCode, AccountComponent, AccountDelta, AccountId, @@ -21,6 +22,7 @@ use miden_protocol::account::{ AccountType, AccountVaultDelta, StorageSlot, + StorageSlotContent, StorageSlotDelta, StorageSlotName, }; @@ -61,6 +63,7 @@ use miden_protocol::transaction::{ TransactionHeader, TransactionId, }; +use miden_protocol::utils::{Deserializable, Serializable}; use miden_protocol::{EMPTY_WORD, Felt, FieldElement, Word, ZERO}; use miden_standards::account::auth::AuthRpoFalcon512; use miden_standards::code_builder::CodeBuilder; @@ -464,27 +467,25 @@ fn sql_unconsumed_network_notes() { create_block(&mut conn, 1.into()); // Create an unconsumed note in each block. - let notes = (0..2) - .map(|i: u32| { - let note = NoteRecord { - block_num: 0.into(), // Created on same block. - note_index: BlockNoteIndex::new(0, i as usize).unwrap(), - note_id: num_to_word(i.into()), - note_commitment: num_to_word(i.into()), - metadata: NoteMetadata::new( - account_note.0, - NoteType::Public, - NoteTag::from_account_id(account_note.0), - NoteExecutionHint::none(), - Felt::default(), - ) - .unwrap(), - details: None, - inclusion_path: SparseMerklePath::default(), - }; - (note, Some(num_to_nullifier(i.into()))) - }) - .collect::>(); + let notes = Vec::from_iter((0..2).map(|i: u32| { + let note = NoteRecord { + block_num: 0.into(), // Created on same block. + note_index: BlockNoteIndex::new(0, i as usize).unwrap(), + note_id: num_to_word(i.into()), + note_commitment: num_to_word(i.into()), + metadata: NoteMetadata::new( + account_note.0, + NoteType::Public, + NoteTag::from_account_id(account_note.0), + NoteExecutionHint::none(), + Felt::default(), + ) + .unwrap(), + details: None, + inclusion_path: SparseMerklePath::default(), + }; + (note, Some(num_to_nullifier(i.into()))) + })); queries::insert_scripts(&mut conn, notes.iter().map(|(note, _)| note)).unwrap(); queries::insert_notes(&mut conn, ¬es).unwrap(); @@ -1165,8 +1166,7 @@ fn sql_account_storage_map_values_insertion() { let mut map1 = StorageMapDelta::default(); map1.insert(key1, value1); map1.insert(key2, value2); - let delta1: BTreeMap<_, _> = - [(slot_name.clone(), StorageSlotDelta::Map(map1))].into_iter().collect(); + let delta1 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map1))]); let storage1 = AccountStorageDelta::from_raw(delta1); let delta1 = AccountDelta::new(account_id, storage1, AccountVaultDelta::default(), Felt::ONE).unwrap(); @@ -1326,6 +1326,30 @@ fn mock_block_account_update(account_id: AccountId, num: u64) -> BlockAccountUpd BlockAccountUpdate::new(account_id, num_to_word(num), AccountUpdateDetails::Private) } +// Helper function to create account with specific code for tests +fn create_account_with_code(code_str: &str, seed: [u8; 32]) -> Account { + let component_storage = vec![ + StorageSlot::with_value(StorageSlotName::mock(0), Word::empty()), + StorageSlot::with_value(StorageSlotName::mock(1), num_to_word(1)), + ]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", code_str) + .unwrap(); + + let component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supported_type(AccountType::RegularAccountUpdatableCode); + + AccountBuilder::new(seed) + .account_type(AccountType::RegularAccountUpdatableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap() +} + fn mock_block_transaction(account_id: AccountId, num: u64) -> TransactionHeader { let initial_state_commitment = Word::try_from([num, 0, 0, 0]).unwrap(); let final_account_commitment = Word::try_from([0, num, 0, 0]).unwrap(); @@ -1428,6 +1452,137 @@ fn mock_account_code_and_storage( .unwrap() } +// ACCOUNT CODE TESTS +// ================================================================================================ + +#[test] +fn test_select_account_code_by_commitment() { + let mut conn = create_db(); + + let block_num_1 = BlockNumber::from(1); + + // Create block 1 + create_block(&mut conn, block_num_1); + + // Create an account with code at block 1 using the existing mock function + let account = mock_account_code_and_storage( + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + [], + None, + ); + + // Get the code commitment and bytes before inserting + let code_commitment = account.code().commitment(); + let expected_code = account.code().to_bytes(); + + // Insert the account at block 1 + queries::upsert_accounts( + &mut conn, + &[BlockAccountUpdate::new( + account.id(), + account.commitment(), + AccountUpdateDetails::Delta(AccountDelta::try_from(account).unwrap()), + )], + block_num_1, + ) + .unwrap(); + + // Query code by commitment - should return the code + let code = queries::select_account_code_by_commitment(&mut conn, code_commitment) + .unwrap() + .expect("Code should exist"); + assert_eq!(code, expected_code); + + // Query code for non-existent commitment - should return None + let non_existent_commitment = [0u8; 32]; + let non_existent_commitment = Word::read_from_bytes(&non_existent_commitment).unwrap(); + let code_other = + queries::select_account_code_by_commitment(&mut conn, non_existent_commitment).unwrap(); + assert!(code_other.is_none(), "Code should not exist for non-existent commitment"); +} + +#[test] +fn test_select_account_code_by_commitment_multiple_codes() { + let mut conn = create_db(); + + let block_num_1 = BlockNumber::from(1); + let block_num_2 = BlockNumber::from(2); + + // Create blocks + create_block(&mut conn, block_num_1); + create_block(&mut conn, block_num_2); + + // Create account with code v1 at block 1 + let code_v1_str = "\ + pub proc account_procedure_1 + push.1.2 + add + end + "; + let account_v1 = create_account_with_code(code_v1_str, [1u8; 32]); + let code_v1_commitment = account_v1.code().commitment(); + let code_v1 = account_v1.code().to_bytes(); + + // Insert the account at block 1 + queries::upsert_accounts( + &mut conn, + &[BlockAccountUpdate::new( + account_v1.id(), + account_v1.commitment(), + AccountUpdateDetails::Delta(AccountDelta::try_from(account_v1).unwrap()), + )], + block_num_1, + ) + .unwrap(); + + // Create account with different code v2 at block 2 + let code_v2_str = "\ + pub proc account_procedure_1 + push.3.4 + mul + end + "; + let account_v2 = create_account_with_code(code_v2_str, [1u8; 32]); // Same seed to keep same account_id + let code_v2_commitment = account_v2.code().commitment(); + let code_v2 = account_v2.code().to_bytes(); + + // Verify that the codes are actually different + assert_ne!( + code_v1, code_v2, + "Test setup error: codes should be different for different code strings" + ); + assert_ne!( + code_v1_commitment, code_v2_commitment, + "Test setup error: code commitments should be different" + ); + + // Insert the updated account at block 2 + queries::upsert_accounts( + &mut conn, + &[BlockAccountUpdate::new( + account_v2.id(), + account_v2.commitment(), + AccountUpdateDetails::Delta(AccountDelta::try_from(account_v2).unwrap()), + )], + block_num_2, + ) + .unwrap(); + + // Both codes should be retrievable by their respective commitments + let code_from_v1_commitment = + queries::select_account_code_by_commitment(&mut conn, code_v1_commitment) + .unwrap() + .expect("v1 code should exist"); + assert_eq!(code_from_v1_commitment, code_v1, "v1 commitment should return v1 code"); + + let code_from_v2_commitment = + queries::select_account_code_by_commitment(&mut conn, code_v2_commitment) + .unwrap() + .expect("v2 code should exist"); + assert_eq!(code_from_v2_commitment, code_v2, "v2 commitment should return v2 code"); +} + // GENESIS REGRESSION TESTS // ================================================================================================ @@ -1682,3 +1837,514 @@ fn regression_1461_full_state_delta_inserts_vault_assets() { assert_eq!(vault_asset.asset, Some(expected_asset)); assert_eq!(vault_asset.vault_key, expected_asset.vault_key()); } + +// SERIALIZATION SYMMETRY TESTS +// ================================================================================================ +// +// These tests ensure that `to_bytes` and `from_bytes`/`read_from_bytes` are symmetric for all +// types used in database operations. This guarantees that data inserted into the database can +// always be correctly retrieved. + +#[test] +fn serialization_symmetry_core_types() { + // AccountId + let account_id = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + let bytes = account_id.to_bytes(); + let restored = AccountId::read_from_bytes(&bytes).unwrap(); + assert_eq!(account_id, restored, "AccountId serialization must be symmetric"); + + // Word + let word = num_to_word(0x1234_5678_9ABC_DEF0); + let bytes = word.to_bytes(); + let restored = Word::read_from_bytes(&bytes).unwrap(); + assert_eq!(word, restored, "Word serialization must be symmetric"); + + // Nullifier + let nullifier = num_to_nullifier(0xDEAD_BEEF); + let bytes = nullifier.to_bytes(); + let restored = Nullifier::read_from_bytes(&bytes).unwrap(); + assert_eq!(nullifier, restored, "Nullifier serialization must be symmetric"); + + // TransactionId + let tx_id = TransactionId::new(num_to_word(1), num_to_word(2), num_to_word(3), num_to_word(4)); + let bytes = tx_id.to_bytes(); + let restored = TransactionId::read_from_bytes(&bytes).unwrap(); + assert_eq!(tx_id, restored, "TransactionId serialization must be symmetric"); + + // NoteId + let note_id = NoteId::new(num_to_word(1), num_to_word(2)); + let bytes = note_id.to_bytes(); + let restored = NoteId::read_from_bytes(&bytes).unwrap(); + assert_eq!(note_id, restored, "NoteId serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_block_header() { + let block_header = BlockHeader::new( + 1_u8.into(), + num_to_word(2), + 3.into(), + num_to_word(4), + num_to_word(5), + num_to_word(6), + num_to_word(7), + num_to_word(8), + num_to_word(9), + SecretKey::new().public_key(), + test_fee_params(), + 11_u8.into(), + ); + + let bytes = block_header.to_bytes(); + let restored = BlockHeader::read_from_bytes(&bytes).unwrap(); + assert_eq!(block_header, restored, "BlockHeader serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_assets() { + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + // FungibleAsset + let fungible = FungibleAsset::new(faucet_id, 1000).unwrap(); + let asset: Asset = fungible.into(); + let bytes = asset.to_bytes(); + let restored = Asset::read_from_bytes(&bytes).unwrap(); + assert_eq!(asset, restored, "Asset (fungible) serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_account_code() { + let account = mock_account_code_and_storage( + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + [], + None, + ); + + let code = account.code(); + let bytes = code.to_bytes(); + let restored = AccountCode::read_from_bytes(&bytes).unwrap(); + assert_eq!(*code, restored, "AccountCode serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_sparse_merkle_path() { + let path = SparseMerklePath::default(); + let bytes = path.to_bytes(); + let restored = SparseMerklePath::read_from_bytes(&bytes).unwrap(); + assert_eq!(path, restored, "SparseMerklePath serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_note_metadata() { + let sender = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + // Use a tag that roundtrips properly - NoteTag::LocalAny stores the full u32 including type + // bits + let tag = NoteTag::from_account_id(sender); + let metadata = NoteMetadata::new( + sender, + NoteType::Public, + tag, + NoteExecutionHint::always(), + Felt::new(42), + ) + .unwrap(); + + let bytes = metadata.to_bytes(); + let restored = NoteMetadata::read_from_bytes(&bytes).unwrap(); + assert_eq!(metadata, restored, "NoteMetadata serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_nullifier_vec() { + let nullifiers: Vec = (0..5).map(num_to_nullifier).collect(); + let bytes = nullifiers.to_bytes(); + let restored: Vec = Deserializable::read_from_bytes(&bytes).unwrap(); + assert_eq!(nullifiers, restored, "Vec serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_note_id_vec() { + let note_ids: Vec = + (0..5).map(|i| NoteId::new(num_to_word(i), num_to_word(i + 100))).collect(); + let bytes = note_ids.to_bytes(); + let restored: Vec = Deserializable::read_from_bytes(&bytes).unwrap(); + assert_eq!(note_ids, restored, "Vec serialization must be symmetric"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_block_header() { + let mut conn = create_db(); + + let block_header = BlockHeader::new( + 1_u8.into(), + num_to_word(2), + BlockNumber::from(42), + num_to_word(4), + num_to_word(5), + num_to_word(6), + num_to_word(7), + num_to_word(8), + num_to_word(9), + SecretKey::new().public_key(), + test_fee_params(), + 11_u8.into(), + ); + + // Insert + queries::insert_block_header(&mut conn, &block_header).unwrap(); + + // Retrieve + let retrieved = + queries::select_block_header_by_block_num(&mut conn, Some(block_header.block_num())) + .unwrap() + .expect("Block header should exist"); + + assert_eq!(block_header, retrieved, "BlockHeader DB roundtrip must be symmetric"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_nullifiers() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let nullifiers: Vec = (0..5).map(|i| num_to_nullifier(i << 48)).collect(); + + // Insert + queries::insert_nullifiers_for_block(&mut conn, &nullifiers, block_num).unwrap(); + + // Retrieve + let retrieved = queries::select_all_nullifiers(&mut conn).unwrap(); + + assert_eq!(nullifiers.len(), retrieved.len(), "Should retrieve same number of nullifiers"); + for (orig, info) in nullifiers.iter().zip(retrieved.iter()) { + assert_eq!(*orig, info.nullifier, "Nullifier DB roundtrip must be symmetric"); + assert_eq!(block_num, info.block_num, "Block number must match"); + } +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_account() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let account = mock_account_code_and_storage( + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + [], + Some([99u8; 32]), + ); + let account_id = account.id(); + let account_commitment = account.commitment(); + + // Insert with full delta (like genesis) + let account_delta = AccountDelta::try_from(account.clone()).unwrap(); + let block_update = BlockAccountUpdate::new( + account_id, + account_commitment, + AccountUpdateDetails::Delta(account_delta), + ); + queries::upsert_accounts(&mut conn, &[block_update], block_num).unwrap(); + + // Retrieve + let retrieved = queries::select_all_accounts(&mut conn).unwrap(); + assert_eq!(retrieved.len(), 1, "Should have one account"); + + let retrieved_info = &retrieved[0]; + assert_eq!( + retrieved_info.summary.account_id, account_id, + "AccountId DB roundtrip must be symmetric" + ); + assert_eq!( + retrieved_info.summary.account_commitment, account_commitment, + "Account commitment DB roundtrip must be symmetric" + ); + assert_eq!(retrieved_info.summary.block_num, block_num, "Block number must match"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_notes() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let sender = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(sender, 0)], block_num) + .unwrap(); + + let new_note = create_note(sender); + let note_index = BlockNoteIndex::new(0, 0).unwrap(); + + let note = NoteRecord { + block_num, + note_index, + note_id: new_note.id().as_word(), + note_commitment: new_note.commitment(), + metadata: *new_note.metadata(), + details: Some(NoteDetails::from(&new_note)), + inclusion_path: SparseMerklePath::default(), + }; + + // Insert + queries::insert_scripts(&mut conn, [¬e]).unwrap(); + queries::insert_notes(&mut conn, &[(note.clone(), None)]).unwrap(); + + // Retrieve + let note_ids = vec![NoteId::from_raw(note.note_id)]; + let retrieved = queries::select_notes_by_id(&mut conn, ¬e_ids).unwrap(); + + assert_eq!(retrieved.len(), 1, "Should have one note"); + let retrieved_note = &retrieved[0]; + + assert_eq!(note.note_id, retrieved_note.note_id, "NoteId DB roundtrip must be symmetric"); + assert_eq!( + note.note_commitment, retrieved_note.note_commitment, + "Note commitment DB roundtrip must be symmetric" + ); + assert_eq!( + note.metadata, retrieved_note.metadata, + "Metadata DB roundtrip must be symmetric" + ); + assert_eq!( + note.inclusion_path, retrieved_note.inclusion_path, + "Inclusion path DB roundtrip must be symmetric" + ); + assert_eq!( + note.details, retrieved_note.details, + "Note details DB roundtrip must be symmetric" + ); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_transactions() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let account_id = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 1)], block_num) + .unwrap(); + + let tx = mock_block_transaction(account_id, 1); + let ordered_tx = OrderedTransactionHeaders::new_unchecked(vec![tx.clone()]); + + // Insert + queries::insert_transactions(&mut conn, block_num, &ordered_tx).unwrap(); + + // Retrieve + let retrieved = queries::select_transactions_by_accounts_and_block_range( + &mut conn, + &[account_id], + BlockNumber::from(0)..=BlockNumber::from(2), + ) + .unwrap(); + + assert_eq!(retrieved.len(), 1, "Should have one transaction"); + let retrieved_tx = &retrieved[0]; + + assert_eq!( + tx.account_id(), + retrieved_tx.account_id, + "AccountId DB roundtrip must be symmetric" + ); + assert_eq!( + tx.id(), + retrieved_tx.transaction_id, + "TransactionId DB roundtrip must be symmetric" + ); + assert_eq!(block_num, retrieved_tx.block_num, "Block number must match"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_vault_assets() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + + // Create account first + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block_num) + .unwrap(); + + let fungible_asset = FungibleAsset::new(faucet_id, 5000).unwrap(); + let asset: Asset = fungible_asset.into(); + let vault_key = asset.vault_key(); + + // Insert vault asset + queries::insert_account_vault_asset(&mut conn, account_id, block_num, vault_key, Some(asset)) + .unwrap(); + + // Retrieve + let (_, vault_assets) = queries::select_account_vault_assets( + &mut conn, + account_id, + BlockNumber::GENESIS..=block_num, + ) + .unwrap(); + + assert_eq!(vault_assets.len(), 1, "Should have one vault asset"); + let retrieved = &vault_assets[0]; + + assert_eq!(retrieved.asset, Some(asset), "Asset DB roundtrip must be symmetric"); + assert_eq!(retrieved.vault_key, vault_key, "VaultKey DB roundtrip must be symmetric"); + assert_eq!(retrieved.block_num, block_num, "Block number must match"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_storage_map_values() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let slot_name = StorageSlotName::mock(5); + let key = num_to_word(12345); + let value = num_to_word(67890); + + // Insert + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block_num, + slot_name.clone(), + key, + value, + ) + .unwrap(); + + // Retrieve + let page = queries::select_account_storage_map_values( + &mut conn, + account_id, + BlockNumber::GENESIS..=block_num, + ) + .unwrap(); + + assert_eq!(page.values.len(), 1, "Should have one storage map value"); + let retrieved = &page.values[0]; + + assert_eq!(retrieved.slot_name, slot_name, "StorageSlotName DB roundtrip must be symmetric"); + assert_eq!(retrieved.key, key, "Key (Word) DB roundtrip must be symmetric"); + assert_eq!(retrieved.value, value, "Value (Word) DB roundtrip must be symmetric"); + assert_eq!(retrieved.block_num, block_num, "Block number must match"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_account_storage_with_maps() { + use miden_protocol::account::StorageMap; + + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + // Create storage with both value slots and map slots + let storage_map = StorageMap::with_entries(vec![ + ( + Word::from([Felt::new(1), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]), + ), + ( + Word::from([Felt::new(2), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(50), Felt::new(60), Felt::new(70), Felt::new(80)]), + ), + ]) + .unwrap(); + + let component_storage = vec![ + StorageSlot::with_value(StorageSlotName::mock(0), num_to_word(42)), + StorageSlot::with_map(StorageSlotName::mock(1), storage_map), + StorageSlot::with_empty_value(StorageSlotName::mock(2)), + ]; + + let component_code = "pub proc foo push.1 end"; + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", component_code) + .unwrap(); + let account_component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supports_all_types(); + + let account = AccountBuilder::new([50u8; 32]) + .account_type(AccountType::RegularAccountUpdatableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(account_component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let account_id = account.id(); + let original_storage = account.storage().clone(); + let original_commitment = original_storage.to_commitment(); + + // Insert the account (this should store header + map values separately) + let account_delta = AccountDelta::try_from(account.clone()).unwrap(); + let block_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(account_delta), + ); + queries::upsert_accounts(&mut conn, &[block_update], block_num).unwrap(); + + // Retrieve the storage using select_latest_account_storage (reconstructs from header + map + // values) + let retrieved_storage = queries::select_latest_account_storage(&mut conn, account_id).unwrap(); + let retrieved_commitment = retrieved_storage.to_commitment(); + + // Verify the commitment matches (this proves the reconstruction is correct) + assert_eq!( + original_commitment, retrieved_commitment, + "Storage commitment must match after DB roundtrip" + ); + + // Verify slot count matches + assert_eq!( + original_storage.slots().len(), + retrieved_storage.slots().len(), + "Number of slots must match" + ); + + // Verify each slot + for (original_slot, retrieved_slot) in + original_storage.slots().iter().zip(retrieved_storage.slots().iter()) + { + assert_eq!(original_slot.name(), retrieved_slot.name(), "Slot names must match"); + assert_eq!(original_slot.slot_type(), retrieved_slot.slot_type(), "Slot types must match"); + + match (original_slot.content(), retrieved_slot.content()) { + (StorageSlotContent::Value(orig), StorageSlotContent::Value(retr)) => { + assert_eq!(orig, retr, "Value slot contents must match"); + }, + (StorageSlotContent::Map(orig_map), StorageSlotContent::Map(retr_map)) => { + assert_eq!(orig_map.root(), retr_map.root(), "Map slot roots must match"); + for (key, value) in orig_map.entries() { + let retrieved_value = retr_map.get(key); + assert_eq!(*value, retrieved_value, "Map entry for key {:?} must match", key); + } + }, + // The slot_type assertion above guarantees matching variants, so this is unreachable + _ => unreachable!(), + } + } + + // Also verify full account reconstruction via select_account (which calls select_full_account) + let account_info = queries::select_account(&mut conn, account_id).unwrap(); + assert!(account_info.details.is_some(), "Public account should have details"); + let retrieved_account = account_info.details.unwrap(); + assert_eq!( + account.commitment(), + retrieved_account.commitment(), + "Full account commitment must match after DB roundtrip" + ); +} diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 4399918ba..42a0fe32d 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -8,9 +8,10 @@ use miden_node_proto::errors::{ConversionError, GrpcError}; use miden_node_utils::limiter::QueryLimitError; use miden_protocol::account::AccountId; use miden_protocol::block::BlockNumber; +use miden_protocol::crypto::merkle::MerkleError; use miden_protocol::crypto::merkle::mmr::MmrError; use miden_protocol::crypto::utils::DeserializationError; -use miden_protocol::note::Nullifier; +use miden_protocol::note::{NoteId, Nullifier}; use miden_protocol::transaction::OutputNote; use miden_protocol::{ AccountDeltaError, @@ -21,6 +22,7 @@ use miden_protocol::{ FeeError, NoteError, NullifierTreeError, + StorageMapError, Word, }; use thiserror::Error; @@ -56,11 +58,13 @@ pub enum DatabaseError { #[error("I/O error")] IoError(#[from] io::Error), #[error("merkle error")] - MerkleError(#[from] miden_protocol::crypto::merkle::MerkleError), + MerkleError(#[from] MerkleError), #[error("network account error")] NetworkAccountError(#[from] NetworkAccountError), #[error("note error")] NoteError(#[from] NoteError), + #[error("storage map error")] + StorageMapError(#[from] StorageMapError), #[error("setup deadpool connection pool failed")] Deadpool(#[from] deadpool::managed::PoolError), #[error("setup deadpool connection pool failed")] @@ -98,16 +102,18 @@ pub enum DatabaseError { AccountNotFoundInDb(AccountId), #[error("account {0} state at block height {1} not found")] AccountAtBlockHeightNotFoundInDb(AccountId, BlockNumber), + #[error("block {0} not found in database")] + BlockNotFound(BlockNumber), #[error("historical block {block_num} not available: {reason}")] HistoricalBlockNotAvailable { block_num: BlockNumber, reason: String }, #[error("accounts {0:?} not found")] AccountsNotFoundInDb(Vec), #[error("account {0} is not on the chain")] AccountNotPublic(AccountId), - #[error("account {0} details missing")] - AccountDetailsMissing(AccountId), #[error("invalid block parameters: block_from ({from}) > block_to ({to})")] InvalidBlockRange { from: BlockNumber, to: BlockNumber }, + #[error("invalid storage slot type: {0}")] + InvalidStorageSlotType(i32), #[error("data corrupted: {0}")] DataCorrupted(String), #[error("SQLite pool interaction failed: {0}")] @@ -175,6 +181,8 @@ impl From for Status { pub enum StateInitializationError { #[error("account tree IO error: {0}")] AccountTreeIoError(String), + #[error("nullifier tree IO error: {0}")] + NullifierTreeIoError(String), #[error("database error")] DatabaseError(#[from] DatabaseError), #[error("failed to create nullifier tree")] @@ -248,6 +256,8 @@ pub enum InvalidBlockError { NewBlockNullifierAlreadySpent(#[source] NullifierTreeError), #[error("duplicate account ID prefix in new block")] NewBlockDuplicateAccountIdPrefix(#[source] AccountTreeError), + #[error("failed to build note tree: {0}")] + FailedToBuildNoteTree(String), } #[derive(Error, Debug)] @@ -447,9 +457,9 @@ pub enum GetNotesByIdError { #[error("malformed note ID")] DeserializationFailed(#[from] ConversionError), #[error("note {0} not found")] - NoteNotFound(miden_protocol::note::NoteId), + NoteNotFound(NoteId), #[error("note {0} is not public")] - NoteNotPublic(miden_protocol::note::NoteId), + NoteNotPublic(NoteId), } // GET NOTE SCRIPT BY ROOT ERRORS diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index ab308569f..aa21c8fd1 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -23,11 +23,11 @@ use miden_node_proto::domain::account::{ use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::ErrorReport; use miden_node_utils::formatting::format_array; -use miden_protocol::account::{AccountHeader, AccountId, StorageSlot, StorageSlotContent}; +use miden_protocol::account::{AccountId, StorageSlotContent}; use miden_protocol::block::account_tree::{AccountTree, AccountWitness, account_id_to_smt_key}; use miden_protocol::block::nullifier_tree::{NullifierTree, NullifierWitness}; use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain, ProvenBlock}; -use miden_protocol::crypto::merkle::mmr::{Forest, Mmr, MmrDelta, MmrPeaks, MmrProof, PartialMmr}; +use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta, MmrPeaks, MmrProof, PartialMmr}; use miden_protocol::crypto::merkle::smt::{ LargeSmt, LargeSmtError, @@ -100,7 +100,10 @@ where } } -/// The rollup state +// CHAIN STATE +// ================================================================================================ + +/// The rollup state. pub struct State { /// The database which stores block headers, nullifiers, notes, and the latest states of /// accounts. @@ -120,6 +123,9 @@ pub struct State { } impl State { + // CONSTRUCTOR + // -------------------------------------------------------------------------------------------- + /// Loads the state from the `db`. #[instrument(target = COMPONENT, skip_all)] pub async fn load(data_path: &Path) -> Result { @@ -136,21 +142,12 @@ impl State { .await .map_err(StateInitializationError::DatabaseLoadError)?; - let chain_mmr = load_mmr(&mut db).await?; - let block_headers = db.select_all_block_headers().await?; - let latest_block_num = block_headers - .last() - .map_or(BlockNumber::GENESIS, miden_protocol::block::BlockHeader::block_num); + let blockchain = load_mmr(&mut db).await?; + let latest_block_num = blockchain.chain_tip().unwrap_or(BlockNumber::GENESIS); let account_tree = load_account_tree(&mut db, latest_block_num).await?; let nullifier_tree = load_nullifier_tree(&mut db).await?; - let inner = RwLock::new(InnerState { - nullifier_tree, - // SAFETY: We assume the loaded MMR is valid and does not have more than u32::MAX - // entries. - blockchain: Blockchain::from_mmr_unchecked(chain_mmr), - account_tree, - }); + let inner = RwLock::new(InnerState { nullifier_tree, blockchain, account_tree }); let writer = Mutex::new(()); let db = Arc::new(db); @@ -158,6 +155,9 @@ impl State { Ok(Self { db, block_store, inner, writer }) } + // STATE MUTATOR + // -------------------------------------------------------------------------------------------- + /// Apply changes of a new block to the DB and in-memory data structures. /// /// ## Note on state consistency @@ -200,7 +200,7 @@ impl State { } let block_num = header.block_num(); - let block_commitment = block.header().commitment(); + let block_commitment = header.commitment(); // ensures the right block header is being processed let prev_block = self @@ -249,7 +249,7 @@ impl State { .body() .created_nullifiers() .iter() - .filter(|&n| inner.nullifier_tree.get_block_num(n).is_some()) + .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) .copied() .collect(); if !duplicate_nullifiers.is_empty() { @@ -418,6 +418,9 @@ impl State { Ok(()) } + // STATE ACCESSORS + // -------------------------------------------------------------------------------------------- + /// Queries a [BlockHeader] from the database, and returns it alongside its inclusion proof. /// /// If [None] is given as the value of `block_num`, the data for the latest [BlockHeader] is @@ -926,7 +929,7 @@ impl State { return Err(DatabaseError::AccountNotPublic(account_id)); } - let (block_num, witness) = self.get_block_witness(block_num, account_id).await?; + let (block_num, witness) = self.get_account_witness(block_num, account_id).await?; let details = if let Some(request) = details { Some(self.fetch_public_account_details(account_id, block_num, request).await?) @@ -941,7 +944,7 @@ impl State { /// /// If `block_num` is provided, returns the witness at that historical block, /// if not present, returns the witness at the latest block. - async fn get_block_witness( + async fn get_account_witness( &self, block_num: Option, account_id: AccountId, @@ -987,67 +990,71 @@ impl State { storage_requests, } = detail_request; - let account_info = self.db.select_historical_account_at(account_id, block_num).await?; + if !account_id.has_public_state() { + return Err(DatabaseError::AccountNotPublic(account_id)); + } - // If we get a query for a public account but the details are missing from the database, - // it indicates an inconsistent state in the database. - let Some(account) = account_info.details else { - return Err(DatabaseError::AccountDetailsMissing(account_id)); + // Validate block exists in the blockchain before querying the database + self.validate_block_exists(block_num).await?; + + let account_header = self + .db + .select_account_header_at_block(account_id, block_num) + .await? + .ok_or_else(|| DatabaseError::AccountNotPublic(account_id))?; + + let account_code = match code_commitment { + Some(commitment) if commitment == account_header.code_commitment() => None, + Some(_) => { + self.db + .select_account_code_by_commitment(account_header.code_commitment()) + .await? + }, + None => None, }; - let storage_header = account.storage().to_header(); + let vault_details = match asset_vault_commitment { + Some(commitment) if commitment == account_header.vault_root() => { + AccountVaultDetails::empty() + }, + Some(_) => { + let vault_assets = + self.db.select_account_vault_at_block(account_id, block_num).await?; + AccountVaultDetails::from_assets(vault_assets) + }, + None => AccountVaultDetails::empty(), + }; + // TODO: don't load the entire storage at once, load what is required + let storage = self.db.select_account_storage_at_block(account_id, block_num).await?; + let storage_header = storage.to_header(); let mut storage_map_details = Vec::::with_capacity(storage_requests.len()); for StorageMapRequest { slot_name, slot_data } in storage_requests { - let Some(StorageSlotContent::Map(storage_map)) = - account.storage().get(&slot_name).map(StorageSlot::content) - else { - return Err(AccountError::StorageSlotNotMap(slot_name).into()); + let Some(slot) = storage.slots().iter().find(|s| s.name() == &slot_name) else { + continue; }; + + let storage_map = match slot.content() { + StorageSlotContent::Map(map) => map, + StorageSlotContent::Value(_) => { + return Err(AccountError::StorageSlotNotMap(slot_name).into()); + }, + }; + let details = AccountStorageMapDetails::new(slot_name, slot_data, storage_map); storage_map_details.push(details); } - // Only include unknown account code blobs, which is equal to a account code digest - // mismatch. If `None` was requested, don't return any. - let account_code = code_commitment - .is_some_and(|code_commitment| code_commitment != account.code().commitment()) - .then(|| account.code().to_bytes()); - - // storage details - let storage_details = AccountStorageDetails { - header: storage_header, - map_details: storage_map_details, - }; - - // Handle vault details based on the `asset_vault_commitment`. - // Similar to `code_commitment`, if the provided commitment matches, we don't return - // vault data. If no commitment is provided or it doesn't match, we return - // the vault data. If the number of vault contained assets are exceeding a - // limit, we signal this back in the response and the user must handle that - // in follow-up request. - let vault_details = match asset_vault_commitment { - Some(commitment) if commitment == account.vault().root() => { - // The client already has the correct vault data - AccountVaultDetails::empty() - }, - Some(_) => { - // The commitment doesn't match, so return vault data - AccountVaultDetails::new(account.vault()) - }, - None => { - // No commitment provided, so don't return vault data - AccountVaultDetails::empty() - }, - }; - Ok(AccountDetails { - account_header: AccountHeader::from(account), + account_header, account_code, vault_details, - storage_details, + storage_details: AccountStorageDetails { + header: storage_header, + map_details: storage_map_details, + }, }) } @@ -1076,6 +1083,26 @@ impl State { self.inner.read().await.latest_block_num() } + /// Validates that a block exists in the blockchain + /// + /// # Attention + /// + /// Acquires a *read lock** on `self.inner`. + /// + /// # Errors + /// + /// Returns `DatabaseError::BlockNotFound` if the block doesn't exist in the blockchain. + async fn validate_block_exists(&self, block_num: BlockNumber) -> Result<(), DatabaseError> { + let inner = self.inner.read().await; + let latest_block_num = inner.latest_block_num(); + + if block_num > latest_block_num { + return Err(DatabaseError::BlockNotFound(block_num)); + } + + Ok(()) + } + /// Runs database optimization. pub async fn optimize_db(&self) -> Result<(), DatabaseError> { self.db.optimize().await @@ -1122,9 +1149,25 @@ impl State { } } -// UTILITIES +// INNER STATE LOADING // ================================================================================================ +#[instrument(level = "info", target = COMPONENT, skip_all)] +async fn load_mmr(db: &mut Db) -> Result { + let block_commitments: Vec = db + .select_all_block_headers() + .await? + .iter() + .map(BlockHeader::commitment) + .collect(); + + // SAFETY: We assume the loaded MMR is valid and does not have more than u32::MAX + // entries. + let chain_mmr = Blockchain::from_mmr_unchecked(block_commitments.into()); + + Ok(chain_mmr) +} + #[instrument(level = "info", target = COMPONENT, skip_all)] async fn load_nullifier_tree( db: &mut Db, @@ -1139,24 +1182,12 @@ async fn load_nullifier_tree( .map_err(StateInitializationError::FailedToCreateNullifierTree) } -#[instrument(level = "info", target = COMPONENT, skip_all)] -async fn load_mmr(db: &mut Db) -> Result { - let block_commitments: Vec = db - .select_all_block_headers() - .await? - .iter() - .map(BlockHeader::commitment) - .collect(); - - Ok(block_commitments.into()) -} - #[instrument(level = "info", target = COMPONENT, skip_all)] async fn load_account_tree( db: &mut Db, block_number: BlockNumber, ) -> Result, StateInitializationError> { - let account_data = db.select_all_account_commitments().await?.into_iter().collect::>(); + let account_data = Vec::from_iter(db.select_all_account_commitments().await?); let smt_entries = account_data .into_iter()