diff --git a/CHANGELOG.md b/CHANGELOG.md index fe5bc0492..fbb1a68d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -20,6 +20,7 @@ - Added `GetLimits` endpoint to the RPC server ([#1410](https://github.com/0xMiden/miden-node/pull/1410)). - Added gRPC-Web probe support to the `miden-network-monitor` binary ([#1484](https://github.com/0xMiden/miden-node/pull/1484)). - Add DB schema change check ([#1268](https://github.com/0xMiden/miden-node/pull/1485)). +- Improve DB query performance for account queries ([#1496](https://github.com/0xMiden/miden-node/pull/1496). ### Changes diff --git a/Cargo.lock b/Cargo.lock index e911e94d0..a3a7a95d2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1114,9 +1114,9 @@ dependencies = [ [[package]] name = "diesel" -version = "2.3.4" +version = "2.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c415189028b232660655e4893e8bc25ca7aee8e96888db66d9edb400535456a" +checksum = "e130c806dccc85428c564f2dc5a96e05b6615a27c9a28776bd7761a9af4bb552" dependencies = [ "bigdecimal", "diesel_derives", @@ -3385,7 +3385,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -4274,7 +4274,7 @@ version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" dependencies = [ - "heck 0.5.0", + "heck 0.4.1", "itertools 0.10.5", "log", "multimap", @@ -5382,7 +5382,7 @@ version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d8c27177b12a6399ffc08b98f76f7c9a1f4fe9fc967c784c5a071fa8d93cf7e1" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] diff --git a/crates/store/src/db/models/queries/accounts/at_block.rs b/crates/store/src/db/models/queries/accounts/at_block.rs index dc613a9c6..307edd0b7 100644 --- a/crates/store/src/db/models/queries/accounts/at_block.rs +++ b/crates/store/src/db/models/queries/accounts/at_block.rs @@ -1,15 +1,8 @@ use std::collections::BTreeMap; -use diesel::prelude::Queryable; +use diesel::prelude::{Queryable, QueryableByName}; use diesel::query_dsl::methods::SelectDsl; -use diesel::{ - BoolExpressionMethods, - ExpressionMethods, - OptionalExtension, - QueryDsl, - RunQueryDsl, - SqliteConnection, -}; +use diesel::{ExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, SqliteConnection}; use miden_protocol::account::{ AccountHeader, AccountId, @@ -125,63 +118,64 @@ pub(crate) fn select_account_header_at_block( // ================================================================================================ /// Query vault assets at a specific block by finding the most recent update for each `vault_key`. +/// +/// Uses a single raw SQL query with a subquery join: +/// ```sql +/// SELECT a.asset FROM account_vault_assets a +/// INNER JOIN ( +/// SELECT vault_key, MAX(block_num) as max_block +/// FROM account_vault_assets +/// WHERE account_id = ? AND block_num <= ? +/// GROUP BY vault_key +/// ) latest ON a.vault_key = latest.vault_key AND a.block_num = latest.max_block +/// WHERE a.account_id = ? +/// ``` pub(crate) fn select_account_vault_at_block( conn: &mut SqliteConnection, account_id: AccountId, block_num: BlockNumber, ) -> Result, DatabaseError> { - use schema::account_vault_assets as t; + use diesel::sql_types::{BigInt, Binary}; let account_id_bytes = account_id.to_bytes(); let block_num_sql = block_num.to_raw_sql(); - // Since Diesel doesn't support composite keys in subqueries easily, we use a two-step approach: - // Step 1: Get max block_num for each vault_key - let latest_blocks_per_vault_key = Vec::from_iter( - QueryDsl::select( - t::table - .filter(t::account_id.eq(&account_id_bytes)) - .filter(t::block_num.le(block_num_sql)) - .group_by(t::vault_key), - (t::vault_key, diesel::dsl::max(t::block_num)), - ) - .load::<(Vec, Option)>(conn)? - .into_iter() - .filter_map(|(key, maybe_block)| maybe_block.map(|block| (key, block))), - ); - - if latest_blocks_per_vault_key.is_empty() { - return Ok(Vec::new()); - } - - // Step 2: Fetch the full rows matching (vault_key, block_num) pairs + let entries: Vec>> = diesel::sql_query( + r" + SELECT a.asset FROM account_vault_assets a + INNER JOIN ( + SELECT vault_key, MAX(block_num) as max_block + FROM account_vault_assets + WHERE account_id = ? AND block_num <= ? + GROUP BY vault_key + ) latest ON a.vault_key = latest.vault_key AND a.block_num = latest.max_block + WHERE a.account_id = ? + ", + ) + .bind::(&account_id_bytes) + .bind::(block_num_sql) + .bind::(&account_id_bytes) + .load::(conn)? + .into_iter() + .map(|row| row.asset) + .collect(); + + // Convert to assets, filtering out deletions (None values) let mut assets = Vec::new(); - for (vault_key_bytes, max_block) in latest_blocks_per_vault_key { - // TODO we should not make a query per vault key, but query many at once or - // or find an alternative approach - let result: Option>> = QueryDsl::select( - t::table.filter( - t::account_id - .eq(&account_id_bytes) - .and(t::vault_key.eq(&vault_key_bytes)) - .and(t::block_num.eq(max_block)), - ), - t::asset, - ) - .first(conn) - .optional()?; - if let Some(Some(asset_bytes)) = result { - let asset = Asset::read_from_bytes(&asset_bytes)?; - assets.push(asset); - } + for asset_bytes in entries.into_iter().flatten() { + let asset = Asset::read_from_bytes(&asset_bytes)?; + assets.push(asset); } - // Sort by vault_key for consistent ordering - assets.sort_by_key(Asset::vault_key); - Ok(assets) } +#[derive(QueryableByName)] +struct AssetRow { + #[diesel(sql_type = diesel::sql_types::Nullable)] + asset: Option>, +} + // ACCOUNT STORAGE // ================================================================================================ @@ -218,18 +212,19 @@ pub(crate) fn select_account_storage_at_block( let header = AccountStorageHeader::read_from_bytes(&blob)?; // Query all map values for this account up to and including this block. - // For each (slot_name, key), we need the latest value at or before block_num. - // First, get all entries up to block_num - let map_values: Vec<(i64, String, Vec, Vec)> = - SelectDsl::select(t::table, (t::block_num, t::slot_name, t::key, t::value)) - .filter(t::account_id.eq(&account_id_bytes).and(t::block_num.le(block_num_sql))) + // Order by (slot_name, key) ascending, then block_num descending so the first entry + // for each (slot_name, key) pair is the latest one. + let map_values: Vec<(String, Vec, Vec)> = + SelectDsl::select(t::table, (t::slot_name, t::key, t::value)) + .filter(t::account_id.eq(&account_id_bytes)) + .filter(t::block_num.le(block_num_sql)) .order((t::slot_name.asc(), t::key.asc(), t::block_num.desc())) .load(conn)?; - // For each (slot_name, key) pair, keep only the latest entry (highest block_num) + // For each (slot_name, key) pair, keep only the latest entry (first one due to ordering) let mut latest_map_entries: BTreeMap<(StorageSlotName, Word), Word> = BTreeMap::new(); - for (_, slot_name_str, key_bytes, value_bytes) in map_values { + for (slot_name_str, key_bytes, value_bytes) in map_values { let slot_name: StorageSlotName = slot_name_str.parse().map_err(|_| { DatabaseError::DataCorrupted(format!("Invalid slot name: {slot_name_str}")) })?; diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs index 67eb24c1f..a0a23f3b5 100644 --- a/crates/store/src/db/models/queries/accounts/tests.rs +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -550,3 +550,181 @@ fn test_upsert_accounts_with_empty_storage() { "Storage header blob should exist even for empty storage" ); } + +// VAULT AT BLOCK HISTORICAL QUERY TESTS +// ================================================================================================ + +/// Tests that querying vault at an older block returns the correct historical state, +/// even when the same `vault_key` has been updated in later blocks. +/// +/// Focuses on deduplication logic that relies on ordering by (`vault_key` ASC and `block_num` +/// DESC). +#[test] +fn test_select_account_vault_at_block_historical_with_updates() { + use assert_matches::assert_matches; + use miden_protocol::asset::{AssetVaultKey, FungibleAsset}; + use miden_protocol::testing::account_id::ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET; + + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + // Faucet ID is needed for creating FungibleAssets + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + let block_1 = BlockNumber::from_epoch(0); + let block_2 = BlockNumber::from_epoch(1); + let block_3 = BlockNumber::from_epoch(2); + + insert_block_header(&mut conn, block_1); + insert_block_header(&mut conn, block_2); + insert_block_header(&mut conn, block_3); + + // Insert account at block 1 + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta), + ); + upsert_accounts(&mut conn, &[account_update], block_1).expect("upsert_accounts failed"); + + // Insert vault asset at block 1: vault_key_1 = 1000 tokens + let vault_key_1 = AssetVaultKey::new_unchecked(Word::from([ + Felt::new(1), + Felt::new(0), + Felt::new(0), + Felt::new(0), + ])); + let asset_v1 = Asset::Fungible(FungibleAsset::new(faucet_id, 1000).unwrap()); + + insert_account_vault_asset(&mut conn, account_id, block_1, vault_key_1, Some(asset_v1)) + .expect("insert vault asset failed"); + + // Update vault asset at block 2: vault_key_1 = 2000 tokens (updated value) + let asset_v2 = Asset::Fungible(FungibleAsset::new(faucet_id, 2000).unwrap()); + insert_account_vault_asset(&mut conn, account_id, block_2, vault_key_1, Some(asset_v2)) + .expect("insert vault asset update failed"); + + // Add a second vault_key at block 2 + let vault_key_2 = AssetVaultKey::new_unchecked(Word::from([ + Felt::new(2), + Felt::new(0), + Felt::new(0), + Felt::new(0), + ])); + let asset_key2 = Asset::Fungible(FungibleAsset::new(faucet_id, 500).unwrap()); + insert_account_vault_asset(&mut conn, account_id, block_2, vault_key_2, Some(asset_key2)) + .expect("insert second vault asset failed"); + + // Update vault_key_1 again at block 3: vault_key_1 = 3000 tokens + let asset_v3 = Asset::Fungible(FungibleAsset::new(faucet_id, 3000).unwrap()); + insert_account_vault_asset(&mut conn, account_id, block_3, vault_key_1, Some(asset_v3)) + .expect("insert vault asset update 2 failed"); + + // Query at block 1: should only see vault_key_1 with 1000 tokens + let assets_at_block_1 = select_account_vault_at_block(&mut conn, account_id, block_1) + .expect("Query at block 1 should succeed"); + + assert_eq!(assets_at_block_1.len(), 1, "Should have 1 asset at block 1"); + assert_matches!(&assets_at_block_1[0], Asset::Fungible(f) if f.amount() == 1000); + + // Query at block 2: should see vault_key_1 with 2000 tokens AND vault_key_2 with 500 tokens + let assets_at_block_2 = select_account_vault_at_block(&mut conn, account_id, block_2) + .expect("Query at block 2 should succeed"); + + assert_eq!(assets_at_block_2.len(), 2, "Should have 2 assets at block 2"); + + // Find the amounts (order may vary) + let amounts: Vec = assets_at_block_2 + .iter() + .map(|a| assert_matches!(a, Asset::Fungible(f) => f.amount())) + .collect(); + + assert!(amounts.contains(&2000), "Block 2 should have vault_key_1 with 2000 tokens"); + assert!(amounts.contains(&500), "Block 2 should have vault_key_2 with 500 tokens"); + + // Query at block 3: should see vault_key_1 with 3000 tokens AND vault_key_2 with 500 tokens + let assets_at_block_3 = select_account_vault_at_block(&mut conn, account_id, block_3) + .expect("Query at block 3 should succeed"); + + assert_eq!(assets_at_block_3.len(), 2, "Should have 2 assets at block 3"); + + let amounts: Vec = assets_at_block_3 + .iter() + .map(|a| assert_matches!(a, Asset::Fungible(f) => f.amount())) + .collect(); + + assert!(amounts.contains(&3000), "Block 3 should have vault_key_1 with 3000 tokens"); + assert!(amounts.contains(&500), "Block 3 should have vault_key_2 with 500 tokens"); +} + +/// Tests that deleted vault assets (asset = None) are correctly excluded from results, +/// and that the deduplication handles deletion entries properly. +#[test] +fn test_select_account_vault_at_block_with_deletion() { + use assert_matches::assert_matches; + use miden_protocol::asset::{AssetVaultKey, FungibleAsset}; + use miden_protocol::testing::account_id::ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET; + + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + // Faucet ID is needed for creating FungibleAssets + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + let block_1 = BlockNumber::from_epoch(0); + let block_2 = BlockNumber::from_epoch(1); + let block_3 = BlockNumber::from_epoch(2); + + insert_block_header(&mut conn, block_1); + insert_block_header(&mut conn, block_2); + insert_block_header(&mut conn, block_3); + + // Insert account at block 1 + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta), + ); + upsert_accounts(&mut conn, &[account_update], block_1).expect("upsert_accounts failed"); + + // Insert vault asset at block 1 + let vault_key = AssetVaultKey::new_unchecked(Word::from([ + Felt::new(1), + Felt::new(0), + Felt::new(0), + Felt::new(0), + ])); + let asset = Asset::Fungible(FungibleAsset::new(faucet_id, 1000).unwrap()); + + insert_account_vault_asset(&mut conn, account_id, block_1, vault_key, Some(asset)) + .expect("insert vault asset failed"); + + // Delete the vault asset at block 2 (insert with asset = None) + insert_account_vault_asset(&mut conn, account_id, block_2, vault_key, None) + .expect("delete vault asset failed"); + + // Re-add the vault asset at block 3 with different amount + let asset_v3 = Asset::Fungible(FungibleAsset::new(faucet_id, 2000).unwrap()); + insert_account_vault_asset(&mut conn, account_id, block_3, vault_key, Some(asset_v3)) + .expect("re-add vault asset failed"); + + // Query at block 1: should see the asset + let assets_at_block_1 = select_account_vault_at_block(&mut conn, account_id, block_1) + .expect("Query at block 1 should succeed"); + assert_eq!(assets_at_block_1.len(), 1, "Should have 1 asset at block 1"); + + // Query at block 2: should NOT see the asset (it was deleted) + let assets_at_block_2 = select_account_vault_at_block(&mut conn, account_id, block_2) + .expect("Query at block 2 should succeed"); + assert!(assets_at_block_2.is_empty(), "Should have no assets at block 2 (deleted)"); + + // Query at block 3: should see the re-added asset with new amount + let assets_at_block_3 = select_account_vault_at_block(&mut conn, account_id, block_3) + .expect("Query at block 3 should succeed"); + assert_eq!(assets_at_block_3.len(), 1, "Should have 1 asset at block 3"); + assert_matches!(&assets_at_block_3[0], Asset::Fungible(f) if f.amount() == 2000); +}