diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 12e76f99c..8cebcbe97 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -16,6 +16,10 @@ concurrency: permissions: contents: read +env: + # Reduce cache usage by removing debug information. + CARGO_PROFILE_DEV_DEBUG: 0 + jobs: typos: runs-on: Linux-ARM64-Runner @@ -89,7 +93,7 @@ jobs: with: save-if: ${{ github.event_name == 'push' && github.ref == 'refs/heads/next' }} - name: Build docs - run: make doc + run: cargo doc --no-deps --workspace --all-features --locked unused_deps: name: check for unused dependencies diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 354e2afa9..046ca7663 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -14,6 +14,10 @@ concurrency: permissions: contents: read +env: + # Reduce cache usage by removing debug information. + CARGO_PROFILE_DEV_DEBUG: 0 + jobs: # Check MSRV (aka `rust-version`) in `Cargo.toml` is valid for workspace members msrv: diff --git a/.github/workflows/network-monitor.yml b/.github/workflows/network-monitor.yml index 507980803..1a6921617 100644 --- a/.github/workflows/network-monitor.yml +++ b/.github/workflows/network-monitor.yml @@ -16,6 +16,10 @@ concurrency: permissions: contents: read +env: + # Reduce cache usage by removing debug information. + CARGO_PROFILE_DEV_DEBUG: 0 + jobs: check: name: check diff --git a/.github/workflows/stress-test-check.yml b/.github/workflows/stress-test-check.yml index 488a2c068..47182f8f9 100644 --- a/.github/workflows/stress-test-check.yml +++ b/.github/workflows/stress-test-check.yml @@ -16,6 +16,10 @@ concurrency: group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" cancel-in-progress: true +env: + # Reduce cache usage by removing debug information. + CARGO_PROFILE_DEV_DEBUG: 0 + jobs: stress-test-check: name: stress-test-check diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 662fd3d44..cfee5fc3c 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -16,6 +16,11 @@ concurrency: group: "${{ github.workflow }} @ ${{ github.event.pull_request.head.label || github.head_ref || github.ref }}" cancel-in-progress: true +env: + # Reduce cache usage by removing debug information. + # This works for tests as well because TEST inherits from DEV. + CARGO_PROFILE_DEV_DEBUG: 0 + jobs: test: name: test @@ -31,3 +36,6 @@ jobs: - uses: taiki-e/install-action@nextest - name: Run tests run: make test + - name: Doc tests + run: cargo test --doc --workspace --all-features + diff --git a/.release-plz.toml b/.release-plz.toml deleted file mode 100644 index c3dfed33d..000000000 --- a/.release-plz.toml +++ /dev/null @@ -1,6 +0,0 @@ -[workspace] -changelog_update = false # For now we have our own changelog. -release_always = true # Without the tracking PR, it would never trigger unless `true`. - -git_release_enable = false -git_tag_enable = false diff --git a/CHANGELOG.md b/CHANGELOG.md index aa29f3a8c..62b85c441 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,56 @@ # Changelog -## v0.12.6 (TBD) +## v0.13.0 (TBD) + +### Enhancements + +- Added support for timeouts in the WASM remote prover clients ([#1383](https://github.com/0xMiden/miden-node/pull/1383)). +- Added block validation endpoint to validator and integrated with block producer ([#1382](https://github.com/0xMiden/miden-node/pull/1381)). +- Added support for caching mempool statistics in the block producer server ([#1388](https://github.com/0xMiden/miden-node/pull/1388)). +- Added mempool statistics to the block producer status in the `miden-network-monitor` binary ([#1392](https://github.com/0xMiden/miden-node/pull/1392)). +- Added success rate to the `miden-network-monitor` binary ([#1420](https://github.com/0xMiden/miden-node/pull/1420)). +- Added chain tip to the block producer status ([#1419](https://github.com/0xMiden/miden-node/pull/1419)). +- The mempool's transaction capacity is now configurable ([#1433](https://github.com/0xMiden/miden-node/pull/1433)). +- Renamed card's names in the `miden-network-monitor` binary ([#1441](https://github.com/0xMiden/miden-node/pull/1441)). +- Added pagination to `GetNetworkAccountIds` endpoint ([#1452](https://github.com/0xMiden/miden-node/pull/1452)). +- Improved tracing in `miden-network-monitor` binary ([#1366](https://github.com/0xMiden/miden-node/pull/1366)). +- Integrated RPC stack with Validator component for transaction validation ([#1457](https://github.com/0xMiden/miden-node/pull/1457)). +- Added validated transactions check to block validation logc in Validator ([#1460](https://github.com/0xMiden/miden-node/pull/1460)). +- Added explorer status to the `miden-network-monitor` binary ([#1450](https://github.com/0xMiden/miden-node/pull/1450)). +- Added `GetLimits` endpoint to the RPC server ([#1410](https://github.com/0xMiden/miden-node/pull/1410)). +- Added gRPC-Web probe support to the `miden-network-monitor` binary ([#1484](https://github.com/0xMiden/miden-node/pull/1484)). +- Add DB schema change check ([#1268](https://github.com/0xMiden/miden-node/pull/1485)). + +### Changes + +- [BREAKING] Renamed `SyncTransactions` response fields ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). +- Normalize response size in endpoints to 4 MB ([#1357](https://github.com/0xMiden/miden-node/pull/1357)). +- [BREAKING] Renamed `ProxyWorkerStatus::address` to `ProxyWorkerStatus::name` ([#1348](https://github.com/0xMiden/miden-node/pull/1348)). +- Added `SyncTransactions` stress test to `miden-node-stress-test` binary ([#1294](https://github.com/0xMiden/miden-node/pull/1294)). +- Remove `trait AccountTreeStorage` ([#1352](https://github.com/0xMiden/miden-node/issues/1352)). +- [BREAKING] `SubmitProvenTransaction` now **requires** that the network's genesis commitment is set in the request's `ACCEPT` header ([#1298](https://github.com/0xMiden/miden-node/pull/1298), [#1436](https://github.com/0xMiden/miden-node/pull/1436)). +- Add `S` generic to `NullifierTree` to allow usage with `LargeSmt`s ([#1353](https://github.com/0xMiden/miden-node/issues/1353)). +- Removed internal errors from the `miden-network-monitor` ([#1424](https://github.com/0xMiden/miden-node/pull/1424)). +- Track network transactions latency in `miden-network-monitor` ([#1430](https://github.com/0xMiden/miden-node/pull/1430)). +- [BREAKING] Re-organized RPC protobuf schema to be independent of internal schema ([#1401](https://github.com/0xMiden/miden-node/pull/1401)). +- Increased the maximum query limit for the store ([#1443](https://github.com/0xMiden/miden-node/pull/1443)). +- [BREAKING] Added block signing capabilities to Validator component and updated gensis bootstrap to sign blocks with configured signer ([#1426](https://github.com/0xMiden/miden-node/pull/1426)). +- Reduced default block interval from `5s` to `2s` ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). +- Increased retained account tree history from 33 to 100 blocks to account for the reduced block interval ([#1438](https://github.com/0xMiden/miden-node/pull/1438)). +- [BREAKING] Migrated to version `v0.20` of the VM ([#1476](https://github.com/0xMiden/miden-node/pull/1476)). +- [BREAKING] Change account in database representation ([#1481](https://github.com/0xMiden/miden-node/pull/1481)). +- Remove the cyclic database optimization ([#1497](https://github.com/0xMiden/miden-node/pull/1497)). + +### Fixes + +- RPC client now correctly sets `genesis` value in `ACCEPT` header if `version` is unspecified ([#1370](https://github.com/0xMiden/miden-node/pull/1370)). +- Pin protobuf (`protox`) dependencies to avoid breaking changes in transitive dependency ([#1403](https://github.com/0xMiden/miden-node/pull/1403)). +- Fixed no-std compatibility for remote prover clients ([#1407](https://github.com/0xMiden/miden-node/pull/1407)). +- Fixed `AccountProofRequest` to retrieve the latest known state in case specified block number (or chain tip) does not contain account updates ([#1422](https://github.com/0xMiden/miden-node/issues/1422)). +- Fixed missing asset setup for full account initialization ([#1461](https://github.com/0xMiden/miden-node/pull/1461)). +- Fixed `GetNetworkAccountIds` pagination to return the chain tip ([#1489](https://github.com/0xMiden/miden-node/pull/1489)). + +## v0.12.6 ### Enhancements @@ -55,6 +105,7 @@ - Add optional `TransactionInputs` field to `SubmitProvenTransaction` endpoint for transaction re-execution (#[1278](https://github.com/0xMiden/miden-node/pull/1278)). - Added `validator` crate with initial protobuf, gRPC server, and sub-command (#[1293](https://github.com/0xMiden/miden-node/pull/1293)). - [BREAKING] Added `AccountTreeWithHistory` and integrate historical queries into `GetAccountProof` ([#1292](https://github.com/0xMiden/miden-node/pull/1292)). +- [BREAKING] Handle past/historical `AccountProof` requests ([#1333](https://github.com/0xMiden/miden-node/pull/1333)). - Implement `DataStore::get_note_script()` for `NtxDataStore` (#[1332](https://github.com/0xMiden/miden-node/pull/1332)). - Started validating notes by their commitment instead of ID before entering the mempool ([#1338](https://github.com/0xMiden/miden-node/pull/1338)). @@ -94,6 +145,7 @@ - [BREAKING] Refactored protobuf messages ([#1045](https://github.com/0xMiden/miden-node/pull/#1045)). - Added `SyncStorageMaps` gRPC endpoint for retrieving account storage maps ([#1140](https://github.com/0xMiden/miden-node/pull/1140), [#1132](https://github.com/0xMiden/miden-node/pull/1132)). - Added `SyncAccountVault` gRPC endpoints for retrieving account assets ([#1176](https://github.com/0xMiden/miden-node/pull/1176)). +- Refactored Network Transaction Builder to manage dedicated tasks for every network account in the chain ([#1219](https://github.com/0xMiden/miden-node/pull/1219)). ### Changes diff --git a/Cargo.lock b/Cargo.lock index 3842cfeff..a3a7a95d2 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2,16 +2,6 @@ # It is not intended for manual editing. version = 4 -[[package]] -name = "Inflector" -version = "0.11.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fe438c63458706e03479442743baae6c88256498e6431708f6dfc520a26515d3" -dependencies = [ - "lazy_static", - "regex", -] - [[package]] name = "addr2line" version = "0.25.1" @@ -37,6 +27,17 @@ dependencies = [ "generic-array", ] +[[package]] +name = "ahash" +version = "0.7.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "891477e0c6a8957309ee5c45a6368af3ae14bb510732d2684ffa19af310920f9" +dependencies = [ + "getrandom 0.2.16", + "once_cell", + "version_check", +] + [[package]] name = "ahash" version = "0.8.12" @@ -206,7 +207,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -217,7 +218,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -342,15 +343,15 @@ checksum = "72b3254f16251a8381aa12e40e3c4d2f0199f8c6508fbecb9d91f575e0fbb8c6" [[package]] name = "base64ct" -version = "1.8.0" +version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" +checksum = "0e050f626429857a27ddccb31e0aca21356bfa709c04041aefddac081a8f068a" [[package]] name = "bech32" -version = "0.11.0" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d965446196e3b7decd44aa7ee49e31d630118f90ef12f97900f262eb915c951d" +checksum = "32637268377fc7b10a8c6d51de3e7fba1ce5dd371a96e342b34e6078db558e7f" [[package]] name = "beef" @@ -514,9 +515,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5" [[package]] name = "cc" -version = "1.2.46" +version = "1.2.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b97463e1064cb1b1c1384ad0a0b9c8abd0988e2a91f52606c80ef14aadb63e36" +checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215" dependencies = [ "find-msvc-tools", "jobserver", @@ -652,9 +653,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.51" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4c26d721170e0295f191a69bd9a1f93efcdb0aff38684b61ab5750468972e5f5" +checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" dependencies = [ "clap_builder", "clap_derive 4.5.49", @@ -662,9 +663,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.51" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75835f0c7bf681bfd05abe44e965760fea999a5286c6eb2d59883634fd02011a" +checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" dependencies = [ "anstream", "anstyle", @@ -694,7 +695,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -792,7 +793,7 @@ dependencies = [ "anes", "cast", "ciborium", - "clap 4.5.51", + "clap 4.5.53", "criterion-plot", "is-terminal", "itertools 0.10.5", @@ -906,7 +907,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -949,7 +950,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -963,7 +964,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -974,7 +975,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -985,7 +986,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1077,7 +1078,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1087,34 +1088,35 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] name = "derive_more" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "093242cf7570c207c83073cf82f79706fe7b8317e98620a47d5be7c3d8497678" +checksum = "10b768e943bed7bf2cab53df09f4bc34bfd217cdb57d971e769874c9a6710618" dependencies = [ "derive_more-impl", ] [[package]] name = "derive_more-impl" -version = "2.0.1" +version = "2.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" +checksum = "6d286bfdaf75e988b4a78e013ecd79c581e06399ab53fbacd2d916c2f904f30b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "rustc_version 0.4.1", + "syn 2.0.111", ] [[package]] name = "diesel" -version = "2.3.3" +version = "2.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e7624a3bb9fffd82fff016be9a7f163d20e5a89eb8d28f9daaa6b30fff37500" +checksum = "e130c806dccc85428c564f2dc5a96e05b6615a27c9a28776bd7761a9af4bb552" dependencies = [ "bigdecimal", "diesel_derives", @@ -1129,22 +1131,22 @@ dependencies = [ [[package]] name = "diesel_derives" -version = "2.3.4" +version = "2.3.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9daac6489a36e42570da165a10c424f3edcefdff70c5fd55e1847c23f3dd7562" +checksum = "8587cbca3c929fb198e7950d761d31ca72b80aa6e07c1b7bec5879d187720436" dependencies = [ "diesel_table_macro_syntax", "dsl_auto_type", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] name = "diesel_migrations" -version = "2.3.0" +version = "2.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ee060f709c3e3b1cadd83fcd0f61711f7a8cf493348f758d3a1c1147d70b3c97" +checksum = "745fd255645f0f1135f9ec55c7b00e0882192af9683ab4731e4bba3da82b8f9c" dependencies = [ "diesel", "migrations_internals", @@ -1157,7 +1159,7 @@ version = "0.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fe2444076b48641147115697648dc743c2c00b61adade0f01ce67133c7babe8c" dependencies = [ - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1186,7 +1188,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1212,7 +1214,7 @@ dependencies = [ "heck 0.5.0", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1297,18 +1299,6 @@ dependencies = [ "cfg-if", ] -[[package]] -name = "enum_dispatch" -version = "0.3.13" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa18ce2bc66555b3218614519ac839ddb759a7d6720732f979ef8d13be147ecd" -dependencies = [ - "once_cell", - "proc-macro2", - "quote", - "syn 2.0.110", -] - [[package]] name = "env_filter" version = "0.1.4" @@ -1345,7 +1335,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -1528,7 +1518,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -1619,6 +1609,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "getset" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cf0fc11e47561d47397154977bc219f4cf809b2974facc3ccb3b89e2436f912" +dependencies = [ + "proc-macro-error2", + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "gimli" version = "0.32.3" @@ -1654,7 +1656,7 @@ dependencies = [ "futures-core", "futures-sink", "http", - "indexmap 2.12.0", + "indexmap 2.12.1", "slab", "tokio", "tokio-util", @@ -1677,6 +1679,9 @@ name = "hashbrown" version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" +dependencies = [ + "ahash 0.7.8", +] [[package]] name = "hashbrown" @@ -1691,15 +1696,16 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.16.0" +version = "0.16.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" +checksum = "841d1cc9bed7f9236f321df977030373f4a4163ae1a7dbfe1a51a2c1a51d9100" dependencies = [ "allocator-api2", "equivalent", "foldhash 0.2.0", "rayon", "serde", + "serde_core", ] [[package]] @@ -1755,23 +1761,22 @@ dependencies = [ [[package]] name = "hostname" -version = "0.4.1" +version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a56f203cd1c76362b69e3863fd987520ac36cf70a8c92627449b2f64a8cf7d65" +checksum = "617aaa3557aef3810a6369d0a99fac8a080891b68bd9f9812a1eeda0c0730cbd" dependencies = [ "cfg-if", "libc", - "windows-link 0.1.3", + "windows-link 0.2.1", ] [[package]] name = "http" -version = "1.3.1" +version = "1.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f4a85d31aea989eead29a3aaf9e1115a180df8282431156e533de47660892565" +checksum = "e3ba2a386d7f85a81f119ad7498ebe444d2e22c2af0b86b069416ace48b3311a" dependencies = [ "bytes", - "fnv", "itoa", ] @@ -1886,9 +1891,9 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.18" +version = "0.1.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "52e9a2a24dc5c6821e71a7030e1e14b7b632acac55c40e9d2e082c621261bb56" +checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f" dependencies = [ "base64", "bytes", @@ -2060,12 +2065,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.12.0" +version = "2.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6717a8d2a5a929a1a2eb43a12812498ed141a0bcfb7e8f7844fbdbe4303bba9f" +checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" dependencies = [ "equivalent", - "hashbrown 0.16.0", + "hashbrown 0.16.1", ] [[package]] @@ -2107,7 +2112,7 @@ checksum = "3640c1c38b8e4e43584d8df18be5fc6b0aa314ce6ebf51b53313d4306cca8e46" dependencies = [ "hermit-abi 0.5.2", "libc", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -2167,7 +2172,7 @@ checksum = "980af8b43c3ad5d8d349ace167ec8170839f753a42d233ba19e08afe1850fa69" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -2182,9 +2187,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.82" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b011eec8cc36da2aab2d5cff675ec18454fad408585853910a202391cf9f8e65" +checksum = "464a3709c7f55f1f721e5389aa6ea4e3bc6aba669353300af094b29ffbdde1d8" dependencies = [ "once_cell", "wasm-bindgen", @@ -2251,9 +2256,9 @@ checksum = "bbd2bcb4c963f2ddae06a2efc7e9f3591312473c50c6685e1f298068316e66fe" [[package]] name = "libc" -version = "0.2.177" +version = "0.2.178" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2874a2af47a2325c2001a6e6fad9b16a53b802102b528163885171cf92b15976" +checksum = "37c93d8daa9d8a012fd8ab92f088405fb202ea0b6ab73ee2482ae66af4f42091" [[package]] name = "libm" @@ -2307,14 +2312,14 @@ checksum = "6373607a59f0be73a39b6fe456b8192fcc3585f602af20751600e974dd455e77" [[package]] name = "local-ip-address" -version = "0.6.5" +version = "0.6.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "656b3b27f8893f7bbf9485148ff9a65f019e3f33bd5cdc87c83cab16b3fd9ec8" +checksum = "786c72d9739fc316a7acf9b22d9c2794ac9cb91074e9668feb04304ab7219783" dependencies = [ "libc", "neli", "thiserror 2.0.17", - "windows-sys 0.59.0", + "windows-sys 0.61.2", ] [[package]] @@ -2328,9 +2333,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.28" +version = "0.4.29" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" +checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "logos" @@ -2354,7 +2359,7 @@ dependencies = [ "quote", "regex-syntax", "rustc_version 0.4.1", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -2432,9 +2437,9 @@ dependencies = [ [[package]] name = "miden-air" -version = "0.19.1" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "06acfd2ddc25b68f9d23d2add3f15c0ec3f9890ce6418409d71bea9dc6590bd0" +checksum = "e663337017ed028dff8c18a0ce1db64aad0e850996e3214f137f98317533c2e1" dependencies = [ "miden-core", "miden-utils-indexing", @@ -2445,10 +2450,11 @@ dependencies = [ [[package]] name = "miden-assembly" -version = "0.19.1" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1219b9e48bb286b58a23bb65cf74baa1b24ddbcb462ca625b38186674571047" +checksum = "001249195c227624695529c82ebf51c390ec1c28e99a567549ce3a272a2aedf3" dependencies = [ + "env_logger", "log", "miden-assembly-syntax", "miden-core", @@ -2459,11 +2465,12 @@ dependencies = [ [[package]] name = "miden-assembly-syntax" -version = "0.19.1" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1eeaef2853061c54527bb2664c0c832ce3d1f80847c79512455fec3b93057f2a" +checksum = "1963cfa667aa6a157c99982df340a7bd42b054652e6f33d5e3513217531eca73" dependencies = [ "aho-corasick", + "env_logger", "lalrpop", "lalrpop-util", "log", @@ -2472,6 +2479,7 @@ dependencies = [ "miden-utils-diagnostics", "midenc-hir-type", "proptest", + "proptest-derive", "regex", "rustc_version 0.4.1", "semver 1.0.27", @@ -2481,46 +2489,66 @@ dependencies = [ [[package]] name = "miden-block-prover" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ec766587e838664ded55fa926d0611244cac2fe23b7cec202d8db0a85d9e536e" +version = "0.13.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#eb0c396d48506c30c3fdc859ddaabbb8bfaf6b00" dependencies = [ - "miden-lib", - "miden-objects", + "miden-protocol", "thiserror 2.0.17", ] [[package]] name = "miden-core" -version = "0.19.1" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "452a00429d05c416001ec0578291eb88e115cf94fc22b3308267abfdcd813440" +checksum = "136debf5474190dc584df3252710dac07a0e45315740c9538a7fc0b72c596365" dependencies = [ - "enum_dispatch", + "derive_more", + "itertools 0.14.0", "miden-crypto", "miden-debug-types", "miden-formatting", + "miden-utils-core-derive", "miden-utils-indexing", "num-derive", "num-traits", + "proptest", + "proptest-derive", "thiserror 2.0.17", "winter-math", "winter-utils", ] +[[package]] +name = "miden-core-lib" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dcec9fb9a256d2fae347162d9a94653a1790dd33b4af73ad29686475b63deb34" +dependencies = [ + "env_logger", + "fs-err", + "miden-assembly", + "miden-core", + "miden-crypto", + "miden-processor", + "miden-utils-sync", + "sha2", + "thiserror 2.0.17", +] + [[package]] name = "miden-crypto" -version = "0.18.2" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eb82051002f9c64878d3b105a7b924de1ee92019231923380cf4ecd7b824f9a" +checksum = "dc7981c1d907bb9864e24f2bd6304c4fca03a41fc4606c09edd6a7f5a8fc80fc" dependencies = [ "blake3", "cc", "chacha20poly1305", + "curve25519-dalek", "ed25519-dalek", "flume", "glob", - "hashbrown 0.16.0", + "hashbrown 0.16.1", "hkdf", "k256", "miden-crypto-derive", @@ -2531,6 +2559,7 @@ dependencies = [ "rand_core 0.9.3", "rand_hc", "rayon", + "sha2", "sha3", "subtle", "thiserror 2.0.17", @@ -2542,19 +2571,19 @@ dependencies = [ [[package]] name = "miden-crypto-derive" -version = "0.18.2" +version = "0.19.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2222f37355ea975f40acd3c098a437574a31a4d8a2c193cf4e9fead2beede577" +checksum = "83479e7af490784c6f2d2e02cec5210fd6e5bc6ce3d4427734e36a773bca72d2" dependencies = [ "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] name = "miden-debug-types" -version = "0.19.1" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97eed62ac0ca7420e49148fd306c74786b23a8d31df6da6277c671ba3e5c619a" +checksum = "6dc25083822c3d582c42ad10aeee0138dec15a130f3017b05495bb91e31fde4a" dependencies = [ "memchr", "miden-crypto", @@ -2577,30 +2606,11 @@ dependencies = [ "unicode-width 0.1.14", ] -[[package]] -name = "miden-lib" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "598582071e5b0ec835d06288857d4ddc0090a98bd4c17e408fa56b2c43f45d73" -dependencies = [ - "Inflector", - "fs-err", - "miden-assembly", - "miden-core", - "miden-objects", - "miden-processor", - "miden-stdlib", - "rand 0.9.2", - "regex", - "thiserror 2.0.17", - "walkdir", -] - [[package]] name = "miden-mast-package" -version = "0.19.1" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1d13e6ba2b357551598f13396ed52f8f21aa99979aa3b338bb5521feeda19c8a" +checksum = "da35f2fc1eacbfd0b6b995e888c2b778bd646acebf34dab27f9f7ed9b3effaa2" dependencies = [ "derive_more", "miden-assembly-syntax", @@ -2631,7 +2641,7 @@ dependencies = [ "supports-color", "supports-hyperlinks", "supports-unicode", - "syn 2.0.110", + "syn 2.0.111", "terminal_size 0.3.0", "textwrap", "thiserror 2.0.17", @@ -2647,22 +2657,22 @@ checksum = "86a905f3ea65634dd4d1041a4f0fd0a3e77aa4118341d265af1a94339182222f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] name = "miden-network-monitor" -version = "0.12.5" +version = "0.13.0" dependencies = [ "anyhow", "axum", - "clap 4.5.51", + "clap 4.5.53", "hex", "humantime", - "miden-lib", "miden-node-proto", "miden-node-utils", - "miden-objects", + "miden-protocol", + "miden-standards", "miden-testing", "miden-tx", "rand 0.9.2", @@ -2679,12 +2689,13 @@ dependencies = [ [[package]] name = "miden-node" -version = "0.12.5" +version = "0.13.0" dependencies = [ "anyhow", - "clap 4.5.51", + "clap 4.5.53", "figment", "fs-err", + "hex", "humantime", "miden-node-block-producer", "miden-node-ntx-builder", @@ -2692,28 +2703,28 @@ dependencies = [ "miden-node-store", "miden-node-utils", "miden-node-validator", - "miden-objects", + "miden-protocol", "tokio", "url", ] [[package]] name = "miden-node-block-producer" -version = "0.12.5" +version = "0.13.0" dependencies = [ "anyhow", "assert_matches", "futures", "itertools 0.14.0", "miden-block-prover", - "miden-lib", "miden-node-proto", "miden-node-proto-build", "miden-node-store", "miden-node-test-macro", "miden-node-utils", - "miden-objects", + "miden-protocol", "miden-remote-prover-client", + "miden-standards", "miden-tx", "miden-tx-batch-prover", "pretty_assertions", @@ -2735,29 +2746,31 @@ dependencies = [ [[package]] name = "miden-node-grpc-error-macro" -version = "0.12.5" +version = "0.13.0" dependencies = [ "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] name = "miden-node-ntx-builder" -version = "0.12.5" +version = "0.13.0" dependencies = [ "anyhow", "futures", - "lru 0.16.2", + "indexmap 2.12.1", "miden-node-proto", "miden-node-test-macro", "miden-node-utils", - "miden-objects", + "miden-protocol", "miden-remote-prover-client", + "miden-standards", "miden-tx", "rstest", "thiserror 2.0.17", "tokio", "tokio-stream", + "tokio-util", "tonic", "tracing", "url", @@ -2765,7 +2778,7 @@ dependencies = [ [[package]] name = "miden-node-proto" -version = "0.12.5" +version = "0.13.0" dependencies = [ "anyhow", "fs-err", @@ -2774,7 +2787,7 @@ dependencies = [ "miden-node-grpc-error-macro", "miden-node-proto-build", "miden-node-utils", - "miden-objects", + "miden-protocol", "miette", "proptest", "prost", @@ -2787,7 +2800,7 @@ dependencies = [ [[package]] name = "miden-node-proto-build" -version = "0.12.5" +version = "0.13.0" dependencies = [ "fs-err", "miette", @@ -2797,19 +2810,19 @@ dependencies = [ [[package]] name = "miden-node-rpc" -version = "0.12.5" +version = "0.13.0" dependencies = [ "anyhow", "futures", "http", "mediatype", "miden-air", - "miden-lib", "miden-node-proto", "miden-node-proto-build", "miden-node-store", "miden-node-utils", - "miden-objects", + "miden-protocol", + "miden-standards", "miden-tx", "reqwest", "rstest", @@ -2829,7 +2842,7 @@ dependencies = [ [[package]] name = "miden-node-store" -version = "0.12.5" +version = "0.13.0" dependencies = [ "anyhow", "assert_matches", @@ -2841,13 +2854,13 @@ dependencies = [ "diesel_migrations", "fs-err", "hex", - "indexmap 2.12.0", - "miden-lib", + "indexmap 2.12.1", "miden-node-proto", "miden-node-proto-build", "miden-node-test-macro", "miden-node-utils", - "miden-objects", + "miden-protocol", + "miden-standards", "pretty_assertions", "rand 0.9.2", "rand_chacha 0.9.0", @@ -2866,19 +2879,19 @@ dependencies = [ [[package]] name = "miden-node-stress-test" -version = "0.12.5" +version = "0.13.0" dependencies = [ - "clap 4.5.51", + "clap 4.5.53", "fs-err", "futures", "miden-air", "miden-block-prover", - "miden-lib", "miden-node-block-producer", "miden-node-proto", "miden-node-store", "miden-node-utils", - "miden-objects", + "miden-protocol", + "miden-standards", "rand 0.9.2", "rayon", "tokio", @@ -2891,20 +2904,22 @@ name = "miden-node-test-macro" version = "0.1.0" dependencies = [ "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] name = "miden-node-utils" -version = "0.12.5" +version = "0.13.0" dependencies = [ "anyhow", "bytes", "figment", + "fs-err", "http", "http-body-util", "itertools 0.14.0", - "miden-objects", + "lru 0.16.2", + "miden-protocol", "opentelemetry", "opentelemetry-otlp", "opentelemetry_sdk", @@ -2925,12 +2940,15 @@ dependencies = [ [[package]] name = "miden-node-validator" -version = "0.12.5" +version = "0.13.0" dependencies = [ "anyhow", "miden-node-proto", "miden-node-proto-build", "miden-node-utils", + "miden-protocol", + "miden-tx", + "thiserror 2.0.17", "tokio", "tokio-stream", "tonic", @@ -2940,56 +2958,70 @@ dependencies = [ ] [[package]] -name = "miden-objects" -version = "0.12.4" +name = "miden-processor" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace4018bb2d6cdbcff4d86d8af5ade8efca9f0479f7e5775c7f09cfab5f91ebe" +checksum = "2eb298dbdda739080497c18eace4d56c58f3e8d257676c9b2f407be441131ecd" +dependencies = [ + "itertools 0.14.0", + "miden-air", + "miden-core", + "miden-debug-types", + "miden-utils-diagnostics", + "miden-utils-indexing", + "paste", + "rayon", + "thiserror 2.0.17", + "tokio", + "tracing", + "winter-prover", +] + +[[package]] +name = "miden-protocol" +version = "0.13.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#eb0c396d48506c30c3fdc859ddaabbb8bfaf6b00" dependencies = [ "bech32", + "fs-err", "getrandom 0.3.4", "miden-assembly", "miden-assembly-syntax", "miden-core", + "miden-core-lib", "miden-crypto", "miden-mast-package", "miden-processor", - "miden-stdlib", + "miden-protocol-macros", "miden-utils-sync", "miden-verifier", "rand 0.9.2", + "rand_chacha 0.9.0", "rand_xoshiro", + "regex", "semver 1.0.27", "serde", "thiserror 2.0.17", "toml 0.9.8", + "walkdir", "winter-rand-utils", ] [[package]] -name = "miden-processor" -version = "0.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2ef77929651b8755965cde8f589bd38e2345a619d54cab6427f91aa23c47f6a" +name = "miden-protocol-macros" +version = "0.13.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#eb0c396d48506c30c3fdc859ddaabbb8bfaf6b00" dependencies = [ - "itertools 0.14.0", - "miden-air", - "miden-core", - "miden-debug-types", - "miden-utils-diagnostics", - "miden-utils-indexing", - "paste", - "rayon", - "thiserror 2.0.17", - "tokio", - "tracing", - "winter-prover", + "proc-macro2", + "quote", + "syn 2.0.111", ] [[package]] name = "miden-prover" -version = "0.19.1" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "84c30a5d10baeec17b9336de8544cb7f9b96b32de757c4cfb8d95ee0521bb5cd" +checksum = "8506c8eb4d980134c0145887af50bd4631df4010eb23d6e454764cb1ee28836c" dependencies = [ "miden-air", "miden-debug-types", @@ -3001,20 +3033,21 @@ dependencies = [ [[package]] name = "miden-remote-prover" -version = "0.12.5" +version = "0.13.0" dependencies = [ "anyhow", "async-trait", "axum", "bytes", - "clap 4.5.51", + "clap 4.5.53", "http", "humantime", "miden-block-prover", - "miden-lib", + "miden-node-proto", "miden-node-proto-build", "miden-node-utils", - "miden-objects", + "miden-protocol", + "miden-standards", "miden-testing", "miden-tx", "miden-tx-batch-prover", @@ -3046,11 +3079,12 @@ dependencies = [ [[package]] name = "miden-remote-prover-client" -version = "0.12.5" +version = "0.13.0" dependencies = [ + "fs-err", "getrandom 0.3.4", "miden-node-proto-build", - "miden-objects", + "miden-protocol", "miden-tx", "miette", "prost", @@ -3064,72 +3098,78 @@ dependencies = [ ] [[package]] -name = "miden-stdlib" -version = "0.19.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5e90a5de45a1e6213ff17b66fff8accde0bbc64264e2c22bbcb9a895f8f3b767" +name = "miden-standards" +version = "0.13.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#eb0c396d48506c30c3fdc859ddaabbb8bfaf6b00" dependencies = [ - "env_logger", "fs-err", "miden-assembly", "miden-core", - "miden-crypto", + "miden-core-lib", "miden-processor", - "miden-utils-sync", + "miden-protocol", + "rand 0.9.2", + "regex", "thiserror 2.0.17", + "walkdir", ] [[package]] name = "miden-testing" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cda0d572d7415682ed168f616becf006825aa04b89692f9907cbb3e3586bf46a" +version = "0.13.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#eb0c396d48506c30c3fdc859ddaabbb8bfaf6b00" dependencies = [ "anyhow", "itertools 0.14.0", "miden-block-prover", - "miden-lib", - "miden-objects", "miden-processor", + "miden-protocol", + "miden-standards", "miden-tx", "miden-tx-batch-prover", "rand 0.9.2", "rand_chacha 0.9.0", - "thiserror 2.0.17", "winterfell", ] [[package]] name = "miden-tx" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2d959064f99ce09fc38e9b6b4dc24c3fa80a63072bf5840a1074ca4ed5e9c911" +version = "0.13.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#eb0c396d48506c30c3fdc859ddaabbb8bfaf6b00" dependencies = [ - "miden-lib", - "miden-objects", "miden-processor", + "miden-protocol", "miden-prover", + "miden-standards", "miden-verifier", - "rand 0.9.2", "thiserror 2.0.17", - "tokio", ] [[package]] name = "miden-tx-batch-prover" -version = "0.12.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f5029810b106654a1ec5d7d7123945db91b96bc4f4187715d0c2cfe0b0a53af4" +version = "0.13.0" +source = "git+https://github.com/0xMiden/miden-base.git?branch=next#eb0c396d48506c30c3fdc859ddaabbb8bfaf6b00" dependencies = [ - "miden-objects", + "miden-protocol", "miden-tx", ] +[[package]] +name = "miden-utils-core-derive" +version = "0.20.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0807840c07a4491a292153258cfae27914333e1a7240777a77c22d8ca3b55873" +dependencies = [ + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "miden-utils-diagnostics" -version = "0.19.1" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a3ff4c019d96539a7066626efb4dce5c9fb7b0e44e961b0c2571e78f34236d5" +checksum = "1b28b1b29e300b471b0f1cbc286997a1326c900814a73b0b28338d5926ce192c" dependencies = [ "miden-crypto", "miden-debug-types", @@ -3140,18 +3180,18 @@ dependencies = [ [[package]] name = "miden-utils-indexing" -version = "0.19.1" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c798250bee4e856d4f18c161e91cdcbef1906f6614d00cf0063b47031c0f8cc6" +checksum = "f8bd0c1966de07d48a4ed0b2821466919c061f4866296be87afc56970a49716a" dependencies = [ "thiserror 2.0.17", ] [[package]] name = "miden-utils-sync" -version = "0.19.1" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "feebe7d896c013ea74dbc98de978836606356a044d4ed3b61ded54d3b319d89f" +checksum = "a1fa7e37db2fbf2dee6ba6e411b3570ef48d52ec780b9c8125623f9ddca30da3" dependencies = [ "lock_api", "loom", @@ -3160,9 +3200,9 @@ dependencies = [ [[package]] name = "miden-verifier" -version = "0.19.1" +version = "0.20.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8f8e47b78bba1fe1b31faee8f12aafd95385f6d6a8b108b03e92f5d743bb29f" +checksum = "383c934eed92f89be4c1e3dbc97ccf37b48433a0b33727c92a5abbfa2d45f420" dependencies = [ "miden-air", "miden-core", @@ -3209,7 +3249,7 @@ checksum = "db5b29714e950dbb20d5e6f74f9dcec4edbcc1067bb7f8ed198c097b8c1a818b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -3251,9 +3291,9 @@ dependencies = [ [[package]] name = "mio" -version = "1.1.0" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69d83b0086dc8ecf3ce9ae2874b2d1290252e2a30720bea58a5c6639b0092873" +checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc" dependencies = [ "libc", "wasi", @@ -3294,27 +3334,31 @@ dependencies = [ [[package]] name = "neli" -version = "0.6.5" +version = "0.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93062a0dce6da2517ea35f301dfc88184ce18d3601ec786a727a87bf535deca9" +checksum = "87fe4204517c0dafc04a1d99ecb577d52c0ffc81e1bbe5cf322769aa8fbd1b05" dependencies = [ + "bitflags 2.10.0", "byteorder", + "derive_builder", + "getset", "libc", "log", "neli-proc-macros", + "parking_lot", ] [[package]] name = "neli-proc-macros" -version = "0.1.4" +version = "0.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0c8034b7fbb6f9455b2a96c19e6edf8dc9fc34c70449938d8ee3b4df363f61fe" +checksum = "90e502fe5db321c6e0ae649ccda600675680125a8e8dee327744fe1910b19332" dependencies = [ "either", "proc-macro2", "quote", "serde", - "syn 1.0.109", + "syn 2.0.111", ] [[package]] @@ -3341,7 +3385,7 @@ version = "0.50.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7957b9740744892f114936ab4a57b3f487491bbeafaf8083688b16841a4240e5" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -3391,7 +3435,7 @@ checksum = "ed3955f1a9c7c0c15e092f9c887db08b1fc683305fdf6eb6684f22555355e202" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -3510,7 +3554,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -3652,7 +3696,7 @@ dependencies = [ "proc-macro2", "proc-macro2-diagnostics", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -3668,7 +3712,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "indexmap 2.12.0", + "indexmap 2.12.1", ] [[package]] @@ -3697,7 +3741,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -3731,7 +3775,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ef622051fbb2cb98a524df3a8112f02d0919ccda600a44d705ec550f1a28fe2" dependencies = [ - "ahash", + "ahash 0.8.12", "async-trait", "blake2", "bytes", @@ -3767,7 +3811,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76f63d3f67d99c95a1f85623fc43242fd644dd12ccbaa18c38a54e1580c6846a" dependencies = [ - "ahash", + "ahash 0.8.12", "async-trait", "brotli", "bytes", @@ -3857,7 +3901,7 @@ version = "0.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b93c897e8cc04ff0d077ee2a655142910618222aeefc83f7f99f5b9fc59ccb13" dependencies = [ - "ahash", + "ahash 0.8.12", ] [[package]] @@ -3889,7 +3933,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ba89e4400cb978f0d7be1c14bd7ab4168c8e2c00d97ff19f964fc0048780237c" dependencies = [ "arrayvec", - "hashbrown 0.16.0", + "hashbrown 0.12.3", "parking_lot", "rand 0.8.5", ] @@ -4074,7 +4118,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -4083,7 +4127,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit 0.23.7", + "toml_edit 0.23.9", ] [[package]] @@ -4110,6 +4154,28 @@ dependencies = [ "version_check", ] +[[package]] +name = "proc-macro-error-attr2" +version = "2.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "96de42df36bb9bba5542fe9f1a054b8cc87e172759a1868aa05c1f3acc89dfc5" +dependencies = [ + "proc-macro2", + "quote", +] + +[[package]] +name = "proc-macro-error2" +version = "2.0.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "11ec05c52be0a07b08061f7dd003e7d7092e0472bc731b4af7bb1ef876109802" +dependencies = [ + "proc-macro-error-attr2", + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "proc-macro2" version = "1.0.103" @@ -4127,7 +4193,7 @@ checksum = "af066a9c399a26e020ada66a034357a868728e72cd426f3adcd35f80d88d88c8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", "version_check", "yansi", ] @@ -4181,6 +4247,17 @@ dependencies = [ "unarray", ] +[[package]] +name = "proptest-derive" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb6dc647500e84a25a85b100e76c85b8ace114c209432dc174f20aac11d4ed6c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.111", +] + [[package]] name = "prost" version = "0.14.1" @@ -4197,8 +4274,8 @@ version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" dependencies = [ - "heck 0.5.0", - "itertools 0.14.0", + "heck 0.4.1", + "itertools 0.10.5", "log", "multimap", "once_cell", @@ -4209,7 +4286,7 @@ dependencies = [ "pulldown-cmark", "pulldown-cmark-to-cmark", "regex", - "syn 2.0.110", + "syn 2.0.111", "tempfile", ] @@ -4220,17 +4297,17 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" dependencies = [ "anyhow", - "itertools 0.14.0", + "itertools 0.10.5", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] name = "prost-reflect" -version = "0.16.2" +version = "0.16.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89a3ac73ec9a9118131a4594c9d336631a07852220a1d0ae03ee36b04503a063" +checksum = "b89455ef41ed200cafc47c76c552ee7792370ac420497e551f16123a9135f76e" dependencies = [ "logos", "miette", @@ -4602,7 +4679,7 @@ dependencies = [ "regex", "relative-path", "rustc_version 0.4.1", - "syn 2.0.110", + "syn 2.0.111", "unicode-ident", ] @@ -4650,7 +4727,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", + "windows-sys 0.52.0", ] [[package]] @@ -4663,7 +4740,7 @@ dependencies = [ "errno", "libc", "linux-raw-sys 0.11.0", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -4695,9 +4772,9 @@ dependencies = [ [[package]] name = "rustls-pki-types" -version = "1.13.0" +version = "1.13.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "94182ad936a0c91c324cd46c6511b9510ed16af436d7b5bab34beab0afd55f7a" +checksum = "708c0f9d5f54ba0272468c1d306a52c495b31fa155e91bc25371e6df7996908c" dependencies = [ "zeroize", ] @@ -4884,7 +4961,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -4986,7 +5063,7 @@ checksum = "5d69265a08751de7844521fd15003ae0a888e035773ba05695c5c759a6f89eef" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -4996,7 +5073,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3fa1f336066b758b7c9df34ed049c0e693a426afe2b27ff7d5b14f410ab1a132" dependencies = [ "base64", - "indexmap 2.12.0", + "indexmap 2.12.1", "rust_decimal", ] @@ -5038,9 +5115,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.6" +version = "1.4.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" +checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad" dependencies = [ "libc", ] @@ -5116,9 +5193,9 @@ dependencies = [ [[package]] name = "sqlite-wasm-rs" -version = "0.4.7" +version = "0.4.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "35c6d746902bca4ddf16592357eacf0473631ea26b36072f0dd0b31fa5ccd1f4" +checksum = "60bdd87fcb4c9764b024805fb2df5f1d659bea6e629fdbdcdcfc4042b9a640d0" dependencies = [ "js-sys", "once_cell", @@ -5187,7 +5264,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -5230,9 +5307,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.110" +version = "2.0.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a99801b5bd34ede4cf3fc688c5919368fea4e4814a4664359503e6015b280aea" +checksum = "390cc9a294ab71bdb1aa2e99d13be9c753cd2d7bd6560c77118597410c4d2e87" dependencies = [ "proc-macro2", "quote", @@ -5256,7 +5333,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -5296,16 +5373,16 @@ dependencies = [ "getrandom 0.3.4", "once_cell", "rustix 1.1.2", - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] name = "term" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2111ef44dae28680ae9752bb89409e7310ca33a8c621ebe7b106cf5c928b3ac0" +checksum = "d8c27177b12a6399ffc08b98f76f7c9a1f4fe9fc967c784c5a071fa8d93cf7e1" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.59.0", ] [[package]] @@ -5380,7 +5457,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -5391,7 +5468,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -5491,7 +5568,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -5570,7 +5647,7 @@ version = "0.9.8" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f0dc8b1fb61449e27716ec0e1bdf0f6b8f3e8f6b05391e8497b8b6d7804ea6d8" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.12.1", "serde_core", "serde_spanned 1.0.3", "toml_datetime 0.7.3", @@ -5603,7 +5680,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.12.1", "serde", "serde_spanned 0.6.9", "toml_datetime 0.6.11", @@ -5613,11 +5690,11 @@ dependencies = [ [[package]] name = "toml_edit" -version = "0.23.7" +version = "0.23.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" +checksum = "5d7cbc3b4b49633d57a0509303158ca50de80ae32c265093b24c414705807832" dependencies = [ - "indexmap 2.12.0", + "indexmap 2.12.1", "toml_datetime 0.7.3", "toml_parser", "winnow", @@ -5684,7 +5761,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -5722,7 +5799,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.110", + "syn 2.0.111", "tempfile", "tonic-build", ] @@ -5792,7 +5869,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 2.12.0", + "indexmap 2.12.1", "pin-project-lite", "slab", "sync_wrapper", @@ -5805,9 +5882,9 @@ dependencies = [ [[package]] name = "tower-http" -version = "0.6.6" +version = "0.6.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2" +checksum = "9cf146f99d442e8e68e585f5d798ccd3cad9a7835b917e09728880a862706456" dependencies = [ "bitflags 2.10.0", "bytes", @@ -5837,9 +5914,9 @@ checksum = "8df9b6e13f2d32c91b9bd719c00d1958837bc7dec474d94952798cc8e69eeec3" [[package]] name = "tracing" -version = "0.1.41" +version = "0.1.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "784e0ac535deb450455cbfa28a6f0df145ea1bb7ae51b821cf5e7927fdcfbdd0" +checksum = "2d15d90a0b5c19378952d479dc858407149d7bb45a14de0142f6c534b16fc647" dependencies = [ "log", "pin-project-lite", @@ -5849,20 +5926,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.30" +version = "0.1.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" +checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] name = "tracing-core" -version = "0.1.34" +version = "0.1.35" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" +checksum = "7a04e24fab5c89c6a36eb8558c9656f30d81de51dfa4d3b45f26b21d61fa0a6c" dependencies = [ "once_cell", "valuable", @@ -5923,9 +6000,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.20" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" dependencies = [ "matchers", "nu-ansi-term", @@ -6082,9 +6159,9 @@ checksum = "06abde3611657adf66d383f00b093d7faecc7fa57071cce2578660c9f1010821" [[package]] name = "uuid" -version = "1.18.1" +version = "1.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" +checksum = "e2e054861b4bd027cd373e18e8d8d8e6548085000e41290d95ce0c373a654b4a" dependencies = [ "getrandom 0.3.4", "js-sys", @@ -6203,9 +6280,9 @@ dependencies = [ [[package]] name = "wasm-bindgen" -version = "0.2.105" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "da95793dfc411fbbd93f5be7715b0578ec61fe87cb1a42b12eb625caa5c5ea60" +checksum = "0d759f433fa64a2d763d1340820e46e111a7a5ab75f993d1852d70b03dbb80fd" dependencies = [ "cfg-if", "once_cell", @@ -6216,9 +6293,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-futures" -version = "0.4.55" +version = "0.4.56" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "551f88106c6d5e7ccc7cd9a16f312dd3b5d36ea8b4954304657d5dfba115d4a0" +checksum = "836d9622d604feee9e5de25ac10e3ea5f2d65b41eac0d9ce72eb5deae707ce7c" dependencies = [ "cfg-if", "js-sys", @@ -6229,9 +6306,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.105" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04264334509e04a7bf8690f2384ef5265f05143a4bff3889ab7a3269adab59c2" +checksum = "48cb0d2638f8baedbc542ed444afc0644a29166f1595371af4fecf8ce1e7eeb3" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -6239,22 +6316,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.105" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "420bc339d9f322e562942d52e115d57e950d12d88983a14c79b86859ee6c7ebc" +checksum = "cefb59d5cd5f92d9dcf80e4683949f15ca4b511f4ac0a6e14d4e1ac60c6ecd40" dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.105" +version = "0.2.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76f218a38c84bcb33c25ec7059b07847d465ce0e0a76b995e134a45adcb6af76" +checksum = "cbc538057e648b67f72a982e708d485b2efa771e1ac05fec311f9f63e5800db4" dependencies = [ "unicode-ident", ] @@ -6274,9 +6351,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.82" +version = "0.3.83" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3a1f95c0d03a47f4ae1f7a64643a6bb97465d9b740f0fa8f90ea33915c99a9a1" +checksum = "9b32828d774c412041098d182a8b38b16ea816958e07cf40eec2bc080ae137ac" dependencies = [ "js-sys", "wasm-bindgen", @@ -6314,7 +6391,7 @@ version = "0.1.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c2a7b1c03c876122aa43f3020e6c3c3ee5c05081c9a00739faf7503aeba10d22" dependencies = [ - "windows-sys 0.61.2", + "windows-sys 0.52.0", ] [[package]] @@ -6390,7 +6467,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -6401,7 +6478,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -6715,9 +6792,9 @@ checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" [[package]] name = "winnow" -version = "0.7.13" +version = "0.7.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "21a0236b59786fed61e2a80582dd500fe61f18b5dca67a4a067d0bc9039339cf" +checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" dependencies = [ "memchr", ] @@ -6774,7 +6851,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6d31a19dae58475d019850e25b0170e94b16d382fbf6afee9c0e80fdc935e73e" dependencies = [ "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -6891,28 +6968,28 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", "synstructure", ] [[package]] name = "zerocopy" -version = "0.8.27" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" +checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.27" +version = "0.8.31" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" +checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] @@ -6932,7 +7009,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", "synstructure", ] @@ -6972,7 +7049,7 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.110", + "syn 2.0.111", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index a29f4f361..53e5182bb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,7 +28,7 @@ license = "MIT" readme = "README.md" repository = "https://github.com/0xMiden/miden-node" rust-version = "1.90" -version = "0.12.5" +version = "0.13.0" # Optimize the cryptography for faster tests involving account creation. [profile.test.package.miden-crypto] @@ -36,45 +36,47 @@ opt-level = 2 [workspace.dependencies] # Workspace crates. -miden-node-block-producer = { path = "crates/block-producer", version = "0.12" } -miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "0.12" } -miden-node-ntx-builder = { path = "crates/ntx-builder", version = "0.12" } -miden-node-proto = { path = "crates/proto", version = "0.12" } -miden-node-proto-build = { path = "proto", version = "0.12" } -miden-node-rpc = { path = "crates/rpc", version = "0.12" } -miden-node-store = { path = "crates/store", version = "0.12" } +miden-node-block-producer = { path = "crates/block-producer", version = "0.13" } +miden-node-grpc-error-macro = { path = "crates/grpc-error-macro", version = "0.13" } +miden-node-ntx-builder = { path = "crates/ntx-builder", version = "0.13" } +miden-node-proto = { path = "crates/proto", version = "0.13" } +miden-node-proto-build = { path = "proto", version = "0.13" } +miden-node-rpc = { path = "crates/rpc", version = "0.13" } +miden-node-store = { path = "crates/store", version = "0.13" } miden-node-test-macro = { path = "crates/test-macro" } -miden-node-utils = { path = "crates/utils", version = "0.12" } -miden-node-validator = { path = "crates/validator", version = "0.12" } -miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.12" } +miden-node-utils = { path = "crates/utils", version = "0.13" } +miden-node-validator = { path = "crates/validator", version = "0.13" } +miden-remote-prover-client = { path = "crates/remote-prover-client", version = "0.13" } # miden-base aka protocol dependencies. These should be updated in sync. -miden-block-prover = { version = "0.12.4" } -miden-lib = { version = "0.12.4" } -miden-objects = { default-features = false, version = "0.12.4" } -miden-testing = { version = "0.12.4" } -miden-tx = { default-features = false, version = "0.12.4" } -miden-tx-batch-prover = { version = "0.12.4" } +miden-block-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } +miden-protocol = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base.git" } +miden-standards = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } +miden-testing = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } +miden-tx = { branch = "next", default-features = false, git = "https://github.com/0xMiden/miden-base.git" } +miden-tx-batch-prover = { branch = "next", git = "https://github.com/0xMiden/miden-base.git" } # Other miden dependencies. These should align with those expected by miden-base. -miden-air = { features = ["std", "testing"], version = "0.19" } +miden-air = { features = ["std", "testing"], version = "0.20" } # External dependencies -anyhow = { version = "1.0" } -assert_matches = { version = "1.5" } -async-trait = { version = "0.1" } -clap = { features = ["derive"], version = "4.5" } -fs-err = { version = "3" } -futures = { version = "0.3" } -hex = { version = "0.4" } -http = { version = "1.3" } -humantime = { version = "2.2" } -indexmap = { version = "2.12" } -itertools = { version = "0.14" } -lru = { default-features = false, version = "0.16" } -pretty_assertions = { version = "1.4" } -prost = { version = "0.14" } -protox = { version = "0.9" } +anyhow = { version = "1.0" } +assert_matches = { version = "1.5" } +async-trait = { version = "0.1" } +clap = { features = ["derive"], version = "4.5" } +fs-err = { version = "3" } +futures = { version = "0.3" } +hex = { version = "0.4" } +http = { version = "1.3" } +humantime = { version = "2.2" } +indexmap = { version = "2.12" } +itertools = { version = "0.14" } +lru = { default-features = false, version = "0.16" } +pretty_assertions = { version = "1.4" } +# breaking change `DecodeError::new` is not exposed anymore +# but is assumed public by some internal dependency +prost = { default-features = false, version = "=0.14.1" } +protox = { version = "=0.9.0" } rand = { version = "0.9" } rand_chacha = { version = "0.9" } rstest = { version = "0.26" } @@ -83,7 +85,7 @@ thiserror = { default-features = false, version = "2.0" } tokio = { features = ["rt-multi-thread"], version = "1.46" } tokio-stream = { version = "0.1" } toml = { version = "0.9" } -tonic = { version = "0.14" } +tonic = { default-features = false, version = "0.14" } tonic-prost = { version = "0.14" } tonic-prost-build = { version = "0.14" } tonic-reflection = { version = "0.14" } diff --git a/Makefile b/Makefile index 7a968862c..5522c2d63 100644 --- a/Makefile +++ b/Makefile @@ -15,6 +15,7 @@ BUILD_PROTO=BUILD_PROTO=1 clippy: ## Runs Clippy with configs cargo clippy --locked --all-targets --all-features --workspace -- -D warnings cargo clippy --locked --all-targets --all-features -p miden-remote-prover -- -D warnings + cargo clippy --locked -p miden-remote-prover-client --target wasm32-unknown-unknown --no-default-features --features batch-prover,block-prover,tx-prover -- -D warnings .PHONY: fix @@ -90,7 +91,7 @@ check: ## Check all targets and features for errors without code generation .PHONY: build build: ## Builds all crates and re-builds protobuf bindings for proto crates ${BUILD_PROTO} cargo build --locked --workspace - ${BUILD_PROTO} cargo build --locked -p miden-remote-prover-client --target wasm32-unknown-unknown --no-default-features # no-std compatible build + ${BUILD_PROTO} cargo build --locked -p miden-remote-prover-client --target wasm32-unknown-unknown --no-default-features --features batch-prover,block-prover,tx-prover # no-std compatible build # --- installing ---------------------------------------------------------------------------------- diff --git a/bin/network-monitor/.env b/bin/network-monitor/.env index c5779257d..8474b0681 100644 --- a/bin/network-monitor/.env +++ b/bin/network-monitor/.env @@ -3,16 +3,19 @@ MIDEN_MONITOR_PORT=3001 MIDEN_MONITOR_ENABLE_OTEL=true MIDEN_MONITOR_REQUEST_TIMEOUT=10s # rpc checks -MIDEN_MONITOR_RPC_URL=http://0.0.0.0:57291 +MIDEN_MONITOR_RPC_URL=https://rpc.devnet.miden.io/ MIDEN_MONITOR_STATUS_CHECK_INTERVAL=30s # remote prover checks MIDEN_MONITOR_REMOTE_PROVER_URLS=https://tx-prover.devnet.miden.io/,https://batch-prover.devnet.miden.io/ MIDEN_MONITOR_REMOTE_PROVER_TEST_INTERVAL=2m # faucet checks -MIDEN_MONITOR_FAUCET_URL=http://localhost:8080 +MIDEN_MONITOR_FAUCET_URL=https://faucet-api.devnet.miden.io/ MIDEN_MONITOR_FAUCET_TEST_INTERVAL=2m # network transaction checks MIDEN_MONITOR_DISABLE_NTX_SERVICE=false MIDEN_MONITOR_COUNTER_FILEPATH=counter_account.mac MIDEN_MONITOR_WALLET_FILEPATH=wallet_account.mac MIDEN_MONITOR_COUNTER_INCREMENT_INTERVAL=30s +MIDEN_MONITOR_COUNTER_LATENCY_TIMEOUT=2m +# explorer checks +MIDEN_MONITOR_EXPLORER_URL=https://scan-backend-devnet-miden.eu-central-8.gateway.fm/graphql diff --git a/bin/network-monitor/Cargo.toml b/bin/network-monitor/Cargo.toml index 772032b27..64a1f19e1 100644 --- a/bin/network-monitor/Cargo.toml +++ b/bin/network-monitor/Cargo.toml @@ -20,10 +20,10 @@ axum = { version = "0.8" } clap = { features = ["env"], workspace = true } hex = { version = "0.4" } humantime = { workspace = true } -miden-lib = { workspace = true } miden-node-proto = { workspace = true } miden-node-utils = { workspace = true } -miden-objects = { features = ["std", "testing"], workspace = true } +miden-protocol = { features = ["std", "testing"], workspace = true } +miden-standards = { workspace = true } miden-testing = { workspace = true } miden-tx = { features = ["std"], workspace = true } rand = { version = "0.9" } diff --git a/bin/network-monitor/README.md b/bin/network-monitor/README.md index 2bab71c90..47063b923 100644 --- a/bin/network-monitor/README.md +++ b/bin/network-monitor/README.md @@ -30,6 +30,7 @@ miden-network-monitor start --faucet-url http://localhost:8080 --enable-otel - `--rpc-url`: RPC service URL (default: `http://localhost:50051`) - `--remote-prover-urls`: Comma-separated list of remote prover URLs. If omitted or empty, prover tasks are disabled. - `--faucet-url`: Faucet service URL for testing. If omitted, faucet testing is disabled. +- `--explorer-url`: Explorer service GraphQL endpoint. If omitted, explorer checks are disabled. - `--disable-ntx-service`: Disable the network transaction service checks (enabled by default). The network transaction service consists of two components: counter increment (sending increment transactions) and counter tracking (monitoring counter value changes). - `--remote-prover-test-interval`: Interval at which to test the remote provers services (default: `2m`) - `--faucet-test-interval`: Interval at which to test the faucet services (default: `2m`) @@ -40,6 +41,7 @@ miden-network-monitor start --faucet-url http://localhost:8080 --enable-otel - `--wallet-filepath`: Path where the wallet account is located (default: `wallet_account.mac`) - `--counter-filepath`: Path where the network account is located (default: `counter_program.mac`) - `--counter-increment-interval`: Interval at which to send the increment counter transaction (default: `30s`) +- `--counter-latency-timeout`: Maximum time to wait for a counter update after submitting a transaction (default: `2m`) - `--help, -h`: Show help information - `--version, -V`: Show version information @@ -50,6 +52,7 @@ If command-line arguments are not provided, the application falls back to enviro - `MIDEN_MONITOR_RPC_URL`: RPC service URL - `MIDEN_MONITOR_REMOTE_PROVER_URLS`: Comma-separated list of remote prover URLs. If unset or empty, prover tasks are disabled. - `MIDEN_MONITOR_FAUCET_URL`: Faucet service URL for testing. If unset, faucet testing is disabled. +- `MIDEN_MONITOR_EXPLORER_URL`: Explorer service GraphQL endpoint. If unset, explorer checks are disabled. - `MIDEN_MONITOR_DISABLE_NTX_SERVICE`: Set to `true` to disable the network transaction service checks (enabled by default). This affects both counter increment and tracking components. - `MIDEN_MONITOR_REMOTE_PROVER_TEST_INTERVAL`: Interval at which to test the remote provers services - `MIDEN_MONITOR_FAUCET_TEST_INTERVAL`: Interval at which to test the faucet services @@ -60,6 +63,7 @@ If command-line arguments are not provided, the application falls back to enviro - `MIDEN_MONITOR_WALLET_FILEPATH`: Path where the wallet account is located - `MIDEN_MONITOR_COUNTER_FILEPATH`: Path where the network account is located - `MIDEN_MONITOR_COUNTER_INCREMENT_INTERVAL`: Interval at which to send the increment counter transaction +- `MIDEN_MONITOR_COUNTER_LATENCY_TIMEOUT`: Maximum time to wait for a counter update after submitting a transaction ## Commands @@ -151,6 +155,14 @@ The monitor application provides real-time status monitoring for the following M - **Block Producer Status**: - Block producer version and health +### Explorer +- **Service Health**: Explorer availability and freshness of the latest block +- **Latest Block Metadata**: + - Block height and timestamp + - Transactions, nullifiers, notes, and account updates counts + - Block, chain, and proof commitments (shortened display with copy-to-clipboard) +- **Block Delta**: The difference between the explorer's block height and the RPC's chain tip. If the difference is greater than a tolerance, a warning is displayed. This check is performed in the frontend. + ### Remote Provers - **Service Health**: Individual remote prover availability and status - **Version Information**: Remote prover service version @@ -175,18 +187,19 @@ The monitor application provides real-time status monitoring for the following M - Transaction and note ID tracking from successful mints - Automated testing on a configurable interval to verify faucet functionality -### Counter Increment Service -- **Service Health**: End-to-end transaction submission for counter increment +### Local Transactions (Counter Increment) +- **Service Health**: End-to-end local transaction submission for counter increment - **Metrics**: - Success/Failure counts for increment transactions - Last TX ID with copy-to-clipboard + - Latency in blocks from submission to observed counter update (with pending measurement tracking) -### Counter Tracking Service -- **Service Health**: Real-time monitoring of counter value changes +### Network Transactions (Counter Tracking) +- **Service Health**: Real-time monitoring of on-chain counter value changes - **Metrics**: - Current network account counter value (queried from RPC periodically) - Expected counter value based on successful increments sent - - Pending increments: How many transactions are queued/unprocessed + - Pending notes: How many transactions are queued/unprocessed - Last updated timestamp ## User Interface @@ -201,6 +214,31 @@ The web dashboard provides a clean, responsive interface with the following feat - **Interactive Elements**: Copy-to-clipboard functionality for genesis commitments, transaction IDs, and note IDs - **Responsive Design**: Optimized for both desktop and mobile viewing +### gRPC-Web Browser Probe + +The dashboard automatically probes RPC and Remote Prover services every 30 seconds using gRPC-Web protocol. This tests whether the browser can successfully communicate with these services. + +**What it checks:** +- Browser connectivity to the service endpoint +- CORS configuration (the probe is a real cross-origin request from the browser) +- gRPC-Web protocol handling (proper framing and trailers) +- Basic service availability (calls the `Status` endpoint) + +**Results displayed:** +- **gRPC-Web: OK** / **gRPC-Web: FAILED** status +- Response latency in milliseconds +- Error details (if failed) +- Time since last probe + +**Common failure scenarios:** +- **CORS / Network error**: The service is not configured to accept cross-origin requests from the browser, or the service is unreachable +- **HTTP 4xx/5xx**: The service returned an HTTP error (check server logs) +- **grpc-status non-zero**: The gRPC call failed at the application level + +**Note:** The probe uses the same URLs configured for `--rpc-url` and `--remote-prover-urls`. For the probe to work from a browser, these services must: +1. Have gRPC-Web support enabled (e.g., via Envoy, grpc-web proxy, or native tonic-web) +2. Allow CORS requests from the monitor's origin (or use `Access-Control-Allow-Origin: *`) + ## Account Management When the network transaction service is enabled, the monitor manages the necessary Miden accounts: diff --git a/bin/network-monitor/assets/index.css b/bin/network-monitor/assets/index.css index b375f10e1..26213b717 100644 --- a/bin/network-monitor/assets/index.css +++ b/bin/network-monitor/assets/index.css @@ -383,7 +383,7 @@ body { font-family: "DM Mono", monospace; } -.worker-address { +.worker-name { font-weight: 500; color: #333; } @@ -450,6 +450,24 @@ body { font-weight: 500; } +.metric-value.warning-delta, +.warning-text { + color: #ff8c00; +} + +.warning-text { + font-weight: 500; + font-size: 12px; +} + +.warning-banner { + margin-top: 8px; + padding: 8px 12px; + border-radius: 4px; + background: rgba(255, 85, 0, 0.08); + border-left: 3px solid #ff8c00; +} + .test-metrics.healthy .metric-value { color: #22C55D; } @@ -499,3 +517,100 @@ body { grid-template-columns: repeat(auto-fit, minmax(280px, 1fr)); } } + +/* gRPC-Web Probe Styles */ +.probe-section { + margin-top: 12px; + padding-top: 8px; + border-top: 1px dashed #e0e0e0; +} + +.probe-spinner { + width: 12px; + height: 12px; + border: 2px solid #ccc; + border-top-color: #666; + border-radius: 50%; + animation: spin 0.8s linear infinite; +} + +@keyframes spin { + to { transform: rotate(360deg); } +} + +.probe-result { + margin-top: 8px; + padding: 6px 10px; + border-radius: 4px; + font-size: 11px; + display: flex; + flex-wrap: wrap; + align-items: center; + gap: 8px; +} + +.probe-result.probe-ok { + background-color: rgba(34, 197, 93, 0.1); + border-left: 3px solid #22C55D; +} + +.probe-result.probe-failed { + background-color: rgba(255, 85, 0, 0.1); + border-left: 3px solid #ff5500; +} + +.probe-result.probe-pending { + background-color: rgba(150, 150, 150, 0.1); + border-left: 3px solid #999; +} + +.probe-pending .probe-status-badge { + color: #666; + text-transform: none; +} + +.probe-status-badge { + font-weight: 600; + text-transform: uppercase; + letter-spacing: 0.5px; +} + +.probe-ok .probe-status-badge { + color: #22C55D; +} + +.probe-failed .probe-status-badge { + color: #ff5500; +} + +.probe-latency { + font-family: "DM Mono", monospace; + color: #666; +} + +.probe-error { + color: #dc2626; + font-size: 10px; + max-width: 200px; + overflow: hidden; + text-overflow: ellipsis; + white-space: nowrap; +} + +.probe-time { + color: #999; + font-size: 10px; + margin-left: auto; +} + +@media (max-width: 768px) { + .probe-result { + flex-direction: column; + align-items: flex-start; + gap: 4px; + } + + .probe-time { + margin-left: 0; + } +} diff --git a/bin/network-monitor/assets/index.html b/bin/network-monitor/assets/index.html index ffa773abd..9b66d6c18 100644 --- a/bin/network-monitor/assets/index.html +++ b/bin/network-monitor/assets/index.html @@ -45,451 +45,6 @@ - + diff --git a/bin/network-monitor/assets/index.js b/bin/network-monitor/assets/index.js new file mode 100644 index 000000000..049de239d --- /dev/null +++ b/bin/network-monitor/assets/index.js @@ -0,0 +1,798 @@ +// Miden Network Monitor - Frontend JavaScript +// ================================================================================================ + +let statusData = null; +let updateInterval = null; +const EXPLORER_LAG_TOLERANCE = 20; // max allowed block delta vs RPC, roughly 1 minute + +// Store gRPC-Web probe results keyed by service URL +const grpcWebProbeResults = new Map(); + +// gRPC-Web probe implementation +// ================================================================================================ + +/** + * Performs a gRPC-Web probe to the given URL and path. + * This sends a real browser-originated gRPC-Web request to test connectivity, + * CORS configuration, and gRPC-Web protocol handling. + * + * @param {string} baseUrl - The base URL of the service (e.g., "https://prover.example.com:443") + * @param {string} grpcPath - The gRPC method path (e.g., "/remote_prover.ProxyStatusApi/Status") + * @returns {Promise<{ok: boolean, latencyMs: number, error: string|null}>} + */ +async function probeGrpcWeb(baseUrl, grpcPath) { + const startTime = performance.now(); + + // Normalize URL: remove trailing slash from baseUrl + const normalizedUrl = baseUrl.replace(/\/+$/, ''); + const fullUrl = `${normalizedUrl}${grpcPath}`; + + // gRPC-Web frame for google.protobuf.Empty: + // - 1 byte compressed flag = 0x00 (not compressed) + // - 4 bytes big-endian length = 0x00000000 (empty message) + const emptyGrpcWebFrame = new Uint8Array([0x00, 0x00, 0x00, 0x00, 0x00]); + + // Build headers - RPC service requires custom Accept header + const headers = { + 'Content-Type': 'application/grpc-web+proto', + 'X-Grpc-Web': '1', + }; + + // The RPC service requires 'application/vnd.miden' in Accept header + // (this is the custom media type used by the Miden gRPC clients) + // The remote prover accepts standard gRPC-Web content types + if (grpcPath.startsWith('/rpc.')) { + headers['Accept'] = 'application/vnd.miden'; + } else { + headers['Accept'] = 'application/grpc-web+proto'; + } + + try { + const response = await fetch(fullUrl, { + method: 'POST', + headers, + body: emptyGrpcWebFrame, + }); + + const latencyMs = Math.round(performance.now() - startTime); + + if (!response.ok) { + return { + ok: false, + latencyMs, + error: `HTTP ${response.status}: ${response.statusText}`, + }; + } + + // Read the response body as bytes + const responseBytes = new Uint8Array(await response.arrayBuffer()); + + // Parse gRPC-Web response to extract grpc-status from trailers + const grpcStatus = parseGrpcWebTrailers(responseBytes); + + if (grpcStatus === '0' || grpcStatus === null) { + // grpc-status 0 means OK; null means no trailer found (might still be OK) + return { ok: true, latencyMs, error: null }; + } else { + return { + ok: false, + latencyMs, + error: `grpc-status: ${grpcStatus}`, + }; + } + } catch (err) { + const latencyMs = Math.round(performance.now() - startTime); + + // TypeError: Failed to fetch usually indicates CORS or network error + if (err instanceof TypeError) { + return { + ok: false, + latencyMs, + error: 'CORS / Network error: ' + err.message, + }; + } + + return { + ok: false, + latencyMs, + error: err.message || String(err), + }; + } +} + +/** + * Parses gRPC-Web response bytes to extract the grpc-status from trailers. + * gRPC-Web trailers are sent as a frame with flag 0x80. + * + * @param {Uint8Array} data - The response body bytes + * @returns {string|null} - The grpc-status value, or null if not found + */ +function parseGrpcWebTrailers(data) { + let offset = 0; + + while (offset + 5 <= data.length) { + const flag = data[offset]; + const length = (data[offset + 1] << 24) | + (data[offset + 2] << 16) | + (data[offset + 3] << 8) | + data[offset + 4]; + + offset += 5; + + if (offset + length > data.length) break; + + // Flag 0x80 indicates trailers + if (flag === 0x80) { + const trailerBytes = data.slice(offset, offset + length); + const trailerText = new TextDecoder().decode(trailerBytes); + + // Parse trailer headers (format: "key: value\r\n") + const lines = trailerText.split(/\r?\n/); + for (const line of lines) { + const match = line.match(/^grpc-status:\s*(\d+)/i); + if (match) { + return match[1]; + } + } + } + + offset += length; + } + + return null; +} + +// Interval for periodic gRPC-Web probing +let grpcWebProbeInterval = null; +const GRPC_WEB_PROBE_INTERVAL_MS = 30000; // Probe every 30 seconds + +/** + * Collects all gRPC-Web endpoints that need to be probed from the current status data. + * + * @returns {Array<{serviceKey: string, baseUrl: string, grpcPath: string}>} + */ +function collectGrpcWebEndpoints() { + if (!statusData || !statusData.services) return []; + + const endpoints = []; + + for (const service of statusData.services) { + if (service.details) { + // RPC service + if (service.details.RpcStatus && service.details.RpcStatus.url) { + endpoints.push({ + serviceKey: service.details.RpcStatus.url, + baseUrl: service.details.RpcStatus.url, + grpcPath: '/rpc.Api/Status', + }); + } + // Remote Prover service + if (service.details.RemoteProverStatus && service.details.RemoteProverStatus.url) { + endpoints.push({ + serviceKey: service.details.RemoteProverStatus.url, + baseUrl: service.details.RemoteProverStatus.url, + grpcPath: '/remote_prover.ProxyStatusApi/Status', + }); + } + } + } + + return endpoints; +} + +/** + * Runs gRPC-Web probes for all collected endpoints. + * Results are stored in grpcWebProbeResults and display is updated. + */ +async function runGrpcWebProbes() { + const endpoints = collectGrpcWebEndpoints(); + if (endpoints.length === 0) return; + + // Run all probes in parallel + const probePromises = endpoints.map(async ({ serviceKey, baseUrl, grpcPath }) => { + const result = await probeGrpcWeb(baseUrl, grpcPath); + grpcWebProbeResults.set(serviceKey, { + ...result, + timestamp: Date.now(), + }); + }); + + await Promise.all(probePromises); + + // Re-render to show updated results + updateDisplay(); +} + +/** + * Renders the probe result badge for a service. + * + * @param {string} serviceKey - Unique key for the service + * @returns {string} - HTML string for the probe result + */ +function renderProbeResult(serviceKey) { + const result = grpcWebProbeResults.get(serviceKey); + if (!result) return ''; + + const statusClass = result.ok ? 'probe-ok' : 'probe-failed'; + const statusText = result.ok ? 'OK' : 'FAILED'; + const seconds = Math.floor((Date.now() - result.timestamp) / 1000); + const timeAgo = seconds < 60 ? `${seconds}s ago` : seconds < 3600 ? `${Math.floor(seconds / 60)}m ago` : `${Math.floor(seconds / 3600)}h ago`; + const errorDisplay = result.error && result.error.length > 40 ? result.error.substring(0, 40) + '...' : result.error; + + return ` +
+ gRPC-Web: ${statusText} + ${result.latencyMs}ms + ${result.error ? `${errorDisplay}` : ''} + ${timeAgo} +
+ `; +} + +/** + * Renders the gRPC-Web probe result section for a service. + * Shows "Checking..." if no result yet, otherwise shows the probe result. + * + * @param {string} serviceKey - Unique key for the service (the URL) + * @returns {string} - HTML string for the probe result section + */ +function renderGrpcWebProbeSection(serviceKey) { + const result = grpcWebProbeResults.get(serviceKey); + + if (!result) { + return ` +
+
+ + gRPC-Web: Checking... +
+
+ `; + } + + return ` +
+ ${renderProbeResult(serviceKey)} +
+ `; +} + + +const COPY_ICON = ` + + + + +`; + +function renderCopyButton(value, label) { + if (!value) return ''; + const escapedValue = JSON.stringify(value); + return ` + + `; +} + +function formatSuccessRate(successCount, failureCount) { + const total = successCount + failureCount; + if (!total) { + return 'N/A'; + } + + return `${((successCount / total) * 100).toFixed(1)}%`; +} + +async function fetchStatus() { + try { + const response = await fetch('/status'); + if (!response.ok) { + throw new Error(`HTTP error! status: ${response.status}`); + } + statusData = await response.json(); + updateDisplay(); + } catch (error) { + console.error('Error fetching status:', error); + showError('Failed to fetch network status: ' + error.message); + } +} + +// Merge Remote Prover status and test entries into a single card per prover. +function mergeProverStatusAndTests(services) { + const testsByName = new Map(); + const merged = []; + const usedTests = new Set(); + + services.forEach(service => { + if (service.details && service.details.RemoteProverTest) { + testsByName.set(service.name, service); + } + }); + + services.forEach(service => { + if (service.details && service.details.RemoteProverStatus) { + const test = testsByName.get(service.name); + if (test) { + usedTests.add(service.name); + } + merged.push({ + ...service, + testDetails: test?.details?.RemoteProverTest ?? null, + testStatus: test?.status ?? null, + testError: test?.error ?? null + }); + } else if (!(service.details && service.details.RemoteProverTest)) { + // Non-prover entries pass through unchanged + merged.push(service); + } + }); + + // Add orphaned tests (in case a test arrives before a status) + testsByName.forEach((test, name) => { + if (!usedTests.has(name)) { + merged.push({ + name, + status: test.status, + last_checked: test.last_checked, + error: test.error, + details: null, + testDetails: test.details.RemoteProverTest, + testStatus: test.status, + testError: test.error + }); + } + }); + + return merged; +} + +function updateDisplay() { + if (!statusData) return; + + const container = document.getElementById('status-container'); + const lastUpdated = document.getElementById('last-updated'); + const overallStatus = document.getElementById('overall-status'); + const servicesCount = document.getElementById('services-count'); + + // Update last updated time + const lastUpdateTime = new Date(statusData.last_updated * 1000); + lastUpdated.textContent = lastUpdateTime.toLocaleString(); + + // Group remote prover status + test into single cards + const processedServices = mergeProverStatusAndTests(statusData.services); + const rpcService = processedServices.find(s => s.details && s.details.RpcStatus); + const rpcChainTip = + rpcService?.details?.RpcStatus?.store_status?.chain_tip ?? + rpcService?.details?.RpcStatus?.block_producer_status?.chain_tip ?? + null; + + // Count healthy vs unhealthy services + const healthyServices = processedServices.filter(s => s.status === 'Healthy').length; + const totalServices = processedServices.length; + const allHealthy = healthyServices === totalServices; + + // Update footer + overallStatus.textContent = allHealthy ? 'All Systems Operational' : `${healthyServices}/${totalServices} Services Healthy`; + overallStatus.style.color = allHealthy ? '#22C55D' : '#ff5500'; + servicesCount.textContent = `${totalServices} Services`; + + // Generate status cards + const serviceCardsHtml = processedServices.map(service => { + const isHealthy = service.status === 'Healthy'; + const statusColor = isHealthy ? '#22C55D' : '#ff5500'; + const statusIcon = isHealthy ? '✓' : '✗'; + const numOrDash = value => isHealthy ? (value?.toLocaleString?.() ?? value ?? '-') : '-'; + const timeOrDash = ts => { + if (!isHealthy) return '-'; + return ts ? new Date(ts * 1000).toLocaleString() : '-'; + }; + const commitmentOrDash = (value, label) => isHealthy && value + ? ` + ${value.substring(0, 20)}... + ${renderCopyButton(value, label)} + ` + : '-'; + + const explorerStats = service.details?.ExplorerStatus; + const isExplorerService = service.name?.toLowerCase().includes('explorer'); + const deltaBlock = (isHealthy && explorerStats && rpcChainTip !== null) + ? explorerStats.block_number - rpcChainTip + : null; + const deltaWarning = + deltaBlock !== null && Math.abs(deltaBlock) > EXPLORER_LAG_TOLERANCE + ? `Explorer tip is ${Math.abs(deltaBlock)} blocks ${deltaBlock > 0 ? 'ahead' : 'behind'}` + : null; + let explorerWarningHtml = ''; + + let detailsHtml = ''; + if (service.details) { + const details = service.details; + detailsHtml = ` +
+ ${details.RpcStatus ? ` +
Version: ${details.RpcStatus.version}
+ ${details.RpcStatus.genesis_commitment ? ` +
+ Genesis: + 0x${details.RpcStatus.genesis_commitment.substring(0, 20)}... + ${renderCopyButton(details.RpcStatus.genesis_commitment, 'genesis commitment')} +
+ ` : ''} + ${details.RpcStatus.url ? renderGrpcWebProbeSection(details.RpcStatus.url) : ''} + ${details.RpcStatus.store_status ? ` +
+
Store
+
+ Version: + ${details.RpcStatus.store_status.version} +
+
+ Status: + ${details.RpcStatus.store_status.status} +
+
+ Chain Tip: + ${details.RpcStatus.store_status.chain_tip} +
+
+ ` : ''} + ${details.RpcStatus.block_producer_status ? ` +
+
Block Producer
+
+ Version: + ${details.RpcStatus.block_producer_status.version} +
+
+ Status: + ${details.RpcStatus.block_producer_status.status} +
+
+ Chain Tip: + ${details.RpcStatus.block_producer_status.chain_tip} +
+
+ Mempool stats: +
+ Unbatched TXs: + ${details.RpcStatus.block_producer_status.mempool.unbatched_transactions} +
+
+ Proposed Batches: + ${details.RpcStatus.block_producer_status.mempool.proposed_batches} +
+
+ Proven Batches: + ${details.RpcStatus.block_producer_status.mempool.proven_batches} +
+
+
+ ` : ''} + ` : ''} + ${details.RemoteProverStatus ? ` +
+ Prover Status (${details.RemoteProverStatus.url}): +
Version: ${details.RemoteProverStatus.version}
+
+ Supported Proof Type: ${details.RemoteProverStatus.supported_proof_type} +
+ ${details.RemoteProverStatus.workers && details.RemoteProverStatus.workers.length > 0 ? ` +
+ Workers (${details.RemoteProverStatus.workers.length}): + ${details.RemoteProverStatus.workers.map(worker => ` +
+ ${worker.name} - + ${worker.version} - + ${worker.status} +
+ `).join('')} +
+ ` : ''} + ${renderGrpcWebProbeSection(details.RemoteProverStatus.url)} +
+ ` : ''} + ${details.FaucetTest ? ` +
+ Faucet: +
+
+ Success Rate: + ${formatSuccessRate(details.FaucetTest.success_count, details.FaucetTest.failure_count)} +
+
+ Last Response Time: + ${details.FaucetTest.test_duration_ms}ms +
+ ${details.FaucetTest.last_tx_id ? ` +
+ Last TX ID: + ${details.FaucetTest.last_tx_id.substring(0, 16)}...${renderCopyButton(details.FaucetTest.last_tx_id, 'TX ID')} +
+ ` : ''} + ${details.FaucetTest.challenge_difficulty ? ` +
+ Last Challenge Difficulty: + ~${details.FaucetTest.challenge_difficulty} bits +
+ ` : ''} +
+
+ ${details.FaucetTest.faucet_metadata ? ` +
+ Faucet Token Info: +
+
+ Token ID: + ${details.FaucetTest.faucet_metadata.id.substring(0, 16)}...${renderCopyButton(details.FaucetTest.faucet_metadata.id, 'token ID')} +
+
+ Version: + ${details.FaucetTest.faucet_metadata.version || '-'} +
+
+ Current Issuance: + ${details.FaucetTest.faucet_metadata.issuance.toLocaleString()} +
+
+ Max Supply: + ${details.FaucetTest.faucet_metadata.max_supply.toLocaleString()} +
+
+ Decimals: + ${details.FaucetTest.faucet_metadata.decimals} +
+
+ Base Amount: + ${details.FaucetTest.faucet_metadata.base_amount.toLocaleString()} +
+
+ PoW Difficulty: + ${details.FaucetTest.faucet_metadata.pow_load_difficulty} +
+ +
+
+ ` : ''} + ` : ''} + ${details.NtxIncrement ? ` +
+ Local Transactions: +
+
+ Success Rate: + ${formatSuccessRate(details.NtxIncrement.success_count, details.NtxIncrement.failure_count)} +
+ ${details.NtxIncrement.last_latency_blocks !== null && details.NtxIncrement.last_latency_blocks !== undefined ? ` +
+ Latency: + ${details.NtxIncrement.last_latency_blocks} blocks +
+ ` : ''} + ${details.NtxIncrement.last_tx_id ? ` +
+ Last TX ID: + ${details.NtxIncrement.last_tx_id.substring(0, 16)}...${renderCopyButton(details.NtxIncrement.last_tx_id, 'TX ID')} +
+ ` : ''} +
+
+ ` : ''} + ${details.NtxTracking ? ` +
+ Network Transactions: +
+
+ Current Value: + ${details.NtxTracking.current_value ?? '-'} +
+ ${details.NtxTracking.expected_value ? ` +
+ Expected Value: + ${details.NtxTracking.expected_value} +
+ ` : ''} + ${details.NtxTracking.pending_increments !== null && details.NtxTracking.pending_increments !== undefined ? ` +
+ Pending Notes: + ${details.NtxTracking.pending_increments} +
+ ` : ''} + ${details.NtxTracking.last_updated ? ` +
+ Last Updated: + ${new Date(details.NtxTracking.last_updated * 1000).toLocaleString()} +
+ ` : ''} +
+
+ ` : ''} + ${service.testDetails ? ` +
+ Proof Generation Testing (${service.testDetails.proof_type}): +
+
+ Success Rate: + ${formatSuccessRate(service.testDetails.success_count, service.testDetails.failure_count)} +
+
+ Last Response Time: + ${service.testDetails.test_duration_ms}ms +
+
+ Last Proof Size: + ${(service.testDetails.proof_size_bytes / 1024).toFixed(2)} KB +
+
+
+ ` : ''} +
+ `; + } + + // Always render explorer block for explorer services, even if stats are missing. + if (isExplorerService) { + detailsHtml += ` +
+
+ Explorer: +
+ Block Height: + ${explorerStats ? numOrDash(explorerStats.block_number) : '-'} +
+
+ RPC Chain Tip: + ${isHealthy && rpcChainTip !== null ? rpcChainTip : '-'} +
+
+ Block Time: + ${explorerStats ? timeOrDash(explorerStats.timestamp) : '-'} +
+
+ Block Commitment: + ${explorerStats ? commitmentOrDash(explorerStats.block_commitment, 'block commitment') : '-'} +
+
+ Chain Commitment: + ${explorerStats ? commitmentOrDash(explorerStats.chain_commitment, 'chain commitment') : '-'} +
+
+ Proof Commitment: + ${explorerStats ? commitmentOrDash(explorerStats.proof_commitment, 'proof commitment') : '-'} +
+
+ Transactions: + ${explorerStats ? numOrDash(explorerStats.number_of_transactions) : '-'} +
+
+ Nullifiers: + ${explorerStats ? numOrDash(explorerStats.number_of_nullifiers) : '-'} +
+
+ Notes: + ${explorerStats ? numOrDash(explorerStats.number_of_notes) : '-'} +
+
+ Account Updates: + ${explorerStats ? numOrDash(explorerStats.number_of_account_updates) : '-'} +
+
+
+ `; + + if (deltaWarning) { + explorerWarningHtml = ` +
+
+ Explorer vs RPC +
+
${deltaWarning}
+
+ `; + } + } + + return ` +
+
+
${service.name}
+
+ ${statusIcon} ${service.status.toUpperCase()} +
+
+
+ ${detailsHtml} + ${explorerWarningHtml} +
+
+ Last checked: ${new Date(service.last_checked * 1000).toLocaleString()} +
+
+ `; + }).join(''); + + container.innerHTML = serviceCardsHtml; + + // Add refresh button that spans the full grid + container.innerHTML += ` +
+ +
+ `; +} + +function showError(message) { + const container = document.getElementById('status-container'); + container.innerHTML = ` +
+ ${message} +
+
+ +
+ `; +} + +async function copyToClipboard(text, event) { + const button = event.target.closest('.copy-button'); + if (!button) return; + + try { + await navigator.clipboard.writeText(text); + // Show a brief success indicator + const originalContent = button.innerHTML; + button.innerHTML = ''; + button.style.color = '#22C55D'; + + setTimeout(() => { + button.innerHTML = originalContent; + button.style.color = ''; + }, 2000); + } catch (err) { + console.error('Failed to copy to clipboard:', err); + // Show error feedback on button + button.style.color = '#ff5500'; + setTimeout(() => { + button.style.color = ''; + }, 2000); + } +} + +// Initialize on DOM ready +document.addEventListener('DOMContentLoaded', () => { + // Initial load and set up auto-refresh + fetchStatus().then(() => { + // Start gRPC-Web probing after initial status fetch + runGrpcWebProbes(); + grpcWebProbeInterval = setInterval(runGrpcWebProbes, GRPC_WEB_PROBE_INTERVAL_MS); + }); + updateInterval = setInterval(fetchStatus, 10000); // Refresh every 10 seconds +}); + +// Clean up on page unload +window.addEventListener('beforeunload', () => { + if (updateInterval) { + clearInterval(updateInterval); + } + if (grpcWebProbeInterval) { + clearInterval(grpcWebProbeInterval); + } +}); + diff --git a/bin/network-monitor/src/assets/counter_program.masm b/bin/network-monitor/src/assets/counter_program.masm index 60cd146ba..175e7e969 100644 --- a/bin/network-monitor/src/assets/counter_program.masm +++ b/bin/network-monitor/src/assets/counter_program.masm @@ -3,17 +3,20 @@ # - Slot 0: counter value (u64) # - Slot 1: authorized wallet account id as [prefix, suffix, 0, 0] -use.miden::active_account -use.miden::native_account -use.miden::active_note -use.miden::account_id -use.miden::tx +use miden::core::sys +use miden::protocol::active_account +use miden::protocol::native_account +use miden::protocol::active_note +use miden::protocol::account_id +use miden::protocol::tx -use.std::sys + +# The slot in this component's storage layout where the counter is stored. +const COUNTER_SLOT = word("miden::monitor::counter_contract::counter") # Increment function with note authentication # => [] -export.increment +pub proc increment # Ensure the note sender matches the authorized wallet stored in slot 1. push.1 exec.active_account::get_item # => [owner_prefix, owner_suffix, 0, 0] @@ -27,13 +30,13 @@ export.increment assert.err="Note sender not authorized" drop drop # => [] - push.0 exec.active_account::get_item + push.COUNTER_SLOT[0..2] exec.active_account::get_item # => [count, 0, 0, 0] - + push.1 add # => [count+1] - push.0 exec.native_account::set_item + push.COUNTER_SLOT[0..2] exec.native_account::set_item # => [count, 0, 0, 0] dropw @@ -42,8 +45,8 @@ end # Get the counter (no auth required) # => [count] -export.get_count - push.0 exec.active_account::get_item +pub proc get_count + push.COUNTER_SLOT[0..2] exec.active_account::get_item # => [count, 0, 0, 0] exec.sys::truncate_stack diff --git a/bin/network-monitor/src/assets/increment_counter.masm b/bin/network-monitor/src/assets/increment_counter.masm index 76c4bdcb6..4a835bdd8 100644 --- a/bin/network-monitor/src/assets/increment_counter.masm +++ b/bin/network-monitor/src/assets/increment_counter.masm @@ -2,7 +2,7 @@ # This script is executed as a note and calls the # `counter_contract::increment` entrypoint. -use.external_contract::counter_contract +use external_contract::counter_contract begin call.counter_contract::increment diff --git a/bin/network-monitor/src/commands/start.rs b/bin/network-monitor/src/commands/start.rs index 3f1cbca6b..4262db445 100644 --- a/bin/network-monitor/src/commands/start.rs +++ b/bin/network-monitor/src/commands/start.rs @@ -4,7 +4,7 @@ use anyhow::Result; use miden_node_utils::logging::OpenTelemetry; -use tracing::{info, instrument, warn}; +use tracing::{debug, info, instrument, warn}; use crate::COMPONENT; use crate::config::MonitorConfig; @@ -15,7 +15,16 @@ use crate::monitor::tasks::Tasks; /// /// This function initializes all monitoring tasks including RPC status checking, /// remote prover testing, faucet testing, and the web frontend. -#[instrument(target = COMPONENT, name = "start-monitor", skip_all, fields(port = %config.port))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.start_monitor", + skip_all, + level = "info", + fields(port = %config.port), + ret(level = "debug"), + err +)] pub async fn start_monitor(config: MonitorConfig) -> Result<()> { // Load configuration from command-line arguments and environment variables info!("Loaded configuration: {:?}", config); @@ -29,17 +38,28 @@ pub async fn start_monitor(config: MonitorConfig) -> Result<()> { let mut tasks = Tasks::new(); // Initialize the RPC Status endpoint checker task. + debug!(target: COMPONENT, "Initializing RPC status checker"); let rpc_rx = tasks.spawn_rpc_checker(&config).await?; + // Initialize the explorer status checker task. + let explorer_rx = if config.explorer_url.is_some() { + Some(tasks.spawn_explorer_checker(&config).await?) + } else { + None + }; + // Initialize the prover checkers & tests tasks, only if URLs were provided. let prover_rxs = if config.remote_prover_urls.is_empty() { + debug!(target: COMPONENT, "No remote prover URLs configured, skipping prover tasks"); Vec::new() } else { + debug!(target: COMPONENT, "Initializing prover checkers and tests"); tasks.spawn_prover_tasks(&config).await? }; // Initialize the faucet testing task. let faucet_rx = if config.faucet_url.is_some() { + debug!(target: COMPONENT, "Initializing faucet testing task"); Some(tasks.spawn_faucet(&config)) } else { warn!("Faucet URL not configured, skipping faucet testing"); @@ -48,19 +68,23 @@ pub async fn start_monitor(config: MonitorConfig) -> Result<()> { // Initialize the counter increment and tracking tasks only if enabled. let (ntx_increment_rx, ntx_tracking_rx) = if config.disable_ntx_service { + debug!(target: COMPONENT, "NTX service disabled, skipping counter increment task"); (None, None) } else { + debug!(target: COMPONENT, "Initializing counter increment task"); let (increment_rx, tracking_rx) = tasks.spawn_ntx_service(&config).await?; (Some(increment_rx), Some(tracking_rx)) }; // Initialize HTTP server. + debug!(target: COMPONENT, "Initializing HTTP server"); let server_state = ServerState { rpc: rpc_rx, provers: prover_rxs, faucet: faucet_rx, ntx_increment: ntx_increment_rx, ntx_tracking: ntx_tracking_rx, + explorer: explorer_rx, }; tasks.spawn_http_server(server_state, &config); diff --git a/bin/network-monitor/src/config.rs b/bin/network-monitor/src/config.rs index fa2af59e2..c30735c02 100644 --- a/bin/network-monitor/src/config.rs +++ b/bin/network-monitor/src/config.rs @@ -138,6 +138,16 @@ pub struct MonitorConfig { )] pub counter_increment_interval: Duration, + /// Maximum time to wait for the counter update after submitting a transaction. + #[arg( + long = "counter-latency-timeout", + env = "MIDEN_MONITOR_COUNTER_LATENCY_TIMEOUT", + default_value = "2m", + value_parser = humantime::parse_duration, + help = "Maximum time to wait for a counter update after submitting a transaction" + )] + pub counter_latency_timeout: Duration, + /// The timeout for the outgoing requests. #[arg( long = "request-timeout", @@ -147,4 +157,12 @@ pub struct MonitorConfig { help = "The timeout for the outgoing requests" )] pub request_timeout: Duration, + + /// The URL of the explorer service. + #[arg( + long = "explorer-url", + env = "MIDEN_MONITOR_EXPLORER_URL", + help = "The URL of the explorer service" + )] + pub explorer_url: Option, } diff --git a/bin/network-monitor/src/counter.rs b/bin/network-monitor/src/counter.rs index 7bf16618b..4c89c74f8 100644 --- a/bin/network-monitor/src/counter.rs +++ b/bin/network-monitor/src/counter.rs @@ -6,20 +6,18 @@ use std::path::Path; use std::sync::Arc; use std::sync::atomic::{AtomicU64, Ordering}; +use std::time::Instant; use anyhow::{Context, Result}; -use miden_lib::AuthScheme; -use miden_lib::account::interface::AccountInterface; -use miden_lib::utils::ScriptBuilder; -use miden_node_proto::clients::{Builder, Rpc, RpcClient}; -use miden_node_proto::generated::shared::BlockHeaderByNumberRequest; +use miden_node_proto::clients::RpcClient; +use miden_node_proto::generated::rpc::BlockHeaderByNumberRequest; use miden_node_proto::generated::transaction::ProvenTransaction; -use miden_objects::account::auth::AuthSecretKey; -use miden_objects::account::{Account, AccountFile, AccountHeader, AccountId}; -use miden_objects::assembly::Library; -use miden_objects::block::{BlockHeader, BlockNumber}; -use miden_objects::crypto::dsa::rpo_falcon512::SecretKey; -use miden_objects::note::{ +use miden_protocol::account::auth::AuthSecretKey; +use miden_protocol::account::{Account, AccountFile, AccountHeader, AccountId}; +use miden_protocol::assembly::Library; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::crypto::dsa::falcon512_rpo::SecretKey; +use miden_protocol::note::{ Note, NoteAssets, NoteExecutionHint, @@ -30,38 +28,36 @@ use miden_objects::note::{ NoteTag, NoteType, }; -use miden_objects::transaction::{InputNotes, PartialBlockchain, TransactionArgs}; -use miden_objects::utils::Deserializable; -use miden_objects::{Felt, Word, ZERO}; +use miden_protocol::transaction::{InputNotes, PartialBlockchain, TransactionArgs}; +use miden_protocol::utils::Deserializable; +use miden_protocol::{Felt, Word, ZERO}; +use miden_standards::account::interface::{AccountInterface, AccountInterfaceExt}; +use miden_standards::code_builder::CodeBuilder; use miden_tx::auth::BasicAuthenticator; use miden_tx::utils::Serializable; use miden_tx::{LocalTransactionProver, TransactionExecutor}; use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha20Rng; -use tokio::sync::watch; +use tokio::sync::{Mutex, watch}; use tracing::{error, info, instrument, warn}; use crate::COMPONENT; use crate::config::MonitorConfig; -use crate::deploy::{MonitorDataStore, get_counter_library}; +use crate::deploy::{MonitorDataStore, create_genesis_aware_rpc_client, get_counter_library}; use crate::status::{ CounterTrackingDetails, IncrementDetails, + PendingLatencyDetails, ServiceDetails, ServiceStatus, Status, }; -async fn create_rpc_client(config: &MonitorConfig) -> Result { - Builder::new(config.rpc_url.clone()) - .with_tls() - .context("Failed to configure TLS for RPC client") - .expect("TLS is enabled") - .with_timeout(config.request_timeout) - .without_metadata_version() - .without_metadata_genesis() - .connect::() - .await +#[derive(Debug, Default, Clone)] +pub struct LatencyState { + pending: Option, + pending_started: Option, + last_latency_blocks: Option, } /// Get the genesis block header. @@ -207,14 +203,24 @@ async fn setup_increment_task( /// # Returns /// /// This function runs indefinitely, only returning on error. -#[instrument(target = COMPONENT, name = "run-increment-task", skip_all, ret(level = "debug"))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.counter.run_increment_task", + skip_all, + level = "info", + ret(level = "debug"), + err +)] pub async fn run_increment_task( config: MonitorConfig, tx: watch::Sender, expected_counter_value: Arc, + latency_state: Arc>, ) -> Result<()> { // Create RPC client - let mut rpc_client = create_rpc_client(&config).await?; + let mut rpc_client = + create_genesis_aware_rpc_client(&config.rpc_url, config.request_timeout).await?; let ( mut details, @@ -232,7 +238,9 @@ pub async fn run_increment_task( loop { interval.tick().await; - let last_error = match create_and_submit_network_note( + let mut last_error = None; + + match create_and_submit_network_note( &wallet_account, &counter_account, &secret_key, @@ -244,16 +252,34 @@ pub async fn run_increment_task( ) .await { - Ok((tx_id, final_account, _block_height)) => handle_increment_success( - &mut wallet_account, - &final_account, - &mut data_store, - &mut details, - tx_id, - &expected_counter_value, - )?, - Err(e) => Some(handle_increment_failure(&mut details, &e)), - }; + Ok((tx_id, final_account, block_height)) => { + let target_value = handle_increment_success( + &mut wallet_account, + &final_account, + &mut data_store, + &mut details, + tx_id, + &expected_counter_value, + )?; + + { + let mut guard = latency_state.lock().await; + guard.pending = Some(PendingLatencyDetails { + submit_height: block_height.as_u32(), + target_value, + }); + guard.pending_started = Some(Instant::now()); + } + }, + Err(e) => { + last_error = Some(handle_increment_failure(&mut details, &e)); + }, + } + + { + let guard = latency_state.lock().await; + details.last_latency_blocks = guard.last_latency_blocks; + } let status = build_increment_status(&details, last_error); send_status(&tx, status)?; @@ -261,6 +287,8 @@ pub async fn run_increment_task( } /// Handle the success path for increment operations. +/// +/// Returns the next expected counter value after a successful increment. fn handle_increment_success( wallet_account: &mut Account, final_account: &AccountHeader, @@ -268,7 +296,7 @@ fn handle_increment_success( details: &mut IncrementDetails, tx_id: String, expected_counter_value: &Arc, -) -> Result> { +) -> Result { let updated_wallet = Account::new( wallet_account.id(), wallet_account.vault().clone(), @@ -284,9 +312,9 @@ fn handle_increment_success( details.last_tx_id = Some(tx_id); // Increment the expected counter value - expected_counter_value.fetch_add(1, Ordering::Relaxed); + let new_expected = expected_counter_value.fetch_add(1, Ordering::Relaxed) + 1; - Ok(None) + Ok(new_expected) } /// Handle the failure path when creating/submitting the network note fails. @@ -298,7 +326,11 @@ fn handle_increment_failure(details: &mut IncrementDetails, error: &anyhow::Erro /// Build a `ServiceStatus` snapshot from the current increment details and last error. fn build_increment_status(details: &IncrementDetails, last_error: Option) -> ServiceStatus { - let status = if details.failure_count == 0 { + let status = if last_error.is_some() { + // If the most recent attempt failed, surface the service as unhealthy so the + // dashboard reflects that the increment pipeline is not currently working. + Status::Unhealthy + } else if details.failure_count == 0 { Status::Healthy } else if details.success_count == 0 { Status::Unhealthy @@ -307,7 +339,7 @@ fn build_increment_status(details: &IncrementDetails, last_error: Option }; ServiceStatus { - name: "Counter Increment".to_string(), + name: "Local Transactions".to_string(), status, last_checked: crate::monitor::tasks::current_unix_timestamp_secs(), error: last_error, @@ -339,14 +371,24 @@ fn send_status(tx: &watch::Sender, status: ServiceStatus) -> Resu /// # Returns /// /// This function runs indefinitely, only returning on error. -#[instrument(target = COMPONENT, name = "run-counter-tracking-task", skip_all, ret(level = "debug"))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.counter.run_counter_tracking_task", + skip_all, + level = "info", + ret(level = "debug"), + err +)] pub async fn run_counter_tracking_task( config: MonitorConfig, tx: watch::Sender, expected_counter_value: Arc, + latency_state: Arc>, ) -> Result<()> { // Create RPC client - let mut rpc_client = create_rpc_client(&config).await?; + let mut rpc_client = + create_genesis_aware_rpc_client(&config.rpc_url, config.request_timeout).await?; // Load counter account to get the account ID let counter_account = match load_counter_account(&config.counter_filepath) { @@ -358,11 +400,45 @@ pub async fn run_counter_tracking_task( }; let mut details = CounterTrackingDetails::default(); + initialize_counter_tracking_state( + &mut rpc_client, + &counter_account, + &expected_counter_value, + &mut details, + ) + .await; + + let mut poll_interval = tokio::time::interval(config.counter_increment_interval / 2); + + loop { + poll_interval.tick().await; - // Initialize the expected counter value by fetching the current value from the node - match fetch_counter_value(&mut rpc_client, counter_account.id()).await { + let last_error = poll_counter_once( + &mut rpc_client, + &counter_account, + &expected_counter_value, + &latency_state, + &mut details, + &config, + ) + .await; + let status = build_tracking_status(&details, last_error); + send_status(&tx, status)?; + } +} + +/// Initialize tracking state by fetching the current counter value from the node. +/// +/// Populates `expected_counter_value` and seeds `details` with the latest observed +/// values so the first poll iteration starts from a consistent snapshot. +async fn initialize_counter_tracking_state( + rpc_client: &mut RpcClient, + counter_account: &Account, + expected_counter_value: &Arc, + details: &mut CounterTrackingDetails, +) { + match fetch_counter_value(rpc_client, counter_account.id()).await { Ok(Some(initial_value)) => { - // Set the expected value to the current value from the node expected_counter_value.store(initial_value, Ordering::Relaxed); details.current_value = Some(initial_value); details.expected_value = Some(initial_value); @@ -370,61 +446,121 @@ pub async fn run_counter_tracking_task( info!("Initialized counter tracking with value: {}", initial_value); }, Ok(None) => { - // Counter doesn't exist yet, initialize to 0 expected_counter_value.store(0, Ordering::Relaxed); warn!("Counter account not found, initializing expected value to 0"); }, Err(e) => { - // Failed to fetch, initialize to 0 but log the error expected_counter_value.store(0, Ordering::Relaxed); error!("Failed to fetch initial counter value, initializing to 0: {:?}", e); }, } +} - let mut poll_interval = tokio::time::interval(config.counter_increment_interval / 2); +/// Poll the counter once, updating details and latency tracking state. +/// +/// Returns a human-readable error string when the poll fails or latency tracking +/// cannot complete; otherwise returns `None`. +async fn poll_counter_once( + rpc_client: &mut RpcClient, + counter_account: &Account, + expected_counter_value: &Arc, + latency_state: &Arc>, + details: &mut CounterTrackingDetails, + config: &MonitorConfig, +) -> Option { + let mut last_error = None; + let current_time = crate::monitor::tasks::current_unix_timestamp_secs(); + + match fetch_counter_value(rpc_client, counter_account.id()).await { + Ok(Some(value)) => { + details.current_value = Some(value); + details.last_updated = Some(current_time); + + update_expected_and_pending(details, expected_counter_value, value); + handle_latency_tracking(rpc_client, latency_state, config, value, &mut last_error) + .await; + }, + Ok(None) => { + // Counter value not available, but not an error + }, + Err(e) => { + error!("Failed to fetch counter value: {:?}", e); + last_error = Some(format!("fetch counter value failed: {e}")); + }, + } - loop { - poll_interval.tick().await; + last_error +} - let current_time = crate::monitor::tasks::current_unix_timestamp_secs(); - let last_error = match fetch_counter_value(&mut rpc_client, counter_account.id()).await { - Ok(Some(value)) => { - // Update current value and timestamp - details.current_value = Some(value); - details.last_updated = Some(current_time); - - // Get expected value and calculate pending increments - let expected = expected_counter_value.load(Ordering::Relaxed); - details.expected_value = Some(expected); - - // Calculate how many increments are pending (expected - current) - // Use saturating_sub to avoid negative values if current > expected (shouldn't - // happen normally, but could due to race conditions) - if expected >= value { - details.pending_increments = Some(expected - value); - } else { - // This shouldn't happen, but log it if it does - warn!( - "Expected counter value ({}) is less than current value ({}), setting pending to 0", - expected, value - ); - details.pending_increments = Some(0); - } +/// Update expected and pending counters based on the latest observed value. +fn update_expected_and_pending( + details: &mut CounterTrackingDetails, + expected_counter_value: &Arc, + observed_value: u64, +) { + let expected = expected_counter_value.load(Ordering::Relaxed); + details.expected_value = Some(expected); - None - }, - Ok(None) => { - // Counter value not available, but not an error - None - }, - Err(e) => { - error!("Failed to fetch counter value: {:?}", e); - Some(format!("fetch counter value failed: {e}")) - }, - }; + if expected >= observed_value { + details.pending_increments = Some(expected - observed_value); + } else { + warn!( + "Expected counter value ({}) is less than current value ({}), setting pending to 0", + expected, observed_value + ); + details.pending_increments = Some(0); + } +} - let status = build_tracking_status(&details, last_error); - send_status(&tx, status)?; +/// Update latency tracking state, performing RPC as needed while minimizing lock hold time. +/// +/// Populates `last_error` when latency bookkeeping fails or times out. +async fn handle_latency_tracking( + rpc_client: &mut RpcClient, + latency_state: &Arc>, + config: &MonitorConfig, + observed_value: u64, + last_error: &mut Option, +) { + let (pending, pending_started) = { + let guard = latency_state.lock().await; + (guard.pending.clone(), guard.pending_started) + }; + + if let Some(pending) = pending { + if observed_value >= pending.target_value { + match fetch_chain_tip(rpc_client).await { + Ok(observed_height) => { + let latency_blocks = observed_height.saturating_sub(pending.submit_height); + let mut guard = latency_state.lock().await; + if guard.pending.as_ref().map(|p| p.target_value) == Some(pending.target_value) + { + guard.last_latency_blocks = Some(latency_blocks); + guard.pending = None; + guard.pending_started = None; + } + }, + Err(e) => { + *last_error = Some(format!("Failed to fetch chain tip for latency calc: {e}")); + }, + } + } else if let Some(started) = pending_started { + if Instant::now().saturating_duration_since(started) >= config.counter_latency_timeout { + warn!( + "Latency measurement timed out after {:?} for target value {}", + config.counter_latency_timeout, pending.target_value + ); + let mut guard = latency_state.lock().await; + if guard.pending.as_ref().map(|p| p.target_value) == Some(pending.target_value) { + guard.pending = None; + guard.pending_started = None; + } + *last_error = Some(format!( + "Timed out after {:?} waiting for counter to reach {}", + config.counter_latency_timeout, pending.target_value + )); + } + } } } @@ -433,14 +569,18 @@ fn build_tracking_status( details: &CounterTrackingDetails, last_error: Option, ) -> ServiceStatus { - let status = if details.current_value.is_some() { + let status = if last_error.is_some() { + // If the latest poll failed, surface the service as unhealthy even if we have + // a previously cached value, so the dashboard shows that tracking is degraded. + Status::Unhealthy + } else if details.current_value.is_some() { Status::Healthy } else { Status::Unknown }; ServiceStatus { - name: "Counter Tracking".to_string(), + name: "Network Transactions".to_string(), status, last_checked: crate::monitor::tasks::current_unix_timestamp_secs(), error: last_error, @@ -458,7 +598,15 @@ fn load_counter_account(file_path: &Path) -> Result { /// Create and submit a network note that targets the counter account. #[allow(clippy::too_many_arguments)] -#[instrument(target = COMPONENT, name = "create-and-submit-network-note", skip_all, ret)] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.counter.create_and_submit_network_note", + skip_all, + level = "info", + ret(level = "debug"), + err +)] async fn create_and_submit_network_note( wallet_account: &Account, counter_account: &Account, @@ -472,15 +620,11 @@ async fn create_and_submit_network_note( // Create authenticator for transaction signing let authenticator = BasicAuthenticator::new(&[AuthSecretKey::RpoFalcon512(secret_key.clone())]); - let account_interface = AccountInterface::new( - wallet_account.id(), - vec![AuthScheme::RpoFalcon512 { pub_key: secret_key.public_key().into() }], - wallet_account.code(), - ); + let account_interface = AccountInterface::from_account(wallet_account); let (network_note, note_recipient) = create_network_note(wallet_account, counter_account, increment_script.clone(), rng)?; - let script = account_interface.build_send_notes_script(&[network_note.into()], None, false)?; + let script = account_interface.build_send_notes_script(&[network_note.into()], None)?; // Create transaction executor let executor = TransactionExecutor::new(data_store).with_authenticator(&authenticator); @@ -515,7 +659,7 @@ async fn create_and_submit_network_note( .await .context("Failed to submit proven transaction to RPC")? .into_inner() - .block_height + .block_num .into(); info!("Submitted proven transaction to RPC"); @@ -530,7 +674,7 @@ async fn create_and_submit_network_note( fn create_increment_script() -> Result<(NoteScript, Library)> { let library = get_counter_library()?; - let script_builder = ScriptBuilder::new(true) + let script_builder = CodeBuilder::new() .with_dynamically_linked_library(&library) .context("Failed to create script builder with library")?; @@ -572,3 +716,16 @@ fn create_network_note( let network_note = Note::new(NoteAssets::new(vec![])?, metadata, recipient.clone()); Ok((network_note, recipient)) } + +/// Fetch the current chain tip height from RPC status. +async fn fetch_chain_tip(rpc_client: &mut RpcClient) -> Result { + let status = rpc_client.status(()).await?.into_inner(); + + if let Some(block_producer_status) = status.block_producer { + Ok(block_producer_status.chain_tip) + } else if let Some(store_status) = status.store { + Ok(store_status.chain_tip) + } else { + anyhow::bail!("RPC status response did not include a chain tip") + } +} diff --git a/bin/network-monitor/src/deploy/counter.rs b/bin/network-monitor/src/deploy/counter.rs index fa62b1575..fc10a43d1 100644 --- a/bin/network-monitor/src/deploy/counter.rs +++ b/bin/network-monitor/src/deploy/counter.rs @@ -3,9 +3,7 @@ use std::path::Path; use anyhow::Result; -use miden_lib::testing::account_component::IncrNonceAuthComponent; -use miden_lib::transaction::TransactionKernel; -use miden_objects::account::{ +use miden_protocol::account::{ Account, AccountBuilder, AccountComponent, @@ -14,12 +12,26 @@ use miden_objects::account::{ AccountStorageMode, AccountType, StorageSlot, + StorageSlotName, }; -use miden_objects::{Felt, FieldElement, Word}; +use miden_protocol::utils::sync::LazyLock; +use miden_protocol::{Felt, FieldElement, Word}; +use miden_standards::code_builder::CodeBuilder; +use miden_standards::testing::account_component::IncrNonceAuthComponent; use tracing::instrument; use crate::COMPONENT; +static OWNER_SLOT_NAME: LazyLock = LazyLock::new(|| { + StorageSlotName::new("miden::monitor::counter_contract::owner") + .expect("storage slot name should be valid") +}); + +static COUNTER_SLOT_NAME: LazyLock = LazyLock::new(|| { + StorageSlotName::new("miden::monitor::counter_contract::counter") + .expect("storage slot name should be valid") +}); + /// Create a counter program account with custom MASM script. #[instrument(target = COMPONENT, name = "create-counter-account", skip_all, ret(level = "debug"))] pub fn create_counter_account(owner_account_id: AccountId) -> Result { @@ -31,21 +43,18 @@ pub fn create_counter_account(owner_account_id: AccountId) -> Result { let owner_account_id_prefix = owner_account_id.prefix().as_felt(); let owner_account_id_suffix = owner_account_id.suffix(); - let owner_id_slot = StorageSlot::Value(Word::from([ - Felt::ZERO, - Felt::ZERO, - owner_account_id_suffix, - owner_account_id_prefix, - ])); + let owner_id_slot = StorageSlot::with_value( + OWNER_SLOT_NAME.clone(), + Word::from([Felt::ZERO, Felt::ZERO, owner_account_id_suffix, owner_account_id_prefix]), + ); + + let counter_slot = StorageSlot::with_value(COUNTER_SLOT_NAME.clone(), Word::empty()); - let counter_slot = StorageSlot::Value(Word::empty()); + let component_code = + CodeBuilder::default().compile_component_code("counter::program", script)?; - let account_code = AccountComponent::compile( - script, - TransactionKernel::assembler(), - vec![counter_slot, owner_id_slot], - )? - .with_supports_all_types(); + let account_code = AccountComponent::new(component_code, vec![counter_slot, owner_id_slot])? + .with_supports_all_types(); let incr_nonce_auth: AccountComponent = IncrNonceAuthComponent.into(); diff --git a/bin/network-monitor/src/deploy/mod.rs b/bin/network-monitor/src/deploy/mod.rs index d9be433a1..0a6c4ebe5 100644 --- a/bin/network-monitor/src/deploy/mod.rs +++ b/bin/network-monitor/src/deploy/mod.rs @@ -8,18 +8,29 @@ use std::sync::Arc; use std::time::Duration; use anyhow::{Context, Result}; -use miden_lib::transaction::TransactionKernel; -use miden_node_proto::clients::{Builder, Rpc, RpcClient}; -use miden_node_proto::generated::shared::BlockHeaderByNumberRequest; +use miden_node_proto::clients::{Builder, RpcClient}; +use miden_node_proto::generated::rpc::BlockHeaderByNumberRequest; use miden_node_proto::generated::transaction::ProvenTransaction; -use miden_objects::account::{Account, AccountId, PartialAccount, PartialStorage}; -use miden_objects::assembly::{DefaultSourceManager, Library, LibraryPath, Module, ModuleKind}; -use miden_objects::asset::{AssetVaultKey, AssetWitness, PartialVault}; -use miden_objects::block::{BlockHeader, BlockNumber}; -use miden_objects::crypto::merkle::{MmrPeaks, PartialMmr}; -use miden_objects::note::NoteScript; -use miden_objects::transaction::{AccountInputs, InputNotes, PartialBlockchain, TransactionArgs}; -use miden_objects::{MastForest, Word}; +use miden_protocol::account::{Account, AccountId, PartialAccount, PartialStorage}; +use miden_protocol::assembly::{ + DefaultSourceManager, + Library, + Module, + ModuleKind, + Path as MidenPath, +}; +use miden_protocol::asset::{AssetVaultKey, AssetWitness, PartialVault}; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::crypto::merkle::mmr::{MmrPeaks, PartialMmr}; +use miden_protocol::note::NoteScript; +use miden_protocol::transaction::{ + AccountInputs, + InputNotes, + PartialBlockchain, + TransactionArgs, + TransactionKernel, +}; +use miden_protocol::{MastForest, Word}; use miden_tx::auth::BasicAuthenticator; use miden_tx::utils::Serializable; use miden_tx::{ @@ -40,6 +51,62 @@ use crate::deploy::wallet::{create_wallet_account, save_wallet_account}; pub mod counter; pub mod wallet; +/// Create an RPC client configured with the correct genesis metadata in the +/// `Accept` header so that write RPCs such as `SubmitProvenTransaction` are +/// accepted by the node. +pub async fn create_genesis_aware_rpc_client( + rpc_url: &Url, + timeout: Duration, +) -> Result { + // First, create a temporary client without genesis metadata to discover the + // genesis block header and its commitment. + let mut rpc: RpcClient = Builder::new(rpc_url.clone()) + .with_tls() + .context("Failed to configure TLS for RPC client")? + .with_timeout(timeout) + .without_metadata_version() + .without_metadata_genesis() + .without_otel_context_injection() + .connect() + .await + .context("Failed to create RPC client for genesis discovery")?; + + let block_header_request = BlockHeaderByNumberRequest { + block_num: Some(BlockNumber::GENESIS.as_u32()), + include_mmr_proof: None, + }; + + let response = rpc + .get_block_header_by_number(block_header_request) + .await + .context("Failed to get genesis block header from RPC")? + .into_inner(); + + let genesis_block_header = response + .block_header + .ok_or_else(|| anyhow::anyhow!("No block header in response"))?; + + let genesis_header: BlockHeader = + genesis_block_header.try_into().context("Failed to convert block header")?; + let genesis_commitment = genesis_header.commitment(); + let genesis = genesis_commitment.to_hex(); + + // Rebuild the client, this time including the required genesis metadata so that + // write RPCs like SubmitProvenTransaction are accepted by the node. + let rpc_client = Builder::new(rpc_url.clone()) + .with_tls() + .context("Failed to configure TLS for RPC client")? + .with_timeout(timeout) + .without_metadata_version() + .with_metadata_genesis(genesis) + .without_otel_context_injection() + .connect() + .await + .context("Failed to connect to RPC server with genesis metadata")?; + + Ok(rpc_client) +} + /// Ensure accounts exist, creating them if they don't. /// /// This function checks if the wallet and counter account files exist. @@ -89,16 +156,8 @@ pub async fn ensure_accounts_exist( /// then saves it to the specified file. #[instrument(target = COMPONENT, name = "deploy-counter-account", skip_all, ret(level = "debug"))] pub async fn deploy_counter_account(counter_account: &Account, rpc_url: &Url) -> Result<()> { - // Deploy counter account to the network - let mut rpc_client: RpcClient = Builder::new(rpc_url.clone()) - .with_tls() - .context("Failed to configure TLS for RPC client")? - .with_timeout(Duration::from_secs(5)) - .without_metadata_version() - .without_metadata_genesis() - .connect::() - .await - .context("Failed to connect to RPC server")?; + // Deploy counter account to the network using a genesis-aware RPC client. + let mut rpc_client = create_genesis_aware_rpc_client(rpc_url, Duration::from_secs(10)).await?; let block_header_request = BlockHeaderByNumberRequest { block_num: Some(BlockNumber::GENESIS.as_u32()), @@ -115,7 +174,8 @@ pub async fn deploy_counter_account(counter_account: &Account, rpc_url: &Url) -> .block_header .ok_or_else(|| anyhow::anyhow!("No block header in response"))?; - let genesis_header = root_block_header.try_into().context("Failed to convert block header")?; + let genesis_header: BlockHeader = + root_block_header.try_into().context("Failed to convert block header")?; let genesis_chain_mmr = PartialBlockchain::new(PartialMmr::from_peaks(MmrPeaks::default()), Vec::new()) @@ -125,7 +185,7 @@ pub async fn deploy_counter_account(counter_account: &Account, rpc_url: &Url) -> data_store.add_account(counter_account.clone()); let executor: TransactionExecutor<'_, '_, _, BasicAuthenticator> = - TransactionExecutor::new(&data_store); + TransactionExecutor::new(&data_store).with_debug_mode(); let tx_args = TransactionArgs::default(); @@ -157,16 +217,15 @@ pub async fn deploy_counter_account(counter_account: &Account, rpc_url: &Url) -> } pub(crate) fn get_counter_library() -> Result { - let assembler = TransactionKernel::assembler().with_debug_mode(true); + let assembler = TransactionKernel::assembler(); let source_manager = Arc::new(DefaultSourceManager::default()); let script = include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/src/assets/counter_program.masm")); - let library_path = LibraryPath::new("external_contract::counter_contract") - .context("Failed to create library path")?; + let library_path = MidenPath::new("external_contract::counter_contract"); let module = Module::parser(ModuleKind::Library) - .parse_str(library_path, script, &source_manager) + .parse_str(library_path, script, source_manager) .map_err(|e| anyhow::anyhow!("Failed to parse module: {e}"))?; assembler @@ -248,7 +307,7 @@ impl DataStore for MonitorDataStore { _account_id: AccountId, _map_root: Word, _map_key: Word, - ) -> Result { + ) -> Result { unimplemented!("Not needed") } @@ -260,12 +319,12 @@ impl DataStore for MonitorDataStore { unimplemented!("Not needed") } - async fn get_vault_asset_witness( + async fn get_vault_asset_witnesses( &self, account_id: AccountId, vault_root: Word, - vault_key: AssetVaultKey, - ) -> Result { + vault_keys: BTreeSet, + ) -> Result, DataStoreError> { let account = self.get_account(account_id)?; if account.vault().root() != vault_root { @@ -275,16 +334,21 @@ impl DataStore for MonitorDataStore { }); } - AssetWitness::new(account.vault().open(vault_key).into()).map_err(|err| { - DataStoreError::Other { - error_msg: "failed to open vault asset tree".into(), - source: Some(Box::new(err)), - } - }) + Result::, _>::from_iter(vault_keys.into_iter().map(|vault_key| { + AssetWitness::new(account.vault().open(vault_key).into()).map_err(|err| { + DataStoreError::Other { + error_msg: "failed to open vault asset tree".into(), + source: Some(Box::new(err)), + } + }) + })) } - async fn get_note_script(&self, script_root: Word) -> Result { - Err(DataStoreError::NoteScriptNotFound(script_root)) + async fn get_note_script( + &self, + _script_root: Word, + ) -> Result, DataStoreError> { + Ok(None) } } diff --git a/bin/network-monitor/src/deploy/wallet.rs b/bin/network-monitor/src/deploy/wallet.rs index 704ced5e4..89c616c17 100644 --- a/bin/network-monitor/src/deploy/wallet.rs +++ b/bin/network-monitor/src/deploy/wallet.rs @@ -3,12 +3,12 @@ use std::path::Path; use anyhow::Result; -use miden_lib::AuthScheme; -use miden_lib::account::wallets::create_basic_wallet; use miden_node_utils::crypto::get_rpo_random_coin; -use miden_objects::account::auth::AuthSecretKey; -use miden_objects::account::{Account, AccountFile, AccountStorageMode, AccountType}; -use miden_objects::crypto::dsa::rpo_falcon512::SecretKey; +use miden_protocol::account::auth::AuthSecretKey; +use miden_protocol::account::{Account, AccountFile, AccountStorageMode, AccountType}; +use miden_protocol::crypto::dsa::falcon512_rpo::SecretKey; +use miden_standards::AuthScheme; +use miden_standards::account::wallets::create_basic_wallet; use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha20Rng; use tracing::instrument; diff --git a/bin/network-monitor/src/explorer.rs b/bin/network-monitor/src/explorer.rs new file mode 100644 index 000000000..2053ce22c --- /dev/null +++ b/bin/network-monitor/src/explorer.rs @@ -0,0 +1,257 @@ +// EXPLORER STATUS CHECKER +// ================================================================================================ + +use std::fmt::{self, Display}; +use std::time::Duration; + +use reqwest::Client; +use serde::Serialize; +use tokio::sync::watch; +use tokio::time::MissedTickBehavior; +use tracing::{info, instrument}; +use url::Url; + +use crate::status::{ExplorerStatusDetails, ServiceDetails, ServiceStatus, Status}; +use crate::{COMPONENT, current_unix_timestamp_secs}; + +const LATEST_BLOCK_QUERY: &str = " +query LatestBlock { + blocks(input: { sort_by: timestamp, order_by: desc }, first: 1) { + edges { + node { + block_number + timestamp + number_of_transactions + number_of_nullifiers + number_of_notes + block_commitment + chain_commitment + proof_commitment + number_of_account_updates + } + } + } +} +"; + +#[derive(Serialize, Copy, Clone)] +struct EmptyVariables; + +#[derive(Serialize, Copy, Clone)] +struct GraphqlRequest { + query: &'static str, + variables: V, +} + +const LATEST_BLOCK_REQUEST: GraphqlRequest = GraphqlRequest { + query: LATEST_BLOCK_QUERY, + variables: EmptyVariables, +}; + +/// Runs a task that continuously checks explorer status and updates a watch channel. +/// +/// This function spawns a task that periodically checks the explorer service status +/// and sends updates through a watch channel. +/// +/// # Arguments +/// +/// * `explorer_url` - The URL of the explorer service. +/// * `name` - The name of the explorer. +/// * `status_sender` - The sender for the watch channel. +/// * `status_check_interval` - The interval at which to check the status of the services. +/// +/// # Returns +/// +/// `Ok(())` if the monitoring task runs and completes successfully, or an error if there are +/// connection issues or failures while checking the explorer status. +#[instrument(target = COMPONENT, name = "explorer-status-task", skip_all)] +pub async fn run_explorer_status_task( + explorer_url: Url, + name: String, + status_sender: watch::Sender, + status_check_interval: Duration, + request_timeout: Duration, +) { + let mut explorer_client = reqwest::Client::new(); + + let mut interval = tokio::time::interval(status_check_interval); + interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + + loop { + interval.tick().await; + + let current_time = current_unix_timestamp_secs(); + + let status = check_explorer_status( + &mut explorer_client, + explorer_url.clone(), + name.clone(), + current_time, + request_timeout, + ) + .await; + + // Send the status update; exit if no receivers (shutdown signal) + if status_sender.send(status).is_err() { + info!("No receivers for explorer status updates, shutting down"); + return; + } + } +} + +/// Checks the status of the explorer service. +/// +/// This function checks the status of the explorer service. +/// +/// # GraphQL Query +/// +/// See [`LATEST_BLOCK_QUERY`] for the exact query string used. +/// +/// # Arguments +/// +/// * `explorer` - The explorer client. +/// * `name` - The name of the explorer. +/// * `url` - The URL of the explorer. +/// * `current_time` - The current time. +/// +/// # Returns +/// +/// A `ServiceStatus` containing the status of the explorer service. +#[instrument(target = COMPONENT, name = "check-status.explorer", skip_all, ret(level = "info"))] +pub(crate) async fn check_explorer_status( + explorer_client: &mut Client, + explorer_url: Url, + name: String, + current_time: u64, + request_timeout: Duration, +) -> ServiceStatus { + let resp = explorer_client + .post(explorer_url.clone()) + .json(&LATEST_BLOCK_REQUEST) + .timeout(request_timeout) + .send() + .await; + + let value = match resp { + Ok(resp) => resp.json::().await, + Err(e) => return unhealthy(&name, current_time, &e), + }; + + let details = match value { + Ok(value) => ExplorerStatusDetails::try_from(value), + Err(e) => return unhealthy(&name, current_time, &e), + }; + + match details { + Ok(details) => ServiceStatus { + name: name.clone(), + status: Status::Healthy, + last_checked: current_time, + error: None, + details: ServiceDetails::ExplorerStatus(details), + }, + Err(e) => unhealthy(&name, current_time, &e), + } +} + +/// Returns an unhealthy service status. +fn unhealthy(name: &str, current_time: u64, err: &impl ToString) -> ServiceStatus { + ServiceStatus { + name: name.to_owned(), + status: Status::Unhealthy, + last_checked: current_time, + error: Some(err.to_string()), + details: ServiceDetails::Error, + } +} + +#[derive(Debug)] +pub enum ExplorerStatusError { + MissingField(String), +} + +impl Display for ExplorerStatusError { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + match self { + ExplorerStatusError::MissingField(field) => write!(f, "missing field: {field}"), + } + } +} + +impl TryFrom for ExplorerStatusDetails { + type Error = ExplorerStatusError; + + fn try_from(value: serde_json::Value) -> Result { + let node = value.pointer("/data/blocks/edges/0/node").ok_or_else(|| { + ExplorerStatusError::MissingField("data.blocks.edges[0].node".to_string()) + })?; + + let block_number = node + .get("block_number") + .and_then(serde_json::Value::as_u64) + .ok_or_else(|| ExplorerStatusError::MissingField("block_number".to_string()))?; + let timestamp = node + .get("timestamp") + .and_then(serde_json::Value::as_u64) + .ok_or_else(|| ExplorerStatusError::MissingField("timestamp".to_string()))?; + + let number_of_transactions = node + .get("number_of_transactions") + .and_then(serde_json::Value::as_u64) + .ok_or_else(|| { + ExplorerStatusError::MissingField("number_of_transactions".to_string()) + })?; + let number_of_nullifiers = node + .get("number_of_nullifiers") + .and_then(serde_json::Value::as_u64) + .ok_or_else(|| ExplorerStatusError::MissingField("number_of_nullifiers".to_string()))?; + let number_of_notes = node + .get("number_of_notes") + .and_then(serde_json::Value::as_u64) + .ok_or_else(|| ExplorerStatusError::MissingField("number_of_notes".to_string()))?; + let number_of_account_updates = node + .get("number_of_account_updates") + .and_then(serde_json::Value::as_u64) + .ok_or_else(|| { + ExplorerStatusError::MissingField("number_of_account_updates".to_string()) + })?; + + let block_commitment = node + .get("block_commitment") + .and_then(|v| v.as_str()) + .ok_or_else(|| ExplorerStatusError::MissingField("block_commitment".to_string()))? + .to_string(); + let chain_commitment = node + .get("chain_commitment") + .and_then(|v| v.as_str()) + .ok_or_else(|| ExplorerStatusError::MissingField("chain_commitment".to_string()))? + .to_string(); + let proof_commitment = node + .get("proof_commitment") + .and_then(|v| v.as_str()) + .ok_or_else(|| ExplorerStatusError::MissingField("proof_commitment".to_string()))? + .to_string(); + + Ok(Self { + block_number, + timestamp, + number_of_transactions, + number_of_nullifiers, + number_of_notes, + number_of_account_updates, + block_commitment, + chain_commitment, + proof_commitment, + }) + } +} + +pub(crate) fn initial_explorer_status() -> ServiceStatus { + ServiceStatus { + name: "Explorer".to_string(), + status: Status::Unknown, + last_checked: current_unix_timestamp_secs(), + error: None, + details: ServiceDetails::ExplorerStatus(ExplorerStatusDetails::default()), + } +} diff --git a/bin/network-monitor/src/faucet.rs b/bin/network-monitor/src/faucet.rs index 5cc0944b2..6569a22fa 100644 --- a/bin/network-monitor/src/faucet.rs +++ b/bin/network-monitor/src/faucet.rs @@ -7,8 +7,8 @@ use std::time::Duration; use anyhow::Context; use hex; -use miden_objects::account::AccountId; -use miden_objects::testing::account_id::ACCOUNT_ID_SENDER; +use miden_protocol::account::AccountId; +use miden_protocol::testing::account_id::ACCOUNT_ID_SENDER; use reqwest::Client; use serde::{Deserialize, Serialize}; use sha2::{Digest, Sha256}; @@ -88,7 +88,14 @@ pub struct GetMetadataResponse { /// # Returns /// /// `Ok(())` if the task completes successfully, or an error if the task fails. -#[instrument(target = COMPONENT, name = "faucet-test-task", skip_all)] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.faucet.run_faucet_test_task", + skip_all, + level = "info", + ret(level = "debug") +)] pub async fn run_faucet_test_task( faucet_url: Url, status_sender: watch::Sender, @@ -167,6 +174,15 @@ pub async fn run_faucet_test_task( /// # Returns /// /// The response from the faucet if successful, or an error if the test fails. +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.faucet.perform_faucet_test", + skip_all, + level = "info", + ret(level = "debug"), + err +)] async fn perform_faucet_test( client: &Client, faucet_url: &Url, @@ -248,7 +264,15 @@ async fn perform_faucet_test( /// /// The nonce that solves the challenge, or an error if no solution is found within reasonable /// bounds. -#[instrument(target = COMPONENT, name = "solve-pow-challenge", skip_all, ret(level = "debug"))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.faucet.solve_pow_challenge", + skip_all, + level = "info", + ret(level = "debug"), + err +)] fn solve_pow_challenge(challenge: &str, target: u64) -> anyhow::Result { let challenge_bytes = hex::decode(challenge).context("Failed to decode challenge from hex")?; diff --git a/bin/network-monitor/src/frontend.rs b/bin/network-monitor/src/frontend.rs index ba7838e27..dd6a8fc5c 100644 --- a/bin/network-monitor/src/frontend.rs +++ b/bin/network-monitor/src/frontend.rs @@ -25,6 +25,7 @@ pub struct ServerState { pub faucet: Option>, pub ntx_increment: Option>, pub ntx_tracking: Option>, + pub explorer: Option>, } /// Runs the frontend server. @@ -41,6 +42,7 @@ pub async fn serve(server_state: ServerState, config: MonitorConfig) { let app = Router::new() // Serve embedded assets .route("/assets/index.css", get(serve_css)) + .route("/assets/index.js", get(serve_js)) .route("/assets/favicon.ico", get(serve_favicon)) // Main dashboard route .route("/", get(get_dashboard)) @@ -76,6 +78,11 @@ async fn get_status( // Collect RPC status services.push(server_state.rpc.borrow().clone()); + // Collect explorer status if available + if let Some(explorer_rx) = &server_state.explorer { + services.push(explorer_rx.borrow().clone()); + } + // Collect all remote prover statuses for (prover_status_rx, prover_test_rx) in &server_state.provers { services.push(prover_status_rx.borrow().clone()); @@ -110,6 +117,14 @@ async fn serve_css() -> Response { .into_response() } +async fn serve_js() -> Response { + ( + [(header::CONTENT_TYPE, header::HeaderValue::from_static("text/javascript"))], + include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/assets/index.js")), + ) + .into_response() +} + async fn serve_favicon() -> Response { ( [(header::CONTENT_TYPE, header::HeaderValue::from_static("image/x-icon"))], diff --git a/bin/network-monitor/src/main.rs b/bin/network-monitor/src/main.rs index 2a288f530..ed0f08cba 100644 --- a/bin/network-monitor/src/main.rs +++ b/bin/network-monitor/src/main.rs @@ -12,6 +12,7 @@ pub mod commands; pub mod config; pub mod counter; mod deploy; +pub mod explorer; pub mod faucet; pub mod frontend; mod monitor; diff --git a/bin/network-monitor/src/monitor/tasks.rs b/bin/network-monitor/src/monitor/tasks.rs index 38a306880..be3be5f3c 100644 --- a/bin/network-monitor/src/monitor/tasks.rs +++ b/bin/network-monitor/src/monitor/tasks.rs @@ -6,16 +6,21 @@ use std::sync::atomic::AtomicU64; use std::time::{Duration, SystemTime, UNIX_EPOCH}; use anyhow::Result; -use miden_node_proto::clients::{Builder as ClientBuilder, RemoteProverProxy, Rpc}; -use tokio::sync::watch; +use miden_node_proto::clients::{ + Builder as ClientBuilder, + RemoteProverProxyStatusClient, + RpcClient, +}; use tokio::sync::watch::Receiver; +use tokio::sync::{Mutex, watch}; use tokio::task::{Id, JoinSet}; use tracing::{debug, instrument}; use crate::COMPONENT; use crate::config::MonitorConfig; -use crate::counter::{run_counter_tracking_task, run_increment_task}; +use crate::counter::{LatencyState, run_counter_tracking_task, run_increment_task}; use crate::deploy::ensure_accounts_exist; +use crate::explorer::{initial_explorer_status, run_explorer_status_task}; use crate::faucet::run_faucet_test_task; use crate::frontend::{ServerState, serve}; use crate::remote_prover::{ProofType, generate_prover_test_payload, run_remote_prover_test_task}; @@ -44,11 +49,21 @@ impl Tasks { } /// Spawn the RPC status checker task. - #[instrument(target = COMPONENT, name = "tasks.spawn-rpc-checker", skip_all)] + #[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.tasks.spawn_rpc_checker", + skip_all, + level = "info", + ret(level = "debug"), + err + )] pub async fn spawn_rpc_checker( &mut self, config: &MonitorConfig, ) -> Result> { + debug!(target: COMPONENT, rpc_url = %config.rpc_url, "Spawning RPC status checker task"); + // Create initial status for RPC service let mut rpc = ClientBuilder::new(config.rpc_url.clone()) .with_tls() @@ -56,10 +71,12 @@ impl Tasks { .with_timeout(config.request_timeout) .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .without_otel_context_injection() + .connect_lazy::(); let current_time = current_unix_timestamp_secs(); - let initial_rpc_status = check_rpc_status(&mut rpc, current_time).await; + let initial_rpc_status = + check_rpc_status(&mut rpc, config.rpc_url.to_string(), current_time).await; // Spawn the RPC checker let (rpc_tx, rpc_rx) = watch::channel(initial_rpc_status); @@ -74,19 +91,61 @@ impl Tasks { .id(); self.names.insert(id, "rpc-checker".to_string()); + debug!(target: COMPONENT, "RPC status checker task spawned successfully"); Ok(rpc_rx) } + /// Spawn the explorer status checker task. + #[instrument(target = COMPONENT, name = "tasks.spawn-explorer-checker", skip_all)] + pub async fn spawn_explorer_checker( + &mut self, + config: &MonitorConfig, + ) -> Result> { + let explorer_url = config.explorer_url.clone().expect("Explorer URL exists"); + let name = "Explorer".to_string(); + let status_check_interval = config.status_check_interval; + let request_timeout = config.request_timeout; + let (explorer_status_tx, explorer_status_rx) = watch::channel(initial_explorer_status()); + + let id = self + .handles + .spawn(async move { + run_explorer_status_task( + explorer_url, + name, + explorer_status_tx, + status_check_interval, + request_timeout, + ) + .await; + }) + .id(); + self.names.insert(id, "explorer-checker".to_string()); + + println!("Spawned explorer status checker task"); + + Ok(explorer_status_rx) + } + /// Spawn prover status and test tasks for all configured provers. - #[instrument(target = COMPONENT, name = "tasks.spawn-prover-tasks", skip_all)] + #[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.tasks.spawn_prover_tasks", + skip_all, + level = "info", + ret(level = "debug"), + err + )] pub async fn spawn_prover_tasks( &mut self, config: &MonitorConfig, ) -> Result, watch::Receiver)>> { + debug!(target: COMPONENT, prover_count = config.remote_prover_urls.len(), "Spawning prover tasks"); let mut prover_rxs = Vec::new(); for (i, prover_url) in config.remote_prover_urls.iter().enumerate() { - let name = format!("Prover-{}", i + 1); + let name = format!("Remote Prover ({})", i + 1); let mut remote_prover = ClientBuilder::new(prover_url.clone()) .with_tls() @@ -94,7 +153,8 @@ impl Tasks { .with_timeout(config.request_timeout) .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .without_otel_context_injection() + .connect_lazy::(); let current_time = current_unix_timestamp_secs(); @@ -189,11 +249,19 @@ impl Tasks { prover_rxs.push((prover_status_rx, prover_test_rx)); } + debug!(target: COMPONENT, spawned_provers = prover_rxs.len(), "All prover tasks spawned successfully"); Ok(prover_rxs) } /// Spawn the faucet testing task. - #[instrument(target = COMPONENT, name = "tasks.spawn-faucet", skip_all)] + #[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.tasks.spawn_faucet", + skip_all, + level = "info", + ret(level = "debug") + )] pub fn spawn_faucet(&mut self, config: &MonitorConfig) -> Receiver { let current_time = current_unix_timestamp_secs(); @@ -230,8 +298,16 @@ impl Tasks { faucet_rx } - /// Spawn the network transaction service checker tasks (increment and tracking). - #[instrument(target = COMPONENT, name = "tasks.spawn-ntx-service", skip_all)] + /// Spawn the network transaction service checker task. + #[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.tasks.spawn_ntx_service", + skip_all, + level = "info", + ret(level = "debug"), + err + )] pub async fn spawn_ntx_service( &mut self, config: &MonitorConfig, @@ -244,10 +320,13 @@ impl Tasks { // Create shared atomic counter for tracking expected counter value let expected_counter_value = Arc::new(AtomicU64::new(0)); + let latency_state = Arc::new(Mutex::new(LatencyState::default())); + let latency_state_for_increment = latency_state.clone(); + let latency_state_for_tracking = latency_state.clone(); // Create initial increment status let initial_increment_status = ServiceStatus { - name: "Counter Increment".to_string(), + name: "Local Transactions".to_string(), status: crate::status::Status::Unknown, last_checked: current_time, error: None, @@ -255,12 +334,13 @@ impl Tasks { success_count: 0, failure_count: 0, last_tx_id: None, + last_latency_blocks: None, }), }; // Create initial tracking status let initial_tracking_status = ServiceStatus { - name: "Counter Tracking".to_string(), + name: "Network Transactions".to_string(), status: crate::status::Status::Unknown, last_checked: current_time, error: None, @@ -281,9 +361,14 @@ impl Tasks { let increment_id = self .handles .spawn(async move { - Box::pin(run_increment_task(config_clone, increment_tx, counter_clone)) - .await - .expect("Counter increment task runs indefinitely"); + Box::pin(run_increment_task( + config_clone, + increment_tx, + counter_clone, + latency_state_for_increment, + )) + .await + .expect("Counter increment task runs indefinitely"); }) .id(); self.names.insert(increment_id, "counter-increment".to_string()); @@ -295,9 +380,14 @@ impl Tasks { let tracking_id = self .handles .spawn(async move { - Box::pin(run_counter_tracking_task(config_clone, tracking_tx, counter_clone)) - .await - .expect("Counter tracking task runs indefinitely"); + Box::pin(run_counter_tracking_task( + config_clone, + tracking_tx, + counter_clone, + latency_state_for_tracking, + )) + .await + .expect("Counter tracking task runs indefinitely"); }) .id(); self.names.insert(tracking_id, "counter-tracking".to_string()); @@ -306,7 +396,14 @@ impl Tasks { } /// Spawn the HTTP frontend server. - #[instrument(target = COMPONENT, name = "tasks.spawn-frontend", skip_all)] + #[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.tasks.spawn_http_server", + skip_all, + level = "info", + ret(level = "debug") + )] pub fn spawn_http_server(&mut self, server_state: ServerState, config: &MonitorConfig) { let config = config.clone(); let id = self.handles.spawn(async move { serve(server_state, config).await }).id(); diff --git a/bin/network-monitor/src/remote_prover.rs b/bin/network-monitor/src/remote_prover.rs index c58b41811..4331d8033 100644 --- a/bin/network-monitor/src/remote_prover.rs +++ b/bin/network-monitor/src/remote_prover.rs @@ -8,10 +8,10 @@ use std::time::Duration; use anyhow::Context; use miden_node_proto::clients::{Builder as ClientBuilder, RemoteProverClient}; use miden_node_proto::generated as proto; -use miden_objects::asset::{Asset, FungibleAsset}; -use miden_objects::note::NoteType; -use miden_objects::testing::account_id::{ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_SENDER}; -use miden_objects::transaction::TransactionInputs; +use miden_protocol::asset::{Asset, FungibleAsset}; +use miden_protocol::note::NoteType; +use miden_protocol::testing::account_id::{ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_SENDER}; +use miden_protocol::transaction::TransactionInputs; use miden_testing::{Auth, MockChainBuilder}; use miden_tx::utils::Serializable; use serde::{Deserialize, Serialize}; @@ -87,7 +87,14 @@ pub struct ProverTestDetails { /// # Returns /// /// `Ok(())` if the task completes successfully, or an error if the task fails. -#[instrument(target = COMPONENT, name = "remote-prover-test-task", skip_all)] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.remote_prover.run_remote_prover_test_task", + skip_all, + level = "info", + ret(level = "debug") +)] pub async fn run_remote_prover_test_task( prover_url: Url, name: &str, @@ -103,6 +110,7 @@ pub async fn run_remote_prover_test_task( .with_timeout(request_timeout) .without_metadata_version() .without_metadata_genesis() + .without_otel_context_injection() .connect_lazy::(); let mut interval = tokio::time::interval(test_interval); @@ -153,7 +161,14 @@ pub async fn run_remote_prover_test_task( /// # Returns /// /// A `ServiceStatus` containing the results of the proof test. -#[instrument(target = COMPONENT, name = "test-remote-prover", skip_all, ret(level = "info"))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.remote_prover.test_remote_prover", + skip_all, + level = "info", + ret(level = "debug") +)] async fn test_remote_prover( client: &mut miden_node_proto::clients::RemoteProverClient, name: &str, @@ -256,6 +271,15 @@ fn tonic_status_to_json(status: &tonic::Status) -> String { /// This function creates a mock transaction using `MockChainBuilder` similar to what's done /// in the remote prover tests. The transaction is generated once and can be reused for /// multiple proof test calls. +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.remote_prover.generate_mock_transaction", + skip_all, + level = "info", + ret(level = "debug"), + err +)] pub async fn generate_mock_transaction() -> anyhow::Result { let mut mock_chain_builder = MockChainBuilder::new(); @@ -303,6 +327,14 @@ pub async fn generate_mock_transaction() -> anyhow::Result { // GENERATE TEST REQUEST PAYLOAD // ================================================================================================ +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.remote_prover.generate_prover_test_payload", + skip_all, + level = "info", + ret(level = "debug") +)] pub(crate) async fn generate_prover_test_payload() -> proto::remote_prover::ProofRequest { proto::remote_prover::ProofRequest { proof_type: proto::remote_prover::ProofType::Transaction.into(), diff --git a/bin/network-monitor/src/status.rs b/bin/network-monitor/src/status.rs index c70ee024d..11c77593e 100644 --- a/bin/network-monitor/src/status.rs +++ b/bin/network-monitor/src/status.rs @@ -5,15 +5,17 @@ use std::time::Duration; -use miden_node_proto::clients::{Builder as ClientBuilder, RemoteProverProxy, Rpc}; +use miden_node_proto::clients::{ + Builder as ClientBuilder, + RemoteProverProxyStatusClient, + RpcClient, +}; use miden_node_proto::generated as proto; -use miden_node_proto::generated::block_producer::BlockProducerStatus; -use miden_node_proto::generated::rpc::RpcStatus; -use miden_node_proto::generated::rpc_store::StoreStatus; +use miden_node_proto::generated::rpc::{BlockProducerStatus, RpcStatus, StoreStatus}; use serde::{Deserialize, Serialize}; use tokio::sync::watch; use tokio::time::MissedTickBehavior; -use tracing::{info, instrument}; +use tracing::{debug, info, instrument}; use url::Url; use crate::faucet::FaucetTestDetails; @@ -76,6 +78,17 @@ pub struct IncrementDetails { pub failure_count: u64, /// Last transaction ID (if available). pub last_tx_id: Option, + /// Last measured latency in blocks from submission to state update. + pub last_latency_blocks: Option, +} + +/// Details about an in-flight latency measurement. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct PendingLatencyDetails { + /// Block height returned when the transaction was submitted. + pub submit_height: u32, + /// Counter value we expect to see once the transaction is applied. + pub target_value: u64, } /// Details of the counter tracking service. @@ -91,6 +104,20 @@ pub struct CounterTrackingDetails { pub pending_increments: Option, } +/// Details of the explorer service. +#[derive(Debug, Clone, Serialize, Deserialize, Default)] +pub struct ExplorerStatusDetails { + pub block_number: u64, + pub timestamp: u64, + pub number_of_transactions: u64, + pub number_of_nullifiers: u64, + pub number_of_notes: u64, + pub number_of_account_updates: u64, + pub block_commitment: String, + pub chain_commitment: String, + pub proof_commitment: String, +} + /// Details of a service. #[derive(Debug, Clone, Serialize, Deserialize)] pub enum ServiceDetails { @@ -100,6 +127,7 @@ pub enum ServiceDetails { FaucetTest(FaucetTestDetails), NtxIncrement(IncrementDetails), NtxTracking(CounterTrackingDetails), + ExplorerStatus(ExplorerStatusDetails), Error, } @@ -109,6 +137,8 @@ pub enum ServiceDetails { /// service. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct RpcStatusDetails { + /// The URL of the RPC service (used by the frontend for gRPC-Web probing). + pub url: String, pub version: String, pub genesis_commitment: Option, pub store_status: Option, @@ -134,6 +164,21 @@ pub struct StoreStatusDetails { pub struct BlockProducerStatusDetails { pub version: String, pub status: Status, + /// The block producer's current view of the chain tip height. + pub chain_tip: u32, + /// Mempool statistics for this block producer. + pub mempool: MempoolStatusDetails, +} + +/// Details about the block producer's mempool. +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct MempoolStatusDetails { + /// Number of transactions currently in the mempool waiting to be batched. + pub unbatched_transactions: u64, + /// Number of batches currently being proven. + pub proposed_batches: u64, + /// Number of proven batches waiting for block inclusion. + pub proven_batches: u64, } /// Details of a remote prover service. @@ -154,7 +199,7 @@ pub struct RemoteProverStatusDetails { /// worker service. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct WorkerStatusDetails { - pub address: String, + pub name: String, pub version: String, pub status: Status, } @@ -186,9 +231,20 @@ impl From for StoreStatusDetails { impl From for BlockProducerStatusDetails { fn from(value: BlockProducerStatus) -> Self { + // We assume all supported nodes expose mempool statistics. + let mempool_stats = value + .mempool_stats + .expect("block producer status must include mempool statistics"); + Self { version: value.version, status: value.status.into(), + chain_tip: value.chain_tip, + mempool: MempoolStatusDetails { + unbatched_transactions: mempool_stats.unbatched_transactions, + proposed_batches: mempool_stats.proposed_batches, + proven_batches: mempool_stats.proven_batches, + }, } } } @@ -199,7 +255,7 @@ impl From for WorkerStatusDetails { proto::remote_prover::WorkerHealthStatus::try_from(value.status).unwrap().into(); Self { - address: value.address, + name: value.name, version: value.version, status, } @@ -224,9 +280,11 @@ impl RemoteProverStatusDetails { } } -impl From for RpcStatusDetails { - fn from(status: RpcStatus) -> Self { +impl RpcStatusDetails { + /// Creates `RpcStatusDetails` from a gRPC `RpcStatus` response and the configured URL. + pub fn from_rpc_status(status: RpcStatus, url: String) -> Self { Self { + url, version: status.version, genesis_commitment: status.genesis_commitment.as_ref().map(|gc| format!("{gc:?}")), store_status: status.store.map(StoreStatusDetails::from), @@ -252,20 +310,29 @@ impl From for RpcStatusDetails { /// # Returns /// /// `Ok(())` if the task completes successfully, or an error if the task fails. -#[instrument(target = COMPONENT, name = "rpc-status-task", skip_all)] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.status.run_rpc_status_task", + skip_all, + level = "info", + ret(level = "debug") +)] pub async fn run_rpc_status_task( rpc_url: Url, status_sender: watch::Sender, status_check_interval: Duration, request_timeout: Duration, ) { + let url_str = rpc_url.to_string(); let mut rpc = ClientBuilder::new(rpc_url) .with_tls() .expect("TLS is enabled") .with_timeout(request_timeout) .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .without_otel_context_injection() + .connect_lazy::(); let mut interval = tokio::time::interval(status_check_interval); interval.set_missed_tick_behavior(MissedTickBehavior::Skip); @@ -275,7 +342,7 @@ pub async fn run_rpc_status_task( let current_time = current_unix_timestamp_secs(); - let status = check_rpc_status(&mut rpc, current_time).await; + let status = check_rpc_status(&mut rpc, url_str.clone(), current_time).await; // Send the status update; exit if no receivers (shutdown signal) if status_sender.send(status).is_err() { @@ -292,14 +359,23 @@ pub async fn run_rpc_status_task( /// # Arguments /// /// * `rpc` - The RPC client. +/// * `url` - The URL of the RPC service. /// * `current_time` - The current time. /// /// # Returns /// /// A `ServiceStatus` containing the status of the RPC service. -#[instrument(target = COMPONENT, name = "check-status.rpc", skip_all, ret(level = "info"))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.status.check_rpc_status", + skip_all, + level = "info", + ret(level = "debug") +)] pub(crate) async fn check_rpc_status( rpc: &mut miden_node_proto::clients::RpcClient, + url: String, current_time: u64, ) -> ServiceStatus { match rpc.status(()).await { @@ -311,15 +387,18 @@ pub(crate) async fn check_rpc_status( status: Status::Healthy, last_checked: current_time, error: None, - details: ServiceDetails::RpcStatus(status.into()), + details: ServiceDetails::RpcStatus(RpcStatusDetails::from_rpc_status(status, url)), } }, - Err(e) => ServiceStatus { - name: "RPC".to_string(), - status: Status::Unhealthy, - last_checked: current_time, - error: Some(e.to_string()), - details: ServiceDetails::Error, + Err(e) => { + debug!(target: COMPONENT, error = %e, "RPC status check failed"); + ServiceStatus { + name: "RPC".to_string(), + status: Status::Unhealthy, + last_checked: current_time, + error: Some(e.to_string()), + details: ServiceDetails::Error, + } }, } } @@ -343,7 +422,14 @@ pub(crate) async fn check_rpc_status( /// /// `Ok(())` if the monitoring task runs and completes successfully, or an error if there are /// connection issues or failures while checking the remote prover status. -#[instrument(target = COMPONENT, name = "remote-prover-status-task", skip_all)] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.status.run_remote_prover_status_task", + skip_all, + level = "info", + ret(level = "debug") +)] pub async fn run_remote_prover_status_task( prover_url: Url, name: String, @@ -358,7 +444,8 @@ pub async fn run_remote_prover_status_task( .with_timeout(request_timeout) .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .without_otel_context_injection() + .connect_lazy::(); let mut interval = tokio::time::interval(status_check_interval); interval.set_missed_tick_behavior(MissedTickBehavior::Skip); @@ -398,10 +485,17 @@ pub async fn run_remote_prover_status_task( /// # Returns /// /// A `ServiceStatus` containing the status of the remote prover service. -#[instrument(target = COMPONENT, name = "check-status.remote-prover", skip_all, ret(level = "info"))] +#[instrument( + parent = None, + target = COMPONENT, + name = "network_monitor.status.check_remote_prover_status", + skip_all, + level = "info", + ret(level = "debug") +)] pub(crate) async fn check_remote_prover_status( remote_prover: &mut miden_node_proto::clients::RemoteProverProxyStatusClient, - name: String, + display_name: String, url: String, current_time: u64, ) -> ServiceStatus { @@ -422,19 +516,22 @@ pub(crate) async fn check_remote_prover_status( }; ServiceStatus { - name: format!("Remote Prover ({name})"), + name: display_name.clone(), status: overall_health, last_checked: current_time, error: None, details: ServiceDetails::RemoteProverStatus(remote_prover_details), } }, - Err(e) => ServiceStatus { - name: format!("Remote Prover ({name})"), - status: Status::Unhealthy, - last_checked: current_time, - error: Some(e.to_string()), - details: ServiceDetails::Error, + Err(e) => { + debug!(target: COMPONENT, prover_name = %display_name, error = %e, "Remote prover status check failed"); + ServiceStatus { + name: display_name, + status: Status::Unhealthy, + last_checked: current_time, + error: Some(e.to_string()), + details: ServiceDetails::Error, + } }, } } diff --git a/bin/node/.env b/bin/node/.env index 75d0bbbbd..01e699aff 100644 --- a/bin/node/.env +++ b/bin/node/.env @@ -9,6 +9,8 @@ MIDEN_NODE_NTX_PROVER_URL= MIDEN_NODE_STORE_RPC_URL= MIDEN_NODE_STORE_NTX_BUILDER_URL= MIDEN_NODE_STORE_BLOCK_PRODUCER_URL= +MIDEN_NODE_VALIDATOR_BLOCK_PRODUCER_URL= +MIDEN_NODE_VALIDATOR_INSECURE_SECRET_KEY= MIDEN_NODE_RPC_URL=http://0.0.0.0:57291 MIDEN_NODE_DATA_DIRECTORY=./ MIDEN_NODE_ENABLE_OTEL=true diff --git a/bin/node/Cargo.toml b/bin/node/Cargo.toml index 38db5e109..c7a126c97 100644 --- a/bin/node/Cargo.toml +++ b/bin/node/Cargo.toml @@ -21,6 +21,7 @@ tracing-forest = ["miden-node-block-producer/tracing-forest"] anyhow = { workspace = true } clap = { features = ["env", "string"], workspace = true } fs-err = { workspace = true } +hex = { workspace = true } humantime = { workspace = true } miden-node-block-producer = { workspace = true } miden-node-ntx-builder = { workspace = true } @@ -28,7 +29,7 @@ miden-node-rpc = { workspace = true } miden-node-store = { workspace = true } miden-node-utils = { workspace = true } miden-node-validator = { workspace = true } -miden-objects = { workspace = true } +miden-protocol = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } url = { workspace = true } diff --git a/bin/node/Dockerfile b/bin/node/Dockerfile index a6b5cae55..3becd3ded 100644 --- a/bin/node/Dockerfile +++ b/bin/node/Dockerfile @@ -13,8 +13,6 @@ COPY ./crates ./crates COPY ./proto ./proto RUN cargo install --path bin/node --locked -RUN rm -rf data accounts && mkdir data accounts -RUN miden-node bundled bootstrap --data-directory ./data --accounts-directory ./accounts FROM debian:bullseye-slim @@ -26,8 +24,6 @@ RUN apt-get update && \ sqlite3 \ && rm -rf /var/lib/apt/lists/* -COPY --from=builder /app/accounts accounts -COPY --from=builder /app/data data COPY --from=builder /usr/local/cargo/bin/miden-node /usr/local/bin/miden-node LABEL org.opencontainers.image.authors=devops@miden.team \ @@ -47,6 +43,5 @@ LABEL org.opencontainers.image.created=$CREATED \ # Expose RPC port EXPOSE 57291 -# Start the Miden node # Miden node does not spawn sub-processes, so it can be used as the PID1 -CMD miden-node bundled start --rpc.url http://0.0.0.0:57291 --data-directory ./data +CMD miden-node diff --git a/bin/node/src/commands/block_producer.rs b/bin/node/src/commands/block_producer.rs index 6820a3851..d50182d87 100644 --- a/bin/node/src/commands/block_producer.rs +++ b/bin/node/src/commands/block_producer.rs @@ -12,6 +12,7 @@ use crate::commands::{ BlockProducerConfig, DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, + ENV_VALIDATOR_BLOCK_PRODUCER_URL, duration_to_human_readable_string, }; @@ -27,6 +28,10 @@ pub enum BlockProducerCommand { #[arg(long = "store.url", env = ENV_STORE_BLOCK_PRODUCER_URL)] store_url: Url, + /// The validator's service gRPC url. + #[arg(long = "validator.url", env = ENV_VALIDATOR_BLOCK_PRODUCER_URL)] + validator_url: Url, + #[command(flatten)] block_producer: BlockProducerConfig, @@ -55,6 +60,7 @@ impl BlockProducerCommand { let Self::Start { url, store_url, + validator_url, block_producer, enable_otel: _, grpc_timeout, @@ -64,22 +70,23 @@ impl BlockProducerCommand { url.to_socket().context("Failed to extract socket address from store URL")?; // Runtime validation for protocol constraints - if block_producer.max_batches_per_block > miden_objects::MAX_BATCHES_PER_BLOCK { + if block_producer.max_batches_per_block > miden_protocol::MAX_BATCHES_PER_BLOCK { anyhow::bail!( "max-batches-per-block cannot exceed protocol limit of {}", - miden_objects::MAX_BATCHES_PER_BLOCK + miden_protocol::MAX_BATCHES_PER_BLOCK ); } - if block_producer.max_txs_per_batch > miden_objects::MAX_ACCOUNTS_PER_BATCH { + if block_producer.max_txs_per_batch > miden_protocol::MAX_ACCOUNTS_PER_BATCH { anyhow::bail!( "max-txs-per-batch cannot exceed protocol limit of {}", - miden_objects::MAX_ACCOUNTS_PER_BATCH + miden_protocol::MAX_ACCOUNTS_PER_BATCH ); } BlockProducer { block_producer_address, store_url, + validator_url, batch_prover_url: block_producer.batch_prover_url, block_prover_url: block_producer.block_prover_url, batch_interval: block_producer.batch_interval, @@ -88,6 +95,7 @@ impl BlockProducerCommand { max_batches_per_block: block_producer.max_batches_per_block, production_checkpoint: Arc::new(Barrier::new(1)), grpc_timeout, + mempool_tx_capacity: block_producer.mempool_tx_capacity, } .serve() .await @@ -102,6 +110,8 @@ impl BlockProducerCommand { #[cfg(test)] mod tests { + use std::num::NonZeroUsize; + use url::Url; use super::*; @@ -115,13 +125,15 @@ mod tests { let cmd = BlockProducerCommand::Start { url: dummy_url(), store_url: dummy_url(), + validator_url: dummy_url(), block_producer: BlockProducerConfig { batch_prover_url: None, block_prover_url: None, block_interval: std::time::Duration::from_secs(1), batch_interval: std::time::Duration::from_secs(1), max_txs_per_batch: 8, - max_batches_per_block: miden_objects::MAX_BATCHES_PER_BLOCK + 1, // Invalid value + max_batches_per_block: miden_protocol::MAX_BATCHES_PER_BLOCK + 1, // Invalid value + mempool_tx_capacity: NonZeroUsize::new(1000).unwrap(), }, enable_otel: false, grpc_timeout: Duration::from_secs(10), @@ -137,15 +149,17 @@ mod tests { let cmd = BlockProducerCommand::Start { url: dummy_url(), store_url: dummy_url(), + validator_url: dummy_url(), block_producer: BlockProducerConfig { batch_prover_url: None, block_prover_url: None, block_interval: std::time::Duration::from_secs(1), batch_interval: std::time::Duration::from_secs(1), - max_txs_per_batch: miden_objects::MAX_ACCOUNTS_PER_BATCH + 1, /* Use protocol - * limit - * (should fail) */ + max_txs_per_batch: miden_protocol::MAX_ACCOUNTS_PER_BATCH + 1, /* Use protocol + * limit + * (should fail) */ max_batches_per_block: 8, + mempool_tx_capacity: NonZeroUsize::new(1000).unwrap(), }, enable_otel: false, grpc_timeout: Duration::from_secs(10), diff --git a/bin/node/src/commands/bundled.rs b/bin/node/src/commands/bundled.rs index 71c6f5ac1..a51c191eb 100644 --- a/bin/node/src/commands/bundled.rs +++ b/bin/node/src/commands/bundled.rs @@ -10,6 +10,9 @@ use miden_node_rpc::Rpc; use miden_node_store::Store; use miden_node_utils::grpc::UrlExt; use miden_node_validator::Validator; +use miden_protocol::block::BlockSigner; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::utils::Deserializable; use tokio::net::TcpListener; use tokio::sync::Barrier; use tokio::task::JoinSet; @@ -21,6 +24,8 @@ use crate::commands::{ DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, + ENV_VALIDATOR_INSECURE_SECRET_KEY, + INSECURE_VALIDATOR_KEY_HEX, NtxBuilderConfig, duration_to_human_readable_string, }; @@ -43,7 +48,17 @@ pub enum BundledCommand { accounts_directory: PathBuf, /// Constructs the genesis block from the given toml file. #[arg(long, env = ENV_GENESIS_CONFIG_FILE, value_name = "FILE")] - genesis_config_file: Option, + genesis_config_file: PathBuf, + /// Insecure, hex-encoded validator secret key for development and testing purposes. + /// + /// If not provided, a predefined key is used. + #[arg( + long = "validator.insecure.secret-key", + env = ENV_VALIDATOR_INSECURE_SECRET_KEY, + value_name = "VALIDATOR_INSECURE_SECRET_KEY", + default_value = INSECURE_VALIDATOR_KEY_HEX + )] + validator_insecure_secret_key: String, }, /// Runs all three node components in the same process. @@ -82,6 +97,15 @@ pub enum BundledCommand { value_name = "DURATION" )] grpc_timeout: Duration, + + /// Insecure, hex-encoded validator secret key for development and testing purposes. + #[arg( + long = "validator.insecure.secret-key", + env = ENV_VALIDATOR_INSECURE_SECRET_KEY, + value_name = "VALIDATOR_INSECURE_SECRET_KEY", + default_value = INSECURE_VALIDATOR_KEY_HEX + )] + validator_insecure_secret_key: String, }, } @@ -92,12 +116,14 @@ impl BundledCommand { data_directory, accounts_directory, genesis_config_file, + validator_insecure_secret_key, } => { // Currently the bundled bootstrap is identical to the store's bootstrap. crate::commands::store::StoreCommand::Bootstrap { data_directory, accounts_directory, genesis_config_file, + validator_insecure_secret_key, } .handle() .await @@ -110,9 +136,19 @@ impl BundledCommand { ntx_builder, enable_otel: _, grpc_timeout, + validator_insecure_secret_key, } => { - Self::start(rpc_url, data_directory, ntx_builder, block_producer, grpc_timeout) - .await + let secret_key_bytes = hex::decode(validator_insecure_secret_key)?; + let signer = SecretKey::read_from_bytes(&secret_key_bytes)?; + Self::start( + rpc_url, + data_directory, + ntx_builder, + block_producer, + grpc_timeout, + signer, + ) + .await }, } } @@ -124,8 +160,8 @@ impl BundledCommand { ntx_builder: NtxBuilderConfig, block_producer: BlockProducerConfig, grpc_timeout: Duration, + signer: impl BlockSigner + Send + Sync + 'static, ) -> anyhow::Result<()> { - let should_start_ntb = !ntx_builder.disabled; // Start listening on all gRPC urls so that inter-component connections can be created // before each component is fully started up. // @@ -186,8 +222,9 @@ impl BundledCommand { }) .id(); - // A sync point between the ntb and block-producer components. - let checkpoint = if should_start_ntb { + // A sync point between the ntx-builder and block-producer components. + let should_start_ntx_builder = !ntx_builder.disabled; + let checkpoint = if should_start_ntx_builder { Barrier::new(2) } else { Barrier::new(1) @@ -200,10 +237,13 @@ impl BundledCommand { let checkpoint = Arc::clone(&checkpoint); let store_url = Url::parse(&format!("http://{store_block_producer_address}")) .context("Failed to parse URL")?; + let validator_url = Url::parse(&format!("http://{validator_address}")) + .context("Failed to parse URL")?; async move { BlockProducer { block_producer_address, store_url, + validator_url, batch_prover_url: block_producer.batch_prover_url, block_prover_url: block_producer.block_prover_url, batch_interval: block_producer.batch_interval, @@ -212,6 +252,7 @@ impl BundledCommand { max_txs_per_batch: block_producer.max_txs_per_batch, production_checkpoint: checkpoint, grpc_timeout, + mempool_tx_capacity: block_producer.mempool_tx_capacity, } .serve() .await @@ -223,10 +264,14 @@ impl BundledCommand { let validator_id = join_set .spawn({ async move { - Validator { address: validator_address, grpc_timeout } - .serve() - .await - .context("failed while serving validator component") + Validator { + address: validator_address, + grpc_timeout, + signer, + } + .serve() + .await + .context("failed while serving validator component") } }) .id(); @@ -238,10 +283,13 @@ impl BundledCommand { .context("Failed to parse URL")?; let block_producer_url = Url::parse(&format!("http://{block_producer_address}")) .context("Failed to parse URL")?; + let validator_url = Url::parse(&format!("http://{validator_address}")) + .context("Failed to parse URL")?; Rpc { listener: grpc_rpc, store_url, block_producer_url: Some(block_producer_url), + validator_url, grpc_timeout, } .serve() @@ -262,7 +310,7 @@ impl BundledCommand { let store_ntx_builder_url = Url::parse(&format!("http://{store_ntx_builder_address}")) .context("Failed to parse URL")?; - if should_start_ntb { + if should_start_ntx_builder { let id = join_set .spawn(async move { let block_producer_url = @@ -275,7 +323,7 @@ impl BundledCommand { ntx_builder.ticker_interval, checkpoint, ) - .serve_new() + .run() .await .context("failed while serving ntx builder component") }) diff --git a/bin/node/src/commands/mod.rs b/bin/node/src/commands/mod.rs index 90c91ccfc..ecfee995f 100644 --- a/bin/node/src/commands/mod.rs +++ b/bin/node/src/commands/mod.rs @@ -1,6 +1,12 @@ +use std::num::NonZeroUsize; use std::time::Duration; -use miden_node_block_producer::{DEFAULT_MAX_BATCHES_PER_BLOCK, DEFAULT_MAX_TXS_PER_BATCH}; +use miden_node_block_producer::{ + DEFAULT_BATCH_INTERVAL, + DEFAULT_BLOCK_INTERVAL, + DEFAULT_MAX_BATCHES_PER_BLOCK, + DEFAULT_MAX_TXS_PER_BATCH, +}; use url::Url; pub mod block_producer; @@ -9,6 +15,10 @@ pub mod rpc; pub mod store; pub mod validator; +/// A predefined, insecure validator key for development purposes. +const INSECURE_VALIDATOR_KEY_HEX: &str = + "0101010101010101010101010101010101010101010101010101010101010101"; + const ENV_BLOCK_PRODUCER_URL: &str = "MIDEN_NODE_BLOCK_PRODUCER_URL"; const ENV_VALIDATOR_URL: &str = "MIDEN_NODE_VALIDATOR_URL"; const ENV_BATCH_PROVER_URL: &str = "MIDEN_NODE_BATCH_PROVER_URL"; @@ -18,14 +28,15 @@ const ENV_RPC_URL: &str = "MIDEN_NODE_RPC_URL"; const ENV_STORE_RPC_URL: &str = "MIDEN_NODE_STORE_RPC_URL"; const ENV_STORE_NTX_BUILDER_URL: &str = "MIDEN_NODE_STORE_NTX_BUILDER_URL"; const ENV_STORE_BLOCK_PRODUCER_URL: &str = "MIDEN_NODE_STORE_BLOCK_PRODUCER_URL"; +const ENV_VALIDATOR_BLOCK_PRODUCER_URL: &str = "MIDEN_NODE_VALIDATOR_BLOCK_PRODUCER_URL"; const ENV_DATA_DIRECTORY: &str = "MIDEN_NODE_DATA_DIRECTORY"; const ENV_ENABLE_OTEL: &str = "MIDEN_NODE_ENABLE_OTEL"; const ENV_GENESIS_CONFIG_FILE: &str = "MIDEN_GENESIS_CONFIG_FILE"; const ENV_MAX_TXS_PER_BATCH: &str = "MIDEN_MAX_TXS_PER_BATCH"; const ENV_MAX_BATCHES_PER_BLOCK: &str = "MIDEN_MAX_BATCHES_PER_BLOCK"; +const ENV_MEMPOOL_TX_CAPACITY: &str = "MIDEN_NODE_MEMPOOL_TX_CAPACITY"; +const ENV_VALIDATOR_INSECURE_SECRET_KEY: &str = "MIDEN_NODE_VALIDATOR_INSECURE_SECRET_KEY"; -const DEFAULT_BLOCK_INTERVAL: Duration = Duration::from_secs(5); -const DEFAULT_BATCH_INTERVAL: Duration = Duration::from_secs(2); const DEFAULT_NTX_TICKER_INTERVAL: Duration = Duration::from_millis(200); const DEFAULT_TIMEOUT: Duration = Duration::from_secs(10); @@ -38,7 +49,7 @@ fn duration_to_human_readable_string(duration: Duration) -> String { #[derive(clap::Args)] pub struct NtxBuilderConfig { /// Disable spawning the network transaction builder. - #[arg(long = "no-ntb", default_value_t = false)] + #[arg(long = "no-ntx-builder", default_value_t = false)] pub disabled: bool, /// The remote transaction prover's gRPC url, used for the ntx builder. If unset, @@ -48,7 +59,7 @@ pub struct NtxBuilderConfig { /// Interval at which to run the network transaction builder's ticker. #[arg( - long = "ntb.interval", + long = "ntx-builder.interval", default_value = &duration_to_human_readable_string(DEFAULT_NTX_TICKER_INTERVAL), value_parser = humantime::parse_duration, value_name = "DURATION" @@ -88,10 +99,29 @@ pub struct BlockProducerConfig { pub block_prover_url: Option, /// The number of transactions per batch. - #[arg(long = "max-txs-per-batch", env = ENV_MAX_TXS_PER_BATCH, value_name = "NUM", default_value_t = DEFAULT_MAX_TXS_PER_BATCH)] + #[arg( + long = "max-txs-per-batch", + env = ENV_MAX_TXS_PER_BATCH, + value_name = "NUM", + default_value_t = DEFAULT_MAX_TXS_PER_BATCH + )] pub max_txs_per_batch: usize, /// Maximum number of batches per block. - #[arg(long = "max-batches-per-block", env = ENV_MAX_BATCHES_PER_BLOCK, value_name = "NUM", default_value_t = DEFAULT_MAX_BATCHES_PER_BLOCK)] + #[arg( + long = "max-batches-per-block", + env = ENV_MAX_BATCHES_PER_BLOCK, + value_name = "NUM", + default_value_t = DEFAULT_MAX_BATCHES_PER_BLOCK + )] pub max_batches_per_block: usize, + + /// Maximum number of uncommitted transactions allowed in the mempool. + #[arg( + long = "mempool.tx-capacity", + default_value_t = miden_node_block_producer::DEFAULT_MEMPOOL_TX_CAPACITY, + env = ENV_MEMPOOL_TX_CAPACITY, + value_name = "NUM" + )] + mempool_tx_capacity: NonZeroUsize, } diff --git a/bin/node/src/commands/rpc.rs b/bin/node/src/commands/rpc.rs index ed05546b3..643734f37 100644 --- a/bin/node/src/commands/rpc.rs +++ b/bin/node/src/commands/rpc.rs @@ -5,7 +5,7 @@ use miden_node_rpc::Rpc; use miden_node_utils::grpc::UrlExt; use url::Url; -use super::{ENV_BLOCK_PRODUCER_URL, ENV_RPC_URL, ENV_STORE_RPC_URL}; +use super::{ENV_BLOCK_PRODUCER_URL, ENV_RPC_URL, ENV_STORE_RPC_URL, ENV_VALIDATOR_URL}; use crate::commands::{DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, duration_to_human_readable_string}; #[derive(clap::Subcommand)] @@ -25,6 +25,10 @@ pub enum RpcCommand { #[arg(long = "block-producer.url", env = ENV_BLOCK_PRODUCER_URL, value_name = "URL")] block_producer_url: Option, + /// The validator's gRPC url. + #[arg(long = "validator.url", env = ENV_VALIDATOR_URL, value_name = "URL")] + validator_url: Url, + /// Enables the exporting of traces for OpenTelemetry. /// /// This can be further configured using environment variables as defined in the official @@ -51,6 +55,7 @@ impl RpcCommand { url, store_url, block_producer_url, + validator_url, enable_otel: _, grpc_timeout, } = self; @@ -64,6 +69,7 @@ impl RpcCommand { listener, store_url, block_producer_url, + validator_url, grpc_timeout, } .serve() diff --git a/bin/node/src/commands/store.rs b/bin/node/src/commands/store.rs index 7cc0fb040..2105b1453 100644 --- a/bin/node/src/commands/store.rs +++ b/bin/node/src/commands/store.rs @@ -5,6 +5,8 @@ use anyhow::Context; use miden_node_store::Store; use miden_node_store::genesis::config::{AccountFileWithName, GenesisConfig}; use miden_node_utils::grpc::UrlExt; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::utils::Deserializable; use url::Url; use super::{ @@ -17,6 +19,8 @@ use crate::commands::{ DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, ENV_GENESIS_CONFIG_FILE, + ENV_VALIDATOR_INSECURE_SECRET_KEY, + INSECURE_VALIDATOR_KEY_HEX, duration_to_human_readable_string, }; @@ -38,7 +42,17 @@ pub enum StoreCommand { accounts_directory: PathBuf, /// Use the given configuration file to construct the genesis state from. #[arg(long, env = ENV_GENESIS_CONFIG_FILE, value_name = "GENESIS_CONFIG")] - genesis_config_file: Option, + genesis_config_file: PathBuf, + /// Insecure, hex-encoded validator secret key for development and testing purposes. + /// + /// If not provided, a predefined key is used. + #[arg( + long = "validator.insecure.secret-key", + env = ENV_VALIDATOR_INSECURE_SECRET_KEY, + value_name = "VALIDATOR_INSECURE_SECRET_KEY", + default_value = INSECURE_VALIDATOR_KEY_HEX + )] + validator_insecure_secret_key: String, }, /// Starts the store component. @@ -90,9 +104,13 @@ impl StoreCommand { data_directory, accounts_directory, genesis_config_file, - } => { - Self::bootstrap(&data_directory, &accounts_directory, genesis_config_file.as_ref()) - }, + validator_insecure_secret_key, + } => Self::bootstrap( + &data_directory, + &accounts_directory, + &genesis_config_file, + validator_insecure_secret_key, + ), StoreCommand::Start { rpc_url, ntx_builder_url, @@ -164,24 +182,23 @@ impl StoreCommand { fn bootstrap( data_directory: &Path, accounts_directory: &Path, - maybe_genesis_config: Option<&PathBuf>, + genesis_config: &PathBuf, + validator_insecure_secret_key: String, ) -> anyhow::Result<()> { - let config = maybe_genesis_config - .map(|genesis_config| { - let toml_str = fs_err::read_to_string(genesis_config)?; - let config = GenesisConfig::read_toml(toml_str.as_str()) - .context(format!("Read from file: {}", genesis_config.display()))?; - Ok::<_, anyhow::Error>(config) - }) - .transpose()? - .unwrap_or_default(); - - let (genesis_state, secrets) = config.into_state()?; + // Decode the validator key. + let signer = SecretKey::read_from_bytes(&hex::decode(validator_insecure_secret_key)?)?; + + // Read the toml. + let toml_str = fs_err::read_to_string(genesis_config)?; + let config = GenesisConfig::read_toml(toml_str.as_str()) + .context(format!("Read from file: {}", genesis_config.display()))?; + + let (genesis_state, secrets) = config.into_state(signer)?; // Create directories if they do not already exist. for directory in &[accounts_directory, data_directory] { - if directory.exists() { - let is_empty = directory.read_dir()?.next().is_none(); + if fs_err::exists(directory)? { + let is_empty = fs_err::read_dir(directory)?.next().is_none(); // If the directory exists and is empty, we store the files there if !is_empty { anyhow::bail!(format!("{} exists but it is not empty.", directory.display())); diff --git a/bin/node/src/commands/validator.rs b/bin/node/src/commands/validator.rs index 248331781..f543be301 100644 --- a/bin/node/src/commands/validator.rs +++ b/bin/node/src/commands/validator.rs @@ -3,12 +3,16 @@ use std::time::Duration; use anyhow::Context; use miden_node_utils::grpc::UrlExt; use miden_node_validator::Validator; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::utils::Deserializable; use url::Url; use crate::commands::{ DEFAULT_TIMEOUT, ENV_ENABLE_OTEL, + ENV_VALIDATOR_INSECURE_SECRET_KEY, ENV_VALIDATOR_URL, + INSECURE_VALIDATOR_KEY_HEX, duration_to_human_readable_string, }; @@ -35,17 +39,27 @@ pub enum ValidatorCommand { value_name = "DURATION" )] grpc_timeout: Duration, + + /// Insecure, hex-encoded validator secret key for development and testing purposes. + /// + /// If not provided, a predefined key is used. + #[arg(long = "insecure.secret-key", env = ENV_VALIDATOR_INSECURE_SECRET_KEY, value_name = "INSECURE_SECRET_KEY", default_value = INSECURE_VALIDATOR_KEY_HEX)] + insecure_secret_key: String, }, } impl ValidatorCommand { pub async fn handle(self) -> anyhow::Result<()> { - let Self::Start { url, grpc_timeout, .. } = self; + let Self::Start { + url, grpc_timeout, insecure_secret_key, .. + } = self; let address = url.to_socket().context("Failed to extract socket address from validator URL")?; - Validator { address, grpc_timeout } + let signer = SecretKey::read_from_bytes(hex::decode(insecure_secret_key)?.as_ref())?; + + Validator { address, grpc_timeout, signer } .serve() .await .context("failed while serving validator component") diff --git a/bin/remote-prover/Cargo.toml b/bin/remote-prover/Cargo.toml index 0bcc98a2e..85bc355f7 100644 --- a/bin/remote-prover/Cargo.toml +++ b/bin/remote-prover/Cargo.toml @@ -31,8 +31,9 @@ clap = { features = ["env"], workspace = true } http = { workspace = true } humantime = { workspace = true } miden-block-prover = { workspace = true } +miden-node-proto = { workspace = true } miden-node-utils = { workspace = true } -miden-objects = { features = ["std"], workspace = true } +miden-protocol = { features = ["std"], workspace = true } miden-tx = { features = ["std"], workspace = true } miden-tx-batch-prover = { features = ["std"], workspace = true } opentelemetry = { version = "0.31" } @@ -41,7 +42,7 @@ pingora-core = { version = "0.6" } pingora-limits = { version = "0.6" } pingora-proxy = { version = "0.6" } prometheus = { version = "0.14" } -prost = { default-features = false, features = ["derive"], version = "0.14" } +prost = { default-features = false, features = ["derive"], workspace = true } reqwest = { version = "0.12" } semver = { version = "1.0" } serde = { features = ["derive"], version = "1.0" } @@ -59,10 +60,10 @@ tracing-opentelemetry = { version = "0.32" } uuid = { features = ["v4"], version = "1.16" } [dev-dependencies] -miden-lib = { features = ["testing"], workspace = true } -miden-objects = { features = ["testing"], workspace = true } -miden-testing = { workspace = true } -miden-tx = { features = ["testing"], workspace = true } +miden-protocol = { features = ["testing"], workspace = true } +miden-standards = { features = ["testing"], workspace = true } +miden-testing = { workspace = true } +miden-tx = { features = ["testing"], workspace = true } [build-dependencies] miden-node-proto-build = { features = ["internal"], workspace = true } diff --git a/bin/remote-prover/src/api/prover.rs b/bin/remote-prover/src/api/prover.rs index 2f4cbf6a7..24a70f731 100644 --- a/bin/remote-prover/src/api/prover.rs +++ b/bin/remote-prover/src/api/prover.rs @@ -1,10 +1,10 @@ use miden_block_prover::LocalBlockProver; +use miden_node_proto::BlockProofRequest; use miden_node_utils::ErrorReport; -use miden_objects::MIN_PROOF_SECURITY_LEVEL; -use miden_objects::batch::ProposedBatch; -use miden_objects::block::ProposedBlock; -use miden_objects::transaction::TransactionInputs; -use miden_objects::utils::Serializable; +use miden_protocol::MIN_PROOF_SECURITY_LEVEL; +use miden_protocol::batch::ProposedBatch; +use miden_protocol::transaction::TransactionInputs; +use miden_protocol::utils::Serializable; use miden_tx::LocalTransactionProver; use miden_tx_batch_prover::LocalBatchProver; use serde::{Deserialize, Serialize}; @@ -165,24 +165,25 @@ impl ProverRpcApi { )] pub fn prove_block( &self, - proposed_block: ProposedBlock, + proof_request: BlockProofRequest, request_id: &str, ) -> Result, tonic::Status> { let Prover::Block(prover) = &self.prover else { return Err(Status::unimplemented("Block prover is not enabled")); }; + let BlockProofRequest { tx_batches, block_header, block_inputs } = proof_request; - let proven_block = prover + // Record the commitment of the block in the current tracing span. + let block_id = block_header.commitment(); + tracing::Span::current().record("block_id", tracing::field::display(&block_id)); + + let block_proof = prover .try_lock() .map_err(|_| Status::resource_exhausted("Server is busy handling another request"))? - .prove(proposed_block) + .prove(tx_batches, block_header, block_inputs) .map_err(internal_error)?; - // Record the commitment of the block in the current tracing span - let block_id = proven_block.commitment(); - tracing::Span::current().record("block_id", tracing::field::display(&block_id)); - - Ok(Response::new(proto::remote_prover::Proof { payload: proven_block.to_bytes() })) + Ok(Response::new(proto::remote_prover::Proof { payload: block_proof.to_bytes() })) } } @@ -225,8 +226,8 @@ impl ProverApi for ProverRpcApi { self.prove_batch(proposed_batch, &request_id) }, proto::remote_prover::ProofType::Block => { - let proposed_block = proof_request.try_into().map_err(invalid_argument)?; - self.prove_block(proposed_block, &request_id) + let proof_request = proof_request.try_into().map_err(invalid_argument)?; + self.prove_block(proof_request, &request_id) }, } } @@ -251,13 +252,13 @@ mod test { use std::time::Duration; use miden_node_utils::cors::cors_for_grpc_web_layer; - use miden_objects::asset::{Asset, FungibleAsset}; - use miden_objects::note::NoteType; - use miden_objects::testing::account_id::{ + use miden_protocol::asset::{Asset, FungibleAsset}; + use miden_protocol::note::NoteType; + use miden_protocol::testing::account_id::{ ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_SENDER, }; - use miden_objects::transaction::ProvenTransaction; + use miden_protocol::transaction::ProvenTransaction; use miden_testing::{Auth, MockChainBuilder}; use miden_tx::utils::Serializable; use tokio::net::TcpListener; diff --git a/bin/remote-prover/src/generated/conversions.rs b/bin/remote-prover/src/generated/conversions.rs index 885d1e06b..e1bdc6406 100644 --- a/bin/remote-prover/src/generated/conversions.rs +++ b/bin/remote-prover/src/generated/conversions.rs @@ -1,9 +1,9 @@ // CONVERSIONS // ================================================================================================ -use miden_objects::batch::ProposedBatch; -use miden_objects::block::ProposedBlock; -use miden_objects::transaction::{ProvenTransaction, TransactionInputs}; +use miden_node_proto::BlockProofRequest; +use miden_protocol::batch::ProposedBatch; +use miden_protocol::transaction::{ProvenTransaction, TransactionInputs}; use miden_tx::utils::{Deserializable, DeserializationError, Serializable}; use crate::api::ProofType; @@ -39,11 +39,11 @@ impl TryFrom for ProposedBatch { } } -impl TryFrom for ProposedBlock { +impl TryFrom for BlockProofRequest { type Error = DeserializationError; fn try_from(request: proto::ProofRequest) -> Result { - ProposedBlock::read_from_bytes(&request.payload) + BlockProofRequest::read_from_bytes(&request.payload) } } diff --git a/bin/remote-prover/src/generated/remote_prover.rs b/bin/remote-prover/src/generated/remote_prover.rs index 210b69153..b504804c3 100644 --- a/bin/remote-prover/src/generated/remote_prover.rs +++ b/bin/remote-prover/src/generated/remote_prover.rs @@ -10,7 +10,7 @@ pub struct ProofRequest { /// /// * TRANSACTION: TransactionInputs encoded. /// * BATCH: ProposedBatch encoded. - /// * BLOCK: ProposedBlock encoded. + /// * BLOCK: BlockProofRequest encoded. #[prost(bytes = "vec", tag = "2")] pub payload: ::prost::alloc::vec::Vec, } @@ -21,16 +21,16 @@ pub struct Proof { /// /// * TRANSACTION: Returns an encoded ProvenTransaction. /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded ProvenBlock. + /// * BLOCK: Returns an encoded BlockProof. #[prost(bytes = "vec", tag = "1")] pub payload: ::prost::alloc::vec::Vec, } /// Status of an individual worker in the proxy. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProxyWorkerStatus { - /// The address of the worker. + /// The name of the worker. #[prost(string, tag = "1")] - pub address: ::prost::alloc::string::String, + pub name: ::prost::alloc::string::String, /// The version of the worker. #[prost(string, tag = "2")] pub version: ::prost::alloc::string::String, diff --git a/bin/remote-prover/src/proxy/health_check.rs b/bin/remote-prover/src/proxy/health_check.rs index 51192b774..b583c0982 100644 --- a/bin/remote-prover/src/proxy/health_check.rs +++ b/bin/remote-prover/src/proxy/health_check.rs @@ -49,7 +49,7 @@ impl BackgroundService for LoadBalancerState { if let Err(ref reason) = status_result { error!( err = %reason, - worker.address = worker.address(), + worker.name = worker.name(), "Worker failed health check" ); } diff --git a/bin/remote-prover/src/proxy/mod.rs b/bin/remote-prover/src/proxy/mod.rs index 2e3d53284..81290d73a 100644 --- a/bin/remote-prover/src/proxy/mod.rs +++ b/bin/remote-prover/src/proxy/mod.rs @@ -352,7 +352,7 @@ impl RequestContext { /// Set the worker that will process the request fn set_worker(&mut self, worker: Worker) { - WORKER_REQUEST_COUNT.with_label_values(&[&worker.address()]).inc(); + WORKER_REQUEST_COUNT.with_label_values(&[&worker.name()]).inc(); self.worker = Some(worker); } } @@ -495,7 +495,7 @@ impl ProxyHttp for LoadBalancer { // Check if there is an available worker if let Some(worker) = self.0.pop_available_worker().await { - debug!("Worker {} picked up the request with ID: {}", worker.address(), request_id); + debug!("Worker {} picked up the request with ID: {}", worker.name(), request_id); ctx.set_worker(worker); break; } @@ -508,7 +508,7 @@ impl ProxyHttp for LoadBalancer { // Set SNI let mut http_peer = HttpPeer::new( - ctx.worker.clone().expect("Failed to get worker").address(), + ctx.worker.clone().expect("Failed to get worker").name(), false, String::new(), ); diff --git a/bin/remote-prover/src/proxy/worker.rs b/bin/remote-prover/src/proxy/worker.rs index bf181c613..aa418e8cb 100644 --- a/bin/remote-prover/src/proxy/worker.rs +++ b/bin/remote-prover/src/proxy/worker.rs @@ -140,14 +140,14 @@ impl Worker { /// - `Ok(())` if the client was successfully created /// - `Err(RemoteProverError)` if the client creation failed async fn recreate_status_client(&mut self) -> Result<(), RemoteProverError> { - let address = self.address(); - match create_status_client(&address, self.connection_timeout, self.total_timeout).await { + let name = self.name(); + match create_status_client(&name, self.connection_timeout, self.total_timeout).await { Ok(client) => { self.status_client = Some(client); Ok(()) }, Err(err) => { - error!("Failed to recreate status client for worker {}: {}", address, err); + error!("Failed to recreate status client for worker {}: {}", name, err); Err(err) }, } @@ -170,7 +170,7 @@ impl Worker { if self.status_client.is_none() { match self.recreate_status_client().await { Ok(()) => { - info!("Successfully recreated status client for worker {}", self.address()); + info!("Successfully recreated status client for worker {}", self.name()); }, Err(err) => { return Err(err.as_report_context("failed to recreate status client")); @@ -181,7 +181,7 @@ impl Worker { let worker_status = match self.status_client.as_mut().unwrap().status(()).await { Ok(response) => response.into_inner(), Err(e) => { - error!("Failed to check worker status ({}): {}", self.address(), e); + error!("Failed to check worker status ({}): {}", self.name(), e); return Err(e.message().to_string()); }, }; @@ -198,7 +198,7 @@ impl Worker { let worker_supported_proof_type = ProofType::try_from(worker_status.supported_proof_type) .inspect_err(|err| { - error!(%err, address=%self.address(), "Failed to convert worker supported proof type"); + error!(%err, name=%self.name(), "Failed to convert worker supported proof type"); })?; if supported_proof_type != worker_supported_proof_type { @@ -271,8 +271,8 @@ impl Worker { self.is_available } - /// Returns the worker address. - pub fn address(&self) -> String { + /// Returns the worker name. + pub fn name(&self) -> String { self.backend.addr.to_string() } @@ -325,7 +325,7 @@ impl Worker { } }, WorkerHealthStatus::Unhealthy { .. } => { - WORKER_UNHEALTHY.with_label_values(&[&self.address()]).inc(); + WORKER_UNHEALTHY.with_label_values(&[&self.name()]).inc(); self.is_available = false; }, } @@ -349,7 +349,7 @@ impl From<&Worker> for ProxyWorkerStatus { fn from(worker: &Worker) -> Self { use miden_remote_prover::generated::remote_prover::WorkerHealthStatus as ProtoWorkerHealthStatus; Self { - address: worker.address(), + name: worker.name(), version: worker.version().to_string(), status: match worker.health_status() { WorkerHealthStatus::Healthy => ProtoWorkerHealthStatus::Healthy, diff --git a/bin/stress-test/Cargo.toml b/bin/stress-test/Cargo.toml index 9c3029a82..b9df84d41 100644 --- a/bin/stress-test/Cargo.toml +++ b/bin/stress-test/Cargo.toml @@ -22,14 +22,14 @@ fs-err = { workspace = true } futures = { workspace = true } miden-air = { features = ["testing"], workspace = true } miden-block-prover = { features = ["testing"], workspace = true } -miden-lib = { workspace = true } miden-node-block-producer = { workspace = true } miden-node-proto = { workspace = true } miden-node-store = { workspace = true } miden-node-utils = { workspace = true } -miden-objects = { workspace = true } +miden-protocol = { workspace = true } +miden-standards = { workspace = true } rand = { workspace = true } rayon = { version = "1.10" } tokio = { workspace = true } -tonic = { workspace = true } +tonic = { default-features = true, workspace = true } url = { workspace = true } diff --git a/bin/stress-test/README.md b/bin/stress-test/README.md index dee33fa9d..4d8c283c6 100644 --- a/bin/stress-test/README.md +++ b/bin/stress-test/README.md @@ -23,6 +23,7 @@ The endpoints that you can test are: - `sync_state` - `sync_notes` - `sync_nullifiers` +- `sync_transactions` Most benchmarks accept options to control the number of iterations and concurrency level. The `load_state` endpoint is different - it simply measures the one-time startup cost of loading the state from disk. @@ -153,5 +154,22 @@ P99.9 request latency: 2.289709ms Average nullifiers per response: 21.0348 ``` +- sync-transactions +``` bash +$ miden-node-stress-test benchmark-store --data-directory ./data --iterations 10000 --concurrency 16 sync-transactions --accounts 5 --block-range 100 + +Average request latency: 1.61454ms +P50 request latency: 1.439584ms +P95 request latency: 3.195001ms +P99 request latency: 4.068709ms +P99.9 request latency: 6.888542ms +Average transactions per response: 1.547 +Pagination statistics: + Total runs: 10000 + Runs triggering pagination: 9971 + Pagination rate: 99.71% + Average pages per run: 2.00 +``` + ## License This project is [MIT licensed](../../LICENSE). diff --git a/bin/stress-test/src/main.rs b/bin/stress-test/src/main.rs index 62b5ddc6d..095b04caf 100644 --- a/bin/stress-test/src/main.rs +++ b/bin/stress-test/src/main.rs @@ -3,7 +3,13 @@ use std::path::PathBuf; use clap::{Parser, Subcommand}; use miden_node_utils::logging::OpenTelemetry; use seeding::seed_store; -use store::{bench_sync_notes, bench_sync_nullifiers, bench_sync_state, load_state}; +use store::{ + bench_sync_notes, + bench_sync_nullifiers, + bench_sync_state, + bench_sync_transactions, + load_state, +}; mod seeding; mod store; @@ -58,13 +64,26 @@ pub enum Command { #[derive(Subcommand, Clone, Copy)] pub enum Endpoint { + #[command(name = "sync-nullifiers")] SyncNullifiers { /// Number of prefixes to send in each request. #[arg(short, long, value_name = "PREFIXES", default_value = "10")] prefixes: usize, }, + #[command(name = "sync-state")] SyncState, + #[command(name = "sync-notes")] SyncNotes, + #[command(name = "sync-transactions")] + SyncTransactions { + /// Number of accounts to sync transactions for in each request. + #[arg(short, long, value_name = "ACCOUNTS", default_value = "5")] + accounts: usize, + /// Block range size for each request (number of blocks to query). + #[arg(short, long, value_name = "BLOCK_RANGE", default_value = "100")] + block_range: u32, + }, + #[command(name = "load-state")] LoadState, } @@ -98,6 +117,16 @@ async fn main() { Endpoint::SyncNotes => { bench_sync_notes(data_directory, iterations, concurrency).await; }, + Endpoint::SyncTransactions { accounts, block_range } => { + bench_sync_transactions( + data_directory, + iterations, + concurrency, + accounts, + block_range, + ) + .await; + }, Endpoint::LoadState => { load_state(&data_directory).await; }, diff --git a/bin/stress-test/src/seeding/mod.rs b/bin/stress-test/src/seeding/mod.rs index c35a6dd91..a3a258892 100644 --- a/bin/stress-test/src/seeding/mod.rs +++ b/bin/stress-test/src/seeding/mod.rs @@ -6,28 +6,24 @@ use std::time::{Duration, Instant}; use metrics::SeedingMetrics; use miden_air::ExecutionProof; use miden_block_prover::LocalBlockProver; -use miden_lib::account::auth::AuthRpoFalcon512; -use miden_lib::account::faucets::BasicFungibleFaucet; -use miden_lib::account::wallets::BasicWallet; -use miden_lib::note::create_p2id_note; -use miden_lib::utils::Serializable; use miden_node_block_producer::store::StoreClient; use miden_node_proto::domain::batch::BatchInputs; -use miden_node_proto::generated::rpc_store::rpc_client::RpcClient; +use miden_node_proto::generated::store::rpc_client::RpcClient; use miden_node_store::{DataDirectory, GenesisState, Store}; use miden_node_utils::tracing::grpc::OtelInterceptor; -use miden_objects::account::delta::AccountUpdateDetails; -use miden_objects::account::{ +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{ Account, AccountBuilder, AccountDelta, AccountId, + AccountStorage, AccountStorageMode, AccountType, }; -use miden_objects::asset::{Asset, FungibleAsset, TokenSymbol}; -use miden_objects::batch::{BatchAccountUpdate, BatchId, ProvenBatch}; -use miden_objects::block::{ +use miden_protocol::asset::{Asset, FungibleAsset, TokenSymbol}; +use miden_protocol::batch::{BatchAccountUpdate, BatchId, ProvenBatch}; +use miden_protocol::block::{ BlockHeader, BlockInputs, BlockNumber, @@ -35,10 +31,11 @@ use miden_objects::block::{ ProposedBlock, ProvenBlock, }; -use miden_objects::crypto::dsa::rpo_falcon512::{PublicKey, SecretKey}; -use miden_objects::crypto::rand::RpoRandomCoin; -use miden_objects::note::{Note, NoteHeader, NoteId, NoteInclusionProof}; -use miden_objects::transaction::{ +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey as EcdsaSecretKey; +use miden_protocol::crypto::dsa::falcon512_rpo::{PublicKey, SecretKey}; +use miden_protocol::crypto::rand::RpoRandomCoin; +use miden_protocol::note::{Note, NoteHeader, NoteId, NoteInclusionProof}; +use miden_protocol::transaction::{ InputNote, InputNotes, OrderedTransactionHeaders, @@ -47,7 +44,12 @@ use miden_objects::transaction::{ ProvenTransactionBuilder, TransactionHeader, }; -use miden_objects::{AssetError, Felt, ONE, Word}; +use miden_protocol::utils::Serializable; +use miden_protocol::{AssetError, Felt, ONE, Word}; +use miden_standards::account::auth::AuthRpoFalcon512; +use miden_standards::account::faucets::BasicFungibleFaucet; +use miden_standards::account::wallets::BasicWallet; +use miden_standards::note::create_p2id_note; use rand::Rng; use rayon::iter::{IntoParallelIterator, ParallelIterator}; use rayon::prelude::ParallelSlice; @@ -88,7 +90,8 @@ pub async fn seed_store( // generate the faucet account and the genesis state let faucet = create_faucet(); let fee_params = FeeParameters::new(faucet.id(), 0).unwrap(); - let genesis_state = GenesisState::new(vec![faucet.clone()], fee_params, 1, 1); + let signer = EcdsaSecretKey::new(); + let genesis_state = GenesisState::new(vec![faucet.clone()], fee_params, 1, 1, signer); Store::bootstrap(genesis_state.clone(), &data_directory).expect("store should bootstrap"); // start the store @@ -245,8 +248,13 @@ async fn apply_block( store_client: &StoreClient, metrics: &mut SeedingMetrics, ) -> ProvenBlock { - let proposed_block = ProposedBlock::new(block_inputs, batches).unwrap(); - let proven_block = LocalBlockProver::new(0).prove_dummy(proposed_block).unwrap(); + let proposed_block = ProposedBlock::new(block_inputs.clone(), batches).unwrap(); + let (header, body) = proposed_block.clone().into_header_and_body().unwrap(); + let block_proof = LocalBlockProver::new(0) + .prove_dummy(proposed_block.batches().clone(), header.clone(), block_inputs) + .unwrap(); + let signature = EcdsaSecretKey::new().sign(header.commitment()); + let proven_block = ProvenBlock::new_unchecked(header, body, signature, block_proof); let block_size: usize = proven_block.to_bytes().len(); let start = Instant::now(); @@ -305,7 +313,7 @@ fn create_note(faucet_id: AccountId, target_id: AccountId, rng: &mut RpoRandomCo faucet_id, target_id, vec![asset], - miden_objects::note::NoteType::Public, + miden_protocol::note::NoteType::Public, Felt::default(), rng, ) @@ -434,10 +442,11 @@ fn create_emit_note_tx( ) -> ProvenTransaction { let initial_account_hash = faucet.commitment(); - let slot = faucet.storage().get_item(2).unwrap(); + let metadata_slot_name = AccountStorage::faucet_sysdata_slot(); + let slot = faucet.storage().get_item(metadata_slot_name).unwrap(); faucet .storage_mut() - .set_item(0, [slot[0], slot[1], slot[2], slot[3] + Felt::new(10)].into()) + .set_item(metadata_slot_name, [slot[0], slot[1], slot[2], slot[3] + Felt::new(10)].into()) .unwrap(); faucet.increment_nonce(ONE).unwrap(); diff --git a/bin/stress-test/src/store/mod.rs b/bin/stress-test/src/store/mod.rs index d21826521..59e094ba5 100644 --- a/bin/stress-test/src/store/mod.rs +++ b/bin/stress-test/src/store/mod.rs @@ -2,13 +2,15 @@ use std::path::{Path, PathBuf}; use std::time::{Duration, Instant}; use futures::{StreamExt, stream}; -use miden_node_proto::generated::rpc_store::rpc_client::RpcClient; +use miden_node_proto::generated::store::rpc_client::RpcClient; use miden_node_proto::generated::{self as proto}; use miden_node_store::state::State; use miden_node_utils::tracing::grpc::OtelInterceptor; -use miden_objects::account::AccountId; -use miden_objects::note::{NoteDetails, NoteTag}; -use miden_objects::utils::{Deserializable, Serializable}; +use miden_protocol::account::AccountId; +use miden_protocol::note::{NoteDetails, NoteTag}; +use miden_protocol::utils::{Deserializable, Serializable}; +use rand::Rng; +use rand::seq::SliceRandom; use tokio::fs; use tokio::time::sleep; use tonic::service::interceptor::InterceptedService; @@ -87,7 +89,7 @@ pub async fn sync_state( api_client: &mut RpcClient>, account_ids: Vec, block_num: u32, -) -> (Duration, proto::rpc_store::SyncStateResponse) { +) -> (Duration, proto::rpc::SyncStateResponse) { let note_tags = account_ids .iter() .map(|id| u32::from(NoteTag::from_account_id(*id))) @@ -98,7 +100,7 @@ pub async fn sync_state( .map(|id| proto::account::AccountId { id: id.to_bytes() }) .collect::>(); - let sync_request = proto::rpc_store::SyncStateRequest { block_num, note_tags, account_ids }; + let sync_request = proto::rpc::SyncStateRequest { block_num, note_tags, account_ids }; let start = Instant::now(); let response = api_client.sync_state(sync_request).await.unwrap(); @@ -158,8 +160,8 @@ pub async fn sync_notes( .iter() .map(|id| u32::from(NoteTag::from_account_id(*id))) .collect::>(); - let sync_request = proto::rpc_store::SyncNotesRequest { - block_range: Some(proto::rpc_store::BlockRange { block_from: 0, block_to: None }), + let sync_request = proto::rpc::SyncNotesRequest { + block_range: Some(proto::rpc::BlockRange { block_from: 0, block_to: None }), note_tags, }; @@ -280,9 +282,9 @@ pub async fn bench_sync_nullifiers( async fn sync_nullifiers( api_client: &mut RpcClient>, nullifiers_prefixes: Vec, -) -> (Duration, proto::rpc_store::SyncNullifiersResponse) { - let sync_request = proto::rpc_store::SyncNullifiersRequest { - block_range: Some(proto::rpc_store::BlockRange { block_from: 0, block_to: None }), +) -> (Duration, proto::rpc::SyncNullifiersResponse) { + let sync_request = proto::rpc::SyncNullifiersRequest { + block_range: Some(proto::rpc::BlockRange { block_from: 0, block_to: None }), nullifiers: nullifiers_prefixes, prefix_len: 16, }; @@ -292,6 +294,193 @@ async fn sync_nullifiers( (start.elapsed(), response.into_inner()) } +// SYNC TRANSACTIONS +// ================================================================================================ + +/// Sends multiple `sync_transactions` requests to the store and prints the performance. +/// +/// Arguments: +/// - `data_directory`: directory that contains the database dump file and the accounts ids dump +/// file. +/// - `iterations`: number of requests to send. +/// - `concurrency`: number of requests to send in parallel. +/// - `accounts_per_request`: number of accounts to sync transactions for in each request. +pub async fn bench_sync_transactions( + data_directory: PathBuf, + iterations: usize, + concurrency: usize, + accounts_per_request: usize, + block_range_size: u32, +) { + // load accounts from the dump file + let accounts_file = data_directory.join(ACCOUNTS_FILENAME); + let accounts = fs::read_to_string(&accounts_file) + .await + .unwrap_or_else(|e| panic!("missing file {}: {e:?}", accounts_file.display())); + let mut account_ids: Vec = accounts + .lines() + .map(|a| AccountId::from_hex(a).expect("invalid account id")) + .collect(); + // Shuffle once so the cycling iterator starts in a random order. + let mut rng = rand::rng(); + account_ids.shuffle(&mut rng); + let mut account_ids = account_ids.into_iter().cycle(); + + let (store_client, _) = start_store(data_directory).await; + + wait_for_store(&store_client).await.unwrap(); + + // Get the latest block number to determine the range + let status = store_client.clone().status(()).await.unwrap().into_inner(); + let chain_tip = status.chain_tip; + + // each request will have `accounts_per_request` account ids and will query a range of blocks + let request = |_| { + let mut client = store_client.clone(); + let account_batch: Vec = + account_ids.by_ref().take(accounts_per_request).collect(); + + // Pick a random window of size `block_range_size` that fits before `chain_tip`. + let max_start = chain_tip.saturating_sub(block_range_size); + let start_block = rand::rng().random_range(0..=max_start); + let end_block = start_block.saturating_add(block_range_size).min(chain_tip); + + tokio::spawn(async move { + sync_transactions_paginated(&mut client, account_batch, start_block, end_block).await + }) + }; + + // create a stream of tasks to send sync_transactions requests + let results = stream::iter(0..iterations) + .map(request) + .buffer_unordered(concurrency) + .map(|res| res.unwrap()) + .collect::>() + .await; + + let timers_accumulator: Vec = results.iter().map(|r| r.duration).collect(); + let responses: Vec = + results.iter().map(|r| r.response.clone()).collect(); + + print_summary(&timers_accumulator); + + #[allow(clippy::cast_precision_loss)] + let average_transactions_per_response = if responses.is_empty() { + 0.0 + } else { + responses.iter().map(|r| r.transactions.len()).sum::() as f64 + / responses.len() as f64 + }; + println!("Average transactions per response: {average_transactions_per_response}"); + + // Calculate pagination statistics + let total_runs = results.len(); + let paginated_runs = results.iter().filter(|r| r.pages > 1).count(); + #[allow(clippy::cast_precision_loss)] + let pagination_rate = if total_runs > 0 { + (paginated_runs as f64 / total_runs as f64) * 100.0 + } else { + 0.0 + }; + #[allow(clippy::cast_precision_loss)] + let avg_pages = if total_runs > 0 { + results.iter().map(|r| r.pages as f64).sum::() / total_runs as f64 + } else { + 0.0 + }; + + println!("Pagination statistics:"); + println!(" Total runs: {total_runs}"); + println!(" Runs triggering pagination: {paginated_runs}"); + println!(" Pagination rate: {pagination_rate:.2}%"); + println!(" Average pages per run: {avg_pages:.2}"); +} + +/// Sends a single `sync_transactions` request to the store and returns a tuple with: +/// - the elapsed time. +/// - the response. +pub async fn sync_transactions( + api_client: &mut RpcClient>, + account_ids: Vec, + block_from: u32, + block_to: u32, +) -> (Duration, proto::rpc::SyncTransactionsResponse) { + let account_ids = account_ids + .iter() + .map(|id| proto::account::AccountId { id: id.to_bytes() }) + .collect::>(); + + let sync_request = proto::rpc::SyncTransactionsRequest { + block_range: Some(proto::rpc::BlockRange { block_from, block_to: Some(block_to) }), + account_ids, + }; + + let start = Instant::now(); + let response = api_client.sync_transactions(sync_request).await.unwrap(); + (start.elapsed(), response.into_inner()) +} + +#[derive(Clone)] +struct SyncTransactionsRun { + duration: Duration, + response: proto::rpc::SyncTransactionsResponse, + pages: usize, +} + +async fn sync_transactions_paginated( + api_client: &mut RpcClient>, + account_ids: Vec, + block_from: u32, + block_to: u32, +) -> SyncTransactionsRun { + let mut total_duration = Duration::default(); + let mut aggregated_records = Vec::new(); + let mut next_block_from = block_from; + let mut target_block_to = block_to; + let mut pages = 0usize; + let mut final_pagination_info = None; + + loop { + if next_block_from > target_block_to { + break; + } + + let (elapsed, response) = + sync_transactions(api_client, account_ids.clone(), next_block_from, target_block_to) + .await; + total_duration += elapsed; + pages += 1; + + let info = response.pagination_info.unwrap_or(proto::rpc::PaginationInfo { + chain_tip: target_block_to, + block_num: target_block_to, + }); + + aggregated_records.extend(response.transactions.into_iter()); + let reached_block = info.block_num; + let chain_tip = info.chain_tip; + final_pagination_info = + Some(proto::rpc::PaginationInfo { chain_tip, block_num: reached_block }); + + if reached_block >= chain_tip { + break; + } + + // Request the remaining range up to the reported chain tip + next_block_from = reached_block; + target_block_to = chain_tip; + } + + SyncTransactionsRun { + duration: total_duration, + response: proto::rpc::SyncTransactionsResponse { + pagination_info: final_pagination_info, + transactions: aggregated_records, + }, + pages, + } +} + // LOAD STATE // ================================================================================================ diff --git a/clippy.toml b/clippy.toml index 3523592a2..2a5815cec 100644 --- a/clippy.toml +++ b/clippy.toml @@ -1 +1,32 @@ doc-valid-idents = ["..", "SQLite"] + +disallowed-methods = [ + # Use fs_errr functions, so the filename is available in the error message + { path = "std::fs::canonicalize", replacement = "fs_err::canonicalize" }, + { path = "std::fs::copy", replacement = "fs_err::copy" }, + { path = "std::fs::create_dir", replacement = "fs_err::create_dir" }, + { path = "std::fs::create_dir_all", replacement = "fs_err::create_dir_all" }, + { path = "std::fs::exists", replacement = "fs_err::exists" }, + { path = "std::fs::hard_link", replacement = "fs_err::hard_link" }, + { path = "std::fs::metadata", replacement = "fs_err::metadata" }, + { path = "std::fs::read", replacement = "fs_err::read" }, + { path = "std::fs::read_dir", replacement = "fs_err::read_dir" }, + { path = "std::fs::read_link", replacement = "fs_err::read_link" }, + { path = "std::fs::read_to_string", replacement = "fs_err::read_to_string" }, + { path = "std::fs::remove_dir", replacement = "fs_err::remove_dir" }, + { path = "std::fs::remove_dir_all", replacement = "fs_err::remove_dir_all" }, + { path = "std::fs::remove_file", replacement = "fs_err::remove_file" }, + { path = "std::fs::rename", replacement = "fs_err::rename" }, + { path = "std::fs::set_permissions", replacement = "fs_err::set_permissions" }, + { path = "std::fs::soft_link", replacement = "fs_err::soft_link" }, + { path = "std::fs::symlink_metadata", replacement = "fs_err::symlink_metadata" }, + { path = "std::fs::write", replacement = "fs_err::write" }, + + # Use fs_err::path::PathExt methods, so the filename is available in the error message + { path = "std::path::Path::canonicalize", reason = "Use fs_err::path::PathExt methods" }, + { path = "std::path::Path::metadata", reason = "Use fs_err::path::PathExt methods" }, + { path = "std::path::Path::read_dir", reason = "Use fs_err::path::PathExt methods" }, + { path = "std::path::Path::read_link", reason = "Use fs_err::path::PathExt methods" }, + { path = "std::path::Path::symlink_metadata", reason = "Use fs_err::path::PathExt methods" }, + { path = "std::path::Path::try_exists", reason = "Use fs_err::path::PathExt methods" }, +] diff --git a/crates/block-producer/Cargo.toml b/crates/block-producer/Cargo.toml index eba7c6a13..e5e5511ad 100644 --- a/crates/block-producer/Cargo.toml +++ b/crates/block-producer/Cargo.toml @@ -23,19 +23,19 @@ anyhow = { workspace = true } futures = { workspace = true } itertools = { workspace = true } miden-block-prover = { workspace = true } -miden-lib = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { features = ["testing"], workspace = true } -miden-objects = { default-features = true, workspace = true } +miden-protocol = { default-features = true, workspace = true } miden-remote-prover-client = { features = ["batch-prover", "block-prover"], workspace = true } +miden-standards = { workspace = true } miden-tx = { default-features = true, workspace = true } miden-tx-batch-prover = { workspace = true } rand = { version = "0.9" } thiserror = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } -tonic = { features = ["transport"], workspace = true } +tonic = { default-features = true, features = ["transport"], workspace = true } tonic-reflection = { workspace = true } tower-http = { features = ["util"], workspace = true } tracing = { workspace = true } @@ -43,11 +43,11 @@ url = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } -miden-lib = { features = ["testing"], workspace = true } miden-node-store = { workspace = true } miden-node-test-macro = { workspace = true } miden-node-utils = { features = ["testing"], workspace = true } -miden-objects = { default-features = true, features = ["testing"], workspace = true } +miden-protocol = { default-features = true, features = ["testing"], workspace = true } +miden-standards = { features = ["testing"], workspace = true } miden-tx = { features = ["testing"], workspace = true } pretty_assertions = "1.4" rand_chacha = { default-features = false, version = "0.9" } diff --git a/crates/block-producer/src/batch_builder/mod.rs b/crates/block-producer/src/batch_builder/mod.rs index fb84fd28b..e3cc714c2 100644 --- a/crates/block-producer/src/batch_builder/mod.rs +++ b/crates/block-producer/src/batch_builder/mod.rs @@ -7,8 +7,8 @@ use futures::never::Never; use futures::{FutureExt, TryFutureExt}; use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::tracing::OpenTelemetrySpanExt; -use miden_objects::MIN_PROOF_SECURITY_LEVEL; -use miden_objects::batch::{BatchId, ProposedBatch, ProvenBatch}; +use miden_protocol::MIN_PROOF_SECURITY_LEVEL; +use miden_protocol::batch::{BatchId, ProposedBatch, ProvenBatch}; use miden_remote_prover_client::remote_prover::batch_prover::RemoteBatchProver; use miden_tx_batch_prover::LocalBatchProver; use rand::Rng; diff --git a/crates/block-producer/src/block_builder/mod.rs b/crates/block-producer/src/block_builder/mod.rs index 6a5cf53ef..e63bc8184 100644 --- a/crates/block-producer/src/block_builder/mod.rs +++ b/crates/block-producer/src/block_builder/mod.rs @@ -5,11 +5,20 @@ use futures::FutureExt; use futures::never::Never; use miden_block_prover::LocalBlockProver; use miden_node_utils::tracing::OpenTelemetrySpanExt; -use miden_objects::MIN_PROOF_SECURITY_LEVEL; -use miden_objects::batch::ProvenBatch; -use miden_objects::block::{BlockInputs, BlockNumber, ProposedBlock, ProvenBlock}; -use miden_objects::note::NoteHeader; -use miden_objects::transaction::TransactionHeader; +use miden_protocol::MIN_PROOF_SECURITY_LEVEL; +use miden_protocol::batch::{OrderedBatches, ProvenBatch}; +use miden_protocol::block::{ + BlockBody, + BlockHeader, + BlockInputs, + BlockNumber, + BlockProof, + ProposedBlock, + ProvenBlock, +}; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_protocol::note::NoteHeader; +use miden_protocol::transaction::{OrderedTransactionHeaders, TransactionHeader}; use miden_remote_prover_client::remote_prover::block_prover::RemoteBlockProver; use rand::Rng; use tokio::time::Duration; @@ -19,6 +28,7 @@ use url::Url; use crate::errors::BuildBlockError; use crate::mempool::SharedMempool; use crate::store::StoreClient; +use crate::validator::BlockProducerValidatorClient; use crate::{COMPONENT, TelemetryInjectorExt}; // BLOCK BUILDER @@ -36,6 +46,8 @@ pub struct BlockBuilder { pub store: StoreClient, + pub validator: BlockProducerValidatorClient, + /// The prover used to prove a proposed block into a proven block. pub block_prover: BlockProver, } @@ -46,6 +58,7 @@ impl BlockBuilder { /// If the block prover URL is not set, the block builder will use the local block prover. pub fn new( store: StoreClient, + validator: BlockProducerValidatorClient, block_prover_url: Option, block_interval: Duration, ) -> Self { @@ -61,6 +74,7 @@ impl BlockBuilder { failure_rate: 0.0, block_prover, store, + validator, } } /// Starts the [`BlockBuilder`], infinitely producing blocks at the configured interval. @@ -112,8 +126,11 @@ impl BlockBuilder { self.get_block_inputs(selected) .inspect_ok(BlockBatchesAndInputs::inject_telemetry) .and_then(|inputs| self.propose_block(inputs)) - .inspect_ok(ProposedBlock::inject_telemetry) - .and_then(|inputs| self.prove_block(inputs)) + .inspect_ok(|(proposed_block, _)| { + ProposedBlock::inject_telemetry(proposed_block); + }) + .and_then(|(proposed_block, inputs)| self.validate_block(proposed_block, inputs)) + .and_then(|(proposed_block, inputs, header, signature, body)| self.prove_block(proposed_block, inputs, header, signature, body)) .inspect_ok(ProvenBlock::inject_telemetry) // Failure must be injected before the final pipeline stage i.e. before commit is called. The system cannot // handle errors after it considers the process complete (which makes sense). @@ -163,9 +180,9 @@ impl BlockBuilder { // Note: .cloned() shouldn't be necessary but not having it produces an odd lifetime // error in BlockProducer::serve. Not sure if there's a better fix. Error: // implementation of `FnOnce` is not general enough - // closure with signature `fn(&InputNoteCommitment) -> miden_objects::note::NoteId` must - // implement `FnOnce<(&InputNoteCommitment,)>` ...but it actually implements - // `FnOnce<(&InputNoteCommitment,)>` + // closure with signature `fn(&InputNoteCommitment) -> miden_protocol::note::NoteId` + // must implement `FnOnce<(&InputNoteCommitment,)>` ...but it actually + // implements `FnOnce<(&InputNoteCommitment,)>` batch .input_notes() .iter() @@ -197,31 +214,76 @@ impl BlockBuilder { async fn propose_block( &self, batches_inputs: BlockBatchesAndInputs, - ) -> Result { + ) -> Result<(ProposedBlock, BlockInputs), BuildBlockError> { let BlockBatchesAndInputs { batches, inputs } = batches_inputs; let batches = batches.into_iter().map(Arc::unwrap_or_clone).collect(); - let proposed_block = - ProposedBlock::new(inputs, batches).map_err(BuildBlockError::ProposeBlockFailed)?; + let proposed_block = ProposedBlock::new(inputs.clone(), batches) + .map_err(BuildBlockError::ProposeBlockFailed)?; + + Ok((proposed_block, inputs)) + } + + #[instrument(target = COMPONENT, name = "block_builder.validate_block", skip_all, err)] + async fn validate_block( + &self, + proposed_block: ProposedBlock, + block_inputs: BlockInputs, + ) -> Result<(OrderedBatches, BlockInputs, BlockHeader, Signature, BlockBody), BuildBlockError> + { + // Concurrently build the block and validate it via the validator. + let build_result = tokio::task::spawn_blocking({ + let proposed_block = proposed_block.clone(); + move || proposed_block.into_header_and_body() + }); + let signature = self + .validator + .sign_block(proposed_block.clone()) + .await + .map_err(|err| BuildBlockError::ValidateBlockFailed(err.into()))?; + let (header, body) = build_result + .await + .map_err(|err| BuildBlockError::other(format!("task join error: {err}")))? + .map_err(BuildBlockError::ProposeBlockFailed)?; + + // Verify the signature against the built block to ensure that + // the validator has provided a valid signature for the relevant block. + if !signature.verify(header.commitment(), header.validator_key()) { + return Err(BuildBlockError::InvalidSignature); + } - Ok(proposed_block) + let (ordered_batches, ..) = proposed_block.into_parts(); + Ok((ordered_batches, block_inputs, header, signature, body)) } #[instrument(target = COMPONENT, name = "block_builder.prove_block", skip_all, err)] async fn prove_block( &self, - proposed_block: ProposedBlock, + ordered_batches: OrderedBatches, + block_inputs: BlockInputs, + header: BlockHeader, + signature: Signature, + body: BlockBody, ) -> Result { - let proven_block = self.block_prover.prove(proposed_block).await?; + // Prove block using header and body from validator. + let block_proof = self + .block_prover + .prove(ordered_batches.clone(), header.clone(), block_inputs) + .await?; + self.simulate_proving().await; + // SAFETY: The header and body are assumed valid and consistent with the proof. + let proven_block = ProvenBlock::new_unchecked(header, body, signature, block_proof); if proven_block.proof_security_level() < MIN_PROOF_SECURITY_LEVEL { return Err(BuildBlockError::SecurityLevelTooLow( proven_block.proof_security_level(), MIN_PROOF_SECURITY_LEVEL, )); } - - self.simulate_proving().await; + // TODO(sergerad): Consider removing this validation. Once block proving is implemented, + // this would be replaced with verifying the proof returned from the prover against + // the block header. + validate_tx_headers(&proven_block, &ordered_batches.to_transactions())?; Ok(proven_block) } @@ -388,15 +450,55 @@ impl BlockProver { } #[instrument(target = COMPONENT, skip_all, err)] - async fn prove(&self, proposed_block: ProposedBlock) -> Result { + async fn prove( + &self, + tx_batches: OrderedBatches, + block_header: BlockHeader, + block_inputs: BlockInputs, + ) -> Result { match self { - Self::Local(prover) => { - prover.prove(proposed_block).map_err(BuildBlockError::ProveBlockFailed) - }, + Self::Local(prover) => prover + .prove(tx_batches, block_header, block_inputs) + .map_err(BuildBlockError::ProveBlockFailed), Self::Remote(prover) => prover - .prove(proposed_block) + .prove(tx_batches, block_header, block_inputs) .await .map_err(BuildBlockError::RemoteProverClientError), } } } + +/// Validates that the proven block's transaction headers are consistent with the transactions +/// passed in the proposed block. +/// +/// This expects that transactions from the proposed block and proven block are in the same +/// order, as defined by [`OrderedTransactionHeaders`]. +fn validate_tx_headers( + proven_block: &ProvenBlock, + proposed_txs: &OrderedTransactionHeaders, +) -> Result<(), BuildBlockError> { + if proposed_txs.as_slice().len() != proven_block.body().transactions().as_slice().len() { + return Err(BuildBlockError::other(format!( + "remote prover returned {} transaction headers but {} transactions were passed as part of the proposed block", + proven_block.body().transactions().as_slice().len(), + proposed_txs.as_slice().len() + ))); + } + + // Because we checked the length matches we can zip the iterators up. + // We expect the transaction headers to be in the same order. + for (proposed_header, proven_header) in proposed_txs + .as_slice() + .iter() + .zip(proven_block.body().transactions().as_slice()) + { + if proposed_header != proven_header { + return Err(BuildBlockError::other(format!( + "transaction header with id {} does not match header of the transaction in the proposed block", + proposed_header.id() + ))); + } + } + + Ok(()) +} diff --git a/crates/block-producer/src/domain/batch.rs b/crates/block-producer/src/domain/batch.rs index 4a36798c7..592a34043 100644 --- a/crates/block-producer/src/domain/batch.rs +++ b/crates/block-producer/src/domain/batch.rs @@ -1,10 +1,10 @@ use std::collections::HashMap; use std::sync::Arc; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::batch::BatchId; -use miden_objects::transaction::TransactionId; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::batch::BatchId; +use miden_protocol::transaction::TransactionId; use crate::domain::transaction::AuthenticatedTransaction; @@ -16,8 +16,8 @@ use crate::domain::transaction::AuthenticatedTransaction; /// /// [Mempool]: crate::mempool::Mempool /// [BatchBuilder]: crate::batch_builder::BatchBuilder -/// [ProposedBatch]: miden_objects::batch::ProposedBatch -/// [ProvenBatch]: miden_objects::batch::ProvenBatch +/// [ProposedBatch]: miden_protocol::batch::ProposedBatch +/// [ProvenBatch]: miden_protocol::batch::ProvenBatch #[derive(Clone, Debug, PartialEq)] pub(crate) struct SelectedBatch { txs: Vec>, diff --git a/crates/block-producer/src/domain/transaction.rs b/crates/block-producer/src/domain/transaction.rs index 2b1ef3ecd..0c819d06f 100644 --- a/crates/block-producer/src/domain/transaction.rs +++ b/crates/block-producer/src/domain/transaction.rs @@ -3,11 +3,11 @@ use std::collections::HashSet; use std::sync::Arc; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::block::BlockNumber; -use miden_objects::note::Nullifier; -use miden_objects::transaction::{OutputNote, ProvenTransaction, TransactionId, TxAccountUpdate}; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::Nullifier; +use miden_protocol::transaction::{OutputNote, ProvenTransaction, TransactionId, TxAccountUpdate}; use crate::errors::VerifyTxError; use crate::store::TransactionInputs; @@ -95,7 +95,7 @@ impl AuthenticatedTransaction { self.inner .output_notes() .iter() - .map(miden_objects::transaction::OutputNote::commitment) + .map(miden_protocol::transaction::OutputNote::commitment) } pub fn output_notes(&self) -> impl Iterator + '_ { @@ -144,7 +144,7 @@ impl AuthenticatedTransaction { /// Short-hand for `Self::new` where the input's are setup to match the transaction's initial /// account state. This covers the account's initial state and nullifiers being set to unspent. pub fn from_inner(inner: ProvenTransaction) -> Self { - use miden_objects::Word; + use miden_protocol::Word; let store_account_state = match inner.account_update().initial_state_commitment() { zero if zero == Word::empty() => None, diff --git a/crates/block-producer/src/errors.rs b/crates/block-producer/src/errors.rs index 9d55617a2..8c0dc5866 100644 --- a/crates/block-producer/src/errors.rs +++ b/crates/block-producer/src/errors.rs @@ -1,14 +1,18 @@ -use miden_block_prover::ProvenBlockError; +use core::error::Error as CoreError; + +use miden_block_prover::BlockProverError; use miden_node_proto::errors::{ConversionError, GrpcError}; -use miden_objects::account::AccountId; -use miden_objects::block::BlockNumber; -use miden_objects::note::Nullifier; -use miden_objects::transaction::TransactionId; -use miden_objects::{ProposedBatchError, ProposedBlockError, ProvenBatchError, Word}; +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::Nullifier; +use miden_protocol::transaction::TransactionId; +use miden_protocol::{ProposedBatchError, ProposedBlockError, ProvenBatchError, Word}; use miden_remote_prover_client::RemoteProverClientError; use thiserror::Error; use tokio::task::JoinError; +use crate::validator::ValidatorError; + // Block-producer errors // ================================================================================================= @@ -115,7 +119,7 @@ pub enum AddTransactionError { }, #[error("transaction deserialization failed")] - TransactionDeserializationFailed(#[source] miden_objects::utils::DeserializationError), + TransactionDeserializationFailed(#[source] miden_protocol::utils::DeserializationError), #[error( "transaction expired at block height {expired_at} but the block height limit was {limit}" @@ -124,6 +128,9 @@ pub enum AddTransactionError { expired_at: BlockNumber, limit: BlockNumber, }, + + #[error("the mempool is at capacity")] + CapacityExceeded, } impl From for AddTransactionError { @@ -160,7 +167,7 @@ impl From for AddTransactionError { #[grpc(internal)] pub enum SubmitProvenBatchError { #[error("batch deserialization failed")] - Deserialization(#[source] miden_objects::utils::DeserializationError), + Deserialization(#[source] miden_protocol::utils::DeserializationError), } // Batch building errors @@ -204,8 +211,12 @@ pub enum BuildBlockError { GetBlockInputsFailed(#[source] StoreError), #[error("failed to propose block")] ProposeBlockFailed(#[source] ProposedBlockError), + #[error("failed to validate block")] + ValidateBlockFailed(#[source] Box), + #[error("block signature is invalid")] + InvalidSignature, #[error("failed to prove block")] - ProveBlockFailed(#[source] ProvenBlockError), + ProveBlockFailed(#[source] BlockProverError), /// We sometimes randomly inject errors into the batch building process to test our failure /// responses. #[error("nothing actually went wrong, failure was injected on purpose")] @@ -214,6 +225,21 @@ pub enum BuildBlockError { RemoteProverClientError(#[source] RemoteProverClientError), #[error("block proof security level is too low: {0} < {1}")] SecurityLevelTooLow(u32, u32), + /// Custom error variant for errors not covered by the other variants. + #[error("{error_msg}")] + Other { + error_msg: Box, + source: Option>, + }, +} + +impl BuildBlockError { + /// Creates a custom error using the [`BuildBlockError::Other`] variant from an + /// error message. + pub fn other(message: impl Into) -> Self { + let message: String = message.into(); + Self::Other { error_msg: message.into(), source: None } + } } // Store errors diff --git a/crates/block-producer/src/lib.rs b/crates/block-producer/src/lib.rs index 503e29cc1..36ab9b53d 100644 --- a/crates/block-producer/src/lib.rs +++ b/crates/block-producer/src/lib.rs @@ -1,5 +1,6 @@ #![recursion_limit = "256"] use std::num::NonZeroUsize; +use std::time::Duration; #[cfg(test)] pub mod test_utils; @@ -9,6 +10,7 @@ mod block_builder; mod domain; mod mempool; pub mod store; +mod validator; #[cfg(feature = "testing")] pub mod errors; @@ -45,13 +47,34 @@ const SERVER_MEMPOOL_STATE_RETENTION: NonZeroUsize = NonZeroUsize::new(5).unwrap /// This rejects transactions which would likely expire before making it into a block. const SERVER_MEMPOOL_EXPIRATION_SLACK: u32 = 2; +/// The interval at which to update the cached mempool statistics. +const CACHED_MEMPOOL_STATS_UPDATE_INTERVAL: Duration = Duration::from_secs(5); + +/// How often a block is created. +pub const DEFAULT_BLOCK_INTERVAL: Duration = Duration::from_secs(3); + +/// How often a batch is created. +pub const DEFAULT_BATCH_INTERVAL: Duration = Duration::from_secs(1); + +/// The default transaction capacity of the mempool. +/// +/// The value is selected such that all transactions should approximately be processed within one +/// minutes with a block time of 5s. +#[allow(clippy::cast_sign_loss, reason = "Both durations are positive")] +pub const DEFAULT_MEMPOOL_TX_CAPACITY: NonZeroUsize = NonZeroUsize::new( + DEFAULT_MAX_BATCHES_PER_BLOCK + * DEFAULT_MAX_TXS_PER_BATCH + * (Duration::from_secs(60).div_duration_f32(DEFAULT_BLOCK_INTERVAL)) as usize, +) +.unwrap(); + const _: () = assert!( - DEFAULT_MAX_BATCHES_PER_BLOCK <= miden_objects::MAX_BATCHES_PER_BLOCK, + DEFAULT_MAX_BATCHES_PER_BLOCK <= miden_protocol::MAX_BATCHES_PER_BLOCK, "Server constraint cannot exceed the protocol's constraint" ); const _: () = assert!( - DEFAULT_MAX_TXS_PER_BATCH <= miden_objects::MAX_ACCOUNTS_PER_BATCH, + DEFAULT_MAX_TXS_PER_BATCH <= miden_protocol::MAX_ACCOUNTS_PER_BATCH, "Server constraint cannot exceed the protocol's constraint" ); diff --git a/crates/block-producer/src/mempool/budget.rs b/crates/block-producer/src/mempool/budget.rs index 05a743284..0a3669ae1 100644 --- a/crates/block-producer/src/mempool/budget.rs +++ b/crates/block-producer/src/mempool/budget.rs @@ -1,5 +1,5 @@ -use miden_objects::batch::ProvenBatch; -use miden_objects::{ +use miden_protocol::batch::ProvenBatch; +use miden_protocol::{ MAX_ACCOUNTS_PER_BATCH, MAX_INPUT_NOTES_PER_BATCH, MAX_OUTPUT_NOTES_PER_BATCH, @@ -63,7 +63,7 @@ impl BatchBudget { // This type assertion reminds us to update the account check if we ever support // multiple account updates per tx. pub(crate) const ACCOUNT_UPDATES_PER_TX: usize = 1; - let _: miden_objects::account::AccountId = tx.account_update().account_id(); + let _: miden_protocol::account::AccountId = tx.account_update().account_id(); let output_notes = tx.output_note_count(); let input_notes = tx.input_note_count(); diff --git a/crates/block-producer/src/mempool/inflight_state/tests.rs b/crates/block-producer/src/mempool/inflight_state/tests.rs index 895f32ce4..e4c72f657 100644 --- a/crates/block-producer/src/mempool/inflight_state/tests.rs +++ b/crates/block-producer/src/mempool/inflight_state/tests.rs @@ -1,6 +1,6 @@ use assert_matches::assert_matches; use miden_node_utils::ErrorReport; -use miden_objects::Word; +use miden_protocol::Word; use super::*; use crate::test_utils::note::{mock_note, mock_output_note}; diff --git a/crates/block-producer/src/mempool/mod.rs b/crates/block-producer/src/mempool/mod.rs index 9e3bcad16..227b7f51b 100644 --- a/crates/block-producer/src/mempool/mod.rs +++ b/crates/block-producer/src/mempool/mod.rs @@ -46,9 +46,9 @@ use std::num::NonZeroUsize; use std::sync::Arc; use miden_node_proto::domain::mempool::MempoolEvent; -use miden_objects::batch::{BatchId, ProvenBatch}; -use miden_objects::block::{BlockHeader, BlockNumber}; -use miden_objects::transaction::TransactionId; +use miden_protocol::batch::{BatchId, ProvenBatch}; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::transaction::TransactionId; use subscription::SubscriptionProvider; use tokio::sync::{Mutex, MutexGuard, mpsc}; use tracing::{instrument, warn}; @@ -58,7 +58,12 @@ use crate::domain::transaction::AuthenticatedTransaction; use crate::errors::{AddTransactionError, VerifyTxError}; use crate::mempool::budget::BudgetStatus; use crate::mempool::nodes::{BlockNode, Node, NodeId, ProposedBatchNode, TransactionNode}; -use crate::{COMPONENT, SERVER_MEMPOOL_EXPIRATION_SLACK, SERVER_MEMPOOL_STATE_RETENTION}; +use crate::{ + COMPONENT, + DEFAULT_MEMPOOL_TX_CAPACITY, + SERVER_MEMPOOL_EXPIRATION_SLACK, + SERVER_MEMPOOL_STATE_RETENTION, +}; mod budget; pub use budget::{BatchBudget, BlockBudget}; @@ -70,6 +75,9 @@ mod subscription; #[cfg(test)] mod tests; +// MEMPOOL CONFIGURATION +// ================================================================================================ + #[derive(Clone)] pub struct SharedMempool(Arc>); @@ -100,6 +108,13 @@ pub struct MempoolConfig { /// guarantees that the mempool can verify the data against the additional changes so long as /// the data was authenticated against one of the retained blocks. pub state_retention: NonZeroUsize, + + /// The maximum number of uncommitted transactions allowed in the mempool at once. + /// + /// The mempool will reject transactions once it is at capacity. + /// + /// Transactions in batches and uncommitted blocks _do count_ towards this. + pub tx_capacity: NonZeroUsize, } impl Default for MempoolConfig { @@ -109,10 +124,14 @@ impl Default for MempoolConfig { batch_budget: BatchBudget::default(), expiration_slack: SERVER_MEMPOOL_EXPIRATION_SLACK, state_retention: SERVER_MEMPOOL_STATE_RETENTION, + tx_capacity: DEFAULT_MEMPOOL_TX_CAPACITY, } } } +// SHARED MEMPOOL +// ================================================================================================ + impl SharedMempool { #[instrument(target = COMPONENT, name = "mempool.lock", skip_all)] pub async fn lock(&self) -> MutexGuard<'_, Mempool> { @@ -120,6 +139,9 @@ impl SharedMempool { } } +// MEMPOOL +// ================================================================================================ + #[derive(Clone, Debug)] pub struct Mempool { /// Contains the aggregated state of all transactions, batches and blocks currently inflight in @@ -143,6 +165,9 @@ impl PartialEq for Mempool { } impl Mempool { + // CONSTRUCTORS + // -------------------------------------------------------------------------------------------- + /// Creates a new [`SharedMempool`] with the provided configuration. pub fn shared(chain_tip: BlockNumber, config: MempoolConfig) -> SharedMempool { SharedMempool(Arc::new(Mutex::new(Self::new(chain_tip, config)))) @@ -158,6 +183,16 @@ impl Mempool { } } + /// Returns the current chain tip height as seen by the mempool. + /// + /// This reflects the latest committed block that the block producer is aware of. + pub fn chain_tip(&self) -> BlockNumber { + self.chain_tip + } + + // TRANSACTION & BATCH LIFECYCLE + // -------------------------------------------------------------------------------------------- + /// Adds a transaction to the mempool. /// /// Sends a [`MempoolEvent::TransactionAdded`] event to subscribers. @@ -174,6 +209,10 @@ impl Mempool { &mut self, tx: Arc, ) -> Result { + if self.nodes.uncommitted_tx_count() >= self.config.tx_capacity.get() { + return Err(AddTransactionError::CapacityExceeded); + } + self.authentication_staleness_check(tx.authentication_height())?; self.expiration_check(tx.expires_at())?; @@ -541,6 +580,9 @@ impl Mempool { self.inject_telemetry(); } + // EVENTS & SUBSCRIPTIONS + // -------------------------------------------------------------------------------------------- + /// Creates a subscription to [`MempoolEvent`] which will be emitted in the order they occur. /// /// Only emits events which occurred after the current committed block. @@ -557,6 +599,27 @@ impl Mempool { self.subscription.subscribe(chain_tip) } + // STATS & INSPECTION + // -------------------------------------------------------------------------------------------- + + /// Returns the number of transactions currently waiting to be batched. + pub fn unbatched_transactions_count(&self) -> usize { + self.nodes.txs.len() + } + + /// Returns the number of batches currently being proven. + pub fn proposed_batches_count(&self) -> usize { + self.nodes.proposed_batches.len() + } + + /// Returns the number of proven batches waiting for block inclusion. + pub fn proven_batches_count(&self) -> usize { + self.nodes.proven_batches.len() + } + + // INTERNAL HELPERS + // -------------------------------------------------------------------------------------------- + /// Adds mempool stats to the current tracing span. /// /// Note that these are only visible in the OpenTelemetry context, as conventional tracing diff --git a/crates/block-producer/src/mempool/nodes.rs b/crates/block-producer/src/mempool/nodes.rs index ff2751ef2..461a836c2 100644 --- a/crates/block-producer/src/mempool/nodes.rs +++ b/crates/block-producer/src/mempool/nodes.rs @@ -1,12 +1,12 @@ use std::collections::{HashMap, VecDeque}; use std::sync::Arc; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::batch::{BatchId, ProvenBatch}; -use miden_objects::block::BlockNumber; -use miden_objects::note::{NoteHeader, Nullifier}; -use miden_objects::transaction::{InputNoteCommitment, TransactionHeader, TransactionId}; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::batch::{BatchId, ProvenBatch}; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::{NoteHeader, Nullifier}; +use miden_protocol::transaction::{InputNoteCommitment, TransactionHeader, TransactionId}; use crate::domain::batch::SelectedBatch; use crate::domain::transaction::AuthenticatedTransaction; @@ -345,18 +345,26 @@ impl Nodes { pub(super) fn inject_telemetry(&self, span: &tracing::Span) { use miden_node_utils::tracing::OpenTelemetrySpanExt; + span.set_attribute("mempool.transactions.uncommitted", self.uncommitted_tx_count()); span.set_attribute("mempool.transactions.unbatched", self.txs.len()); span.set_attribute("mempool.batches.proposed", self.proposed_batches.len()); span.set_attribute("mempool.batches.proven", self.proven_batches.len()); } + + pub(super) fn uncommitted_tx_count(&self) -> usize { + self.txs.len() + + self.proposed_batches.values().map(|b| b.0.txs().len()).sum::() + + self.proven_batches.values().map(|b| b.txs.len()).sum::() + + self.proposed_block.as_ref().map(|b| b.1.txs.len()).unwrap_or_default() + } } #[cfg(test)] mod tests { use std::collections::BTreeMap; - use miden_objects::batch::BatchAccountUpdate; - use miden_objects::transaction::{InputNotes, OrderedTransactionHeaders}; + use miden_protocol::batch::BatchAccountUpdate; + use miden_protocol::transaction::{InputNotes, OrderedTransactionHeaders}; use super::*; use crate::test_utils::MockProvenTxBuilder; diff --git a/crates/block-producer/src/mempool/state.rs b/crates/block-producer/src/mempool/state.rs index b4db41657..93c16f6b6 100644 --- a/crates/block-producer/src/mempool/state.rs +++ b/crates/block-producer/src/mempool/state.rs @@ -1,9 +1,9 @@ use std::collections::hash_map::Entry; use std::collections::{HashMap, HashSet}; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::note::Nullifier; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::note::Nullifier; use crate::mempool::nodes::{Node, NodeId}; diff --git a/crates/block-producer/src/mempool/subscription.rs b/crates/block-producer/src/mempool/subscription.rs index 70789bdd8..6bfbf7eaa 100644 --- a/crates/block-producer/src/mempool/subscription.rs +++ b/crates/block-producer/src/mempool/subscription.rs @@ -3,8 +3,8 @@ use std::ops::Mul; use miden_node_proto::domain::mempool::MempoolEvent; use miden_node_proto::domain::note::NetworkNote; -use miden_objects::block::{BlockHeader, BlockNumber}; -use miden_objects::transaction::{OutputNote, TransactionId}; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::transaction::{OutputNote, TransactionId}; use tokio::sync::mpsc; use crate::domain::transaction::AuthenticatedTransaction; @@ -106,7 +106,10 @@ impl SubscriptionProvider { self.inflight_txs.remove(tx); } - Self::send_event(&mut self.subscription, MempoolEvent::BlockCommitted { header, txs }); + Self::send_event( + &mut self.subscription, + MempoolEvent::BlockCommitted { header: Box::new(header), txs }, + ); } pub(super) fn txs_reverted(&mut self, txs: HashSet) { diff --git a/crates/block-producer/src/mempool/tests.rs b/crates/block-producer/src/mempool/tests.rs index 0f41e9660..5cafd0137 100644 --- a/crates/block-producer/src/mempool/tests.rs +++ b/crates/block-producer/src/mempool/tests.rs @@ -1,7 +1,7 @@ use std::sync::Arc; -use miden_objects::Word; -use miden_objects::block::{BlockHeader, BlockNumber}; +use miden_protocol::Word; +use miden_protocol::block::{BlockHeader, BlockNumber}; use pretty_assertions::assert_eq; use serial_test::serial; diff --git a/crates/block-producer/src/mempool/tests/add_transaction.rs b/crates/block-producer/src/mempool/tests/add_transaction.rs index d4ea2d458..1fc611e4e 100644 --- a/crates/block-producer/src/mempool/tests/add_transaction.rs +++ b/crates/block-producer/src/mempool/tests/add_transaction.rs @@ -1,8 +1,8 @@ use std::sync::Arc; use assert_matches::assert_matches; -use miden_objects::Word; -use miden_objects::block::BlockHeader; +use miden_protocol::Word; +use miden_protocol::block::BlockHeader; use crate::domain::transaction::AuthenticatedTransaction; use crate::errors::AddTransactionError; diff --git a/crates/block-producer/src/server/mod.rs b/crates/block-producer/src/server/mod.rs index 39753fe83..901bf32e8 100644 --- a/crates/block-producer/src/server/mod.rs +++ b/crates/block-producer/src/server/mod.rs @@ -1,5 +1,6 @@ use std::collections::HashMap; use std::net::SocketAddr; +use std::num::NonZeroUsize; use std::sync::Arc; use std::time::Duration; @@ -12,12 +13,12 @@ use miden_node_proto_build::block_producer_api_descriptor; use miden_node_utils::formatting::{format_input_notes, format_output_notes}; use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; use miden_node_utils::tracing::grpc::grpc_trace_fn; -use miden_objects::batch::ProvenBatch; -use miden_objects::block::BlockNumber; -use miden_objects::transaction::ProvenTransaction; -use miden_objects::utils::serde::Deserializable; +use miden_protocol::batch::ProvenBatch; +use miden_protocol::block::BlockNumber; +use miden_protocol::transaction::ProvenTransaction; +use miden_protocol::utils::serde::Deserializable; use tokio::net::TcpListener; -use tokio::sync::{Barrier, Mutex}; +use tokio::sync::{Barrier, Mutex, RwLock}; use tokio_stream::wrappers::{ReceiverStream, TcpListenerStream}; use tonic::Status; use tower_http::trace::TraceLayer; @@ -36,7 +37,8 @@ use crate::errors::{ }; use crate::mempool::{BatchBudget, BlockBudget, Mempool, MempoolConfig, SharedMempool}; use crate::store::StoreClient; -use crate::{COMPONENT, SERVER_NUM_BATCH_BUILDERS}; +use crate::validator::BlockProducerValidatorClient; +use crate::{CACHED_MEMPOOL_STATS_UPDATE_INTERVAL, COMPONENT, SERVER_NUM_BATCH_BUILDERS}; /// The block producer server. /// @@ -49,6 +51,8 @@ pub struct BlockProducer { pub block_producer_address: SocketAddr, /// The address of the store component. pub store_url: Url, + /// The address of the validator component. + pub validator_url: Url, /// The address of the batch prover component. pub batch_prover_url: Option, /// The address of the block prover component. @@ -70,6 +74,9 @@ pub struct BlockProducer { /// /// If the handler takes longer than this duration, the server cancels the call. pub grpc_timeout: Duration, + + /// The maximum number of inflight transactions allowed in the mempool at once. + pub mempool_tx_capacity: NonZeroUsize, } impl BlockProducer { @@ -81,6 +88,7 @@ impl BlockProducer { pub async fn serve(self) -> anyhow::Result<()> { info!(target: COMPONENT, endpoint=?self.block_producer_address, store=%self.store_url, "Initializing server"); let store = StoreClient::new(self.store_url.clone()); + let validator = BlockProducerValidatorClient::new(self.validator_url.clone()); // Retry fetching the chain tip from the store until it succeeds. let mut retries_counter = 0; @@ -118,7 +126,7 @@ impl BlockProducer { info!(target: COMPONENT, "Server initialized"); let block_builder = - BlockBuilder::new(store.clone(), self.block_prover_url, self.block_interval); + BlockBuilder::new(store.clone(), validator, self.block_prover_url, self.block_interval); let batch_builder = BatchBuilder::new( store.clone(), SERVER_NUM_BATCH_BUILDERS, @@ -131,6 +139,7 @@ impl BlockProducer { ..BatchBudget::default() }, block_budget: BlockBudget { batches: self.max_batches_per_block }, + tx_capacity: self.mempool_tx_capacity, ..Default::default() }; let mempool = Mempool::shared(chain_tip, mempool); @@ -145,10 +154,10 @@ impl BlockProducer { // Launch the gRPC server and wait at the checkpoint for any other components to be in sync. // - // This is used to ensure the ntb can subscribe to the mempool events without playing catch - // up caused by block-production. + // This is used to ensure the ntx-builder can subscribe to the mempool events without + // playing catch up caused by block-production. // - // This is a temporary work-around until the ntb can resync on the fly. + // This is a temporary work-around until the ntx-builder can resync on the fly. let rpc_id = tasks .spawn({ let mempool = mempool.clone(); @@ -209,6 +218,29 @@ impl BlockProducer { } } +/// Mempool statistics that are updated periodically to avoid locking the mempool. +#[derive(Clone, Copy, Default)] +struct MempoolStats { + /// The mempool's current view of the chain tip height. + chain_tip: BlockNumber, + /// Number of transactions currently in the mempool waiting to be batched. + unbatched_transactions: u64, + /// Number of batches currently being proven. + proposed_batches: u64, + /// Number of proven batches waiting for block inclusion. + proven_batches: u64, +} + +impl From for proto::rpc::MempoolStats { + fn from(stats: MempoolStats) -> Self { + proto::rpc::MempoolStats { + unbatched_transactions: stats.unbatched_transactions, + proposed_batches: stats.proposed_batches, + proven_batches: stats.proven_batches, + } + } +} + /// Serves the block producer's RPC [api](api_server::Api). struct BlockProducerRpcServer { /// The mutex effectively rate limits incoming transactions into the mempool by forcing them @@ -220,6 +252,10 @@ struct BlockProducerRpcServer { mempool: Mutex, store: StoreClient, + + /// Cached mempool statistics that are updated periodically to avoid locking the mempool + /// for each status request. + cached_mempool_stats: Arc>, } #[tonic::async_trait] @@ -227,8 +263,7 @@ impl api_server::Api for BlockProducerRpcServer { async fn submit_proven_transaction( &self, request: tonic::Request, - ) -> Result, Status> - { + ) -> Result, Status> { self.submit_proven_transaction(request.into_inner()) .await .map(tonic::Response::new) @@ -239,7 +274,7 @@ impl api_server::Api for BlockProducerRpcServer { async fn submit_proven_batch( &self, request: tonic::Request, - ) -> Result, Status> { + ) -> Result, Status> { self.submit_proven_batch(request.into_inner()) .await .map(tonic::Response::new) @@ -256,10 +291,14 @@ impl api_server::Api for BlockProducerRpcServer { async fn status( &self, _request: tonic::Request<()>, - ) -> Result, Status> { - Ok(tonic::Response::new(proto::block_producer::BlockProducerStatus { + ) -> Result, Status> { + let mempool_stats = *self.cached_mempool_stats.read().await; + + Ok(tonic::Response::new(proto::rpc::BlockProducerStatus { version: env!("CARGO_PKG_VERSION").to_string(), status: "connected".to_string(), + chain_tip: mempool_stats.chain_tip.as_u32(), + mempool_stats: Some(mempool_stats.into()), })) } @@ -308,10 +347,51 @@ impl tokio_stream::Stream for MempoolEventSubscription { impl BlockProducerRpcServer { pub fn new(mempool: SharedMempool, store: StoreClient) -> Self { - Self { mempool: Mutex::new(mempool), store } + Self { + mempool: Mutex::new(mempool), + store, + cached_mempool_stats: Arc::new(RwLock::new(MempoolStats::default())), + } + } + + /// Starts a background task that periodically updates the cached mempool statistics. + /// + /// This prevents the need to lock the mempool for each status request. + async fn spawn_mempool_stats_updater(&self) { + let cached_mempool_stats = Arc::clone(&self.cached_mempool_stats); + let mempool = self.mempool.lock().await.clone(); + + tokio::spawn(async move { + let mut interval = tokio::time::interval(CACHED_MEMPOOL_STATS_UPDATE_INTERVAL); + + loop { + interval.tick().await; + + let (chain_tip, unbatched_transactions, proposed_batches, proven_batches) = { + let mempool = mempool.lock().await; + ( + mempool.chain_tip(), + mempool.unbatched_transactions_count() as u64, + mempool.proposed_batches_count() as u64, + mempool.proven_batches_count() as u64, + ) + }; + + let mut cache = cached_mempool_stats.write().await; + *cache = MempoolStats { + chain_tip, + unbatched_transactions, + proposed_batches, + proven_batches, + }; + } + }); } async fn serve(self, listener: TcpListener, timeout: Duration) -> anyhow::Result<()> { + // Start background task to periodically update cached mempool stats + self.spawn_mempool_stats_updater().await; + let reflection_service = tonic_reflection::server::Builder::configure() .register_file_descriptor_set(block_producer_api_descriptor()) .build_v1() @@ -348,7 +428,7 @@ impl BlockProducerRpcServer { async fn submit_proven_transaction( &self, request: proto::transaction::ProvenTransaction, - ) -> Result { + ) -> Result { debug!(target: COMPONENT, ?request); let tx = ProvenTransaction::read_from_bytes(&request.transaction) @@ -374,11 +454,13 @@ impl BlockProducerRpcServer { // SAFETY: we assume that the rpc component has verified the transaction proof already. let tx = AuthenticatedTransaction::new_unchecked(tx, inputs).map(Arc::new)?; - self.mempool.lock().await.lock().await.add_transaction(tx).map(|block_height| { - proto::block_producer::SubmitProvenTransactionResponse { - block_height: block_height.as_u32(), - } - }) + self.mempool + .lock() + .await + .lock() + .await + .add_transaction(tx) + .map(|block_height| proto::blockchain::BlockNumber { block_num: block_height.as_u32() }) } #[instrument( @@ -390,7 +472,7 @@ impl BlockProducerRpcServer { async fn submit_proven_batch( &self, request: proto::transaction::ProvenTransactionBatch, - ) -> Result { + ) -> Result { let _batch = ProvenBatch::read_from_bytes(&request.encoded) .map_err(SubmitProvenBatchError::Deserialization)?; diff --git a/crates/block-producer/src/server/tests.rs b/crates/block-producer/src/server/tests.rs index ad2376613..1e5415925 100644 --- a/crates/block-producer/src/server/tests.rs +++ b/crates/block-producer/src/server/tests.rs @@ -5,7 +5,7 @@ use miden_node_proto::generated::{ self as proto, block_producer::api_client as block_producer_client, }; use miden_node_store::{GenesisState, Store}; -use miden_objects::{ +use miden_protocol::{ Digest, account::{AccountId, AccountIdVersion, AccountStorageMode, AccountType}, transaction::ProvenTransactionBuilder, @@ -148,7 +148,7 @@ async fn block_producer_startup_is_robust_to_network_failures() { async fn send_request( mut client: block_producer_client::ApiClient, i: u8, -) -> Result, tonic::Status> +) -> Result, tonic::Status> { let tx = ProvenTransactionBuilder::new( AccountId::dummy( diff --git a/crates/block-producer/src/store/mod.rs b/crates/block-producer/src/store/mod.rs index df2972a8e..a82a60582 100644 --- a/crates/block-producer/src/store/mod.rs +++ b/crates/block-producer/src/store/mod.rs @@ -3,17 +3,17 @@ use std::fmt::{Display, Formatter}; use std::num::NonZeroU32; use itertools::Itertools; -use miden_node_proto::clients::{Builder, StoreBlockProducer, StoreBlockProducerClient}; +use miden_node_proto::clients::{Builder, StoreBlockProducerClient}; use miden_node_proto::domain::batch::BatchInputs; use miden_node_proto::errors::{ConversionError, MissingFieldHelper}; use miden_node_proto::{AccountState, generated as proto}; use miden_node_utils::formatting::format_opt; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::block::{BlockHeader, BlockInputs, BlockNumber, ProvenBlock}; -use miden_objects::note::Nullifier; -use miden_objects::transaction::ProvenTransaction; -use miden_objects::utils::Serializable; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, ProvenBlock}; +use miden_protocol::note::Nullifier; +use miden_protocol::transaction::ProvenTransaction; +use miden_protocol::utils::Serializable; use tracing::{debug, info, instrument}; use url::Url; @@ -65,17 +65,13 @@ impl Display for TransactionInputs { } } -impl TryFrom for TransactionInputs { +impl TryFrom for TransactionInputs { type Error = ConversionError; - fn try_from( - response: proto::block_producer_store::TransactionInputs, - ) -> Result { + fn try_from(response: proto::store::TransactionInputs) -> Result { let AccountState { account_id, account_commitment } = response .account_state - .ok_or(proto::block_producer_store::TransactionInputs::missing_field(stringify!( - account_state - )))? + .ok_or(proto::store::TransactionInputs::missing_field(stringify!(account_state)))? .try_into()?; let mut nullifiers = HashMap::new(); @@ -83,7 +79,7 @@ impl TryFrom for TransactionInpu let nullifier = nullifier_record .nullifier .ok_or( - proto::block_producer_store::transaction_inputs::NullifierTransactionInputRecord::missing_field( + proto::store::transaction_inputs::NullifierTransactionInputRecord::missing_field( stringify!(nullifier), ), )? @@ -133,7 +129,8 @@ impl StoreClient { .without_timeout() .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .with_otel_context_injection() + .connect_lazy::(); Self { client: store } } @@ -145,7 +142,7 @@ impl StoreClient { .client .clone() .get_block_header_by_number(tonic::Request::new( - proto::shared::BlockHeaderByNumberRequest::default(), + proto::rpc::BlockHeaderByNumberRequest::default(), )) .await? .into_inner() @@ -162,7 +159,7 @@ impl StoreClient { &self, proven_tx: &ProvenTransaction, ) -> Result { - let message = proto::block_producer_store::TransactionInputsRequest { + let message = proto::store::TransactionInputsRequest { account_id: Some(proven_tx.account_id().into()), nullifiers: proven_tx.nullifiers().map(Into::into).collect(), unauthenticated_notes: proven_tx @@ -210,7 +207,7 @@ impl StoreClient { unauthenticated_notes: impl Iterator + Send, reference_blocks: impl Iterator + Send, ) -> Result { - let request = tonic::Request::new(proto::block_producer_store::BlockInputsRequest { + let request = tonic::Request::new(proto::store::BlockInputsRequest { account_ids: updated_accounts.map(Into::into).collect(), nullifiers: created_nullifiers.map(proto::primitives::Digest::from).collect(), unauthenticated_notes: unauthenticated_notes @@ -230,7 +227,7 @@ impl StoreClient { block_references: impl Iterator + Send, note_commitments: impl Iterator + Send, ) -> Result { - let request = tonic::Request::new(proto::block_producer_store::BatchInputsRequest { + let request = tonic::Request::new(proto::store::BatchInputsRequest { reference_blocks: block_references.map(|(block_num, _)| block_num.as_u32()).collect(), note_commitments: note_commitments.map(proto::primitives::Digest::from).collect(), }); diff --git a/crates/block-producer/src/test_utils/account.rs b/crates/block-producer/src/test_utils/account.rs index 638fcf9be..0d1e9100b 100644 --- a/crates/block-producer/src/test_utils/account.rs +++ b/crates/block-producer/src/test_utils/account.rs @@ -1,8 +1,8 @@ use std::collections::HashMap; use std::sync::LazyLock; -use miden_objects::account::{AccountId, AccountIdVersion, AccountStorageMode, AccountType}; -use miden_objects::{Hasher, Word}; +use miden_protocol::account::{AccountId, AccountIdVersion, AccountStorageMode, AccountType}; +use miden_protocol::{Hasher, Word}; pub static MOCK_ACCOUNTS: LazyLock>> = LazyLock::new(Default::default); diff --git a/crates/block-producer/src/test_utils/batch.rs b/crates/block-producer/src/test_utils/batch.rs index 878b155db..ecbd21586 100644 --- a/crates/block-producer/src/test_utils/batch.rs +++ b/crates/block-producer/src/test_utils/batch.rs @@ -1,9 +1,9 @@ use std::collections::BTreeMap; -use miden_objects::Word; -use miden_objects::batch::{BatchAccountUpdate, BatchId, ProvenBatch}; -use miden_objects::block::BlockNumber; -use miden_objects::transaction::{ +use miden_protocol::Word; +use miden_protocol::batch::{BatchAccountUpdate, BatchId, ProvenBatch}; +use miden_protocol::block::BlockNumber; +use miden_protocol::transaction::{ InputNotes, OrderedTransactionHeaders, ProvenTransaction, @@ -19,7 +19,7 @@ pub trait TransactionBatchConstructor { /// This builds a mocked version of a proven batch for testing purposes which can be useful if /// the batch's details don't need to be correct (e.g. if something else is under test but /// requires a transaction batch). If you need an actual valid [`ProvenBatch`], build a - /// [`ProposedBatch`](miden_objects::batch::ProposedBatch) first and convert (without proving) + /// [`ProposedBatch`](miden_protocol::batch::ProposedBatch) first and convert (without proving) /// or prove it into a [`ProvenBatch`]. fn mocked_from_transactions<'tx>(txs: impl IntoIterator) -> Self; diff --git a/crates/block-producer/src/test_utils/mod.rs b/crates/block-producer/src/test_utils/mod.rs index 0695ceadf..007fb60cb 100644 --- a/crates/block-producer/src/test_utils/mod.rs +++ b/crates/block-producer/src/test_utils/mod.rs @@ -1,8 +1,8 @@ -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::crypto::rand::{FeltRng, RpoRandomCoin}; -use miden_objects::testing::account_id::AccountIdBuilder; -use miden_objects::transaction::TransactionId; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::crypto::rand::{FeltRng, RpoRandomCoin}; +use miden_protocol::testing::account_id::AccountIdBuilder; +use miden_protocol::transaction::TransactionId; mod proven_tx; @@ -34,7 +34,7 @@ impl Random { } pub fn draw_tx_id(&mut self) -> TransactionId { - self.0.draw_word().into() + TransactionId::from_raw(self.0.draw_word()) } pub fn draw_account_id(&mut self) -> AccountId { diff --git a/crates/block-producer/src/test_utils/note.rs b/crates/block-producer/src/test_utils/note.rs index f632453f1..6defeac83 100644 --- a/crates/block-producer/src/test_utils/note.rs +++ b/crates/block-producer/src/test_utils/note.rs @@ -1,6 +1,6 @@ -use miden_lib::testing::note::NoteBuilder; -use miden_objects::note::Note; -use miden_objects::transaction::OutputNote; +use miden_protocol::note::Note; +use miden_protocol::transaction::OutputNote; +use miden_standards::testing::note::NoteBuilder; use rand_chacha::ChaCha20Rng; use rand_chacha::rand_core::SeedableRng; diff --git a/crates/block-producer/src/test_utils/proven_tx.rs b/crates/block-producer/src/test_utils/proven_tx.rs index 3a52fa565..aa6ec310e 100644 --- a/crates/block-producer/src/test_utils/proven_tx.rs +++ b/crates/block-producer/src/test_utils/proven_tx.rs @@ -3,18 +3,18 @@ use std::sync::Arc; use itertools::Itertools; use miden_node_utils::fee::test_fee; -use miden_objects::account::AccountId; -use miden_objects::asset::FungibleAsset; -use miden_objects::block::BlockNumber; -use miden_objects::note::{Note, Nullifier}; -use miden_objects::transaction::{ +use miden_protocol::account::AccountId; +use miden_protocol::asset::FungibleAsset; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::{Note, Nullifier}; +use miden_protocol::transaction::{ InputNote, OutputNote, ProvenTransaction, ProvenTransactionBuilder, }; -use miden_objects::vm::ExecutionProof; -use miden_objects::{Felt, ONE, Word}; +use miden_protocol::vm::ExecutionProof; +use miden_protocol::{Felt, ONE, Word}; use rand::Rng; use super::MockPrivateAccount; @@ -109,7 +109,7 @@ impl MockProvenTxBuilder { .map(|index| { let nullifier = Word::from([ONE, ONE, ONE, Felt::new(index)]); - Nullifier::from(nullifier) + Nullifier::from_raw(nullifier) }) .collect(); diff --git a/crates/block-producer/src/validator/mod.rs b/crates/block-producer/src/validator/mod.rs new file mode 100644 index 000000000..9844e2d9b --- /dev/null +++ b/crates/block-producer/src/validator/mod.rs @@ -0,0 +1,66 @@ +use miden_node_proto::clients::{Builder, ValidatorClient}; +use miden_node_proto::generated as proto; +use miden_protocol::block::ProposedBlock; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; +use thiserror::Error; +use tracing::{info, instrument}; +use url::Url; + +use crate::COMPONENT; + +// VALIDATOR ERROR +// ================================================================================================ + +#[derive(Debug, Error)] +pub enum ValidatorError { + #[error("gRPC transport error: {0}")] + Transport(#[from] tonic::Status), + #[error("signature deserialization failed: {0}")] + Deserialization(#[from] DeserializationError), +} + +// VALIDATOR CLIENT +// ================================================================================================ + +/// Interface to the validator's gRPC API. +/// +/// Essentially just a thin wrapper around the generated gRPC client which improves type safety. +#[derive(Clone, Debug)] +pub struct BlockProducerValidatorClient { + client: ValidatorClient, +} + +impl BlockProducerValidatorClient { + /// Creates a new validator client with a lazy connection. + pub fn new(validator_url: Url) -> Self { + info!(target: COMPONENT, validator_endpoint = %validator_url, "Initializing validator client"); + + let validator = Builder::new(validator_url) + .without_tls() + .without_timeout() + .without_metadata_version() + .without_metadata_genesis() + .with_otel_context_injection() + .connect_lazy::(); + + Self { client: validator } + } + + #[instrument(target = COMPONENT, name = "validator.client.validate_block", skip_all, err)] + pub async fn sign_block( + &self, + proposed_block: ProposedBlock, + ) -> Result { + // Send request and receive response. + let message = proto::blockchain::ProposedBlock { + proposed_block: proposed_block.to_bytes(), + }; + let request = tonic::Request::new(message); + let response = self.client.clone().sign_block(request).await?; + + // Deserialize the signature. + let signature = response.into_inner(); + Signature::read_from_bytes(&signature.signature).map_err(ValidatorError::Deserialization) + } +} diff --git a/crates/ntx-builder/Cargo.toml b/crates/ntx-builder/Cargo.toml index 7eefab8e4..06ed8eb3b 100644 --- a/crates/ntx-builder/Cargo.toml +++ b/crates/ntx-builder/Cargo.toml @@ -16,19 +16,23 @@ workspace = true [dependencies] anyhow = { workspace = true } futures = { workspace = true } -lru = { workspace = true } +indexmap = { workspace = true } miden-node-proto = { workspace = true } miden-node-utils = { workspace = true } -miden-objects = { default-features = true, workspace = true } +miden-protocol = { default-features = true, workspace = true } miden-remote-prover-client = { features = ["tx-prover"], workspace = true } miden-tx = { default-features = true, workspace = true } thiserror = { workspace = true } tokio = { features = ["rt-multi-thread"], workspace = true } tokio-stream = { workspace = true } +tokio-util = { version = "0.7" } tonic = { workspace = true } tracing = { workspace = true } url = { workspace = true } [dev-dependencies] miden-node-test-macro = { path = "../test-macro" } +miden-node-utils = { features = ["testing"], workspace = true } +miden-protocol = { default-features = true, features = ["testing"], workspace = true } +miden-standards = { workspace = true } rstest = { workspace = true } diff --git a/crates/ntx-builder/src/actor/account_state.rs b/crates/ntx-builder/src/actor/account_state.rs new file mode 100644 index 000000000..cff9334db --- /dev/null +++ b/crates/ntx-builder/src/actor/account_state.rs @@ -0,0 +1,350 @@ +use std::collections::{BTreeMap, BTreeSet, HashSet}; +use std::num::NonZeroUsize; + +use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_node_proto::domain::mempool::MempoolEvent; +use miden_node_proto::domain::note::{NetworkNote, SingleTargetNetworkNote}; +use miden_node_utils::tracing::OpenTelemetrySpanExt; +use miden_protocol::account::Account; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::note::{Note, Nullifier}; +use miden_protocol::transaction::{PartialBlockchain, TransactionId}; +use tracing::instrument; + +use super::ActorShutdownReason; +use super::note_state::{NetworkAccountEffect, NetworkAccountNoteState}; +use crate::COMPONENT; +use crate::actor::inflight_note::InflightNetworkNote; +use crate::builder::ChainState; +use crate::store::{StoreClient, StoreError}; + +// TRANSACTION CANDIDATE +// ================================================================================================ + +/// A candidate network transaction. +/// +/// Contains the data pertaining to a specific network account which can be used to build a network +/// transaction. +#[derive(Clone, Debug)] +pub struct TransactionCandidate { + /// The current inflight state of the account. + pub account: Account, + + /// A set of notes addressed to this network account. + pub notes: Vec, + + /// The latest locally committed block header. + /// + /// This should be used as the reference block during transaction execution. + pub chain_tip_header: BlockHeader, + + /// The chain MMR, which lags behind the tip by one block. + pub chain_mmr: PartialBlockchain, +} + +// NETWORK ACCOUNT STATE +// ================================================================================================ + +/// The current state of a network account. +#[derive(Clone)] +pub struct NetworkAccountState { + /// The network account prefix corresponding to the network account this state represents. + account_prefix: NetworkAccountPrefix, + + /// Component of this state which Contains the committed and inflight account updates as well + /// as available and nullified notes. + account: NetworkAccountNoteState, + + /// Uncommitted transactions which have some impact on the network state. + /// + /// This is tracked so we can commit or revert such transaction effects. Transactions _without_ + /// an impact are ignored. + inflight_txs: BTreeMap, + + /// A set of nullifiers which have been registered for the network account. + nullifier_idx: HashSet, +} + +impl NetworkAccountState { + /// Maximum number of attempts to execute a network note. + const MAX_NOTE_ATTEMPTS: usize = 30; + + /// Load's all available network notes from the store, along with the required account states. + #[instrument(target = COMPONENT, name = "ntx.state.load", skip_all)] + pub async fn load( + account: Account, + account_prefix: NetworkAccountPrefix, + store: &StoreClient, + block_num: BlockNumber, + ) -> Result { + let notes = store.get_unconsumed_network_notes(account_prefix, block_num.as_u32()).await?; + let notes = notes + .into_iter() + .filter_map(|note| { + if let NetworkNote::SingleTarget(note) = note { + Some(note) + } else { + None + } + }) + .collect::>(); + let account = NetworkAccountNoteState::new(account, notes); + + let state = Self { + account, + account_prefix, + inflight_txs: BTreeMap::default(), + nullifier_idx: HashSet::default(), + }; + + state.inject_telemetry(); + + Ok(state) + } + + /// Selects the next candidate network transaction. + #[instrument(target = COMPONENT, name = "ntx.state.select_candidate", skip_all)] + pub fn select_candidate( + &mut self, + limit: NonZeroUsize, + chain_state: ChainState, + ) -> Option { + // Remove notes that have failed too many times. + self.account.drop_failing_notes(Self::MAX_NOTE_ATTEMPTS); + + // Skip empty accounts, and prune them. + // This is how we keep the number of accounts bounded. + if self.account.is_empty() { + return None; + } + + // Select notes from the account that can be consumed or are ready for a retry. + let notes = self + .account + .available_notes(&chain_state.chain_tip_header.block_num()) + .take(limit.get()) + .cloned() + .collect::>(); + + // Skip accounts with no available notes. + if notes.is_empty() { + return None; + } + + let (chain_tip_header, chain_mmr) = chain_state.into_parts(); + TransactionCandidate { + account: self.account.latest_account(), + notes, + chain_tip_header, + chain_mmr, + } + .into() + } + + /// Marks notes of a previously selected candidate as failed. + /// + /// Does not remove the candidate from the in-progress pool. + #[instrument(target = COMPONENT, name = "ntx.state.notes_failed", skip_all)] + pub fn notes_failed(&mut self, notes: &[Note], block_num: BlockNumber) { + let nullifiers = notes.iter().map(Note::nullifier).collect::>(); + self.account.fail_notes(nullifiers.as_slice(), block_num); + } + + /// Updates state with the mempool event. + #[instrument(target = COMPONENT, name = "ntx.state.mempool_update", skip_all)] + pub fn mempool_update(&mut self, update: &MempoolEvent) -> Option { + let span = tracing::Span::current(); + span.set_attribute("mempool_event.kind", update.kind()); + + match update { + MempoolEvent::TransactionAdded { + id, + nullifiers, + network_notes, + account_delta, + } => { + // Filter network notes relevant to this account. + let network_notes = filter_by_prefix_and_map_to_single_target( + self.account_prefix, + network_notes.clone(), + ); + self.add_transaction(*id, nullifiers, &network_notes, account_delta.as_ref()); + }, + MempoolEvent::TransactionsReverted(txs) => { + for tx in txs { + let shutdown_reason = self.revert_transaction(*tx); + if shutdown_reason.is_some() { + return shutdown_reason; + } + } + }, + MempoolEvent::BlockCommitted { txs, .. } => { + for tx in txs { + self.commit_transaction(*tx); + } + }, + } + self.inject_telemetry(); + + // No shutdown, continue running actor. + None + } + + /// Handles a [`MempoolEvent::TransactionAdded`] event. + fn add_transaction( + &mut self, + id: TransactionId, + nullifiers: &[Nullifier], + network_notes: &[SingleTargetNetworkNote], + account_delta: Option<&AccountUpdateDetails>, + ) { + // Skip transactions we already know about. + // + // This can occur since both ntx builder and the mempool might inform us of the same + // transaction. Once when it was submitted to the mempool, and once by the mempool event. + if self.inflight_txs.contains_key(&id) { + return; + } + + let mut tx_impact = TransactionImpact::default(); + if let Some(update) = account_delta.and_then(NetworkAccountEffect::from_protocol) { + let account_prefix = update.prefix(); + if account_prefix == self.account_prefix { + match update { + NetworkAccountEffect::Updated(account_delta) => { + self.account.add_delta(&account_delta); + }, + NetworkAccountEffect::Created(_) => {}, + } + tx_impact.account_delta = Some(account_prefix); + } + } + for note in network_notes { + assert_eq!( + note.account_prefix(), + self.account_prefix, + "transaction note prefix does not match network account actor's prefix" + ); + tx_impact.notes.insert(note.nullifier()); + self.nullifier_idx.insert(note.nullifier()); + self.account.add_note(note.clone()); + } + for nullifier in nullifiers { + // Ignore nullifiers that aren't network note nullifiers. + if !self.nullifier_idx.contains(nullifier) { + continue; + } + tx_impact.nullifiers.insert(*nullifier); + // We don't use the entry wrapper here because the account must already exist. + let _ = self.account.add_nullifier(*nullifier); + } + + if !tx_impact.is_empty() { + self.inflight_txs.insert(id, tx_impact); + } + } + + /// Handles [`MempoolEvent::BlockCommitted`] events. + fn commit_transaction(&mut self, tx: TransactionId) { + // We only track transactions which have an impact on the network state. + let Some(impact) = self.inflight_txs.remove(&tx) else { + return; + }; + + if let Some(prefix) = impact.account_delta { + if prefix == self.account_prefix { + self.account.commit_delta(); + } + } + + for nullifier in impact.nullifiers { + if self.nullifier_idx.remove(&nullifier) { + // Its possible for the account to no longer exist if the transaction creating it + // was reverted. + self.account.commit_nullifier(nullifier); + } + } + } + + /// Handles [`MempoolEvent::TransactionsReverted`] events. + fn revert_transaction(&mut self, tx: TransactionId) -> Option { + // We only track transactions which have an impact on the network state. + let Some(impact) = self.inflight_txs.remove(&tx) else { + tracing::debug!("transaction {tx} not found in inflight transactions"); + return None; + }; + + // Revert account creation. + if let Some(account_prefix) = impact.account_delta { + // Account creation reverted, actor must stop. + if account_prefix == self.account_prefix && self.account.revert_delta() { + return Some(ActorShutdownReason::AccountReverted(account_prefix)); + } + } + + // Revert notes. + for note_nullifier in impact.notes { + if self.nullifier_idx.contains(¬e_nullifier) { + self.account.revert_note(note_nullifier); + self.nullifier_idx.remove(¬e_nullifier); + } + } + + // Revert nullifiers. + for nullifier in impact.nullifiers { + if self.nullifier_idx.contains(&nullifier) { + self.account.revert_nullifier(nullifier); + self.nullifier_idx.remove(&nullifier); + } + } + + None + } + + /// Adds stats to the current tracing span. + /// + /// Note that these are only visible in the OpenTelemetry context, as conventional tracing + /// does not track fields added dynamically. + fn inject_telemetry(&self) { + let span = tracing::Span::current(); + + span.set_attribute("ntx.state.transactions", self.inflight_txs.len()); + span.set_attribute("ntx.state.notes.total", self.nullifier_idx.len()); + } +} + +/// The impact a transaction has on the state. +#[derive(Clone, Default)] +struct TransactionImpact { + /// The network account this transaction added an account delta to. + account_delta: Option, + + /// Network notes this transaction created. + notes: BTreeSet, + + /// Network notes this transaction consumed. + nullifiers: BTreeSet, +} + +impl TransactionImpact { + fn is_empty(&self) -> bool { + self.account_delta.is_none() && self.notes.is_empty() && self.nullifiers.is_empty() + } +} + +/// Filters network notes by prefix and maps them to single target network notes. +fn filter_by_prefix_and_map_to_single_target( + account_prefix: NetworkAccountPrefix, + notes: Vec, +) -> Vec { + notes + .into_iter() + .filter_map(|note| match note { + NetworkNote::SingleTarget(note) if note.account_prefix() == account_prefix => { + Some(note) + }, + _ => None, + }) + .collect::>() +} diff --git a/crates/ntx-builder/src/transaction.rs b/crates/ntx-builder/src/actor/execute.rs similarity index 78% rename from crates/ntx-builder/src/transaction.rs rename to crates/ntx-builder/src/actor/execute.rs index 8d5b1909f..83c1d09c9 100644 --- a/crates/ntx-builder/src/transaction.rs +++ b/crates/ntx-builder/src/actor/execute.rs @@ -1,13 +1,18 @@ use std::collections::BTreeSet; -use std::sync::Arc; -use lru::LruCache; +use miden_node_utils::lru_cache::LruCache; use miden_node_utils::tracing::OpenTelemetrySpanExt; -use miden_objects::account::{Account, AccountId, PartialAccount, StorageMapWitness, StorageSlot}; -use miden_objects::asset::{AssetVaultKey, AssetWitness}; -use miden_objects::block::{BlockHeader, BlockNumber}; -use miden_objects::note::{Note, NoteScript}; -use miden_objects::transaction::{ +use miden_protocol::account::{ + Account, + AccountId, + PartialAccount, + StorageMapWitness, + StorageSlotContent, +}; +use miden_protocol::asset::{AssetVaultKey, AssetWitness}; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::note::{Note, NoteScript}; +use miden_protocol::transaction::{ AccountInputs, ExecutedTransaction, InputNote, @@ -15,10 +20,11 @@ use miden_objects::transaction::{ PartialBlockchain, ProvenTransaction, TransactionArgs, + TransactionId, TransactionInputs, }; -use miden_objects::vm::FutureMaybeSend; -use miden_objects::{TransactionInputError, Word}; +use miden_protocol::vm::FutureMaybeSend; +use miden_protocol::{TransactionInputError, Word}; use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; use miden_tx::auth::UnreachableAuth; use miden_tx::{ @@ -35,13 +41,12 @@ use miden_tx::{ TransactionMastStore, TransactionProverError, }; -use tokio::sync::Mutex; use tokio::task::JoinError; use tracing::{Instrument, instrument}; use crate::COMPONENT; +use crate::actor::account_state::TransactionCandidate; use crate::block_producer::BlockProducerClient; -use crate::state::TransactionCandidate; use crate::store::StoreClient; #[derive(Debug, thiserror::Error)] @@ -64,25 +69,43 @@ pub enum NtxError { type NtxResult = Result; -// Context and execution of network transactions +// NETWORK TRANSACTION CONTEXT // ================================================================================================ /// Provides the context for execution [network transaction candidates](TransactionCandidate). #[derive(Clone)] pub struct NtxContext { - pub block_producer: BlockProducerClient, + block_producer: BlockProducerClient, /// The prover to delegate proofs to. /// /// Defaults to local proving if unset. This should be avoided in production as this is /// computationally intensive. - pub prover: Option, + prover: Option, /// The store client for retrieving note scripts. - pub store: StoreClient, + store: StoreClient, + + /// LRU cache for storing retrieved note scripts to avoid repeated store calls. + script_cache: LruCache, } impl NtxContext { + /// Creates a new [`NtxContext`] instance. + pub fn new( + block_producer: BlockProducerClient, + prover: Option, + store: StoreClient, + script_cache: LruCache, + ) -> Self { + Self { + block_producer, + prover, + store, + script_cache, + } + } + /// Executes a transaction end-to-end: filtering, executing, proving, and submitted to the block /// producer. /// @@ -95,8 +118,8 @@ impl NtxContext { /// /// # Returns /// - /// On success, returns the list of [`FailedNote`]s representing notes that were - /// filtered out before execution. + /// On success, returns the [`TransactionId`] of the executed transaction and a list of + /// [`FailedNote`]s representing notes that were filtered out before execution. /// /// # Errors /// @@ -109,7 +132,7 @@ impl NtxContext { pub fn execute_transaction( self, tx: TransactionCandidate, - ) -> impl FutureMaybeSend>> { + ) -> impl FutureMaybeSend)>> { let TransactionCandidate { account, notes, @@ -125,15 +148,21 @@ impl NtxContext { async move { async move { - let data_store = - NtxDataStore::new(account, chain_tip_header, chain_mmr, self.store.clone()); + let data_store = NtxDataStore::new( + account, + chain_tip_header, + chain_mmr, + self.store.clone(), + self.script_cache.clone(), + ); let notes = notes.into_iter().map(Note::from).collect::>(); let (successful, failed) = self.filter_notes(&data_store, notes).await?; let executed = Box::pin(self.execute(&data_store, successful)).await?; let proven = Box::pin(self.prove(executed.into())).await?; + let tx_id = proven.id(); self.submit(proven).await?; - Ok(failed) + Ok((tx_id, failed)) } .in_current_span() .await @@ -237,7 +266,7 @@ impl NtxContext { } } -// Data store implementation for the transaction execution +// NETWORK TRANSACTION DATA STORE // ================================================================================================ /// A [`DataStore`] implementation which provides transaction inputs for a single account and @@ -256,22 +285,17 @@ struct NtxDataStore { /// Store client for retrieving note scripts. store: StoreClient, /// LRU cache for storing retrieved note scripts to avoid repeated store calls. - script_cache: Arc>>, + script_cache: LruCache, } impl NtxDataStore { - /// Default cache size for note scripts. - /// - /// Each cached script contains the deserialized `NoteScript` object, so the actual memory usage - /// depends on the complexity of the scripts being cached. - const DEFAULT_SCRIPT_CACHE_SIZE: usize = 1000; - /// Creates a new `NtxDataStore` with default cache size. fn new( account: Account, reference_header: BlockHeader, chain_mmr: PartialBlockchain, store: StoreClient, + script_cache: LruCache, ) -> Self { let mast_store = TransactionMastStore::new(); mast_store.load_account_code(account.code()); @@ -282,10 +306,7 @@ impl NtxDataStore { chain_mmr, mast_store, store, - script_cache: Arc::new(Mutex::new(LruCache::new( - std::num::NonZeroUsize::new(Self::DEFAULT_SCRIPT_CACHE_SIZE) - .expect("default script cache size is non-zero"), - ))), + script_cache, } } } @@ -323,12 +344,12 @@ impl DataStore for NtxDataStore { async move { Err(DataStoreError::AccountNotFound(foreign_account_id)) } } - fn get_vault_asset_witness( + fn get_vault_asset_witnesses( &self, account_id: AccountId, vault_root: Word, - vault_key: AssetVaultKey, - ) -> impl FutureMaybeSend> { + vault_keys: BTreeSet, + ) -> impl FutureMaybeSend, DataStoreError>> { async move { if self.account.id() != account_id { return Err(DataStoreError::AccountNotFound(account_id)); @@ -341,12 +362,14 @@ impl DataStore for NtxDataStore { }); } - AssetWitness::new(self.account.vault().open(vault_key).into()).map_err(|err| { - DataStoreError::Other { - error_msg: "failed to open vault asset tree".into(), - source: Some(Box::new(err)), - } - }) + Result::, _>::from_iter(vault_keys.into_iter().map(|vault_key| { + AssetWitness::new(self.account.vault().open(vault_key).into()).map_err(|err| { + DataStoreError::Other { + error_msg: "failed to open vault asset tree".into(), + source: Some(Box::new(err)), + } + }) + })) } } @@ -363,7 +386,7 @@ impl DataStore for NtxDataStore { let mut map_witness = None; for slot in self.account.storage().slots() { - if let StorageSlot::Map(map) = slot { + if let StorageSlotContent::Map(map) = slot.content() { if map.root() == map_root { map_witness = Some(map.open(&map_key)); } @@ -388,41 +411,27 @@ impl DataStore for NtxDataStore { fn get_note_script( &self, script_root: Word, - ) -> impl FutureMaybeSend> { - let store = self.store.clone(); - let cache = self.script_cache.clone(); - + ) -> impl FutureMaybeSend, DataStoreError>> { async move { // Attempt to retrieve the script from the cache. - if let Some(cached_script) = { - let mut cache_guard = cache.lock().await; - cache_guard.get(&script_root).cloned() - } { - return Ok(cached_script); + if let Some(cached_script) = self.script_cache.get(&script_root).await { + return Ok(Some(cached_script)); } // Retrieve the script from the store. - let maybe_script = store.get_note_script_by_root(script_root).await.map_err(|err| { - DataStoreError::Other { - error_msg: "failed to retrieve note script from store".to_string().into(), - source: Some(err.into()), - } - })?; - // Handle response. - match maybe_script { - Some(script) => { - // Cache the retrieved script. - { - let mut cache_guard = cache.lock().await; - cache_guard.put(script_root, script.clone()); + let maybe_script = + self.store.get_note_script_by_root(script_root).await.map_err(|err| { + DataStoreError::Other { + error_msg: "failed to retrieve note script from store".to_string().into(), + source: Some(err.into()), } - // Return script. - Ok(script) - }, - None => { - // Response did not contain the note script. - Err(DataStoreError::NoteScriptNotFound(script_root)) - }, + })?; + // Handle response. + if let Some(script) = maybe_script { + self.script_cache.put(script_root, script.clone()).await; + Ok(Some(script)) + } else { + Ok(None) } } } @@ -431,8 +440,8 @@ impl DataStore for NtxDataStore { impl MastForestStore for NtxDataStore { fn get( &self, - procedure_hash: &miden_objects::Word, - ) -> Option> { + procedure_hash: &miden_protocol::Word, + ) -> Option> { self.mast_store.get(procedure_hash) } } diff --git a/crates/ntx-builder/src/actor/inflight_note.rs b/crates/ntx-builder/src/actor/inflight_note.rs new file mode 100644 index 000000000..626b474ac --- /dev/null +++ b/crates/ntx-builder/src/actor/inflight_note.rs @@ -0,0 +1,71 @@ +use miden_node_proto::domain::note::SingleTargetNetworkNote; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::Note; + +use crate::actor::has_backoff_passed; + +// INFLIGHT NETWORK NOTE +// ================================================================================================ + +/// An unconsumed network note that may have failed to execute. +/// +/// The block number at which the network note was attempted are approximate and may not +/// reflect the exact block number for which the execution attempt failed. The actual block +/// will likely be soon after the number that is recorded here. +#[derive(Debug, Clone)] +pub struct InflightNetworkNote { + note: SingleTargetNetworkNote, + attempt_count: usize, + last_attempt: Option, +} + +impl InflightNetworkNote { + /// Creates a new inflight network note. + pub fn new(note: SingleTargetNetworkNote) -> Self { + Self { + note, + attempt_count: 0, + last_attempt: None, + } + } + + /// Consumes the inflight network note and returns the inner network note. + pub fn into_inner(self) -> SingleTargetNetworkNote { + self.note + } + + /// Returns a reference to the inner network note. + pub fn to_inner(&self) -> &SingleTargetNetworkNote { + &self.note + } + + /// Returns the number of attempts made to execute the network note. + pub fn attempt_count(&self) -> usize { + self.attempt_count + } + + /// Checks if the network note is available for execution. + /// + /// The note is available if it can be consumed and the backoff period has passed. + pub fn is_available(&self, block_num: BlockNumber) -> bool { + let can_consume = self + .to_inner() + .metadata() + .execution_hint() + .can_be_consumed(block_num) + .unwrap_or(true); + can_consume && has_backoff_passed(block_num, self.last_attempt, self.attempt_count) + } + + /// Registers a failed attempt to execute the network note at the specified block number. + pub fn fail(&mut self, block_num: BlockNumber) { + self.last_attempt = Some(block_num); + self.attempt_count += 1; + } +} + +impl From for Note { + fn from(value: InflightNetworkNote) -> Self { + value.into_inner().into() + } +} diff --git a/crates/ntx-builder/src/actor/mod.rs b/crates/ntx-builder/src/actor/mod.rs new file mode 100644 index 000000000..60e7df54c --- /dev/null +++ b/crates/ntx-builder/src/actor/mod.rs @@ -0,0 +1,352 @@ +pub mod account_state; +mod execute; +mod inflight_note; +mod note_state; + +use std::sync::Arc; + +use account_state::{NetworkAccountState, TransactionCandidate}; +use execute::NtxError; +use futures::FutureExt; +use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_node_proto::domain::mempool::MempoolEvent; +use miden_node_utils::ErrorReport; +use miden_node_utils::lru_cache::LruCache; +use miden_protocol::Word; +use miden_protocol::account::{Account, AccountDelta}; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::NoteScript; +use miden_protocol::transaction::TransactionId; +use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; +use tokio::sync::{AcquireError, RwLock, Semaphore, mpsc}; +use tokio_util::sync::CancellationToken; +use url::Url; + +use crate::block_producer::BlockProducerClient; +use crate::builder::ChainState; +use crate::store::StoreClient; + +// ACTOR SHUTDOWN REASON +// ================================================================================================ + +/// The reason an actor has shut down. +pub enum ActorShutdownReason { + /// Occurs when the transaction that created the actor is reverted. + AccountReverted(NetworkAccountPrefix), + /// Occurs when an account actor detects failure in the messaging channel used by the + /// coordinator. + EventChannelClosed, + /// Occurs when an account actor detects failure in acquiring the rate-limiting semaphore. + SemaphoreFailed(AcquireError), + /// Occurs when an account actor detects its corresponding cancellation token has been triggered + /// by the coordinator. Cancellation tokens are triggered by the coordinator to initiate + /// graceful shutdown of actors. + Cancelled(NetworkAccountPrefix), +} + +// ACCOUNT ACTOR CONFIG +// ================================================================================================ + +/// Contains miscellaneous resources that are required by all account actors. +#[derive(Clone)] +pub struct AccountActorContext { + /// Client for interacting with the store in order to load account state. + pub store: StoreClient, + /// Address of the block producer gRPC server. + pub block_producer_url: Url, + /// Address of the remote prover. If `None`, transactions will be proven locally, which is + // undesirable due to the performance impact. + pub tx_prover_url: Option, + /// The latest chain state that account all actors can rely on. A single chain state is shared + /// among all actors. + pub chain_state: Arc>, + /// Shared LRU cache for storing retrieved note scripts to avoid repeated store calls. + /// This cache is shared across all account actors to maximize cache efficiency. + pub script_cache: LruCache, +} + +// ACCOUNT ORIGIN +// ================================================================================================ + +/// The origin of the account which the actor will use to initialize the account state. +#[derive(Debug)] +pub enum AccountOrigin { + /// Accounts that have just been created by a transaction but have not been committed to the + /// store yet. + Transaction(Box), + /// Accounts that already exist in the store. + Store(NetworkAccountPrefix), +} + +impl AccountOrigin { + /// Returns an [`AccountOrigin::Transaction`] if the account is a network account. + pub fn transaction(delta: &AccountDelta) -> Option { + let account = Account::try_from(delta).ok()?; + if account.is_network() { + Some(AccountOrigin::Transaction(account.clone().into())) + } else { + None + } + } + + /// Returns an [`AccountOrigin::Store`]. + pub fn store(prefix: NetworkAccountPrefix) -> Self { + AccountOrigin::Store(prefix) + } + + /// Returns the [`NetworkAccountPrefix`] of the account. + pub fn prefix(&self) -> NetworkAccountPrefix { + match self { + AccountOrigin::Transaction(account) => NetworkAccountPrefix::try_from(account.id()) + .expect("actor accounts are always network accounts"), + AccountOrigin::Store(prefix) => *prefix, + } + } +} + +// ACTOR MODE +// ================================================================================================ + +/// The mode of operation that the account actor is currently performing. +#[derive(Debug)] +enum ActorMode { + NoViableNotes, + NotesAvailable, + TransactionInflight(TransactionId), +} + +// ACCOUNT ACTOR +// ================================================================================================ + +/// A long-running asynchronous task that handles the complete lifecycle of network transaction +/// processing. Each actor operates independently and is managed by a single coordinator that +/// spawns, monitors, and messages all actors. +/// +/// ## Core Responsibilities +/// +/// - **State Management**: Loads and maintains the current state of network accounts, including +/// available notes, pending transactions, and account commitments. +/// - **Transaction Selection**: Selects viable notes and constructs a [`TransactionCandidate`] +/// based on current chain state. +/// - **Transaction Execution**: Executes selected transactions using either local or remote +/// proving. +/// - **Mempool Integration**: Listens for mempool events to stay synchronized with the network +/// state and adjust behavior based on transaction confirmations. +/// +/// ## Lifecycle +/// +/// 1. **Initialization**: Loads account state from the store or uses provided account data. +/// 2. **Event Loop**: Continuously processes mempool events and executes transactions. +/// 3. **Transaction Processing**: Selects, executes, and proves transactions, and submits them to +/// block producer. +/// 4. **State Updates**: Updates internal state based on mempool events and execution results. +/// 5. **Shutdown**: Terminates gracefully when cancelled or encounters unrecoverable errors. +/// +/// ## Concurrency +/// +/// Each actor runs in its own async task and communicates with other system components through +/// channels and shared state. The actor uses a cancellation token for graceful shutdown +/// coordination. +pub struct AccountActor { + origin: AccountOrigin, + store: StoreClient, + mode: ActorMode, + event_rx: mpsc::Receiver>, + cancel_token: CancellationToken, + block_producer: BlockProducerClient, + prover: Option, + chain_state: Arc>, + script_cache: LruCache, +} + +impl AccountActor { + /// Constructs a new account actor and corresponding messaging channel with the given + /// configuration. + pub fn new( + origin: AccountOrigin, + actor_context: &AccountActorContext, + event_rx: mpsc::Receiver>, + cancel_token: CancellationToken, + ) -> Self { + let block_producer = BlockProducerClient::new(actor_context.block_producer_url.clone()); + let prover = actor_context.tx_prover_url.clone().map(RemoteTransactionProver::new); + Self { + origin, + store: actor_context.store.clone(), + mode: ActorMode::NoViableNotes, + event_rx, + cancel_token, + block_producer, + prover, + chain_state: actor_context.chain_state.clone(), + script_cache: actor_context.script_cache.clone(), + } + } + + /// Runs the account actor, processing events and managing state until a reason to shutdown is + /// encountered. + pub async fn run(mut self, semaphore: Arc) -> ActorShutdownReason { + // Load the account state from the store and set up the account actor state. + let account = { + match self.origin { + AccountOrigin::Store(account_prefix) => self + .store + .get_network_account(account_prefix) + .await + .expect("actor should be able to load account") + .expect("actor account should exist"), + AccountOrigin::Transaction(ref account) => *(account.clone()), + } + }; + let block_num = self.chain_state.read().await.chain_tip_header.block_num(); + let mut state = + NetworkAccountState::load(account, self.origin.prefix(), &self.store, block_num) + .await + .expect("actor should be able to load account state"); + + loop { + // Enable or disable transaction execution based on actor mode. + let tx_permit_acquisition = match self.mode { + // Disable transaction execution. + ActorMode::NoViableNotes | ActorMode::TransactionInflight(_) => { + std::future::pending().boxed() + }, + // Enable transaction execution. + ActorMode::NotesAvailable => semaphore.acquire().boxed(), + }; + tokio::select! { + _ = self.cancel_token.cancelled() => { + return ActorShutdownReason::Cancelled(self.origin.prefix()); + } + // Handle mempool events. + event = self.event_rx.recv() => { + let Some(event) = event else { + return ActorShutdownReason::EventChannelClosed; + }; + // Re-enable transaction execution if the transaction being waited on has been + // added to the mempool. + if let ActorMode::TransactionInflight(awaited_id) = self.mode { + if let MempoolEvent::TransactionAdded { id, .. } = *event { + if id == awaited_id { + self.mode = ActorMode::NotesAvailable; + } + } + } else { + self.mode = ActorMode::NotesAvailable; + } + // Update state. + if let Some(shutdown_reason) = state.mempool_update(event.as_ref()) { + return shutdown_reason; + } + }, + // Execute transactions. + permit = tx_permit_acquisition => { + match permit { + Ok(_permit) => { + // Read the chain state. + let chain_state = self.chain_state.read().await.clone(); + // Find a candidate transaction and execute it. + if let Some(tx_candidate) = state.select_candidate(crate::MAX_NOTES_PER_TX, chain_state) { + self.execute_transactions(&mut state, tx_candidate).await; + } else { + // No transactions to execute, wait for events. + self.mode = ActorMode::NoViableNotes; + } + } + Err(err) => { + return ActorShutdownReason::SemaphoreFailed(err); + } + } + } + } + } + } + + /// Execute a transaction candidate and mark notes as failed as required. + /// + /// Updates the state of the actor based on the execution result. + #[tracing::instrument(name = "ntx.actor.execute_transactions", skip(self, state, tx_candidate))] + async fn execute_transactions( + &mut self, + state: &mut NetworkAccountState, + tx_candidate: TransactionCandidate, + ) { + let block_num = tx_candidate.chain_tip_header.block_num(); + + // Execute the selected transaction. + let context = execute::NtxContext::new( + self.block_producer.clone(), + self.prover.clone(), + self.store.clone(), + self.script_cache.clone(), + ); + + let execution_result = context.execute_transaction(tx_candidate).await; + match execution_result { + // Execution completed without failed notes. + Ok((tx_id, failed)) if failed.is_empty() => { + self.mode = ActorMode::TransactionInflight(tx_id); + }, + // Execution completed with some failed notes. + Ok((tx_id, failed)) => { + let notes = failed.into_iter().map(|note| note.note).collect::>(); + state.notes_failed(notes.as_slice(), block_num); + self.mode = ActorMode::TransactionInflight(tx_id); + }, + // Transaction execution failed. + Err(err) => { + tracing::error!(err = err.as_report(), "network transaction failed"); + match err { + NtxError::AllNotesFailed(failed) => { + let notes = failed.into_iter().map(|note| note.note).collect::>(); + state.notes_failed(notes.as_slice(), block_num); + self.mode = ActorMode::NoViableNotes; + }, + NtxError::InputNotes(_) + | NtxError::NoteFilter(_) + | NtxError::Execution(_) + | NtxError::Proving(_) + | NtxError::Submission(_) + | NtxError::Panic(_) => { + self.mode = ActorMode::NoViableNotes; + }, + } + }, + } + } +} + +// HELPERS +// ================================================================================================ + +/// Checks if the backoff block period has passed. +/// +/// The number of blocks passed since the last attempt must be greater than or equal to +/// e^(0.25 * `attempt_count`) rounded to the nearest integer. +/// +/// This evaluates to the following: +/// - After 1 attempt, the backoff period is 1 block. +/// - After 3 attempts, the backoff period is 2 blocks. +/// - After 10 attempts, the backoff period is 12 blocks. +/// - After 20 attempts, the backoff period is 148 blocks. +/// - etc... +#[allow(clippy::cast_precision_loss, clippy::cast_sign_loss)] +fn has_backoff_passed( + chain_tip: BlockNumber, + last_attempt: Option, + attempts: usize, +) -> bool { + if attempts == 0 { + return true; + } + // Compute the number of blocks passed since the last attempt. + let blocks_passed = last_attempt + .and_then(|last| chain_tip.checked_sub(last.as_u32())) + .unwrap_or_default(); + + // Compute the exponential backoff threshold: Δ = e^(0.25 * n). + let backoff_threshold = (0.25 * attempts as f64).exp().round() as usize; + + // Check if the backoff period has passed. + blocks_passed.as_usize() > backoff_threshold +} diff --git a/crates/ntx-builder/src/state/account.rs b/crates/ntx-builder/src/actor/note_state.rs similarity index 65% rename from crates/ntx-builder/src/state/account.rs rename to crates/ntx-builder/src/actor/note_state.rs index 56af83b42..9de85dd6a 100644 --- a/crates/ntx-builder/src/state/account.rs +++ b/crates/ntx-builder/src/actor/note_state.rs @@ -2,82 +2,19 @@ use std::collections::{HashMap, VecDeque}; use miden_node_proto::domain::account::NetworkAccountPrefix; use miden_node_proto::domain::note::SingleTargetNetworkNote; -use miden_objects::account::delta::AccountUpdateDetails; -use miden_objects::account::{Account, AccountDelta, AccountId}; -use miden_objects::block::BlockNumber; -use miden_objects::note::{Note, Nullifier}; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{Account, AccountDelta, AccountId}; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::Nullifier; -// INFLIGHT NETWORK NOTE -// ================================================================================================ - -/// An unconsumed network note that may have failed to execute. -/// -/// The block number at which the network note was attempted are approximate and may not -/// reflect the exact block number for which the execution attempt failed. The actual block -/// will likely be soon after the number that is recorded here. -#[derive(Debug, Clone)] -pub struct InflightNetworkNote { - note: SingleTargetNetworkNote, - attempt_count: usize, - last_attempt: Option, -} - -impl InflightNetworkNote { - /// Creates a new inflight network note. - pub fn new(note: SingleTargetNetworkNote) -> Self { - Self { - note, - attempt_count: 0, - last_attempt: None, - } - } - - /// Consumes the inflight network note and returns the inner network note. - pub fn into_inner(self) -> SingleTargetNetworkNote { - self.note - } - - /// Returns a reference to the inner network note. - pub fn to_inner(&self) -> &SingleTargetNetworkNote { - &self.note - } - - /// Returns the number of attempts made to execute the network note. - pub fn attempt_count(&self) -> usize { - self.attempt_count - } - - /// Checks if the network note is available for execution. - /// - /// The note is available if it can be consumed and the backoff period has passed. - pub fn is_available(&self, block_num: BlockNumber) -> bool { - let can_consume = self - .to_inner() - .metadata() - .execution_hint() - .can_be_consumed(block_num) - .unwrap_or(true); - can_consume && has_backoff_passed(block_num, self.last_attempt, self.attempt_count) - } - - /// Registers a failed attempt to execute the network note at the specified block number. - pub fn fail(&mut self, block_num: BlockNumber) { - self.last_attempt = Some(block_num); - self.attempt_count += 1; - } -} - -impl From for Note { - fn from(value: InflightNetworkNote) -> Self { - value.into_inner().into() - } -} +use crate::actor::inflight_note::InflightNetworkNote; // ACCOUNT STATE // ================================================================================================ /// Tracks the state of a network account and its notes. -pub struct AccountState { +#[derive(Clone)] +pub struct NetworkAccountNoteState { /// The committed account state, if any. /// /// Its possible this is `None` if the account creation transaction is still inflight. @@ -93,25 +30,29 @@ pub struct AccountState { nullified_notes: HashMap, } -impl AccountState { - /// Creates a new account state using the given value as the committed state. - pub fn from_committed_account(account: Account) -> Self { - Self { +impl NetworkAccountNoteState { + /// Creates a new account state from the supplied account and notes. + pub fn new(account: Account, notes: Vec) -> Self { + let account_prefix = NetworkAccountPrefix::try_from(account.id()) + .expect("only network accounts are used for account state"); + + let mut state = Self { committed: Some(account), inflight: VecDeque::default(), available_notes: HashMap::default(), nullified_notes: HashMap::default(), - } - } + }; - /// Creates a new account state where the creating transaction is still inflight. - pub fn from_uncommitted_account(account: Account) -> Self { - Self { - inflight: VecDeque::from([account]), - committed: None, - available_notes: HashMap::default(), - nullified_notes: HashMap::default(), + for note in notes { + // Currently only support single target network notes in NTB. + assert!( + note.account_prefix() == account_prefix, + "Notes supplied into account state must match expected account prefix" + ); + state.add_note(note); } + + state } /// Returns an iterator over inflight notes that are not currently within their respective @@ -197,6 +138,7 @@ impl AccountState { // in case it's transaction wasn't available in the first place. // It shouldn't happen practically, since we skip them if the // relevant account cannot be retrieved via `fetch`. + let _ = self.nullified_notes.remove(&nullifier); } @@ -256,16 +198,16 @@ pub enum NetworkAccountEffect { } impl NetworkAccountEffect { - pub fn from_protocol(update: AccountUpdateDetails) -> Option { + pub fn from_protocol(update: &AccountUpdateDetails) -> Option { let update = match update { AccountUpdateDetails::Private => return None, AccountUpdateDetails::Delta(update) if update.is_full_state() => { NetworkAccountEffect::Created( - Account::try_from(&update) + Account::try_from(update) .expect("Account should be derivable by full state AccountDelta"), ) }, - AccountUpdateDetails::Delta(update) => NetworkAccountEffect::Updated(update), + AccountUpdateDetails::Delta(update) => NetworkAccountEffect::Updated(update.clone()), }; update.account_id().is_network().then_some(update) @@ -284,44 +226,9 @@ impl NetworkAccountEffect { } } -// HELPERS -// ================================================================================================ - -/// Checks if the backoff block period has passed. -/// -/// The number of blocks passed since the last attempt must be greater than or equal to -/// e^(0.25 * `attempt_count`) rounded to the nearest integer. -/// -/// This evaluates to the following: -/// - After 1 attempt, the backoff period is 1 block. -/// - After 3 attempts, the backoff period is 2 blocks. -/// - After 10 attempts, the backoff period is 12 blocks. -/// - After 20 attempts, the backoff period is 148 blocks. -/// - etc... -#[allow(clippy::cast_precision_loss, clippy::cast_sign_loss)] -fn has_backoff_passed( - chain_tip: BlockNumber, - last_attempt: Option, - attempts: usize, -) -> bool { - if attempts == 0 { - return true; - } - // Compute the number of blocks passed since the last attempt. - let blocks_passed = last_attempt - .and_then(|last| chain_tip.checked_sub(last.as_u32())) - .unwrap_or_default(); - - // Compute the exponential backoff threshold: Δ = e^(0.25 * n). - let backoff_threshold = (0.25 * attempts as f64).exp().round() as usize; - - // Check if the backoff period has passed. - blocks_passed.as_usize() > backoff_threshold -} - #[cfg(test)] mod tests { - use miden_objects::block::BlockNumber; + use miden_protocol::block::BlockNumber; #[rstest::rstest] #[test] @@ -341,9 +248,11 @@ mod tests { #[case] attempt_count: usize, #[case] backoff_should_have_passed: bool, ) { + use crate::actor::has_backoff_passed; + assert_eq!( backoff_should_have_passed, - super::has_backoff_passed(current_block_num, last_attempt_block_num, attempt_count) + has_backoff_passed(current_block_num, last_attempt_block_num, attempt_count) ); } } diff --git a/crates/ntx-builder/src/block_producer.rs b/crates/ntx-builder/src/block_producer.rs index a29b61295..7c1af9d8f 100644 --- a/crates/ntx-builder/src/block_producer.rs +++ b/crates/ntx-builder/src/block_producer.rs @@ -1,16 +1,12 @@ use std::time::Duration; use futures::{TryStream, TryStreamExt}; -use miden_node_proto::clients::{ - BlockProducer, - BlockProducerClient as InnerBlockProducerClient, - Builder, -}; +use miden_node_proto::clients::{BlockProducerClient as InnerBlockProducerClient, Builder}; use miden_node_proto::domain::mempool::MempoolEvent; use miden_node_proto::generated::{self as proto}; use miden_node_utils::FlattenResult; -use miden_objects::block::BlockNumber; -use miden_objects::transaction::ProvenTransaction; +use miden_protocol::block::BlockNumber; +use miden_protocol::transaction::ProvenTransaction; use miden_tx::utils::Serializable; use tokio_stream::StreamExt; use tonic::Status; @@ -40,11 +36,12 @@ impl BlockProducerClient { .without_timeout() .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .with_otel_context_injection() + .connect_lazy::(); Self { client: block_producer } } - #[instrument(target = COMPONENT, name = "block_producer.client.submit_proven_transaction", skip_all, err)] + #[instrument(target = COMPONENT, name = "ntx.block_producer.client.submit_proven_transaction", skip_all, err)] pub async fn submit_proven_transaction( &self, proven_tx: ProvenTransaction, @@ -59,7 +56,7 @@ impl BlockProducerClient { Ok(()) } - #[instrument(target = COMPONENT, name = "block_producer.client.subscribe_to_mempool", skip_all, err)] + #[instrument(target = COMPONENT, name = "ntx.block_producer.client.subscribe_to_mempool", skip_all, err)] pub async fn subscribe_to_mempool_with_retry( &self, chain_tip: BlockNumber, diff --git a/crates/ntx-builder/src/builder.rs b/crates/ntx-builder/src/builder.rs new file mode 100644 index 000000000..34ebdc06f --- /dev/null +++ b/crates/ntx-builder/src/builder.rs @@ -0,0 +1,261 @@ +use std::num::NonZeroUsize; +use std::sync::Arc; +use std::time::Duration; + +use anyhow::Context; +use futures::TryStreamExt; +use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_node_proto::domain::mempool::MempoolEvent; +use miden_node_utils::lru_cache::LruCache; +use miden_protocol::Word; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::block::BlockHeader; +use miden_protocol::crypto::merkle::mmr::PartialMmr; +use miden_protocol::note::NoteScript; +use miden_protocol::transaction::PartialBlockchain; +use tokio::sync::{Barrier, RwLock}; +use tokio::time; +use url::Url; + +use crate::MAX_IN_PROGRESS_TXS; +use crate::actor::{AccountActorContext, AccountOrigin}; +use crate::block_producer::BlockProducerClient; +use crate::coordinator::Coordinator; +use crate::store::StoreClient; + +// CONSTANTS +// ================================================================================================= + +/// The maximum number of blocks to keep in memory while tracking the chain tip. +const MAX_BLOCK_COUNT: usize = 4; + +// CHAIN STATE +// ================================================================================================ + +/// Contains information about the chain that is relevant to the [`NetworkTransactionBuilder`] and +/// all account actors managed by the [`Coordinator`] +#[derive(Debug, Clone)] +pub struct ChainState { + /// The current tip of the chain. + pub chain_tip_header: BlockHeader, + /// A partial representation of the latest state of the chain. + pub chain_mmr: PartialBlockchain, +} + +impl ChainState { + /// Constructs a new instance of [`ChainState`]. + fn new(chain_tip_header: BlockHeader, chain_mmr: PartialMmr) -> Self { + let chain_mmr = PartialBlockchain::new(chain_mmr, []) + .expect("partial blockchain should build from partial mmr"); + Self { chain_tip_header, chain_mmr } + } + + /// Consumes the chain state and returns the chain tip header and the partial blockchain as a + /// tuple. + pub fn into_parts(self) -> (BlockHeader, PartialBlockchain) { + (self.chain_tip_header, self.chain_mmr) + } +} + +// NETWORK TRANSACTION BUILDER +// ================================================================================================ + +/// Network transaction builder component. +/// +/// The network transaction builder is in in charge of building transactions that consume notes +/// against network accounts. These notes are identified and communicated by the block producer. +/// The service maintains a list of unconsumed notes and periodically executes and proves +/// transactions that consume them (reaching out to the store to retrieve state as necessary). +/// +/// The builder manages the tasks for every network account on the chain through the coordinator. +pub struct NetworkTransactionBuilder { + /// Address of the store gRPC server. + store_url: Url, + /// Address of the block producer gRPC server. + block_producer_url: Url, + /// Address of the remote prover. If `None`, transactions will be proven locally, which is + /// undesirable due to the performance impact. + tx_prover_url: Option, + /// Interval for checking pending notes and executing network transactions. + ticker_interval: Duration, + /// A checkpoint used to sync start-up process with the block-producer. + /// + /// This informs the block-producer when we have subscribed to mempool events and that it is + /// safe to begin block-production. + bp_checkpoint: Arc, + /// Shared LRU cache for storing retrieved note scripts to avoid repeated store calls. + /// This cache is shared across all account actors. + script_cache: LruCache, + /// Coordinator for managing actor tasks. + coordinator: Coordinator, +} + +impl NetworkTransactionBuilder { + /// Default cache size for note scripts. + /// + /// Each cached script contains the deserialized `NoteScript` object, so the actual memory usage + /// depends on the complexity of the scripts being cached. + const DEFAULT_SCRIPT_CACHE_SIZE: NonZeroUsize = NonZeroUsize::new(1000).unwrap(); + + /// Creates a new instance of the network transaction builder. + pub fn new( + store_url: Url, + block_producer_url: Url, + tx_prover_url: Option, + ticker_interval: Duration, + bp_checkpoint: Arc, + ) -> Self { + let script_cache = LruCache::new(Self::DEFAULT_SCRIPT_CACHE_SIZE); + let coordinator = Coordinator::new(MAX_IN_PROGRESS_TXS); + Self { + store_url, + block_producer_url, + tx_prover_url, + ticker_interval, + bp_checkpoint, + script_cache, + coordinator, + } + } + + /// Runs the network transaction builder until a fatal error occurs. + pub async fn run(mut self) -> anyhow::Result<()> { + let store = StoreClient::new(self.store_url.clone()); + let block_producer = BlockProducerClient::new(self.block_producer_url.clone()); + + let (chain_tip_header, chain_mmr) = store + .get_latest_blockchain_data_with_retry() + .await? + .expect("store should contain a latest block"); + let mut mempool_events = block_producer + .subscribe_to_mempool_with_retry(chain_tip_header.block_num()) + .await + .context("failed to subscribe to mempool events")?; + + // Unlock the block-producer's block production. The block-producer is prevented from + // producing blocks until we have subscribed to mempool events. + // + // This is a temporary work-around until the ntx-builder can resync on the fly. + self.bp_checkpoint.wait().await; + + let mut interval = tokio::time::interval(self.ticker_interval); + interval.set_missed_tick_behavior(time::MissedTickBehavior::Skip); + + // Create chain state that will be updated by the coordinator and read by actors. + let chain_state = Arc::new(RwLock::new(ChainState::new(chain_tip_header, chain_mmr))); + + let actor_context = AccountActorContext { + block_producer_url: self.block_producer_url.clone(), + tx_prover_url: self.tx_prover_url.clone(), + chain_state: chain_state.clone(), + store: store.clone(), + script_cache: self.script_cache.clone(), + }; + + // Create initial set of actors based on all known network accounts. + let account_ids = store.get_network_account_ids().await?; + for account_id in account_ids { + if let Ok(account_prefix) = NetworkAccountPrefix::try_from(account_id) { + self.coordinator + .spawn_actor(AccountOrigin::store(account_prefix), &actor_context) + .await?; + } + } + + // Main loop which manages actors and passes mempool events to them. + loop { + tokio::select! { + // Handle actor result. + result = self.coordinator.next() => { + result?; + }, + // Handle mempool events. + event = mempool_events.try_next() => { + let event = event + .context("mempool event stream ended")? + .context("mempool event stream failed")?; + + self.handle_mempool_event( + event.into(), + &actor_context, + chain_state.clone(), + ).await?; + }, + } + } + } + + /// Handles mempool events by sending them to actors via the coordinator and/or spawning new + /// actors as required. + #[tracing::instrument( + name = "ntx.builder.handle_mempool_event", + skip(self, event, actor_context, chain_state) + )] + async fn handle_mempool_event( + &mut self, + event: Arc, + actor_context: &AccountActorContext, + chain_state: Arc>, + ) -> Result<(), anyhow::Error> { + match event.as_ref() { + MempoolEvent::TransactionAdded { account_delta, .. } => { + // Handle account deltas in case an account is being created. + if let Some(AccountUpdateDetails::Delta(delta)) = account_delta { + // Handle account deltas for network accounts only. + if let Some(network_account) = AccountOrigin::transaction(delta) { + // Spawn new actors if a transaction creates a new network account + let is_creating_account = delta.is_full_state(); + if is_creating_account { + self.coordinator.spawn_actor(network_account, actor_context).await?; + } + } + } + self.coordinator.send_targeted(&event).await?; + Ok(()) + }, + // Update chain state and broadcast. + MempoolEvent::BlockCommitted { header, txs } => { + self.update_chain_tip(header.as_ref().clone(), chain_state).await; + self.coordinator.broadcast(event.clone()).await; + + // All transactions pertaining to predating events should now be available through + // the store. So we can now drain them. + for tx_id in txs { + self.coordinator.drain_predating_events(tx_id); + } + Ok(()) + }, + // Broadcast to all actors. + MempoolEvent::TransactionsReverted(txs) => { + self.coordinator.broadcast(event.clone()).await; + + // Reverted predating transactions need not be processed. + for tx_id in txs { + self.coordinator.drain_predating_events(tx_id); + } + Ok(()) + }, + } + } + + /// Updates the chain tip and MMR block count. + /// + /// Blocks in the MMR are pruned if the block count exceeds the maximum. + async fn update_chain_tip(&mut self, tip: BlockHeader, chain_state: Arc>) { + // Lock the chain state. + let mut chain_state = chain_state.write().await; + + // Update MMR which lags by one block. + let mmr_tip = chain_state.chain_tip_header.clone(); + chain_state.chain_mmr.add_block(&mmr_tip, true); + + // Set the new tip. + chain_state.chain_tip_header = tip; + + // Keep MMR pruned. + let pruned_block_height = + (chain_state.chain_mmr.chain_length().as_usize().saturating_sub(MAX_BLOCK_COUNT)) + as u32; + chain_state.chain_mmr.prune_to(..pruned_block_height.into()); + } +} diff --git a/crates/ntx-builder/src/builder/mod.rs b/crates/ntx-builder/src/builder/mod.rs deleted file mode 100644 index 2932240b7..000000000 --- a/crates/ntx-builder/src/builder/mod.rs +++ /dev/null @@ -1,209 +0,0 @@ -use std::collections::HashMap; -use std::sync::Arc; -use std::time::Duration; - -use anyhow::Context; -use futures::TryStreamExt; -use miden_node_proto::domain::account::NetworkAccountPrefix; -use miden_node_utils::ErrorReport; -use miden_remote_prover_client::remote_prover::tx_prover::RemoteTransactionProver; -use tokio::sync::Barrier; -use tokio::time; -use url::Url; - -use crate::MAX_IN_PROGRESS_TXS; -use crate::block_producer::BlockProducerClient; -use crate::store::StoreClient; -use crate::transaction::NtxError; - -// NETWORK TRANSACTION BUILDER -// ================================================================================================ - -/// Network transaction builder component. -/// -/// The network transaction builder is in in charge of building transactions that consume notes -/// against network accounts. These notes are identified and communicated by the block producer. -/// The service maintains a list of unconsumed notes and periodically executes and proves -/// transactions that consume them (reaching out to the store to retrieve state as necessary). -pub struct NetworkTransactionBuilder { - /// Address of the store gRPC server. - store_url: Url, - /// Address of the block producer gRPC server. - block_producer_url: Url, - /// Address of the remote prover. If `None`, transactions will be proven locally, which is - /// undesirable due to the perofmrance impact. - tx_prover_url: Option, - /// Interval for checking pending notes and executing network transactions. - ticker_interval: Duration, - /// A checkpoint used to sync start-up process with the block-producer. - /// - /// This informs the block-producer when we have subscribed to mempool events and that it is - /// safe to begin block-production. - bp_checkpoint: Arc, -} - -impl NetworkTransactionBuilder { - /// Creates a new instance of the network transaction builder. - pub fn new( - store_url: Url, - block_producer_url: Url, - tx_prover_url: Option, - ticker_interval: Duration, - bp_checkpoint: Arc, - ) -> Self { - Self { - store_url, - block_producer_url, - tx_prover_url, - ticker_interval, - bp_checkpoint, - } - } - - pub async fn serve_new(self) -> anyhow::Result<()> { - let store = StoreClient::new(self.store_url); - let block_producer = BlockProducerClient::new(self.block_producer_url); - - let mut state = crate::state::State::load(store.clone()) - .await - .context("failed to load ntx state")?; - - let mut mempool_events = block_producer - .subscribe_to_mempool_with_retry(state.chain_tip()) - .await - .context("failed to subscribe to mempool events")?; - - // Unlock the block-producer's block production. The block-producer is prevented from - // producing blocks until we have subscribed to mempool events. - // - // This is a temporary work-around until the ntb can resync on the fly. - self.bp_checkpoint.wait().await; - - let prover = self.tx_prover_url.map(RemoteTransactionProver::new); - - let mut interval = tokio::time::interval(self.ticker_interval); - interval.set_missed_tick_behavior(time::MissedTickBehavior::Skip); - - // Tracks network transaction tasks until they are submitted to the mempool. - // - // We also map the task ID to the network account so we can mark it as failed if it doesn't - // get submitted. - let mut inflight = JoinSet::new(); - let mut inflight_idx = HashMap::new(); - - let context = crate::transaction::NtxContext { - block_producer: block_producer.clone(), - prover, - store, - }; - - loop { - tokio::select! { - _next = interval.tick() => { - if inflight.len() > MAX_IN_PROGRESS_TXS { - tracing::info!("At maximum network tx capacity, skipping"); - continue; - } - - let Some(candidate) = state.select_candidate(crate::MAX_NOTES_PER_TX) else { - tracing::debug!("No candidate network transaction available"); - continue; - }; - - let network_account_prefix = NetworkAccountPrefix::try_from(candidate.account.id()) - .expect("all accounts managed by NTB are network accounts"); - let indexed_candidate = (network_account_prefix, candidate.chain_tip_header.block_num()); - let task_id = inflight.spawn({ - let context = context.clone(); - context.execute_transaction(candidate) - }).id(); - - // SAFETY: This is definitely a network account. - inflight_idx.insert(task_id, indexed_candidate); - }, - event = mempool_events.try_next() => { - let event = event - .context("mempool event stream ended")? - .context("mempool event stream failed")?; - state.mempool_update(event).await.context("failed to update state")?; - }, - completed = inflight.join_next_with_id() => { - // Grab the task ID and associated network account reference. - let task_id = match &completed { - Ok((task_id, _)) => *task_id, - Err(join_handle) => join_handle.id(), - }; - // SAFETY: both inflights should have the same set. - let (candidate, block_num) = inflight_idx.remove(&task_id).unwrap(); - - match completed { - // Some notes failed. - Ok((_, Ok(failed))) => { - let notes = failed.into_iter().map(|note| note.note).collect::>(); - state.notes_failed(candidate, notes.as_slice(), block_num); - }, - // Transaction execution failed. - Ok((_, Err(err))) => { - tracing::warn!(err=err.as_report(), "network transaction failed"); - match err { - NtxError::AllNotesFailed(failed) => { - let notes = failed.into_iter().map(|note| note.note).collect::>(); - state.notes_failed(candidate, notes.as_slice(), block_num); - }, - NtxError::InputNotes(_) - | NtxError::NoteFilter(_) - | NtxError::Execution(_) - | NtxError::Proving(_) - | NtxError::Submission(_) - | NtxError::Panic(_) => {}, - } - state.candidate_failed(candidate); - }, - // Unexpected error occurred. - Err(err) => { - tracing::warn!(err=err.as_report(), "network transaction panicked"); - state.candidate_failed(candidate); - } - } - } - } - } - } -} - -/// A wrapper arounnd tokio's [`JoinSet`](tokio::task::JoinSet) which returns pending instead of -/// [`None`] if its empty. -/// -/// This makes it much more convenient to use in a `select!`. -struct JoinSet(tokio::task::JoinSet); - -impl JoinSet -where - T: 'static, -{ - fn new() -> Self { - Self(tokio::task::JoinSet::new()) - } - - fn spawn(&mut self, task: F) -> tokio::task::AbortHandle - where - F: Future, - F: Send + 'static, - T: Send, - { - self.0.spawn(task) - } - - async fn join_next_with_id(&mut self) -> Result<(tokio::task::Id, T), tokio::task::JoinError> { - if self.0.is_empty() { - std::future::pending().await - } else { - // Cannot be None as its not empty. - self.0.join_next_with_id().await.unwrap() - } - } - - fn len(&self) -> usize { - self.0.len() - } -} diff --git a/crates/ntx-builder/src/coordinator.rs b/crates/ntx-builder/src/coordinator.rs new file mode 100644 index 000000000..3806b7c8f --- /dev/null +++ b/crates/ntx-builder/src/coordinator.rs @@ -0,0 +1,273 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use anyhow::Context; +use indexmap::IndexMap; +use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_node_proto::domain::mempool::MempoolEvent; +use miden_node_proto::domain::note::NetworkNote; +use miden_protocol::transaction::TransactionId; +use tokio::sync::mpsc::error::SendError; +use tokio::sync::{Semaphore, mpsc}; +use tokio::task::JoinSet; +use tokio_util::sync::CancellationToken; + +use crate::actor::{AccountActor, AccountActorContext, AccountOrigin, ActorShutdownReason}; + +// ACTOR HANDLE +// ================================================================================================ + +/// Handle to account actors that are spawned by the coordinator. +#[derive(Clone)] +struct ActorHandle { + event_tx: mpsc::Sender>, + cancel_token: CancellationToken, +} + +impl ActorHandle { + fn new(event_tx: mpsc::Sender>, cancel_token: CancellationToken) -> Self { + Self { event_tx, cancel_token } + } +} + +// COORDINATOR +// ================================================================================================ + +/// Coordinator for managing [`AccountActor`] instances, tasks, and associated communication. +/// +/// The `Coordinator` is the central orchestrator of the network transaction builder system. +/// It manages the lifecycle of account actors. Each actor is responsible for handling transactions +/// for a specific network account prefix. The coordinator provides the following core +/// functionality: +/// +/// ## Actor Management +/// - Spawns new [`AccountActor`] instances for network accounts as needed. +/// - Maintains a registry of active actors with their communication channels. +/// - Gracefully handles actor shutdown and cleanup when actors complete or fail. +/// - Monitors actor tasks through a join set to detect completion or errors. +/// +/// ## Event Broadcasting +/// - Distributes mempool events to all account actors. +/// - Handles communication failures by canceling disconnected actors. +/// - Maintains reliable message delivery through dedicated channels per actor. +/// +/// ## Resource Management +/// - Controls transaction concurrency across all network accounts using a semaphore. +/// - Prevents resource exhaustion by limiting simultaneous transaction processing. +/// +/// The coordinator operates in an event-driven manner: +/// 1. Network accounts are registered and actors spawned as needed. +/// 2. Mempool events are broadcast to all active actors. +/// 3. Actor completion/failure events are monitored and handled. +/// 4. Failed or completed actors are cleaned up from the registry. +pub struct Coordinator { + /// Mapping of network account prefixes to their respective message channels and cancellation + /// tokens. + /// + /// This registry serves as the primary directory for communicating with active account actors. + /// When actors are spawned, they register their communication channel here. When events need + /// to be broadcast, this registry is used to locate the appropriate actors. The registry is + /// automatically cleaned up when actors complete their execution. + actor_registry: HashMap, + + /// Join set for managing actor tasks and monitoring their completion status. + /// + /// This join set allows the coordinator to wait for actor task completion and handle + /// different shutdown scenarios. When an actor task completes (either successfully or + /// due to an error), the corresponding entry is removed from the actor registry. + actor_join_set: JoinSet, + + /// Semaphore for controlling the maximum number of concurrent transactions across all network + /// accounts. + /// + /// This shared semaphore prevents the system from becoming overwhelmed by limiting the total + /// number of transactions that can be processed simultaneously across all account actors. + /// Each actor must acquire a permit from this semaphore before processing a transaction, + /// ensuring fair resource allocation and system stability under load. + semaphore: Arc, + + /// Cache of events received from the mempool that predate corresponding network accounts. + /// Grouped by account prefix to allow targeted event delivery to actors upon creation. + predating_events: HashMap>>, +} + +impl Coordinator { + /// Maximum number of messages of the message channel for each actor. + const ACTOR_CHANNEL_SIZE: usize = 100; + + /// Creates a new coordinator with the specified maximum number of inflight transactions + /// and shared script cache. + pub fn new(max_inflight_transactions: usize) -> Self { + Self { + actor_registry: HashMap::new(), + actor_join_set: JoinSet::new(), + semaphore: Arc::new(Semaphore::new(max_inflight_transactions)), + predating_events: HashMap::new(), + } + } + + /// Spawns a new actor to manage the state of the provided network account. + /// + /// This method creates a new [`AccountActor`] instance for the specified account origin + /// and adds it to the coordinator's management system. The actor will be responsible for + /// processing transactions and managing state for accounts matching the network prefix. + #[tracing::instrument(name = "ntx.builder.spawn_actor", skip(self, origin, actor_context))] + pub async fn spawn_actor( + &mut self, + origin: AccountOrigin, + actor_context: &AccountActorContext, + ) -> Result<(), SendError>> { + let account_prefix = origin.prefix(); + + // If an actor already exists for this account prefix, something has gone wrong. + if let Some(handle) = self.actor_registry.remove(&account_prefix) { + tracing::error!("account actor already exists for prefix: {}", account_prefix); + handle.cancel_token.cancel(); + } + + let (event_tx, event_rx) = mpsc::channel(Self::ACTOR_CHANNEL_SIZE); + let cancel_token = tokio_util::sync::CancellationToken::new(); + let actor = AccountActor::new(origin, actor_context, event_rx, cancel_token.clone()); + let handle = ActorHandle::new(event_tx, cancel_token); + + // Run the actor. + let semaphore = self.semaphore.clone(); + self.actor_join_set.spawn(Box::pin(actor.run(semaphore))); + + // Send the new actor any events that contain notes that predate account creation. + if let Some(prefix_events) = self.predating_events.remove(&account_prefix) { + for event in prefix_events.values() { + Self::send(&handle, event.clone()).await?; + } + } + + self.actor_registry.insert(account_prefix, handle); + tracing::info!("created actor for account prefix: {}", account_prefix); + Ok(()) + } + + /// Broadcasts a mempool event to all active account actors. + /// + /// This method distributes the provided event to every actor currently registered + /// with the coordinator. Each actor will receive the event through its dedicated + /// message channel and can process it accordingly. + /// + /// If an actor fails to receive the event, it will be canceled. + pub async fn broadcast(&mut self, event: Arc) { + tracing::debug!( + actor_count = self.actor_registry.len(), + "broadcasting event to all actors" + ); + + let mut failed_actors = Vec::new(); + + // Send event to all actors. + for (account_prefix, handle) in &self.actor_registry { + if let Err(err) = Self::send(handle, event.clone()).await { + tracing::error!("failed to send event to actor {}: {}", account_prefix, err); + failed_actors.push(*account_prefix); + } + } + // Remove failed actors from registry and cancel them. + for prefix in failed_actors { + let handle = + self.actor_registry.remove(&prefix).expect("actor found in send loop above"); + handle.cancel_token.cancel(); + } + } + + /// Waits for the next actor to complete and processes the shutdown reason. + /// + /// This method monitors the join set for actor task completion and handles + /// different shutdown scenarios appropriately. It's designed to be called + /// in a loop to continuously monitor and manage actor lifecycles. + /// + /// If no actors are currently running, this method will wait indefinitely until + /// new actors are spawned. This prevents busy-waiting when the coordinator is idle. + pub async fn next(&mut self) -> anyhow::Result<()> { + let actor_result = self.actor_join_set.join_next().await; + match actor_result { + Some(Ok(shutdown_reason)) => match shutdown_reason { + ActorShutdownReason::Cancelled(account_prefix) => { + // Do not remove the actor from the registry, as it may be re-spawned. + // The coordinator should always remove actors immediately after cancellation. + tracing::info!("account actor cancelled: {}", account_prefix); + Ok(()) + }, + ActorShutdownReason::AccountReverted(account_prefix) => { + tracing::info!("account reverted: {}", account_prefix); + self.actor_registry.remove(&account_prefix); + Ok(()) + }, + ActorShutdownReason::EventChannelClosed => { + anyhow::bail!("event channel closed"); + }, + ActorShutdownReason::SemaphoreFailed(err) => Err(err).context("semaphore failed"), + }, + Some(Err(err)) => { + tracing::error!(err = %err, "actor task failed"); + Ok(()) + }, + None => { + // There are no actors to wait for. Wait indefinitely until actors are spawned. + std::future::pending().await + }, + } + } + + /// Sends a mempool event to all network account actors that are found in the corresponding + /// transaction's notes. + /// + /// Caches the mempool event for each network account found in the transaction's notes that does + /// not currently have a corresponding actor. If an actor does not exist for the account, it is + /// assumed that the account has not been created on the chain yet. + /// + /// Cached events will be fed to the corresponding actor when the account creation transaction + /// is processed. + pub async fn send_targeted( + &mut self, + event: &Arc, + ) -> Result<(), SendError>> { + let mut target_actors = HashMap::new(); + if let MempoolEvent::TransactionAdded { id, network_notes, .. } = event.as_ref() { + // Determine target actors for each note. + for note in network_notes { + if let NetworkNote::SingleTarget(note) = note { + let prefix = note.account_prefix(); + if let Some(actor) = self.actor_registry.get(&prefix) { + // Register actor as target. + target_actors.insert(prefix, actor); + } else { + // Cache event for every note that doesn't have a corresponding actor. + self.predating_events.entry(prefix).or_default().insert(*id, event.clone()); + } + } + } + } + // Send event to target actors. + for actor in target_actors.values() { + Self::send(actor, event.clone()).await?; + } + Ok(()) + } + + /// Removes any cached events for a given transaction ID from all account prefix caches. + pub fn drain_predating_events(&mut self, tx_id: &TransactionId) { + // Remove the transaction from all prefix caches. + // This iterates over all predating events which is fine because the count is expected to be + // low. + self.predating_events.retain(|_, prefix_event| { + prefix_event.shift_remove(tx_id); + // Remove entries for account prefixes with no more cached events. + !prefix_event.is_empty() + }); + } + + /// Helper function to send an event to a single account actor. + async fn send( + handle: &ActorHandle, + event: Arc, + ) -> Result<(), SendError>> { + handle.event_tx.send(event).await + } +} diff --git a/crates/ntx-builder/src/lib.rs b/crates/ntx-builder/src/lib.rs index c3efd1351..b0d89f94c 100644 --- a/crates/ntx-builder/src/lib.rs +++ b/crates/ntx-builder/src/lib.rs @@ -1,10 +1,10 @@ use std::num::NonZeroUsize; +mod actor; mod block_producer; mod builder; -mod state; +mod coordinator; mod store; -mod transaction; pub use builder::NetworkTransactionBuilder; diff --git a/crates/ntx-builder/src/state/mod.rs b/crates/ntx-builder/src/state/mod.rs deleted file mode 100644 index fbdb8ea56..000000000 --- a/crates/ntx-builder/src/state/mod.rs +++ /dev/null @@ -1,487 +0,0 @@ -use std::collections::hash_map::Entry; -use std::collections::{BTreeMap, BTreeSet, HashMap, HashSet, VecDeque}; -use std::num::NonZeroUsize; - -use account::{AccountState, InflightNetworkNote, NetworkAccountEffect}; -use anyhow::Context; -use miden_node_proto::domain::account::NetworkAccountPrefix; -use miden_node_proto::domain::mempool::MempoolEvent; -use miden_node_proto::domain::note::{NetworkNote, SingleTargetNetworkNote}; -use miden_node_utils::tracing::OpenTelemetrySpanExt; -use miden_objects::account::Account; -use miden_objects::account::delta::AccountUpdateDetails; -use miden_objects::block::{BlockHeader, BlockNumber}; -use miden_objects::note::{Note, Nullifier}; -use miden_objects::transaction::{PartialBlockchain, TransactionId}; -use tracing::instrument; - -use crate::COMPONENT; -use crate::store::{StoreClient, StoreError}; - -mod account; - -// CONSTANTS -// ================================================================================================= - -/// The maximum number of blocks to keep in memory while tracking the chain tip. -const MAX_BLOCK_COUNT: usize = 4; - -/// A candidate network transaction. -/// -/// Contains the data pertaining to a specific network account which can be used to build a network -/// transaction. -#[derive(Clone)] -pub struct TransactionCandidate { - /// The current inflight state of the account. - pub account: Account, - - /// A set of notes addressed to this network account. - pub notes: Vec, - - /// The latest locally committed block header. - /// - /// This should be used as the reference block during transaction execution. - pub chain_tip_header: BlockHeader, - - /// The chain MMR, which lags behind the tip by one block. - pub chain_mmr: PartialBlockchain, -} - -/// Holds the state of the network transaction builder. -/// -/// It tracks inflight transactions, and their impact on network-related state. -pub struct State { - /// The latest committed block header. - chain_tip_header: BlockHeader, - - /// The chain MMR, which lags behind the tip by one block. - chain_mmr: PartialBlockchain, - - /// Tracks all network accounts with inflight state. - /// - /// This is network account deltas, network notes and their nullifiers. - accounts: HashMap, - - /// A rotating queue of all tracked network accounts. - /// - /// This is used to select the next transaction's account. - /// - /// Note that this _always_ includes _all_ network accounts. Filtering out accounts that aren't - /// viable is handled within the select method itself. - queue: VecDeque, - - /// Network accounts which have been selected but whose transaction has not yet completed. - /// - /// This locks these accounts so they cannot be selected. - in_progress: HashSet, - - /// Uncommitted transactions which have a some impact on the network state. - /// - /// This is tracked so we can commit or revert such transaction effects. Transactions _without_ - /// an impact are ignored. - inflight_txs: BTreeMap, - - /// A mapping of network note's to their account. - nullifier_idx: BTreeMap, - - /// gRPC client used to retrieve the network account state from the store. - store: StoreClient, -} - -impl State { - /// Maximum number of attempts to execute a network note. - const MAX_NOTE_ATTEMPTS: usize = 1; - - /// Load's all available network notes from the store, along with the required account states. - #[instrument(target = COMPONENT, name = "ntx.state.load", skip_all)] - pub async fn load(store: StoreClient) -> Result { - let (chain_tip_header, chain_mmr) = store - .get_latest_blockchain_data_with_retry() - .await? - .expect("store should contain a latest block"); - - let chain_mmr = PartialBlockchain::new(chain_mmr, []) - .expect("PartialBlockchain should build from latest partial MMR"); - - let mut state = Self { - chain_tip_header, - chain_mmr, - store, - accounts: HashMap::default(), - queue: VecDeque::default(), - in_progress: HashSet::default(), - inflight_txs: BTreeMap::default(), - nullifier_idx: BTreeMap::default(), - }; - - let notes = state.store.get_unconsumed_network_notes().await?; - for note in notes { - // Currently only support single target network notes in NTB. - if let NetworkNote::SingleTarget(note) = note { - let prefix = note.account_prefix(); - // Ignore notes which don't target an existing account. - if let Some(account) = state.fetch_account(prefix).await? { - account.add_note(note); - } - } - } - state.inject_telemetry(); - - Ok(state) - } - - /// Selects the next candidate network transaction. - /// - /// Note that this marks the candidate account as in-progress and that it cannot be selected - /// again until either: - /// - /// - it has been marked as failed if the transaction failed, or - /// - the transaction was submitted successfully, indicated by the associated mempool event - /// being submitted - #[instrument(target = COMPONENT, name = "ntx.state.select_candidate", skip_all)] - pub fn select_candidate(&mut self, limit: NonZeroUsize) -> Option { - // Loop through the account queue until we find one that is selectable. - // - // Since the queue contains _all_ accounts, including unselectable accounts, we limit our - // search to once through the entire queue. - // - // There are smarter ways of doing this, but this should scale more than well enough for a - // long time. - for _ in 0..self.queue.len() { - // This is a rotating queue. - let candidate = self.queue.pop_front().unwrap(); - self.queue.push_back(candidate); - - // Skip accounts which are already in-progress. - if self.in_progress.contains(&candidate) { - continue; - } - - let account = self.accounts.get_mut(&candidate).expect("queue account must be tracked"); - - // Remove notes that have failed too many times. - account.drop_failing_notes(Self::MAX_NOTE_ATTEMPTS); - - // Skip empty accounts, and prune them. - // This is how we keep the number of accounts bounded. - if account.is_empty() { - // We don't need to prune the inflight transactions because if the account is empty, - // then it would have no inflight txs. - self.accounts.remove(&candidate); - // We know this account is the backmost one since we just rotated it there. - self.queue.pop_back(); - continue; - } - - // Select notes from the account that can be consumed or are ready for a retry. - let notes = account - .available_notes(&self.chain_tip_header.block_num()) - .take(limit.get()) - .cloned() - .collect::>(); - - // Skip accounts with no available notes. - if notes.is_empty() { - continue; - } - - self.in_progress.insert(candidate); - return TransactionCandidate { - account: account.latest_account(), - notes, - chain_tip_header: self.chain_tip_header.clone(), - chain_mmr: self.chain_mmr.clone(), - } - .into(); - } - self.inject_telemetry(); - - None - } - - /// The latest block number the state knows of. - pub fn chain_tip(&self) -> BlockNumber { - self.chain_tip_header.block_num() - } - - /// Updates the chain tip and MMR block count. - /// - /// Blocks in the MMR are pruned if the block count exceeds the maximum. - fn update_chain_tip(&mut self, tip: BlockHeader) { - // Update MMR which lags by one block. - self.chain_mmr.add_block(self.chain_tip_header.clone(), true); - - // Set the new tip. - self.chain_tip_header = tip; - - // Keep MMR pruned. - let pruned_block_height = - (self.chain_mmr.chain_length().as_usize().saturating_sub(MAX_BLOCK_COUNT)) as u32; - self.chain_mmr.prune_to(..pruned_block_height.into()); - } - - /// Marks notes of a previously selected candidate as failed. - /// - /// Does not remove the candidate from the in-progress pool. - #[instrument(target = COMPONENT, name = "ntx.state.notes_failed", skip_all)] - pub fn notes_failed( - &mut self, - candidate: NetworkAccountPrefix, - notes: &[Note], - block_num: BlockNumber, - ) { - if let Some(account) = self.accounts.get_mut(&candidate) { - let nullifiers = notes.iter().map(Note::nullifier).collect::>(); - account.fail_notes(nullifiers.as_slice(), block_num); - } else { - tracing::error!(account.prefix=%candidate, "failed network notes have no local account state"); - } - } - - /// Marks a previously selected candidate account as failed, allowing it to be available for - /// selection again. - /// - /// All notes in the candidate will be marked as failed. - #[instrument(target = COMPONENT, name = "ntx.state.candidate_failed", skip_all)] - pub fn candidate_failed(&mut self, candidate: NetworkAccountPrefix) { - self.in_progress.remove(&candidate); - - self.inject_telemetry(); - } - - /// Updates state with the mempool event. - #[instrument(target = COMPONENT, name = "ntx.state.mempool_update", skip_all)] - pub async fn mempool_update(&mut self, update: MempoolEvent) -> anyhow::Result<()> { - let span = tracing::Span::current(); - span.set_attribute("mempool_event.kind", update.kind()); - - match update { - // Note: this event will get triggered by normal user transactions, as well as our - // network transactions. The mempool does not distinguish between the two. - MempoolEvent::TransactionAdded { - id, - nullifiers, - network_notes, - account_delta, - } => { - let network_notes = network_notes - .into_iter() - .filter_map(|note| match note { - NetworkNote::SingleTarget(note) => Some(note), - NetworkNote::MultiTarget(_) => None, - }) - .collect::>(); - self.add_transaction(id, nullifiers, network_notes, account_delta).await?; - }, - MempoolEvent::BlockCommitted { header, txs } => { - anyhow::ensure!( - header.prev_block_commitment() == self.chain_tip_header.commitment(), - "New block's parent commitment {} does not match local chain tip {}", - header.prev_block_commitment(), - self.chain_tip_header.commitment() - ); - self.update_chain_tip(header); - for tx in txs { - self.commit_transaction(tx); - } - }, - MempoolEvent::TransactionsReverted(txs) => { - for tx in txs { - self.revert_transaction(tx); - } - }, - } - self.inject_telemetry(); - - Ok(()) - } - - /// Handles a [`MempoolEvent::TransactionAdded`] event. - /// - /// Note that this will include our own network transactions as well as user submitted - /// transactions. - /// - /// This updates the state of network accounts affected by this transaction. Account state - /// may be loaded from the store if it isn't already known locally. This would be the case if - /// the network account has no inflight state changes. - async fn add_transaction( - &mut self, - id: TransactionId, - nullifiers: Vec, - network_notes: Vec, - account_delta: Option, - ) -> anyhow::Result<()> { - // Skip transactions we already know about. - // - // This can occur since both ntx builder and the mempool might inform us of the same - // transaction. Once when it was submitted to the mempool, and once by the mempool event. - if self.inflight_txs.contains_key(&id) { - return Ok(()); - } - - let mut tx_impact = TransactionImpact::default(); - if let Some(update) = account_delta.and_then(NetworkAccountEffect::from_protocol) { - let prefix = update.prefix(); - - match update { - NetworkAccountEffect::Created(account) => { - let account_state = AccountState::from_uncommitted_account(account); - self.accounts.insert(prefix, account_state); - self.queue.push_back(prefix); - }, - NetworkAccountEffect::Updated(account_delta) => { - self.fetch_account(prefix) - .await - .context("failed to load account")? - .context("account with delta not found")? - .add_delta(&account_delta); - }, - } - - // If this account was in-progress, then it should no longer be as this update is the - // result of our own network transaction. - self.in_progress.remove(&prefix); - tx_impact.account_delta = Some(prefix); - } - for note in network_notes { - let prefix = note.account_prefix(); - tx_impact.notes.insert(note.nullifier()); - - // Skip and ignore nullifier if note targets a non-existent network account - let Some(account) = self.fetch_account(prefix).await? else { - tracing::warn!("could not fetch account from network: {:?}", prefix); - continue; - }; - - account.add_note(note.clone()); - self.nullifier_idx.insert(note.nullifier(), prefix); - } - for nullifier in nullifiers { - // Ignore nullifiers that aren't network note nullifiers. - let Some(account) = self.nullifier_idx.get(&nullifier) else { - continue; - }; - tx_impact.nullifiers.insert(nullifier); - // We don't use the entry wrapper here because the account must already exist. - let _res = self - .accounts - .get_mut(account) - .expect("nullifier account must exist") - .add_nullifier(nullifier); - } - - if !tx_impact.is_empty() { - self.inflight_txs.insert(id, tx_impact); - } - - Ok(()) - } - - /// Handles [`MempoolEvent::BlockCommitted`] events. - fn commit_transaction(&mut self, tx: TransactionId) { - // We only track transactions which have an impact on the network state. - let Some(impact) = self.inflight_txs.remove(&tx) else { - return; - }; - - if let Some(prefix) = impact.account_delta { - self.accounts.get_mut(&prefix).unwrap().commit_delta(); - } - - for nullifier in impact.nullifiers { - let prefix = self.nullifier_idx.remove(&nullifier).unwrap(); - // Its possible for the account to no longer exist if the transaction creating it was - // reverted. - if let Some(account) = self.accounts.get_mut(&prefix) { - account.commit_nullifier(nullifier); - } - } - } - - /// Handles [`MempoolEvent::TransactionsReverted`] events. - fn revert_transaction(&mut self, tx: TransactionId) { - // We only track transactions which have an impact on the network state. - let Some(impact) = self.inflight_txs.remove(&tx) else { - return; - }; - - if let Some(prefix) = impact.account_delta { - // We need to remove the account if this transaction created the account. - if self.accounts.get_mut(&prefix).unwrap().revert_delta() { - self.accounts.remove(&prefix); - } - } - - for note in impact.notes { - let prefix = self.nullifier_idx.remove(¬e).unwrap(); - // Its possible for the account to no longer exist if the transaction creating it was - // reverted. - if let Some(account) = self.accounts.get_mut(&prefix) { - account.revert_note(note); - } - } - - for nullifier in impact.nullifiers { - let prefix = self.nullifier_idx.get(&nullifier).unwrap(); - // Its possible for the account to no longer exist if the transaction creating it was - // reverted. - if let Some(account) = self.accounts.get_mut(prefix) { - account.revert_nullifier(nullifier); - } - } - } - - /// Returns the current inflight account, loading it from the store if it isn't present locally. - /// - /// Returns `None` if the account is unknown. - async fn fetch_account( - &mut self, - prefix: NetworkAccountPrefix, - ) -> Result, StoreError> { - match self.accounts.entry(prefix) { - Entry::Occupied(occupied_entry) => Ok(Some(occupied_entry.into_mut())), - Entry::Vacant(vacant_entry) => { - let Some(account) = self.store.get_network_account(prefix).await? else { - return Ok(None); - }; - - self.queue.push_back(prefix); - let entry = vacant_entry.insert(AccountState::from_committed_account(account)); - - Ok(Some(entry)) - }, - } - } - - /// Adds stats to the current tracing span. - /// - /// Note that these are only visible in the OpenTelemetry context, as conventional tracing - /// does not track fields added dynamically. - fn inject_telemetry(&self) { - let span = tracing::Span::current(); - - span.set_attribute("ntx.state.accounts.total", self.accounts.len()); - span.set_attribute("ntx.state.accounts.in_progress", self.in_progress.len()); - span.set_attribute("ntx.state.transactions", self.inflight_txs.len()); - span.set_attribute("ntx.state.notes.total", self.nullifier_idx.len()); - } -} - -/// The impact a transaction has on the state. -#[derive(Default)] -struct TransactionImpact { - /// The network account this transaction added an account delta to. - account_delta: Option, - - /// Network notes this transaction created. - notes: BTreeSet, - - /// Network notes this transaction consumed. - nullifiers: BTreeSet, -} - -impl TransactionImpact { - fn is_empty(&self) -> bool { - self.account_delta.is_none() && self.notes.is_empty() && self.nullifiers.is_empty() - } -} diff --git a/crates/ntx-builder/src/store.rs b/crates/ntx-builder/src/store.rs index 922275276..447571a5a 100644 --- a/crates/ntx-builder/src/store.rs +++ b/crates/ntx-builder/src/store.rs @@ -1,16 +1,17 @@ use std::time::Duration; -use miden_node_proto::clients::{Builder, StoreNtxBuilder, StoreNtxBuilderClient}; +use miden_node_proto::clients::{Builder, StoreNtxBuilderClient}; use miden_node_proto::domain::account::NetworkAccountPrefix; use miden_node_proto::domain::note::NetworkNote; use miden_node_proto::errors::ConversionError; +use miden_node_proto::generated::rpc::BlockRange; use miden_node_proto::generated::{self as proto}; use miden_node_proto::try_convert; -use miden_objects::Word; -use miden_objects::account::Account; -use miden_objects::block::BlockHeader; -use miden_objects::crypto::merkle::{Forest, MmrPeaks, PartialMmr}; -use miden_objects::note::NoteScript; +use miden_protocol::Word; +use miden_protocol::account::{Account, AccountId}; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::crypto::merkle::mmr::{Forest, MmrPeaks, PartialMmr}; +use miden_protocol::note::NoteScript; use miden_tx::utils::Deserializable; use thiserror::Error; use tracing::{info, instrument}; @@ -39,7 +40,8 @@ impl StoreClient { .without_timeout() .without_metadata_version() .without_metadata_genesis() - .connect_lazy::(); + .with_otel_context_injection() + .connect_lazy::(); Self { inner: store } } @@ -104,43 +106,12 @@ impl StoreClient { } } - /// Returns the list of unconsumed network notes. - #[instrument(target = COMPONENT, name = "store.client.get_unconsumed_network_notes", skip_all, err)] - pub async fn get_unconsumed_network_notes(&self) -> Result, StoreError> { - let mut all_notes = Vec::new(); - let mut page_token: Option = None; - - loop { - let req = proto::ntx_builder_store::UnconsumedNetworkNotesRequest { - page_token, - page_size: 128, - }; - let resp = self.inner.clone().get_unconsumed_network_notes(req).await?.into_inner(); - - let page: Vec = resp - .notes - .into_iter() - .map(NetworkNote::try_from) - .collect::, _>>()?; - - all_notes.extend(page); - - match resp.next_token { - Some(tok) => page_token = Some(tok), - None => break, - } - } - - Ok(all_notes) - } - #[instrument(target = COMPONENT, name = "store.client.get_network_account", skip_all, err)] pub async fn get_network_account( &self, prefix: NetworkAccountPrefix, ) -> Result, StoreError> { - let request = - proto::ntx_builder_store::AccountIdPrefix { account_id_prefix: prefix.inner() }; + let request = proto::store::AccountIdPrefix { account_id_prefix: prefix.inner() }; let store_response = self .inner @@ -164,6 +135,102 @@ impl StoreClient { Ok(account) } + /// Returns the list of unconsumed network notes for a specific network account up to a + /// specified block. + #[instrument(target = COMPONENT, name = "store.client.get_unconsumed_network_notes", skip_all, err)] + pub async fn get_unconsumed_network_notes( + &self, + network_account_prefix: NetworkAccountPrefix, + block_num: u32, + ) -> Result, StoreError> { + // Upper bound of each note is ~10KB. Limit page size to ~10MB. + const PAGE_SIZE: u64 = 1024; + + let mut all_notes = Vec::new(); + let mut page_token: Option = None; + + let mut store_client = self.inner.clone(); + loop { + let req = proto::store::UnconsumedNetworkNotesRequest { + page_token, + page_size: PAGE_SIZE, + network_account_id_prefix: network_account_prefix.inner(), + block_num, + }; + let resp = store_client.get_unconsumed_network_notes(req).await?.into_inner(); + + all_notes.reserve(resp.notes.len()); + for note in resp.notes { + all_notes.push(NetworkNote::try_from(note)?); + } + + match resp.next_token { + Some(token) => page_token = Some(token), + None => break, + } + } + + Ok(all_notes) + } + + /// Get all network account IDs. + /// + /// Since the `GetNetworkAccountIds` method is paginated, we loop through all pages until we + /// reach the end. + /// + /// Each page can return up to `MAX_RESPONSE_PAYLOAD_BYTES / AccountId::SERIALIZED_SIZE` + /// accounts (~289,000). With `100_000` iterations, which is assumed to be sufficient for the + /// foreseeable future. + #[instrument(target = COMPONENT, name = "store.client.get_network_account_ids", skip_all, err)] + pub async fn get_network_account_ids(&self) -> Result, StoreError> { + const MAX_ITERATIONS: u32 = 100_000; + + let mut block_range = BlockNumber::from(0)..=BlockNumber::from(u32::MAX); + + let mut ids = Vec::new(); + let mut iterations_count = 0; + + loop { + let response = self + .inner + .clone() + .get_network_account_ids(Into::::into(block_range.clone())) + .await? + .into_inner(); + + let accounts: Result, ConversionError> = response + .account_ids + .into_iter() + .map(|account_id| { + AccountId::read_from_bytes(&account_id.id) + .map_err(|err| ConversionError::deserialization_error("account_id", err)) + }) + .collect(); + + let pagination_info = response.pagination_info.ok_or( + ConversionError::MissingFieldInProtobufRepresentation { + entity: "NetworkAccountIdList", + field_name: "pagination_info", + }, + )?; + + ids.extend(accounts?); + iterations_count += 1; + block_range = + BlockNumber::from(pagination_info.block_num)..=BlockNumber::from(u32::MAX); + + if pagination_info.block_num >= pagination_info.chain_tip { + break; + } + + if iterations_count >= MAX_ITERATIONS { + return Err(StoreError::MaxIterationsReached("GetNetworkAccountIds".to_string())); + } + } + + Ok(ids) + } + #[instrument(target = COMPONENT, name = "store.client.get_note_script_by_root", skip_all, err)] pub async fn get_note_script_by_root( &self, @@ -201,4 +268,6 @@ pub enum StoreError { MalformedResponse(String), #[error("failed to parse response")] DeserializationError(#[from] ConversionError), + #[error("max iterations reached: {0}")] + MaxIterationsReached(String), } diff --git a/crates/proto/Cargo.toml b/crates/proto/Cargo.toml index 0b0943030..738eade6b 100644 --- a/crates/proto/Cargo.toml +++ b/crates/proto/Cargo.toml @@ -20,10 +20,10 @@ hex = { version = "0.4" } http = { workspace = true } miden-node-grpc-error-macro = { workspace = true } miden-node-utils = { workspace = true } -miden-objects = { workspace = true } +miden-protocol = { workspace = true } prost = { workspace = true } thiserror = { workspace = true } -tonic = { workspace = true } +tonic = { default-features = true, workspace = true } tonic-prost = { workspace = true } url = { workspace = true } diff --git a/crates/proto/build.rs b/crates/proto/build.rs index 6d71e8400..b0ac773a7 100644 --- a/crates/proto/build.rs +++ b/crates/proto/build.rs @@ -9,7 +9,6 @@ use miden_node_proto_build::{ store_block_producer_api_descriptor, store_ntx_builder_api_descriptor, store_rpc_api_descriptor, - store_shared_api_descriptor, validator_api_descriptor, }; use miette::{Context, IntoDiagnostic}; @@ -44,7 +43,6 @@ fn main() -> miette::Result<()> { generate_bindings(store_rpc_api_descriptor(), &dst_dir)?; generate_bindings(store_ntx_builder_api_descriptor(), &dst_dir)?; generate_bindings(store_block_producer_api_descriptor(), &dst_dir)?; - generate_bindings(store_shared_api_descriptor(), &dst_dir)?; generate_bindings(block_producer_api_descriptor(), &dst_dir)?; generate_bindings(remote_prover_api_descriptor(), &dst_dir)?; generate_bindings(validator_api_descriptor(), &dst_dir)?; diff --git a/crates/proto/src/clients/mod.rs b/crates/proto/src/clients/mod.rs index 4fb6b622c..3599b472c 100644 --- a/crates/proto/src/clients/mod.rs +++ b/crates/proto/src/clients/mod.rs @@ -5,31 +5,34 @@ //! //! # Examples //! -//! ```rust,no_run -//! use miden_node_proto::clients::{Builder, WantsTls, StoreNtxBuilderClient, StoreNtxBuilder}; +//! ```rust +//! # use miden_node_proto::clients::{Builder, WantsTls, StoreNtxBuilderClient}; +//! # use url::Url; //! //! # async fn example() -> anyhow::Result<()> { //! // Create a store client with OTEL and TLS -//! let client: StoreNtxBuilderClient = Builder::new("https://store.example.com")? -//! .with_tls()? // or `.without_tls()` -//! .without_timeout() // or `.with_timeout(Duration::from_secs(10))` -//! .without_metadata_version() // or `.with_metadata_version("1.0".into())` -//! .without_metadata_genesis() // or `.with_metadata_genesis(genesis)` -//! .connect::() +//! let url = Url::parse("https://example.com:8080")?; +//! let client: StoreNtxBuilderClient = Builder::new(url) +//! .with_tls()? // or `.without_tls()` +//! .without_timeout() // or `.with_timeout(Duration::from_secs(10))` +//! .without_metadata_version() // or `.with_metadata_version("1.0".into())` +//! .without_metadata_genesis() // or `.with_metadata_genesis(genesis)` +//! .with_otel_context_injection() // or `.without_otel_context_injection()` +//! .connect::() //! .await?; //! # Ok(()) //! # } //! ``` -use std::collections::HashMap; -use std::fmt::Write; use std::marker::PhantomData; +use std::ops::{Deref, DerefMut}; +use std::str::FromStr; use std::time::Duration; use anyhow::{Context, Result}; +use http::header::ACCEPT; use miden_node_utils::tracing::grpc::OtelInterceptor; use tonic::metadata::AsciiMetadataValue; -use tonic::service::Interceptor; use tonic::service::interceptor::InterceptedService; use tonic::transport::{Channel, ClientTlsConfig, Endpoint}; use tonic::{Request, Status}; @@ -37,206 +40,278 @@ use url::Url; use crate::generated; -// METADATA INTERCEPTOR -// ================================================================================================ +#[derive(Clone)] +pub struct Interceptor { + otel: Option, + accept: AsciiMetadataValue, +} -/// Interceptor designed to inject required metadata into all RPC requests. -#[derive(Default, Clone)] -pub struct MetadataInterceptor { - metadata: HashMap<&'static str, AsciiMetadataValue>, +impl Default for Interceptor { + fn default() -> Self { + Self { + otel: None, + accept: AsciiMetadataValue::from_static(Self::MEDIA_TYPE), + } + } } -impl MetadataInterceptor { - /// Adds or overwrites HTTP ACCEPT metadata to the interceptor. - /// - /// Provided version string must be ASCII. - pub fn with_accept_metadata( - mut self, - version: &str, - genesis: Option<&str>, - ) -> Result { - let mut accept_value = format!("application/vnd.miden; version={version}"); - if let Some(genesis) = genesis { - write!(accept_value, "; genesis={genesis}")?; +impl Interceptor { + const MEDIA_TYPE: &str = "application/vnd.miden"; + const VERSION: &str = "version"; + const GENESIS: &str = "genesis"; + + fn new(enable_otel: bool, version: Option<&str>, genesis: Option<&str>) -> Self { + if let Some(version) = version + && !version.is_ascii() + { + panic!("version contains non-ascii values: {version}"); + } + + if let Some(genesis) = genesis + && !genesis.is_ascii() + { + panic!("genesis contains non-ascii values: {genesis}"); + } + + let accept = match (version, genesis) { + (None, None) => Self::MEDIA_TYPE.to_string(), + (None, Some(genesis)) => format!("{}; {}={genesis}", Self::MEDIA_TYPE, Self::GENESIS), + (Some(version), None) => format!("{}; {}={version}", Self::MEDIA_TYPE, Self::VERSION), + (Some(version), Some(genesis)) => format!( + "{}; {}={version}, {}={genesis}", + Self::MEDIA_TYPE, + Self::VERSION, + Self::GENESIS + ), + }; + Self { + otel: enable_otel.then_some(OtelInterceptor), + // SAFETY: we checked that all values are ascii at the top of the function. + accept: AsciiMetadataValue::from_str(&accept).unwrap(), } - self.metadata.insert("accept", AsciiMetadataValue::try_from(accept_value)?); - Ok(self) } } -// COMBINED INTERCEPTOR (OTEL + METADATA) + +impl tonic::service::Interceptor for Interceptor { + fn call(&mut self, mut request: tonic::Request<()>) -> Result, Status> { + if let Some(mut otel) = self.otel { + request = otel.call(request)?; + } + + request.metadata_mut().insert(ACCEPT.as_str(), self.accept.clone()); + + Ok(request) + } +} + +// TYPE ALIASES TO AID LEGIBILITY // ================================================================================================ -#[derive(Clone)] -pub struct OtelAndMetadataInterceptor { - otel: OtelInterceptor, - metadata: MetadataInterceptor, +type InterceptedChannel = InterceptedService; +type GeneratedRpcClient = generated::rpc::api_client::ApiClient; +type GeneratedBlockProducerClient = + generated::block_producer::api_client::ApiClient; +type GeneratedStoreClientForNtxBuilder = + generated::store::ntx_builder_client::NtxBuilderClient; +type GeneratedStoreClientForBlockProducer = + generated::store::block_producer_client::BlockProducerClient; +type GeneratedStoreClientForRpc = generated::store::rpc_client::RpcClient; +type GeneratedProxyStatusClient = + generated::remote_prover::proxy_status_api_client::ProxyStatusApiClient; +type GeneratedProverClient = generated::remote_prover::api_client::ApiClient; +type GeneratedValidatorClient = generated::validator::api_client::ApiClient; + +// gRPC CLIENTS +// ================================================================================================ + +#[derive(Debug, Clone)] +pub struct RpcClient(GeneratedRpcClient); +#[derive(Debug, Clone)] +pub struct BlockProducerClient(GeneratedBlockProducerClient); +#[derive(Debug, Clone)] +pub struct StoreNtxBuilderClient(GeneratedStoreClientForNtxBuilder); +#[derive(Debug, Clone)] +pub struct StoreBlockProducerClient(GeneratedStoreClientForBlockProducer); +#[derive(Debug, Clone)] +pub struct StoreRpcClient(GeneratedStoreClientForRpc); +#[derive(Debug, Clone)] +pub struct RemoteProverProxyStatusClient(GeneratedProxyStatusClient); +#[derive(Debug, Clone)] +pub struct RemoteProverClient(GeneratedProverClient); +#[derive(Debug, Clone)] +pub struct ValidatorClient(GeneratedValidatorClient); + +impl DerefMut for RpcClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } } -impl OtelAndMetadataInterceptor { - pub fn new(otel: OtelInterceptor, metadata: MetadataInterceptor) -> Self { - Self { otel, metadata } +impl Deref for RpcClient { + type Target = GeneratedRpcClient; + + fn deref(&self) -> &Self::Target { + &self.0 } } -impl Interceptor for OtelAndMetadataInterceptor { - fn call(&mut self, request: Request<()>) -> Result, Status> { - // Apply OTEL first so tracing context propagates, then attach metadata headers - let req = self.otel.call(request)?; - self.metadata.call(req) +impl DerefMut for BlockProducerClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 } } -impl Interceptor for MetadataInterceptor { - fn call(&mut self, request: Request<()>) -> Result, Status> { - let mut request = request; - for (key, value) in &self.metadata { - request.metadata_mut().insert(*key, value.clone()); - } - Ok(request) +impl Deref for BlockProducerClient { + type Target = GeneratedBlockProducerClient; + + fn deref(&self) -> &Self::Target { + &self.0 } } -// TYPE ALIASES FOR INSTRUMENTED CLIENTS -// ================================================================================================ +impl DerefMut for StoreNtxBuilderClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} -pub type RpcClient = - generated::rpc::api_client::ApiClient>; -pub type BlockProducerClient = - generated::block_producer::api_client::ApiClient>; -pub type StoreNtxBuilderClient = generated::ntx_builder_store::ntx_builder_client::NtxBuilderClient< - InterceptedService, ->; -pub type StoreBlockProducerClient = - generated::block_producer_store::block_producer_client::BlockProducerClient< - InterceptedService, - >; -pub type StoreRpcClient = - generated::rpc_store::rpc_client::RpcClient>; - -pub type RemoteProverProxyStatusClient = - generated::remote_prover::proxy_status_api_client::ProxyStatusApiClient< - InterceptedService, - >; - -pub type RemoteProverClient = - generated::remote_prover::api_client::ApiClient>; +impl Deref for StoreNtxBuilderClient { + type Target = GeneratedStoreClientForNtxBuilder; -// GRPC CLIENT BUILDER TRAIT -// ================================================================================================ + fn deref(&self) -> &Self::Target { + &self.0 + } +} -/// Configuration for gRPC clients. -/// -/// This struct contains the configuration for gRPC clients, including the metadata version and -/// genesis commitment. -pub struct ClientConfig { - pub metadata_version: Option, - pub metadata_genesis: Option, +impl DerefMut for StoreBlockProducerClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } } -/// Trait for building gRPC clients from a common [`Builder`] configuration. -/// -/// This trait provides a standardized way to create different gRPC clients with consistent -/// configuration options like TLS, OTEL interceptors, and connection types. -pub trait GrpcClientBuilder { - type Service; +impl Deref for StoreBlockProducerClient { + type Target = GeneratedStoreClientForBlockProducer; - fn with_interceptor(channel: Channel, config: &ClientConfig) -> Self::Service; + fn deref(&self) -> &Self::Target { + &self.0 + } } -// CLIENT BUILDER MARKERS -// ================================================================================================ +impl DerefMut for StoreRpcClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} -#[derive(Copy, Clone, Debug)] -pub struct Rpc; +impl Deref for StoreRpcClient { + type Target = GeneratedStoreClientForRpc; -#[derive(Copy, Clone, Debug)] -pub struct BlockProducer; + fn deref(&self) -> &Self::Target { + &self.0 + } +} -#[derive(Copy, Clone, Debug)] -pub struct StoreNtxBuilder; +impl DerefMut for RemoteProverProxyStatusClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} -#[derive(Copy, Clone, Debug)] -pub struct StoreBlockProducer; +impl Deref for RemoteProverProxyStatusClient { + type Target = GeneratedProxyStatusClient; -#[derive(Copy, Clone, Debug)] -pub struct StoreRpc; + fn deref(&self) -> &Self::Target { + &self.0 + } +} -#[derive(Copy, Clone, Debug)] -pub struct RemoteProverProxy; +impl DerefMut for RemoteProverClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 + } +} -// CLIENT BUILDER IMPLEMENTATIONS -// ================================================================================================ +impl Deref for RemoteProverClient { + type Target = GeneratedProverClient; -impl GrpcClientBuilder for Rpc { - type Service = RpcClient; + fn deref(&self) -> &Self::Target { + &self.0 + } +} - fn with_interceptor(channel: Channel, config: &ClientConfig) -> Self::Service { - // Include Accept header only if version was explicitly provided; still combine with OTEL. - let mut metadata = MetadataInterceptor::default(); - if let Some(version) = config.metadata_version.as_deref() { - metadata = metadata - .with_accept_metadata(version, config.metadata_genesis.as_deref()) - .expect("Failed to create metadata interceptor"); - } - let combined = OtelAndMetadataInterceptor::new(OtelInterceptor, metadata); - generated::rpc::api_client::ApiClient::with_interceptor(channel, combined) +impl DerefMut for ValidatorClient { + fn deref_mut(&mut self) -> &mut Self::Target { + &mut self.0 } } -impl GrpcClientBuilder for BlockProducer { - type Service = BlockProducerClient; +impl Deref for ValidatorClient { + type Target = GeneratedValidatorClient; - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::block_producer::api_client::ApiClient::with_interceptor(channel, OtelInterceptor) + fn deref(&self) -> &Self::Target { + &self.0 } } -impl GrpcClientBuilder for StoreNtxBuilder { - type Service = StoreNtxBuilderClient; +// GRPC CLIENT BUILDER TRAIT +// ================================================================================================ - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::ntx_builder_store::ntx_builder_client::NtxBuilderClient::with_interceptor( - channel, - OtelInterceptor, - ) +/// Trait for building gRPC clients from a common [`Builder`] configuration. +pub trait GrpcClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self; +} + +impl GrpcClient for RpcClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedRpcClient::new(InterceptedService::new(channel, interceptor))) } } -impl GrpcClientBuilder for StoreBlockProducer { - type Service = StoreBlockProducerClient; +impl GrpcClient for BlockProducerClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedBlockProducerClient::new(InterceptedService::new(channel, interceptor))) + } +} - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::block_producer_store::block_producer_client::BlockProducerClient::with_interceptor( +impl GrpcClient for StoreNtxBuilderClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedStoreClientForNtxBuilder::new(InterceptedService::new( channel, - OtelInterceptor, - ) + interceptor, + ))) } } -impl GrpcClientBuilder for StoreRpc { - type Service = StoreRpcClient; - - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::rpc_store::rpc_client::RpcClient::with_interceptor(channel, OtelInterceptor) +impl GrpcClient for StoreBlockProducerClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedStoreClientForBlockProducer::new(InterceptedService::new( + channel, + interceptor, + ))) } } -impl GrpcClientBuilder for RemoteProverProxy { - type Service = RemoteProverProxyStatusClient; +impl GrpcClient for StoreRpcClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedStoreClientForRpc::new(InterceptedService::new(channel, interceptor))) + } +} - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::remote_prover::proxy_status_api_client::ProxyStatusApiClient::with_interceptor( - channel, - OtelInterceptor, - ) +impl GrpcClient for RemoteProverProxyStatusClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedProxyStatusClient::new(InterceptedService::new(channel, interceptor))) } } -impl GrpcClientBuilder for RemoteProverClient { - type Service = RemoteProverClient; +impl GrpcClient for RemoteProverClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedProverClient::new(InterceptedService::new(channel, interceptor))) + } +} - fn with_interceptor(channel: Channel, _config: &ClientConfig) -> Self::Service { - generated::remote_prover::api_client::ApiClient::with_interceptor(channel, OtelInterceptor) +impl GrpcClient for ValidatorClient { + fn with_interceptor(channel: Channel, interceptor: Interceptor) -> Self { + Self(GeneratedValidatorClient::new(InterceptedService::new(channel, interceptor))) } } @@ -251,17 +326,20 @@ impl GrpcClientBuilder for RemoteProverClient { /// /// Usage example: /// -/// ```rust,no_run -/// use miden_node_proto::clients::{Builder, WantsTls, Rpc, RpcClient}; -/// use std::time::Duration; +/// ```rust +/// # use miden_node_proto::clients::{Builder, WantsTls, RpcClient}; +/// # use url::Url; +/// # use std::time::Duration; /// /// # async fn example() -> anyhow::Result<()> { -/// let client: RpcClient = Builder::new("https://rpc.example.com:8080")? -/// .with_tls()? // or `.without_tls()` +/// let url = Url::parse("https://rpc.example.com:8080")?; +/// let client: RpcClient = Builder::new(url) +/// .with_tls()? // or `.without_tls()` /// .with_timeout(Duration::from_secs(5)) // or `.without_timeout()` -/// .with_metadata_version("1.0".into()) // or `.without_metadata_version()` +/// .with_metadata_version("1.0".into()) // or `.without_metadata_version()` /// .without_metadata_genesis() // or `.with_metadata_genesis(genesis)` -/// .connect::() +/// .with_otel_context_injection() // or `.without_otel_context_injection()` +/// .connect::() /// .await?; /// # Ok(()) /// # } @@ -271,6 +349,7 @@ pub struct Builder { endpoint: Endpoint, metadata_version: Option, metadata_genesis: Option, + enable_otel: bool, _state: PhantomData, } @@ -283,6 +362,8 @@ pub struct WantsVersion; #[derive(Copy, Clone, Debug)] pub struct WantsGenesis; #[derive(Copy, Clone, Debug)] +pub struct WantsOTel; +#[derive(Copy, Clone, Debug)] pub struct WantsConnection; impl Builder { @@ -292,6 +373,7 @@ impl Builder { endpoint: self.endpoint, metadata_version: self.metadata_version, metadata_genesis: self.metadata_genesis, + enable_otel: self.enable_otel, _state: PhantomData::, } } @@ -308,6 +390,7 @@ impl Builder { endpoint, metadata_version: None, metadata_genesis: None, + enable_otel: false, _state: PhantomData, } } @@ -357,42 +440,64 @@ impl Builder { impl Builder { /// Do not include genesis commitment in request metadata. - pub fn without_metadata_genesis(mut self) -> Builder { + pub fn without_metadata_genesis(mut self) -> Builder { self.metadata_genesis = None; self.next_state() } /// Include a specific genesis commitment string in request metadata. - pub fn with_metadata_genesis(mut self, genesis: String) -> Builder { + pub fn with_metadata_genesis(mut self, genesis: String) -> Builder { self.metadata_genesis = Some(genesis); self.next_state() } } +impl Builder { + /// Enables OpenTelemetry context propagation via gRPC. + /// + /// This is used to by OpenTelemetry to connect traces across network boundaries. The server on + /// the other end must be configured to receive and use the injected trace context. + pub fn with_otel_context_injection(mut self) -> Builder { + self.enable_otel = true; + self.next_state() + } + + /// Disables OpenTelemetry context propagation. This should be disabled when interfacing with + /// external third party gRPC servers. + pub fn without_otel_context_injection(mut self) -> Builder { + self.enable_otel = false; + self.next_state() + } +} + impl Builder { /// Establish an eager connection and return a fully configured client. - pub async fn connect(self) -> Result + pub async fn connect(self) -> Result where - T: GrpcClientBuilder, + T: GrpcClient, { let channel = self.endpoint.connect().await?; - let cfg = ClientConfig { - metadata_version: self.metadata_version, - metadata_genesis: self.metadata_genesis, - }; - Ok(T::with_interceptor(channel, &cfg)) + Ok(self.connect_with_channel::(channel)) } /// Establish a lazy connection and return a client that will connect on first use. - pub fn connect_lazy(self) -> T::Service + pub fn connect_lazy(self) -> T where - T: GrpcClientBuilder, + T: GrpcClient, { let channel = self.endpoint.connect_lazy(); - let cfg = ClientConfig { - metadata_version: self.metadata_version, - metadata_genesis: self.metadata_genesis, - }; - T::with_interceptor(channel, &cfg) + self.connect_with_channel::(channel) + } + + fn connect_with_channel(self, channel: Channel) -> T + where + T: GrpcClient, + { + let interceptor = Interceptor::new( + self.enable_otel, + self.metadata_version.as_deref(), + self.metadata_genesis.as_deref(), + ); + T::with_interceptor(channel, interceptor) } } diff --git a/crates/proto/src/domain/account.rs b/crates/proto/src/domain/account.rs index 8d690803c..4330a82de 100644 --- a/crates/proto/src/domain/account.rs +++ b/crates/proto/src/domain/account.rs @@ -1,20 +1,23 @@ use std::fmt::{Debug, Display, Formatter}; use miden_node_utils::formatting::format_opt; -use miden_objects::Word; -use miden_objects::account::{ +use miden_protocol::Word; +use miden_protocol::account::{ Account, AccountHeader, AccountId, AccountStorageHeader, StorageMap, + StorageSlotHeader, + StorageSlotName, StorageSlotType, }; -use miden_objects::asset::{Asset, AssetVault}; -use miden_objects::block::{AccountWitness, BlockNumber}; -use miden_objects::crypto::merkle::SparseMerklePath; -use miden_objects::note::{NoteExecutionMode, NoteTag}; -use miden_objects::utils::{Deserializable, DeserializationError, Serializable}; +use miden_protocol::asset::{Asset, AssetVault}; +use miden_protocol::block::BlockNumber; +use miden_protocol::block::account_tree::AccountWitness; +use miden_protocol::crypto::merkle::SparseMerklePath; +use miden_protocol::note::{NoteExecutionMode, NoteTag}; +use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; use thiserror::Error; use super::try_convert; @@ -96,11 +99,35 @@ impl From<&AccountInfo> for proto::account::AccountDetails { fn from(AccountInfo { summary, details }: &AccountInfo) -> Self { Self { summary: Some(summary.into()), - details: details.as_ref().map(miden_objects::utils::Serializable::to_bytes), + details: details.as_ref().map(Serializable::to_bytes), } } } +// ACCOUNT STORAGE HEADER +//================================================================================================ + +impl TryFrom for AccountStorageHeader { + type Error = ConversionError; + + fn try_from(value: proto::account::AccountStorageHeader) -> Result { + let proto::account::AccountStorageHeader { slots } = value; + + let slot_headers = slots + .into_iter() + .map(|slot| { + let slot_name = StorageSlotName::new(slot.slot_name)?; + let slot_type = storage_slot_type_from_raw(slot.slot_type)?; + let commitment = + slot.commitment.ok_or(ConversionError::NotAValidFelt)?.try_into()?; + Ok(StorageSlotHeader::new(slot_name, slot_type, commitment)) + }) + .collect::, ConversionError>>()?; + + Ok(AccountStorageHeader::new(slot_headers)?) + } +} + // ACCOUNT PROOF REQUEST // ================================================================================================ @@ -112,14 +139,14 @@ pub struct AccountProofRequest { pub details: Option, } -impl TryFrom for AccountProofRequest { +impl TryFrom for AccountProofRequest { type Error = ConversionError; - fn try_from(value: proto::rpc_store::AccountProofRequest) -> Result { - let proto::rpc_store::AccountProofRequest { account_id, block_num, details } = value; + fn try_from(value: proto::rpc::AccountProofRequest) -> Result { + let proto::rpc::AccountProofRequest { account_id, block_num, details } = value; let account_id = account_id - .ok_or(proto::rpc_store::AccountProofRequest::missing_field(stringify!(account_id)))? + .ok_or(proto::rpc::AccountProofRequest::missing_field(stringify!(account_id)))? .try_into()?; let block_num = block_num.map(Into::into); @@ -136,15 +163,13 @@ pub struct AccountDetailRequest { pub storage_requests: Vec, } -impl TryFrom - for AccountDetailRequest -{ +impl TryFrom for AccountDetailRequest { type Error = ConversionError; fn try_from( - value: proto::rpc_store::account_proof_request::AccountDetailRequest, + value: proto::rpc::account_proof_request::AccountDetailRequest, ) -> Result { - let proto::rpc_store::account_proof_request::AccountDetailRequest { + let proto::rpc::account_proof_request::AccountDetailRequest { code_commitment, asset_vault_commitment, storage_maps, @@ -162,100 +187,29 @@ impl TryFrom } } -impl TryFrom for AccountStorageHeader { - type Error = ConversionError; - - fn try_from(value: proto::account::AccountStorageHeader) -> Result { - let proto::account::AccountStorageHeader { slots } = value; - - let items = slots - .into_iter() - .map(|slot| { - let slot_type = storage_slot_type_from_raw(slot.slot_type)?; - let commitment = - slot.commitment.ok_or(ConversionError::NotAValidFelt)?.try_into()?; - Ok((slot_type, commitment)) - }) - .collect::, ConversionError>>()?; - - Ok(AccountStorageHeader::new(items)) - } -} - -impl TryFrom - for AccountStorageMapDetails -{ - type Error = ConversionError; - - fn try_from( - value: proto::rpc_store::account_storage_details::AccountStorageMapDetails, - ) -> Result { - let proto::rpc_store::account_storage_details::AccountStorageMapDetails { - slot_index, - too_many_entries, - entries, - } = value; - - let slot_index = slot_index.try_into().map_err(ConversionError::TryFromIntError)?; - - // Extract map_entries from the MapEntries message - let map_entries = if let Some(entries) = entries { - entries - .entries - .into_iter() - .map(|entry| { - let key = entry - .key - .ok_or(proto::rpc_store::account_storage_details::account_storage_map_details::map_entries::StorageMapEntry::missing_field( - stringify!(key), - ))? - .try_into()?; - let value = entry - .value - .ok_or(proto::rpc_store::account_storage_details::account_storage_map_details::map_entries::StorageMapEntry::missing_field( - stringify!(value), - ))? - .try_into()?; - Ok((key, value)) - }) - .collect::, ConversionError>>()? - } else { - Vec::new() - }; - - Ok(Self { - slot_index, - too_many_entries, - map_entries, - }) - } -} - #[derive(Debug, Clone, PartialEq, Eq)] pub struct StorageMapRequest { - pub slot_index: u8, + pub slot_name: StorageSlotName, pub slot_data: SlotData, } -impl - TryFrom< - proto::rpc_store::account_proof_request::account_detail_request::StorageMapDetailRequest, - > for StorageMapRequest +impl TryFrom + for StorageMapRequest { type Error = ConversionError; fn try_from( - value: proto::rpc_store::account_proof_request::account_detail_request::StorageMapDetailRequest, + value: proto::rpc::account_proof_request::account_detail_request::StorageMapDetailRequest, ) -> Result { - let proto::rpc_store::account_proof_request::account_detail_request::StorageMapDetailRequest { - slot_index, + let proto::rpc::account_proof_request::account_detail_request::StorageMapDetailRequest { + slot_name, slot_data, } = value; - let slot_index = slot_index.try_into()?; - let slot_data = slot_data.ok_or(proto::rpc_store::account_proof_request::account_detail_request::StorageMapDetailRequest::missing_field(stringify!(slot_data)))?.try_into()?; + let slot_name = StorageSlotName::new(slot_name)?; + let slot_data = slot_data.ok_or(proto::rpc::account_proof_request::account_detail_request::StorageMapDetailRequest::missing_field(stringify!(slot_data)))?.try_into()?; - Ok(StorageMapRequest { slot_index, slot_data }) + Ok(StorageMapRequest { slot_name, slot_data }) } } @@ -265,13 +219,13 @@ pub enum SlotData { MapKeys(Vec), } -impl TryFrom +impl TryFrom for SlotData { type Error = ConversionError; - fn try_from(value: proto::rpc_store::account_proof_request::account_detail_request::storage_map_detail_request::SlotData) -> Result { - use proto::rpc_store::account_proof_request::account_detail_request::storage_map_detail_request::SlotData as ProtoSlotData; + fn try_from(value: proto::rpc::account_proof_request::account_detail_request::storage_map_detail_request::SlotData) -> Result { + use proto::rpc::account_proof_request::account_detail_request::storage_map_detail_request::SlotData as ProtoSlotData; Ok(match value { ProtoSlotData::AllEntries(true) => SlotData::All, @@ -339,9 +293,10 @@ impl From for proto::account::AccountStorageHeader { fn from(value: AccountStorageHeader) -> Self { let slots = value .slots() - .map(|(slot_type, slot_value)| proto::account::account_storage_header::StorageSlot { - slot_type: storage_slot_type_to_raw(*slot_type), - commitment: Some(proto::primitives::Digest::from(*slot_value)), + .map(|slot_header| proto::account::account_storage_header::StorageSlot { + slot_name: slot_header.name().to_string(), + slot_type: storage_slot_type_to_raw(slot_header.slot_type()), + commitment: Some(proto::primitives::Digest::from(slot_header.value())), }) .collect(); @@ -349,133 +304,212 @@ impl From for proto::account::AccountStorageHeader { } } +// ACCOUNT VAULT DETAILS +//================================================================================================ + +/// Account vault details +/// +/// When an account contains a large number of assets (> +/// [`AccountVaultDetails::MAX_RETURN_ENTRIES`]), including all assets in a single RPC response +/// creates performance issues. In such cases, the `LimitExceeded` variant indicates to the client +/// to use the `SyncAccountVault` endpoint instead. #[derive(Debug, Clone, PartialEq, Eq)] -pub struct AccountVaultDetails { - pub too_many_assets: bool, - pub assets: Vec, +pub enum AccountVaultDetails { + /// The vault has too many assets to return inline. + /// Clients must use `SyncAccountVault` endpoint instead. + LimitExceeded, + + /// The assets in the vault (up to `MAX_RETURN_ENTRIES`). + Assets(Vec), } + impl AccountVaultDetails { - const MAX_RETURN_ENTRIES: usize = 1000; + /// Maximum number of vault entries that can be returned in a single response. + /// Accounts with more assets will have `LimitExceeded` variant. + pub const MAX_RETURN_ENTRIES: usize = 1000; pub fn new(vault: &AssetVault) -> Self { if vault.assets().nth(Self::MAX_RETURN_ENTRIES).is_some() { - Self::too_many() + Self::LimitExceeded } else { - Self { - too_many_assets: false, - assets: Vec::from_iter(vault.assets()), - } + Self::Assets(Vec::from_iter(vault.assets())) } } pub fn empty() -> Self { - Self { - too_many_assets: false, - assets: Vec::new(), - } + Self::Assets(Vec::new()) } - fn too_many() -> Self { - Self { - too_many_assets: true, - assets: Vec::new(), + /// Creates `AccountVaultDetails` from a list of assets. + pub fn from_assets(assets: Vec) -> Self { + if assets.len() > Self::MAX_RETURN_ENTRIES { + Self::LimitExceeded + } else { + Self::Assets(assets) } } } -impl TryFrom for AccountVaultDetails { +impl TryFrom for AccountVaultDetails { type Error = ConversionError; - fn try_from(value: proto::rpc_store::AccountVaultDetails) -> Result { - let proto::rpc_store::AccountVaultDetails { too_many_assets, assets } = value; + fn try_from(value: proto::rpc::AccountVaultDetails) -> Result { + let proto::rpc::AccountVaultDetails { too_many_assets, assets } = value; - let assets = - Result::, ConversionError>::from_iter(assets.into_iter().map(|asset| { - let asset = asset - .asset - .ok_or(proto::primitives::Asset::missing_field(stringify!(asset)))?; - let asset = Word::try_from(asset)?; - Asset::try_from(asset).map_err(ConversionError::AssetError) - }))?; - Ok(Self { too_many_assets, assets }) + if too_many_assets { + Ok(Self::LimitExceeded) + } else { + let parsed_assets = + Result::, ConversionError>::from_iter(assets.into_iter().map(|asset| { + let asset = asset + .asset + .ok_or(proto::primitives::Asset::missing_field(stringify!(asset)))?; + let asset = Word::try_from(asset)?; + Asset::try_from(asset).map_err(ConversionError::AssetError) + }))?; + Ok(Self::Assets(parsed_assets)) + } } } -impl From for proto::rpc_store::AccountVaultDetails { +impl From for proto::rpc::AccountVaultDetails { fn from(value: AccountVaultDetails) -> Self { - let AccountVaultDetails { too_many_assets, assets } = value; - - Self { - too_many_assets, - assets: Vec::from_iter(assets.into_iter().map(|asset| proto::primitives::Asset { - asset: Some(proto::primitives::Digest::from(Word::from(asset))), - })), + match value { + AccountVaultDetails::LimitExceeded => Self { + too_many_assets: true, + assets: Vec::new(), + }, + AccountVaultDetails::Assets(assets) => Self { + too_many_assets: false, + assets: Vec::from_iter(assets.into_iter().map(|asset| proto::primitives::Asset { + asset: Some(proto::primitives::Digest::from(Word::from(asset))), + })), + }, } } } +// ACCOUNT STORAGE MAP DETAILS +//================================================================================================ + +/// Details about an account storage map slot. #[derive(Debug, Clone, PartialEq, Eq)] pub struct AccountStorageMapDetails { - pub slot_index: u8, - pub too_many_entries: bool, - pub map_entries: Vec<(Word, Word)>, + pub slot_name: StorageSlotName, + pub entries: StorageMapEntries, +} + +/// Storage map entries for an account storage slot. +/// +/// When a storage map contains many entries (> [`AccountStorageMapDetails::MAX_RETURN_ENTRIES`]), +/// returning all entries in a single RPC response creates performance issues. In such cases, +/// the `LimitExceeded` variant indicates to the client to use the `SyncStorageMaps` endpoint +/// instead. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum StorageMapEntries { + /// The map has too many entries to return inline. + /// Clients must use `SyncStorageMaps` endpoint instead. + LimitExceeded, + + /// The storage map entries (key-value pairs), up to `MAX_RETURN_ENTRIES`. + /// TODO: For partial responses, also include Merkle proofs and inner SMT nodes. + Entries(Vec<(Word, Word)>), } impl AccountStorageMapDetails { - const MAX_RETURN_ENTRIES: usize = 1000; + /// Maximum number of storage map entries that can be returned in a single response. + pub const MAX_RETURN_ENTRIES: usize = 1000; - pub fn new(slot_index: u8, slot_data: SlotData, storage_map: &StorageMap) -> Self { + pub fn new(slot_name: StorageSlotName, slot_data: SlotData, storage_map: &StorageMap) -> Self { match slot_data { - SlotData::All => Self::from_all_entries(slot_index, storage_map), - SlotData::MapKeys(keys) => Self::from_specific_keys(slot_index, &keys[..], storage_map), + SlotData::All => Self::from_all_entries(slot_name, storage_map), + SlotData::MapKeys(keys) => Self::from_specific_keys(slot_name, &keys[..], storage_map), } } - fn from_all_entries(slot_index: u8, storage_map: &StorageMap) -> Self { + fn from_all_entries(slot_name: StorageSlotName, storage_map: &StorageMap) -> Self { if storage_map.num_entries() > Self::MAX_RETURN_ENTRIES { - Self::too_many_entries(slot_index) + Self { + slot_name, + entries: StorageMapEntries::LimitExceeded, + } } else { let map_entries = Vec::from_iter(storage_map.entries().map(|(k, v)| (*k, *v))); Self { - slot_index, - too_many_entries: false, - map_entries, + slot_name, + entries: StorageMapEntries::Entries(map_entries), } } } - fn from_specific_keys(slot_index: u8, keys: &[Word], storage_map: &StorageMap) -> Self { + fn from_specific_keys( + slot_name: StorageSlotName, + keys: &[Word], + storage_map: &StorageMap, + ) -> Self { if keys.len() > Self::MAX_RETURN_ENTRIES { - Self::too_many_entries(slot_index) + Self { + slot_name, + entries: StorageMapEntries::LimitExceeded, + } } else { // TODO For now, we return all entries instead of specific keys with proofs - Self::from_all_entries(slot_index, storage_map) + Self::from_all_entries(slot_name, storage_map) } } +} - pub fn too_many_entries(slot_index: u8) -> Self { - Self { - slot_index, - too_many_entries: true, - map_entries: Vec::new(), +impl From + for proto::rpc::account_storage_details::AccountStorageMapDetails +{ + fn from(value: AccountStorageMapDetails) -> Self { + use proto::rpc::account_storage_details::account_storage_map_details; + + let AccountStorageMapDetails { slot_name, entries } = value; + + match entries { + StorageMapEntries::LimitExceeded => Self { + slot_name: slot_name.to_string(), + too_many_entries: true, + entries: Some(account_storage_map_details::MapEntries { entries: Vec::new() }), + }, + StorageMapEntries::Entries(map_entries) => { + let entries = Some(account_storage_map_details::MapEntries { + entries: Vec::from_iter(map_entries.into_iter().map(|(key, value)| { + account_storage_map_details::map_entries::StorageMapEntry { + key: Some(key.into()), + value: Some(value.into()), + } + })), + }); + + Self { + slot_name: slot_name.to_string(), + too_many_entries: false, + entries, + } + }, } } } +// ACCOUNT STORAGE DETAILS DETAILS +//================================================================================================ + #[derive(Debug, Clone, PartialEq, Eq)] pub struct AccountStorageDetails { pub header: AccountStorageHeader, pub map_details: Vec, } -impl TryFrom for AccountStorageDetails { +impl TryFrom for AccountStorageDetails { type Error = ConversionError; - fn try_from(value: proto::rpc_store::AccountStorageDetails) -> Result { - let proto::rpc_store::AccountStorageDetails { header, map_details } = value; + fn try_from(value: proto::rpc::AccountStorageDetails) -> Result { + let proto::rpc::AccountStorageDetails { header, map_details } = value; let header = header - .ok_or(proto::rpc_store::AccountStorageDetails::missing_field(stringify!(header)))? + .ok_or(proto::rpc::AccountStorageDetails::missing_field(stringify!(header)))? .try_into()?; let map_details = try_convert(map_details).collect::, _>>()?; @@ -484,7 +518,7 @@ impl TryFrom for AccountStorageDetails } } -impl From for proto::rpc_store::AccountStorageDetails { +impl From for proto::rpc::AccountStorageDetails { fn from(value: AccountStorageDetails) -> Self { let AccountStorageDetails { header, map_details } = value; @@ -497,27 +531,68 @@ impl From for proto::rpc_store::AccountStorageDetails { const fn storage_slot_type_from_raw(slot_type: u32) -> Result { Ok(match slot_type { - 0 => StorageSlotType::Map, - 1 => StorageSlotType::Value, + 0 => StorageSlotType::Value, + 1 => StorageSlotType::Map, _ => return Err(ConversionError::EnumDiscriminantOutOfRange), }) } const fn storage_slot_type_to_raw(slot_type: StorageSlotType) -> u32 { match slot_type { - StorageSlotType::Map => 0, - StorageSlotType::Value => 1, + StorageSlotType::Value => 0, + StorageSlotType::Map => 1, } } -/// Represents account details returned in response to an account proof request. -pub struct AccountDetails { - pub account_header: AccountHeader, - pub account_code: Option>, - pub vault_details: AccountVaultDetails, - pub storage_details: AccountStorageDetails, +impl TryFrom + for AccountStorageMapDetails +{ + type Error = ConversionError; + + fn try_from( + value: proto::rpc::account_storage_details::AccountStorageMapDetails, + ) -> Result { + use proto::rpc::account_storage_details::account_storage_map_details::map_entries::StorageMapEntry; + let proto::rpc::account_storage_details::AccountStorageMapDetails { + slot_name, + too_many_entries, + entries, + } = value; + + let slot_name = StorageSlotName::new(slot_name)?; + + let entries = if too_many_entries { + StorageMapEntries::LimitExceeded + } else { + let map_entries = if let Some(entries) = entries { + entries + .entries + .into_iter() + .map(|entry| { + let key = entry + .key + .ok_or(StorageMapEntry::missing_field(stringify!(key)))? + .try_into()?; + let value = entry + .value + .ok_or(StorageMapEntry::missing_field(stringify!(value)))? + .try_into()?; + Ok((key, value)) + }) + .collect::, ConversionError>>()? + } else { + Vec::new() + }; + StorageMapEntries::Entries(map_entries) + }; + + Ok(Self { slot_name, entries }) + } } +// ACCOUNT PROOF RESPONSE +//================================================================================================ + /// Represents the response to an account proof request. pub struct AccountProofResponse { pub block_num: BlockNumber, @@ -525,18 +600,18 @@ pub struct AccountProofResponse { pub details: Option, } -impl TryFrom for AccountProofResponse { +impl TryFrom for AccountProofResponse { type Error = ConversionError; - fn try_from(value: proto::rpc_store::AccountProofResponse) -> Result { - let proto::rpc_store::AccountProofResponse { block_num, witness, details } = value; + fn try_from(value: proto::rpc::AccountProofResponse) -> Result { + let proto::rpc::AccountProofResponse { block_num, witness, details } = value; let block_num = block_num - .ok_or(proto::rpc_store::AccountProofResponse::missing_field(stringify!(block_num)))? + .ok_or(proto::rpc::AccountProofResponse::missing_field(stringify!(block_num)))? .into(); let witness = witness - .ok_or(proto::rpc_store::AccountProofResponse::missing_field(stringify!(witness)))? + .ok_or(proto::rpc::AccountProofResponse::missing_field(stringify!(witness)))? .try_into()?; let details = details.map(TryFrom::try_from).transpose()?; @@ -545,7 +620,7 @@ impl TryFrom for AccountProofResponse { } } -impl From for proto::rpc_store::AccountProofResponse { +impl From for proto::rpc::AccountProofResponse { fn from(value: AccountProofResponse) -> Self { let AccountProofResponse { block_num, witness, details } = value; @@ -557,13 +632,24 @@ impl From for proto::rpc_store::AccountProofResponse { } } -impl TryFrom for AccountDetails { +// ACCOUNT DETAILS +//================================================================================================ + +/// Represents account details returned in response to an account proof request. +pub struct AccountDetails { + pub account_header: AccountHeader, + pub account_code: Option>, + pub vault_details: AccountVaultDetails, + pub storage_details: AccountStorageDetails, +} + +impl TryFrom for AccountDetails { type Error = ConversionError; fn try_from( - value: proto::rpc_store::account_proof_response::AccountDetails, + value: proto::rpc::account_proof_response::AccountDetails, ) -> Result { - let proto::rpc_store::account_proof_response::AccountDetails { + let proto::rpc::account_proof_response::AccountDetails { header, code, vault_details, @@ -571,21 +657,21 @@ impl TryFrom for Accou } = value; let account_header = header - .ok_or(proto::rpc_store::account_proof_response::AccountDetails::missing_field( - stringify!(header), - ))? + .ok_or(proto::rpc::account_proof_response::AccountDetails::missing_field(stringify!( + header + )))? .try_into()?; let storage_details = storage_details - .ok_or(proto::rpc_store::account_proof_response::AccountDetails::missing_field( - stringify!(storage_details), - ))? + .ok_or(proto::rpc::account_proof_response::AccountDetails::missing_field(stringify!( + storage_details + )))? .try_into()?; let vault_details = vault_details - .ok_or(proto::rpc_store::account_proof_response::AccountDetails::missing_field( - stringify!(vault_details), - ))? + .ok_or(proto::rpc::account_proof_response::AccountDetails::missing_field(stringify!( + vault_details + )))? .try_into()?; let account_code = code; @@ -598,7 +684,7 @@ impl TryFrom for Accou } } -impl From for proto::rpc_store::account_proof_response::AccountDetails { +impl From for proto::rpc::account_proof_response::AccountDetails { fn from(value: AccountDetails) -> Self { let AccountDetails { account_header, @@ -621,35 +707,6 @@ impl From for proto::rpc_store::account_proof_response::AccountD } } -impl From - for proto::rpc_store::account_storage_details::AccountStorageMapDetails -{ - fn from(value: AccountStorageMapDetails) -> Self { - use proto::rpc_store::account_storage_details::account_storage_map_details; - - let AccountStorageMapDetails { - slot_index, - too_many_entries, - map_entries, - } = value; - - let entries = Some(account_storage_map_details::MapEntries { - entries: Vec::from_iter(map_entries.into_iter().map(|(key, value)| { - account_storage_map_details::map_entries::StorageMapEntry { - key: Some(key.into()), - value: Some(value.into()), - } - })), - }); - - Self { - slot_index: u32::from(slot_index), - too_many_entries, - entries, - } - } -} - // ACCOUNT WITNESS // ================================================================================================ @@ -770,24 +827,22 @@ impl Display for AccountState { } } -impl TryFrom - for AccountState -{ +impl TryFrom for AccountState { type Error = ConversionError; fn try_from( - from: proto::block_producer_store::transaction_inputs::AccountTransactionInputRecord, + from: proto::store::transaction_inputs::AccountTransactionInputRecord, ) -> Result { let account_id = from .account_id - .ok_or(proto::block_producer_store::transaction_inputs::AccountTransactionInputRecord::missing_field( + .ok_or(proto::store::transaction_inputs::AccountTransactionInputRecord::missing_field( stringify!(account_id), ))? .try_into()?; let account_commitment = from .account_commitment - .ok_or(proto::block_producer_store::transaction_inputs::AccountTransactionInputRecord::missing_field( + .ok_or(proto::store::transaction_inputs::AccountTransactionInputRecord::missing_field( stringify!(account_commitment), ))? .try_into()?; @@ -804,9 +859,7 @@ impl TryFrom - for proto::block_producer_store::transaction_inputs::AccountTransactionInputRecord -{ +impl From for proto::store::transaction_inputs::AccountTransactionInputRecord { fn from(from: AccountState) -> Self { Self { account_id: Some(from.account_id.into()), diff --git a/crates/proto/src/domain/batch.rs b/crates/proto/src/domain/batch.rs index 718e74463..1cccf6ab8 100644 --- a/crates/proto/src/domain/batch.rs +++ b/crates/proto/src/domain/batch.rs @@ -1,9 +1,9 @@ use std::collections::BTreeMap; -use miden_objects::block::BlockHeader; -use miden_objects::note::{NoteId, NoteInclusionProof}; -use miden_objects::transaction::PartialBlockchain; -use miden_objects::utils::{Deserializable, Serializable}; +use miden_protocol::block::BlockHeader; +use miden_protocol::note::{NoteId, NoteInclusionProof}; +use miden_protocol::transaction::PartialBlockchain; +use miden_protocol::utils::{Deserializable, Serializable}; use crate::errors::{ConversionError, MissingFieldHelper}; use crate::generated as proto; @@ -16,7 +16,7 @@ pub struct BatchInputs { pub partial_block_chain: PartialBlockchain, } -impl From for proto::block_producer_store::BatchInputs { +impl From for proto::store::BatchInputs { fn from(inputs: BatchInputs) -> Self { Self { batch_reference_block_header: Some(inputs.batch_reference_block_header.into()), @@ -26,16 +26,14 @@ impl From for proto::block_producer_store::BatchInputs { } } -impl TryFrom for BatchInputs { +impl TryFrom for BatchInputs { type Error = ConversionError; - fn try_from( - response: proto::block_producer_store::BatchInputs, - ) -> Result { + fn try_from(response: proto::store::BatchInputs) -> Result { let result = Self { batch_reference_block_header: response .batch_reference_block_header - .ok_or(proto::block_producer_store::BatchInputs::missing_field("block_header"))? + .ok_or(proto::store::BatchInputs::missing_field("block_header"))? .try_into()?, note_proofs: response .note_proofs diff --git a/crates/proto/src/domain/block.rs b/crates/proto/src/domain/block.rs index a64427d1a..aa94f306d 100644 --- a/crates/proto/src/domain/block.rs +++ b/crates/proto/src/domain/block.rs @@ -1,17 +1,13 @@ use std::collections::BTreeMap; use std::ops::RangeInclusive; -use miden_objects::account::AccountId; -use miden_objects::block::{ - BlockHeader, - BlockInputs, - BlockNumber, - FeeParameters, - NullifierWitness, -}; -use miden_objects::note::{NoteId, NoteInclusionProof}; -use miden_objects::transaction::PartialBlockchain; -use miden_objects::utils::{Deserializable, Serializable}; +use miden_protocol::account::AccountId; +use miden_protocol::block::nullifier_tree::NullifierWitness; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, FeeParameters}; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::{PublicKey, Signature}; +use miden_protocol::note::{NoteId, NoteInclusionProof}; +use miden_protocol::transaction::PartialBlockchain; +use miden_protocol::utils::{Deserializable, Serializable}; use thiserror::Error; use crate::errors::{ConversionError, MissingFieldHelper}; @@ -47,7 +43,7 @@ impl From<&BlockHeader> for proto::blockchain::BlockHeader { note_root: Some(header.note_root().into()), tx_commitment: Some(header.tx_commitment().into()), tx_kernel_commitment: Some(header.tx_kernel_commitment().into()), - proof_commitment: Some(header.proof_commitment().into()), + validator_key: Some(header.validator_key().into()), timestamp: header.timestamp(), fee_parameters: Some(header.fee_parameters().into()), } @@ -108,8 +104,8 @@ impl TryFrom for BlockHeader { )))? .try_into()?, value - .proof_commitment - .ok_or(proto::blockchain::BlockHeader::missing_field(stringify!(proof_commitment)))? + .validator_key + .ok_or(proto::blockchain::BlockHeader::missing_field(stringify!(validator_key)))? .try_into()?, FeeParameters::try_from(value.fee_parameters.ok_or( proto::blockchain::FeeParameters::missing_field(stringify!(fee_parameters)), @@ -122,7 +118,7 @@ impl TryFrom for BlockHeader { // BLOCK INPUTS // ================================================================================================ -impl From for proto::block_producer_store::BlockInputs { +impl From for proto::store::BlockInputs { fn from(inputs: BlockInputs) -> Self { let ( prev_block_header, @@ -132,7 +128,7 @@ impl From for proto::block_producer_store::BlockInputs { unauthenticated_note_proofs, ) = inputs.into_parts(); - proto::block_producer_store::BlockInputs { + proto::store::BlockInputs { latest_block_header: Some(prev_block_header.into()), account_witnesses: account_witnesses .into_iter() @@ -154,10 +150,10 @@ impl From for proto::block_producer_store::BlockInputs { } } -impl TryFrom for BlockInputs { +impl TryFrom for BlockInputs { type Error = ConversionError; - fn try_from(response: proto::block_producer_store::BlockInputs) -> Result { + fn try_from(response: proto::store::BlockInputs) -> Result { let latest_block_header: BlockHeader = response .latest_block_header .ok_or(proto::blockchain::BlockHeader::missing_field("block_header"))? @@ -202,6 +198,52 @@ impl TryFrom for BlockInputs { } } +// PUBLIC KEY +// ================================================================================================ + +impl TryFrom for PublicKey { + type Error = ConversionError; + fn try_from(public_key: proto::blockchain::ValidatorPublicKey) -> Result { + PublicKey::read_from_bytes(&public_key.validator_key) + .map_err(|source| ConversionError::deserialization_error("PublicKey", source)) + } +} + +impl From for proto::blockchain::ValidatorPublicKey { + fn from(value: PublicKey) -> Self { + Self::from(&value) + } +} + +impl From<&PublicKey> for proto::blockchain::ValidatorPublicKey { + fn from(value: &PublicKey) -> Self { + Self { validator_key: value.to_bytes() } + } +} + +// SIGNATURE +// ================================================================================================ + +impl TryFrom for Signature { + type Error = ConversionError; + fn try_from(signature: proto::blockchain::BlockSignature) -> Result { + Signature::read_from_bytes(&signature.signature) + .map_err(|source| ConversionError::deserialization_error("Signature", source)) + } +} + +impl From for proto::blockchain::BlockSignature { + fn from(value: Signature) -> Self { + Self::from(&value) + } +} + +impl From<&Signature> for proto::blockchain::BlockSignature { + fn from(value: &Signature) -> Self { + Self { signature: value.to_bytes() } + } +} + // FEE PARAMETERS // ================================================================================================ @@ -242,7 +284,7 @@ pub enum InvalidBlockRange { EmptyRange { start: BlockNumber, end: BlockNumber }, } -impl proto::rpc_store::BlockRange { +impl proto::rpc::BlockRange { /// Converts the block range into an inclusive range, using the fallback block number if the /// block to is not specified. pub fn into_inclusive_range>( @@ -274,7 +316,7 @@ impl proto::rpc_store::BlockRange { } } -impl From> for proto::rpc_store::BlockRange { +impl From> for proto::rpc::BlockRange { fn from(range: RangeInclusive) -> Self { Self { block_from: range.start().as_u32(), diff --git a/crates/proto/src/domain/digest.rs b/crates/proto/src/domain/digest.rs index 68cfbd9b5..7be94e530 100644 --- a/crates/proto/src/domain/digest.rs +++ b/crates/proto/src/domain/digest.rs @@ -1,8 +1,8 @@ use std::fmt::{Debug, Display, Formatter}; use hex::{FromHex, ToHex}; -use miden_objects::note::NoteId; -use miden_objects::{Felt, StarkField, Word}; +use miden_protocol::note::NoteId; +use miden_protocol::{Felt, StarkField, Word}; use crate::errors::ConversionError; use crate::generated as proto; diff --git a/crates/proto/src/domain/mempool.rs b/crates/proto/src/domain/mempool.rs index 6e9e56322..332cd6772 100644 --- a/crates/proto/src/domain/mempool.rs +++ b/crates/proto/src/domain/mempool.rs @@ -1,10 +1,10 @@ use std::collections::HashSet; -use miden_objects::account::delta::AccountUpdateDetails; -use miden_objects::block::BlockHeader; -use miden_objects::note::Nullifier; -use miden_objects::transaction::TransactionId; -use miden_objects::utils::{Deserializable, Serializable}; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::block::BlockHeader; +use miden_protocol::note::Nullifier; +use miden_protocol::transaction::TransactionId; +use miden_protocol::utils::{Deserializable, Serializable}; use super::note::NetworkNote; use crate::errors::{ConversionError, MissingFieldHelper}; @@ -19,7 +19,8 @@ pub enum MempoolEvent { account_delta: Option, }, BlockCommitted { - header: BlockHeader, + // Box'd as this struct is quite large and triggers clippy. + header: Box, txs: Vec, }, TransactionsReverted(HashSet), @@ -58,7 +59,7 @@ impl From for proto::block_producer::MempoolEvent { MempoolEvent::BlockCommitted { header, txs } => { proto::block_producer::mempool_event::Event::BlockCommitted( proto::block_producer::mempool_event::BlockCommitted { - block_header: Some(header.into()), + block_header: Some(header.as_ref().into()), transactions: txs.into_iter().map(Into::into).collect(), }, ) @@ -120,6 +121,7 @@ impl TryFrom for MempoolEvent { "block_header", ))? .try_into()?; + let header = Box::new(header); let txs = block_committed .transactions .into_iter() diff --git a/crates/proto/src/domain/merkle.rs b/crates/proto/src/domain/merkle.rs index 6d3845625..ed14d523b 100644 --- a/crates/proto/src/domain/merkle.rs +++ b/crates/proto/src/domain/merkle.rs @@ -1,13 +1,7 @@ -use miden_objects::Word; -use miden_objects::crypto::merkle::{ - Forest, - LeafIndex, - MerklePath, - MmrDelta, - SmtLeaf, - SmtProof, - SparseMerklePath, -}; +use miden_protocol::Word; +use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta}; +use miden_protocol::crypto::merkle::smt::{LeafIndex, SmtLeaf, SmtProof}; +use miden_protocol::crypto::merkle::{MerklePath, SparseMerklePath}; use crate::domain::{convert, try_convert}; use crate::errors::{ConversionError, MissingFieldHelper}; diff --git a/crates/proto/src/domain/mod.rs b/crates/proto/src/domain/mod.rs index f70c8f738..b07865553 100644 --- a/crates/proto/src/domain/mod.rs +++ b/crates/proto/src/domain/mod.rs @@ -6,6 +6,7 @@ pub mod mempool; pub mod merkle; pub mod note; pub mod nullifier; +pub mod proof_request; pub mod transaction; // UTILITIES diff --git a/crates/proto/src/domain/note.rs b/crates/proto/src/domain/note.rs index a61884f69..c4065b298 100644 --- a/crates/proto/src/domain/note.rs +++ b/crates/proto/src/domain/note.rs @@ -1,5 +1,5 @@ -use miden_objects::crypto::merkle::SparseMerklePath; -use miden_objects::note::{ +use miden_protocol::crypto::merkle::SparseMerklePath; +use miden_protocol::note::{ Note, NoteDetails, NoteExecutionHint, @@ -11,8 +11,8 @@ use miden_objects::note::{ NoteType, Nullifier, }; -use miden_objects::utils::{Deserializable, Serializable}; -use miden_objects::{Felt, Word}; +use miden_protocol::utils::{Deserializable, Serializable}; +use miden_protocol::{Felt, Word}; use thiserror::Error; use super::account::NetworkAccountPrefix; @@ -135,19 +135,18 @@ impl TryFrom<&proto::note::NoteInclusionInBlockProof> for (NoteId, NoteInclusion .clone(), )?; + let note_id = Word::try_from( + proof + .note_id + .as_ref() + .ok_or(proto::note::NoteInclusionInBlockProof::missing_field(stringify!(note_id)))? + .id + .as_ref() + .ok_or(proto::note::NoteId::missing_field(stringify!(id)))?, + )?; + Ok(( - Word::try_from( - proof - .note_id - .as_ref() - .ok_or(proto::note::NoteInclusionInBlockProof::missing_field(stringify!( - note_id - )))? - .id - .as_ref() - .ok_or(proto::note::NoteId::missing_field(stringify!(id)))?, - )? - .into(), + NoteId::from_raw(note_id), NoteInclusionProof::new( proof.block_num.into(), proof.note_index_in_block.try_into()?, diff --git a/crates/proto/src/domain/nullifier.rs b/crates/proto/src/domain/nullifier.rs index f511731f9..3ccdf88ba 100644 --- a/crates/proto/src/domain/nullifier.rs +++ b/crates/proto/src/domain/nullifier.rs @@ -1,6 +1,6 @@ -use miden_objects::Word; -use miden_objects::crypto::merkle::SmtProof; -use miden_objects::note::Nullifier; +use miden_protocol::Word; +use miden_protocol::crypto::merkle::smt::SmtProof; +use miden_protocol::note::Nullifier; use crate::errors::{ConversionError, MissingFieldHelper}; use crate::generated as proto; @@ -28,7 +28,7 @@ impl TryFrom for Nullifier { fn try_from(value: proto::primitives::Digest) -> Result { let digest: Word = value.try_into()?; - Ok(digest.into()) + Ok(Nullifier::from_raw(digest)) } } @@ -41,32 +41,30 @@ pub struct NullifierWitnessRecord { pub proof: SmtProof, } -impl TryFrom - for NullifierWitnessRecord -{ +impl TryFrom for NullifierWitnessRecord { type Error = ConversionError; fn try_from( - nullifier_witness_record: proto::block_producer_store::block_inputs::NullifierWitness, + nullifier_witness_record: proto::store::block_inputs::NullifierWitness, ) -> Result { Ok(Self { nullifier: nullifier_witness_record .nullifier - .ok_or(proto::block_producer_store::block_inputs::NullifierWitness::missing_field( - stringify!(nullifier), - ))? + .ok_or(proto::store::block_inputs::NullifierWitness::missing_field(stringify!( + nullifier + )))? .try_into()?, proof: nullifier_witness_record .opening - .ok_or(proto::block_producer_store::block_inputs::NullifierWitness::missing_field( - stringify!(opening), - ))? + .ok_or(proto::store::block_inputs::NullifierWitness::missing_field(stringify!( + opening + )))? .try_into()?, }) } } -impl From for proto::block_producer_store::block_inputs::NullifierWitness { +impl From for proto::store::block_inputs::NullifierWitness { fn from(value: NullifierWitnessRecord) -> Self { Self { nullifier: Some(value.nullifier.into()), diff --git a/crates/proto/src/domain/proof_request.rs b/crates/proto/src/domain/proof_request.rs new file mode 100644 index 000000000..f6a40d753 --- /dev/null +++ b/crates/proto/src/domain/proof_request.rs @@ -0,0 +1,39 @@ +// PROOF REQUEST +// ================================================================================================ + +use miden_protocol::batch::OrderedBatches; +use miden_protocol::block::{BlockHeader, BlockInputs}; +use miden_protocol::utils::{ + ByteReader, + ByteWriter, + Deserializable, + DeserializationError, + Serializable, +}; + +pub struct BlockProofRequest { + pub tx_batches: OrderedBatches, + pub block_header: BlockHeader, + pub block_inputs: BlockInputs, +} + +impl Serializable for BlockProofRequest { + fn write_into(&self, target: &mut W) { + let Self { tx_batches, block_header, block_inputs } = self; + tx_batches.write_into(target); + block_header.write_into(target); + block_inputs.write_into(target); + } +} + +impl Deserializable for BlockProofRequest { + fn read_from(source: &mut R) -> Result { + let block = Self { + tx_batches: OrderedBatches::read_from(source)?, + block_header: BlockHeader::read_from(source)?, + block_inputs: BlockInputs::read_from(source)?, + }; + + Ok(block) + } +} diff --git a/crates/proto/src/domain/transaction.rs b/crates/proto/src/domain/transaction.rs index 53ccf6b0c..4b2e29362 100644 --- a/crates/proto/src/domain/transaction.rs +++ b/crates/proto/src/domain/transaction.rs @@ -1,5 +1,5 @@ -use miden_objects::Word; -use miden_objects::transaction::TransactionId; +use miden_protocol::Word; +use miden_protocol::transaction::TransactionId; use crate::errors::ConversionError; use crate::generated as proto; @@ -39,7 +39,7 @@ impl TryFrom for TransactionId { fn try_from(value: proto::primitives::Digest) -> Result { let digest: Word = value.try_into()?; - Ok(digest.into()) + Ok(TransactionId::from_raw(digest)) } } diff --git a/crates/proto/src/errors/mod.rs b/crates/proto/src/errors/mod.rs index 5e461315c..49f0f30bd 100644 --- a/crates/proto/src/errors/mod.rs +++ b/crates/proto/src/errors/mod.rs @@ -3,9 +3,9 @@ use std::num::TryFromIntError; // Re-export the GrpcError derive macro for convenience pub use miden_node_grpc_error_macro::GrpcError; -use miden_objects::crypto::merkle::{SmtLeafError, SmtProofError}; -use miden_objects::utils::DeserializationError; -use miden_objects::{AssetError, FeeError}; +use miden_protocol::crypto::merkle::smt::{SmtLeafError, SmtProofError}; +use miden_protocol::utils::DeserializationError; +use miden_protocol::{AccountError, AssetError, FeeError, StorageSlotNameError}; use thiserror::Error; use crate::domain::note::NetworkNoteError; @@ -17,18 +17,22 @@ mod test_macro; pub enum ConversionError { #[error("asset error")] AssetError(#[from] AssetError), + #[error("account error")] + AccountError(#[from] AccountError), #[error("fee parameters error")] FeeError(#[from] FeeError), #[error("hex error")] HexError(#[from] hex::FromHexError), #[error("note error")] - NoteError(#[from] miden_objects::NoteError), + NoteError(#[from] miden_protocol::NoteError), #[error("network note error")] NetworkNoteError(#[from] NetworkNoteError), #[error("SMT leaf error")] SmtLeafError(#[from] SmtLeafError), #[error("SMT proof error")] SmtProofError(#[from] SmtProofError), + #[error("storage slot name error")] + StorageSlotNameError(#[from] StorageSlotNameError), #[error("integer conversion error: {0}")] TryFromIntError(#[from] TryFromIntError), #[error("too much data, expected {expected}, got {got}")] @@ -38,14 +42,14 @@ pub enum ConversionError { #[error("value is not in the range 0..MODULUS")] NotAValidFelt, #[error("merkle error")] - MerkleError(#[from] miden_objects::crypto::merkle::MerkleError), + MerkleError(#[from] miden_protocol::crypto::merkle::MerkleError), #[error("field `{entity}::{field_name}` is missing")] MissingFieldInProtobufRepresentation { entity: &'static str, field_name: &'static str, }, #[error("MMR error")] - MmrError(#[from] miden_objects::crypto::merkle::MmrError), + MmrError(#[from] miden_protocol::crypto::merkle::mmr::MmrError), #[error("failed to deserialize {entity}")] DeserializationError { entity: &'static str, diff --git a/crates/proto/src/generated/account.rs b/crates/proto/src/generated/account.rs index d30e8c888..f93017b30 100644 --- a/crates/proto/src/generated/account.rs +++ b/crates/proto/src/generated/account.rs @@ -7,7 +7,7 @@ #[prost(skip_debug)] pub struct AccountId { /// 15 bytes (120 bits) encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_objects::account::account_id::AccountId\]. + /// \[miden_protocol::account::account_id::AccountId\]. #[prost(bytes = "vec", tag = "1")] pub id: ::prost::alloc::vec::Vec, } @@ -34,13 +34,16 @@ pub struct AccountStorageHeader { /// Nested message and enum types in `AccountStorageHeader`. pub mod account_storage_header { /// A single storage slot in the account storage header. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] + #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct StorageSlot { + /// The name of the storage slot. + #[prost(string, tag = "1")] + pub slot_name: ::prost::alloc::string::String, /// The type of the storage slot. - #[prost(uint32, tag = "1")] + #[prost(uint32, tag = "2")] pub slot_type: u32, /// The commitment (Word) for this storage slot. - #[prost(message, optional, tag = "2")] + #[prost(message, optional, tag = "3")] pub commitment: ::core::option::Option, } } @@ -51,7 +54,7 @@ pub struct AccountDetails { #[prost(message, optional, tag = "1")] pub summary: ::core::option::Option, /// Account details encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_objects::account::Account\]. + /// \[miden_protocol::account::Account\]. #[prost(bytes = "vec", optional, tag = "2")] pub details: ::core::option::Option<::prost::alloc::vec::Vec>, } diff --git a/crates/proto/src/generated/block_producer.rs b/crates/proto/src/generated/block_producer.rs index 5771d5510..9c95e6a75 100644 --- a/crates/proto/src/generated/block_producer.rs +++ b/crates/proto/src/generated/block_producer.rs @@ -1,27 +1,4 @@ // This file is @generated by prost-build. -/// Represents the status of the block producer. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockProducerStatus { - /// The block producer's running version. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The block producer's status. - #[prost(string, tag = "2")] - pub status: ::prost::alloc::string::String, -} -/// Represents the result of submitting proven transaction. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SubmitProvenTransactionResponse { - /// The node's current block height. - #[prost(fixed32, tag = "1")] - pub block_height: u32, -} -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SubmitProvenBatchResponse { - /// The node's current block height. - #[prost(fixed32, tag = "1")] - pub block_height: u32, -} /// Request to subscribe to mempool events. #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] pub struct MempoolSubscriptionRequest { @@ -68,7 +45,7 @@ pub mod mempool_event { /// Changes to a network account, if any. This includes creation of new network accounts. /// /// The account delta is encoded using \[winter_utils::Serializable\] implementation - /// for \[miden_objects::account::delta::AccountDelta\]. + /// for \[miden_protocol::account::delta::AccountDelta\]. #[prost(bytes = "vec", optional, tag = "4")] pub network_account_delta: ::core::option::Option<::prost::alloc::vec::Vec>, } @@ -186,7 +163,7 @@ pub mod api_client { &mut self, request: impl tonic::IntoRequest<()>, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -205,14 +182,14 @@ pub mod api_client { req.extensions_mut().insert(GrpcMethod::new("block_producer.Api", "Status")); self.inner.unary(req, path, codec).await } - /// Submits proven transaction to the Miden network + /// Submits proven transaction to the Miden network. Returns the node's current block height. pub async fn submit_proven_transaction( &mut self, request: impl tonic::IntoRequest< super::super::transaction::ProvenTransaction, >, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -244,13 +221,15 @@ pub mod api_client { /// /// All transactions in the batch but not in the mempool must build on the current mempool /// state following normal transaction submission rules. + /// + /// Returns the node's current block height. pub async fn submit_proven_batch( &mut self, request: impl tonic::IntoRequest< super::super::transaction::ProvenTransactionBatch, >, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -324,15 +303,15 @@ pub mod api_server { &self, request: tonic::Request<()>, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; - /// Submits proven transaction to the Miden network + /// Submits proven transaction to the Miden network. Returns the node's current block height. async fn submit_proven_transaction( &self, request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Submits a proven batch to the Miden network. @@ -345,11 +324,13 @@ pub mod api_server { /// /// All transactions in the batch but not in the mempool must build on the current mempool /// state following normal transaction submission rules. + /// + /// Returns the node's current block height. async fn submit_proven_batch( &self, request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Server streaming response type for the MempoolSubscription method. @@ -456,7 +437,7 @@ pub mod api_server { #[allow(non_camel_case_types)] struct StatusSvc(pub Arc); impl tonic::server::UnaryService<()> for StatusSvc { - type Response = super::BlockProducerStatus; + type Response = super::super::rpc::BlockProducerStatus; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -499,7 +480,7 @@ pub mod api_server { > tonic::server::UnaryService< super::super::transaction::ProvenTransaction, > for SubmitProvenTransactionSvc { - type Response = super::SubmitProvenTransactionResponse; + type Response = super::super::blockchain::BlockNumber; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -547,7 +528,7 @@ pub mod api_server { > tonic::server::UnaryService< super::super::transaction::ProvenTransactionBatch, > for SubmitProvenBatchSvc { - type Response = super::SubmitProvenBatchResponse; + type Response = super::super::blockchain::BlockNumber; type Future = BoxFuture< tonic::Response, tonic::Status, diff --git a/crates/proto/src/generated/block_producer_store.rs b/crates/proto/src/generated/block_producer_store.rs deleted file mode 100644 index 3603ca50c..000000000 --- a/crates/proto/src/generated/block_producer_store.rs +++ /dev/null @@ -1,789 +0,0 @@ -// This file is @generated by prost-build. -/// Returns data required to prove the next block. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockInputsRequest { - /// IDs of all accounts updated in the proposed block for which to retrieve account witnesses. - #[prost(message, repeated, tag = "1")] - pub account_ids: ::prost::alloc::vec::Vec, - /// Nullifiers of all notes consumed by the block for which to retrieve witnesses. - /// - /// Due to note erasure it will generally not be possible to know the exact set of nullifiers - /// a block will create, unless we pre-execute note erasure. So in practice, this set of - /// nullifiers will be the set of nullifiers of all proven batches in the block, which is a - /// superset of the nullifiers the block may create. - /// - /// However, if it is known that a certain note will be erased, it would not be necessary to - /// provide a nullifier witness for it. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec, - /// Array of note IDs for which to retrieve note inclusion proofs, **if they exist in the store**. - #[prost(message, repeated, tag = "3")] - pub unauthenticated_notes: ::prost::alloc::vec::Vec, - /// Array of block numbers referenced by all batches in the block. - #[prost(fixed32, repeated, tag = "4")] - pub reference_blocks: ::prost::alloc::vec::Vec, -} -/// Represents the result of getting block inputs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockInputs { - /// The latest block header. - #[prost(message, optional, tag = "1")] - pub latest_block_header: ::core::option::Option, - /// Proof of each requested unauthenticated note's inclusion in a block, **if it existed in - /// the store**. - #[prost(message, repeated, tag = "2")] - pub unauthenticated_note_proofs: ::prost::alloc::vec::Vec< - super::note::NoteInclusionInBlockProof, - >, - /// The serialized chain MMR which includes proofs for all blocks referenced by the - /// above note inclusion proofs as well as proofs for inclusion of the requested blocks - /// referenced by the batches in the block. - #[prost(bytes = "vec", tag = "3")] - pub partial_block_chain: ::prost::alloc::vec::Vec, - /// The state commitments of the requested accounts and their authentication paths. - #[prost(message, repeated, tag = "4")] - pub account_witnesses: ::prost::alloc::vec::Vec, - /// The requested nullifiers and their authentication paths. - #[prost(message, repeated, tag = "5")] - pub nullifier_witnesses: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `BlockInputs`. -pub mod block_inputs { - /// A nullifier returned as a response to the `GetBlockInputs`. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct NullifierWitness { - /// The nullifier. - #[prost(message, optional, tag = "1")] - pub nullifier: ::core::option::Option, - /// The SMT proof to verify the nullifier's inclusion in the nullifier tree. - #[prost(message, optional, tag = "2")] - pub opening: ::core::option::Option, - } -} -/// Returns the inputs for a transaction batch. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BatchInputsRequest { - /// List of unauthenticated note commitments to be queried from the database. - #[prost(message, repeated, tag = "1")] - pub note_commitments: ::prost::alloc::vec::Vec, - /// Set of block numbers referenced by transactions. - #[prost(fixed32, repeated, tag = "2")] - pub reference_blocks: ::prost::alloc::vec::Vec, -} -/// Represents the result of getting batch inputs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BatchInputs { - /// The block header that the transaction batch should reference. - #[prost(message, optional, tag = "1")] - pub batch_reference_block_header: ::core::option::Option< - super::blockchain::BlockHeader, - >, - /// Proof of each *found* unauthenticated note's inclusion in a block. - #[prost(message, repeated, tag = "2")] - pub note_proofs: ::prost::alloc::vec::Vec, - /// The serialized chain MMR which includes proofs for all blocks referenced by the - /// above note inclusion proofs as well as proofs for inclusion of the blocks referenced - /// by the transactions in the batch. - #[prost(bytes = "vec", tag = "3")] - pub partial_block_chain: ::prost::alloc::vec::Vec, -} -/// Returns data required to validate a new transaction. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TransactionInputsRequest { - /// ID of the account against which a transaction is executed. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// Set of nullifiers consumed by this transaction. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec, - /// Set of unauthenticated note commitments to check for existence on-chain. - /// - /// These are notes which were not on-chain at the state the transaction was proven, - /// but could by now be present. - #[prost(message, repeated, tag = "3")] - pub unauthenticated_notes: ::prost::alloc::vec::Vec, -} -/// Represents the result of getting transaction inputs. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TransactionInputs { - /// Account state proof. - #[prost(message, optional, tag = "1")] - pub account_state: ::core::option::Option< - transaction_inputs::AccountTransactionInputRecord, - >, - /// List of nullifiers that have been consumed. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec< - transaction_inputs::NullifierTransactionInputRecord, - >, - /// List of unauthenticated notes that were not found in the database. - #[prost(message, repeated, tag = "3")] - pub found_unauthenticated_notes: ::prost::alloc::vec::Vec, - /// The node's current block height. - #[prost(fixed32, tag = "4")] - pub block_height: u32, - /// Whether the account ID prefix is unique. Only relevant for account creation requests. - /// - /// TODO: Replace this with an error. When a general error message exists. - #[prost(bool, optional, tag = "5")] - pub new_account_id_prefix_is_unique: ::core::option::Option, -} -/// Nested message and enum types in `TransactionInputs`. -pub mod transaction_inputs { - /// An account returned as a response to the `GetTransactionInputs`. - #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] - pub struct AccountTransactionInputRecord { - /// The account ID. - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// The latest account commitment, zero commitment if the account doesn't exist. - #[prost(message, optional, tag = "2")] - pub account_commitment: ::core::option::Option, - } - /// A nullifier returned as a response to the `GetTransactionInputs`. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] - pub struct NullifierTransactionInputRecord { - /// The nullifier ID. - #[prost(message, optional, tag = "1")] - pub nullifier: ::core::option::Option, - /// The block at which the nullifier has been consumed, zero if not consumed. - #[prost(fixed32, tag = "2")] - pub block_num: u32, - } -} -/// Generated client implementations. -pub mod block_producer_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Store API for the BlockProducer component - #[derive(Debug, Clone)] - pub struct BlockProducerClient { - inner: tonic::client::Grpc, - } - impl BlockProducerClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl BlockProducerClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> BlockProducerClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - BlockProducerClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Applies changes of a new block to the DB and in-memory data structures. - pub async fn apply_block( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer_store.BlockProducer/ApplyBlock", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new("block_producer_store.BlockProducer", "ApplyBlock"), - ); - self.inner.unary(req, path, codec).await - } - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - pub async fn get_block_header_by_number( - &mut self, - request: impl tonic::IntoRequest< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer_store.BlockProducer/GetBlockHeaderByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "block_producer_store.BlockProducer", - "GetBlockHeaderByNumber", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns data required to prove the next block. - pub async fn get_block_inputs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer_store.BlockProducer/GetBlockInputs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "block_producer_store.BlockProducer", - "GetBlockInputs", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns the inputs for a transaction batch. - pub async fn get_batch_inputs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer_store.BlockProducer/GetBatchInputs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "block_producer_store.BlockProducer", - "GetBatchInputs", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns data required to validate a new transaction. - pub async fn get_transaction_inputs( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/block_producer_store.BlockProducer/GetTransactionInputs", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "block_producer_store.BlockProducer", - "GetTransactionInputs", - ), - ); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod block_producer_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with BlockProducerServer. - #[async_trait] - pub trait BlockProducer: std::marker::Send + std::marker::Sync + 'static { - /// Applies changes of a new block to the DB and in-memory data structures. - async fn apply_block( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - async fn get_block_header_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns data required to prove the next block. - async fn get_block_inputs( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Returns the inputs for a transaction batch. - async fn get_batch_inputs( - &self, - request: tonic::Request, - ) -> std::result::Result, tonic::Status>; - /// Returns data required to validate a new transaction. - async fn get_transaction_inputs( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// Store API for the BlockProducer component - #[derive(Debug)] - pub struct BlockProducerServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl BlockProducerServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for BlockProducerServer - where - T: BlockProducer, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/block_producer_store.BlockProducer/ApplyBlock" => { - #[allow(non_camel_case_types)] - struct ApplyBlockSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for ApplyBlockSvc { - type Response = (); - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::apply_block(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = ApplyBlockSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer_store.BlockProducer/GetBlockHeaderByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockHeaderByNumberSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService< - super::super::shared::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::shared::BlockHeaderByNumberResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_header_by_number( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockHeaderByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer_store.BlockProducer/GetBlockInputs" => { - #[allow(non_camel_case_types)] - struct GetBlockInputsSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for GetBlockInputsSvc { - type Response = super::BlockInputs; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_inputs(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockInputsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer_store.BlockProducer/GetBatchInputs" => { - #[allow(non_camel_case_types)] - struct GetBatchInputsSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for GetBatchInputsSvc { - type Response = super::BatchInputs; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_batch_inputs(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBatchInputsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/block_producer_store.BlockProducer/GetTransactionInputs" => { - #[allow(non_camel_case_types)] - struct GetTransactionInputsSvc(pub Arc); - impl< - T: BlockProducer, - > tonic::server::UnaryService - for GetTransactionInputsSvc { - type Response = super::TransactionInputs; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_transaction_inputs( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetTransactionInputsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for BlockProducerServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "block_producer_store.BlockProducer"; - impl tonic::server::NamedService for BlockProducerServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/proto/src/generated/blockchain.rs b/crates/proto/src/generated/blockchain.rs index 1f1152896..69bbe2e28 100644 --- a/crates/proto/src/generated/blockchain.rs +++ b/crates/proto/src/generated/blockchain.rs @@ -3,15 +3,23 @@ #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct Block { /// Block data encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_objects::block::Block\]. + /// \[miden_protocol::block::Block\]. #[prost(bytes = "vec", tag = "1")] pub block: ::prost::alloc::vec::Vec, } +/// Represents a proposed block. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct ProposedBlock { + /// Block data encoded using \[winter_utils::Serializable\] implementation for + /// \[miden_protocol::block::ProposedBlock\]. + #[prost(bytes = "vec", tag = "1")] + pub proposed_block: ::prost::alloc::vec::Vec, +} /// Represents a block or nothing. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct MaybeBlock { /// The requested block data encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_objects::block::Block\]. + /// \[miden_protocol::block::Block\]. #[prost(bytes = "vec", optional, tag = "1")] pub block: ::core::option::Option<::prost::alloc::vec::Vec>, } @@ -56,9 +64,9 @@ pub struct BlockHeader { /// A commitment to a set of IDs of transactions which affected accounts in this block. #[prost(message, optional, tag = "8")] pub tx_commitment: ::core::option::Option, - /// A commitment to a STARK proof attesting to the correct state transition. + /// The validator's ECDSA public key. #[prost(message, optional, tag = "9")] - pub proof_commitment: ::core::option::Option, + pub validator_key: ::core::option::Option, /// A commitment to all transaction kernels supported by this block. #[prost(message, optional, tag = "10")] pub tx_kernel_commitment: ::core::option::Option, @@ -69,6 +77,22 @@ pub struct BlockHeader { #[prost(fixed32, tag = "12")] pub timestamp: u32, } +/// Validator ECDSA public key. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct ValidatorPublicKey { + /// Signature encoded using \[winter_utils::Serializable\] implementation for + /// \[crypto::dsa::ecdsa_k256_keccak::PublicKey\]. + #[prost(bytes = "vec", tag = "1")] + pub validator_key: ::prost::alloc::vec::Vec, +} +/// Block ECDSA Signature. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockSignature { + /// Signature encoded using \[winter_utils::Serializable\] implementation for + /// \[crypto::dsa::ecdsa_k256_keccak::Signature\]. + #[prost(bytes = "vec", tag = "1")] + pub signature: ::prost::alloc::vec::Vec, +} /// Definition of the fee parameters. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct FeeParameters { @@ -79,3 +103,11 @@ pub struct FeeParameters { #[prost(fixed32, tag = "2")] pub verification_base_fee: u32, } +/// Represents a block body. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockBody { + /// Block body data encoded using \[winter_utils::Serializable\] implementation for + /// \[miden_protocol::block::BlockBody\]. + #[prost(bytes = "vec", tag = "1")] + pub block_body: ::prost::alloc::vec::Vec, +} diff --git a/crates/proto/src/generated/mod.rs b/crates/proto/src/generated/mod.rs index ab0567476..61e3a5379 100644 --- a/crates/proto/src/generated/mod.rs +++ b/crates/proto/src/generated/mod.rs @@ -3,14 +3,11 @@ pub mod account; pub mod block_producer; -pub mod block_producer_store; pub mod blockchain; pub mod note; -pub mod ntx_builder_store; pub mod primitives; pub mod remote_prover; pub mod rpc; -pub mod rpc_store; -pub mod shared; +pub mod store; pub mod transaction; pub mod validator; diff --git a/crates/proto/src/generated/note.rs b/crates/proto/src/generated/note.rs index 097c5f94d..239d6f6d0 100644 --- a/crates/proto/src/generated/note.rs +++ b/crates/proto/src/generated/note.rs @@ -24,12 +24,12 @@ pub struct NoteMetadata { pub note_type: u32, /// A value which can be used by the recipient(s) to identify notes intended for them. /// - /// See `miden_objects::note::note_tag` for more info. + /// See `miden_protocol::note::note_tag` for more info. #[prost(fixed32, tag = "3")] pub tag: u32, /// Specifies when a note is ready to be consumed. /// - /// See `miden_objects::note::execution_hint` for more info. + /// See `miden_protocol::note::execution_hint` for more info. #[prost(fixed64, tag = "4")] pub execution_hint: u64, /// An arbitrary user-defined value. diff --git a/crates/proto/src/generated/ntx_builder_store.rs b/crates/proto/src/generated/ntx_builder_store.rs deleted file mode 100644 index 3beb83076..000000000 --- a/crates/proto/src/generated/ntx_builder_store.rs +++ /dev/null @@ -1,843 +0,0 @@ -// This file is @generated by prost-build. -/// Account ID prefix. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct AccountIdPrefix { - /// Account ID prefix. - #[prost(fixed32, tag = "1")] - pub account_id_prefix: u32, -} -/// Represents the result of getting network account details by prefix. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MaybeAccountDetails { - /// Account details. - #[prost(message, optional, tag = "1")] - pub details: ::core::option::Option, -} -/// Returns a list of unconsumed network notes using pagination. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct UnconsumedNetworkNotesRequest { - /// An opaque token used to paginate through the notes. - /// - /// This should be null on the first call, and set to the response token until the response token - /// is null, at which point all data has been fetched. - #[prost(uint64, optional, tag = "1")] - pub page_token: ::core::option::Option, - /// Number of notes to retrieve per page. - #[prost(uint64, tag = "2")] - pub page_size: u64, -} -/// Returns a paginated list of unconsumed network notes for an account. -/// -/// Notes created or consumed after the specified block are excluded from the result. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct UnconsumedNetworkNotesForAccountRequest { - /// This should be null on the first call, and set to the response token until the response token - /// is null, at which point all data has been fetched. - /// - /// Note that this token is only valid if used with the same parameters. - #[prost(uint64, optional, tag = "1")] - pub page_token: ::core::option::Option, - /// Number of notes to retrieve per page. - #[prost(uint64, tag = "2")] - pub page_size: u64, - /// The network account ID prefix to filter notes by. - #[prost(uint32, tag = "3")] - pub network_account_id_prefix: u32, - /// The block number to filter the returned notes by. - /// - /// Notes that are created or consumed after this block are excluded from the result. - #[prost(fixed32, tag = "4")] - pub block_num: u32, -} -/// Represents the result of getting the unconsumed network notes. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct UnconsumedNetworkNotes { - /// An opaque pagination token. - /// - /// Use this in your next request to get the next - /// set of data. - /// - /// Will be null once there is no more data remaining. - #[prost(uint64, optional, tag = "1")] - pub next_token: ::core::option::Option, - /// The list of unconsumed network notes. - #[prost(message, repeated, tag = "2")] - pub notes: ::prost::alloc::vec::Vec, -} -/// Current blockchain data based on the requested block number. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CurrentBlockchainData { - /// Commitments that represent the current state according to the MMR. - #[prost(message, repeated, tag = "1")] - pub current_peaks: ::prost::alloc::vec::Vec, - /// Current block header. - #[prost(message, optional, tag = "2")] - pub current_block_header: ::core::option::Option, -} -/// Generated client implementations. -pub mod ntx_builder_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Store API for the network transaction builder component - #[derive(Debug, Clone)] - pub struct NtxBuilderClient { - inner: tonic::client::Grpc, - } - impl NtxBuilderClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl NtxBuilderClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> NtxBuilderClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - NtxBuilderClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - pub async fn get_block_header_by_number( - &mut self, - request: impl tonic::IntoRequest< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetBlockHeaderByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetBlockHeaderByNumber", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns a paginated list of unconsumed network notes. - pub async fn get_unconsumed_network_notes( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetUnconsumedNetworkNotes", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetUnconsumedNetworkNotes", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns a paginated list of a network account's unconsumed notes up to a specified block number. - pub async fn get_unconsumed_network_notes_for_account( - &mut self, - request: impl tonic::IntoRequest< - super::UnconsumedNetworkNotesForAccountRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetUnconsumedNetworkNotesForAccount", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetUnconsumedNetworkNotesForAccount", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this - /// header for executing network transactions. If the block number is not provided, the latest - /// header and peaks will be retrieved. - pub async fn get_current_blockchain_data( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetCurrentBlockchainData", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetCurrentBlockchainData", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns the latest state of a network account with the specified account prefix. - pub async fn get_network_account_details_by_prefix( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetNetworkAccountDetailsByPrefix", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetNetworkAccountDetailsByPrefix", - ), - ); - self.inner.unary(req, path, codec).await - } - /// Returns the script for a note by its root. - pub async fn get_note_script_by_root( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/ntx_builder_store.NtxBuilder/GetNoteScriptByRoot", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert( - GrpcMethod::new( - "ntx_builder_store.NtxBuilder", - "GetNoteScriptByRoot", - ), - ); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod ntx_builder_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with NtxBuilderServer. - #[async_trait] - pub trait NtxBuilder: std::marker::Send + std::marker::Sync + 'static { - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - async fn get_block_header_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a paginated list of unconsumed network notes. - async fn get_unconsumed_network_notes( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a paginated list of a network account's unconsumed notes up to a specified block number. - async fn get_unconsumed_network_notes_for_account( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this - /// header for executing network transactions. If the block number is not provided, the latest - /// header and peaks will be retrieved. - async fn get_current_blockchain_data( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest state of a network account with the specified account prefix. - async fn get_network_account_details_by_prefix( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the script for a note by its root. - async fn get_note_script_by_root( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// Store API for the network transaction builder component - #[derive(Debug)] - pub struct NtxBuilderServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl NtxBuilderServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for NtxBuilderServer - where - T: NtxBuilder, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/ntx_builder_store.NtxBuilder/GetBlockHeaderByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockHeaderByNumberSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService< - super::super::shared::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::shared::BlockHeaderByNumberResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_header_by_number( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockHeaderByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/ntx_builder_store.NtxBuilder/GetUnconsumedNetworkNotes" => { - #[allow(non_camel_case_types)] - struct GetUnconsumedNetworkNotesSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetUnconsumedNetworkNotesSvc { - type Response = super::UnconsumedNetworkNotes; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_unconsumed_network_notes( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetUnconsumedNetworkNotesSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/ntx_builder_store.NtxBuilder/GetUnconsumedNetworkNotesForAccount" => { - #[allow(non_camel_case_types)] - struct GetUnconsumedNetworkNotesForAccountSvc( - pub Arc, - ); - impl< - T: NtxBuilder, - > tonic::server::UnaryService< - super::UnconsumedNetworkNotesForAccountRequest, - > for GetUnconsumedNetworkNotesForAccountSvc { - type Response = super::UnconsumedNetworkNotes; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::UnconsumedNetworkNotesForAccountRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_unconsumed_network_notes_for_account( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetUnconsumedNetworkNotesForAccountSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/ntx_builder_store.NtxBuilder/GetCurrentBlockchainData" => { - #[allow(non_camel_case_types)] - struct GetCurrentBlockchainDataSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService< - super::super::blockchain::MaybeBlockNumber, - > for GetCurrentBlockchainDataSvc { - type Response = super::CurrentBlockchainData; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::blockchain::MaybeBlockNumber, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_current_blockchain_data( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetCurrentBlockchainDataSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/ntx_builder_store.NtxBuilder/GetNetworkAccountDetailsByPrefix" => { - #[allow(non_camel_case_types)] - struct GetNetworkAccountDetailsByPrefixSvc( - pub Arc, - ); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetNetworkAccountDetailsByPrefixSvc { - type Response = super::MaybeAccountDetails; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_network_account_details_by_prefix( - &inner, - request, - ) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNetworkAccountDetailsByPrefixSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/ntx_builder_store.NtxBuilder/GetNoteScriptByRoot" => { - #[allow(non_camel_case_types)] - struct GetNoteScriptByRootSvc(pub Arc); - impl< - T: NtxBuilder, - > tonic::server::UnaryService - for GetNoteScriptByRootSvc { - type Response = super::super::shared::MaybeNoteScript; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_note_script_by_root(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNoteScriptByRootSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for NtxBuilderServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "ntx_builder_store.NtxBuilder"; - impl tonic::server::NamedService for NtxBuilderServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/proto/src/generated/primitives.rs b/crates/proto/src/generated/primitives.rs index 907ef856a..ea7f5a1a1 100644 --- a/crates/proto/src/generated/primitives.rs +++ b/crates/proto/src/generated/primitives.rs @@ -15,10 +15,10 @@ pub struct SmtLeafEntry { #[prost(message, optional, tag = "2")] pub value: ::core::option::Option, } -/// Represents multiple leaf entries in an SMT. +/// Multiple leaf entries when hash collisions occur at the same leaf position. #[derive(Clone, PartialEq, ::prost::Message)] pub struct SmtLeafEntryList { - /// The entries list. + /// The list of entries at this leaf. #[prost(message, repeated, tag = "1")] pub entries: ::prost::alloc::vec::Vec, } diff --git a/crates/proto/src/generated/remote_prover.rs b/crates/proto/src/generated/remote_prover.rs index 210b69153..b504804c3 100644 --- a/crates/proto/src/generated/remote_prover.rs +++ b/crates/proto/src/generated/remote_prover.rs @@ -10,7 +10,7 @@ pub struct ProofRequest { /// /// * TRANSACTION: TransactionInputs encoded. /// * BATCH: ProposedBatch encoded. - /// * BLOCK: ProposedBlock encoded. + /// * BLOCK: BlockProofRequest encoded. #[prost(bytes = "vec", tag = "2")] pub payload: ::prost::alloc::vec::Vec, } @@ -21,16 +21,16 @@ pub struct Proof { /// /// * TRANSACTION: Returns an encoded ProvenTransaction. /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded ProvenBlock. + /// * BLOCK: Returns an encoded BlockProof. #[prost(bytes = "vec", tag = "1")] pub payload: ::prost::alloc::vec::Vec, } /// Status of an individual worker in the proxy. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProxyWorkerStatus { - /// The address of the worker. + /// The name of the worker. #[prost(string, tag = "1")] - pub address: ::prost::alloc::string::String, + pub name: ::prost::alloc::string::String, /// The version of the worker. #[prost(string, tag = "2")] pub version: ::prost::alloc::string::String, diff --git a/crates/proto/src/generated/rpc.rs b/crates/proto/src/generated/rpc.rs index 5c6a4ce4f..f9a59e39c 100644 --- a/crates/proto/src/generated/rpc.rs +++ b/crates/proto/src/generated/rpc.rs @@ -10,13 +10,560 @@ pub struct RpcStatus { pub genesis_commitment: ::core::option::Option, /// The store status. #[prost(message, optional, tag = "3")] - pub store: ::core::option::Option, + pub store: ::core::option::Option, /// The block producer status. #[prost(message, optional, tag = "4")] - pub block_producer: ::core::option::Option< - super::block_producer::BlockProducerStatus, + pub block_producer: ::core::option::Option, +} +/// Represents the status of the block producer. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockProducerStatus { + /// The block producer's running version. + #[prost(string, tag = "1")] + pub version: ::prost::alloc::string::String, + /// The block producer's status. + #[prost(string, tag = "2")] + pub status: ::prost::alloc::string::String, + /// The block producer's current view of the chain tip height. + /// + /// This is the height of the latest block that the block producer considers + /// to be part of the canonical chain. + #[prost(fixed32, tag = "4")] + pub chain_tip: u32, + /// Statistics about the mempool. + #[prost(message, optional, tag = "3")] + pub mempool_stats: ::core::option::Option, +} +/// Statistics about the mempool. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct MempoolStats { + /// Number of transactions currently in the mempool waiting to be batched. + #[prost(uint64, tag = "1")] + pub unbatched_transactions: u64, + /// Number of batches currently being proven. + #[prost(uint64, tag = "2")] + pub proposed_batches: u64, + /// Number of proven batches waiting for block inclusion. + #[prost(uint64, tag = "3")] + pub proven_batches: u64, +} +/// Represents the status of the store. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct StoreStatus { + /// The store's running version. + #[prost(string, tag = "1")] + pub version: ::prost::alloc::string::String, + /// The store's status. + #[prost(string, tag = "2")] + pub status: ::prost::alloc::string::String, + /// Number of the latest block in the chain. + #[prost(fixed32, tag = "3")] + pub chain_tip: u32, +} +/// Returns the block header corresponding to the requested block number, as well as the merkle +/// path and current forest which validate the block's inclusion in the chain. +/// +/// The Merkle path is an MMR proof for the block's leaf, based on the current chain length. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockHeaderByNumberRequest { + /// The target block height, defaults to latest if not provided. + #[prost(uint32, optional, tag = "1")] + pub block_num: ::core::option::Option, + /// Whether or not to return authentication data for the block header. + #[prost(bool, optional, tag = "2")] + pub include_mmr_proof: ::core::option::Option, +} +/// Represents the result of getting a block header by block number. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockHeaderByNumberResponse { + /// The requested block header. + #[prost(message, optional, tag = "1")] + pub block_header: ::core::option::Option, + /// Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. + #[prost(message, optional, tag = "2")] + pub mmr_path: ::core::option::Option, + /// Current chain length. + #[prost(fixed32, optional, tag = "3")] + pub chain_length: ::core::option::Option, +} +/// Represents a note script or nothing. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct MaybeNoteScript { + /// The script for a note by its root. + #[prost(message, optional, tag = "1")] + pub script: ::core::option::Option, +} +/// Returns the latest state proof of the specified account. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccountProofRequest { + /// ID of the account for which we want to get data + #[prost(message, optional, tag = "1")] + pub account_id: ::core::option::Option, + /// Optional block height at which to return the proof. + /// + /// Defaults to current chain tip if unspecified. + #[prost(message, optional, tag = "2")] + pub block_num: ::core::option::Option, + /// Request for additional account details; valid only for public accounts. + #[prost(message, optional, tag = "3")] + pub details: ::core::option::Option, +} +/// Nested message and enum types in `AccountProofRequest`. +pub mod account_proof_request { + /// Request the details for a public account. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct AccountDetailRequest { + /// Last known code commitment to the requester. The response will include account code + /// only if its commitment is different from this value. + /// + /// If the field is ommiteed, the response will not include the account code. + #[prost(message, optional, tag = "1")] + pub code_commitment: ::core::option::Option, + /// Last known asset vault commitment to the requester. The response will include asset vault data + /// only if its commitment is different from this value. If the value is not present in the + /// request, the response will not contain one either. + /// If the number of to-be-returned asset entries exceed a threshold, they have to be requested + /// separately, which is signaled in the response message with dedicated flag. + #[prost(message, optional, tag = "2")] + pub asset_vault_commitment: ::core::option::Option< + super::super::primitives::Digest, + >, + /// Additional request per storage map. + #[prost(message, repeated, tag = "3")] + pub storage_maps: ::prost::alloc::vec::Vec< + account_detail_request::StorageMapDetailRequest, + >, + } + /// Nested message and enum types in `AccountDetailRequest`. + pub mod account_detail_request { + /// Represents a storage slot index and the associated map keys. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct StorageMapDetailRequest { + /// Storage slot name. + #[prost(string, tag = "1")] + pub slot_name: ::prost::alloc::string::String, + #[prost(oneof = "storage_map_detail_request::SlotData", tags = "2, 3")] + pub slot_data: ::core::option::Option, + } + /// Nested message and enum types in `StorageMapDetailRequest`. + pub mod storage_map_detail_request { + /// Indirection required for use in `oneof {..}` block. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct MapKeys { + /// A list of map keys associated with this storage slot. + #[prost(message, repeated, tag = "1")] + pub map_keys: ::prost::alloc::vec::Vec< + super::super::super::super::primitives::Digest, + >, + } + #[derive(Clone, PartialEq, ::prost::Oneof)] + pub enum SlotData { + /// Request to return all storage map data. If the number exceeds a threshold of 1000 entries, + /// the response will not contain them but must be requested separately. + #[prost(bool, tag = "2")] + AllEntries(bool), + /// A list of map keys associated with the given storage slot identified by `slot_name`. + #[prost(message, tag = "3")] + MapKeys(MapKeys), + } + } + } +} +/// Represents the result of getting account proof. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccountProofResponse { + /// The block number at which the account witness was created and the account details were observed. + #[prost(message, optional, tag = "1")] + pub block_num: ::core::option::Option, + /// Account ID, current state commitment, and SMT path. + #[prost(message, optional, tag = "2")] + pub witness: ::core::option::Option, + /// Additional details for public accounts. + #[prost(message, optional, tag = "3")] + pub details: ::core::option::Option, +} +/// Nested message and enum types in `AccountProofResponse`. +pub mod account_proof_response { + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct AccountDetails { + /// Account header. + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option, + /// Account storage data + #[prost(message, optional, tag = "2")] + pub storage_details: ::core::option::Option, + /// Account code; empty if code commitments matched or none was requested. + #[prost(bytes = "vec", optional, tag = "3")] + pub code: ::core::option::Option<::prost::alloc::vec::Vec>, + /// Account asset vault data; empty if vault commitments matched or the requester + /// omitted it in the request. + #[prost(message, optional, tag = "4")] + pub vault_details: ::core::option::Option, + } +} +/// Account vault details for AccountProofResponse +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccountVaultDetails { + /// A flag that is set to true if the account contains too many assets. This indicates + /// to the user that `SyncAccountVault` endpoint should be used to retrieve the + /// account's assets + #[prost(bool, tag = "1")] + pub too_many_assets: bool, + /// When too_many_assets == false, this will contain the list of assets in the + /// account's vault + #[prost(message, repeated, tag = "2")] + pub assets: ::prost::alloc::vec::Vec, +} +/// Account storage details for AccountProofResponse +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct AccountStorageDetails { + /// Account storage header (storage slot info for up to 256 slots) + #[prost(message, optional, tag = "1")] + pub header: ::core::option::Option, + /// Additional data for the requested storage maps + #[prost(message, repeated, tag = "2")] + pub map_details: ::prost::alloc::vec::Vec< + account_storage_details::AccountStorageMapDetails, + >, +} +/// Nested message and enum types in `AccountStorageDetails`. +pub mod account_storage_details { + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct AccountStorageMapDetails { + /// Storage slot name. + #[prost(string, tag = "1")] + pub slot_name: ::prost::alloc::string::String, + /// A flag that is set to `true` if the number of to-be-returned entries in the + /// storage map would exceed a threshold. This indicates to the user that `SyncStorageMaps` + /// endpoint should be used to get all storage map data. + #[prost(bool, tag = "2")] + pub too_many_entries: bool, + /// By default we provide all storage entries. + #[prost(message, optional, tag = "3")] + pub entries: ::core::option::Option, + } + /// Nested message and enum types in `AccountStorageMapDetails`. + pub mod account_storage_map_details { + /// Wrapper for repeated storage map entries + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct MapEntries { + #[prost(message, repeated, tag = "1")] + pub entries: ::prost::alloc::vec::Vec, + } + /// Nested message and enum types in `MapEntries`. + pub mod map_entries { + /// Definition of individual storage entries. + #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] + pub struct StorageMapEntry { + #[prost(message, optional, tag = "1")] + pub key: ::core::option::Option< + super::super::super::super::primitives::Digest, + >, + #[prost(message, optional, tag = "2")] + pub value: ::core::option::Option< + super::super::super::super::primitives::Digest, + >, + } + } + } +} +/// List of nullifiers to return proofs for. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NullifierList { + /// List of nullifiers to return proofs for. + #[prost(message, repeated, tag = "1")] + pub nullifiers: ::prost::alloc::vec::Vec, +} +/// Represents the result of checking nullifiers. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CheckNullifiersResponse { + /// Each requested nullifier has its corresponding nullifier proof at the same position. + #[prost(message, repeated, tag = "1")] + pub proofs: ::prost::alloc::vec::Vec, +} +/// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct SyncNullifiersRequest { + /// Block number from which the nullifiers are requested (inclusive). + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, + /// Number of bits used for nullifier prefix. Currently the only supported value is 16. + #[prost(uint32, tag = "2")] + pub prefix_len: u32, + /// List of nullifiers to check. Each nullifier is specified by its prefix with length equal + /// to `prefix_len`. + #[prost(uint32, repeated, tag = "3")] + pub nullifiers: ::prost::alloc::vec::Vec, +} +/// Represents the result of syncing nullifiers. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncNullifiersResponse { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// List of nullifiers matching the prefixes specified in the request. + #[prost(message, repeated, tag = "2")] + pub nullifiers: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `SyncNullifiersResponse`. +pub mod sync_nullifiers_response { + /// Represents a single nullifier update. + #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] + pub struct NullifierUpdate { + /// Nullifier ID. + #[prost(message, optional, tag = "1")] + pub nullifier: ::core::option::Option, + /// Block number. + #[prost(fixed32, tag = "2")] + pub block_num: u32, + } +} +/// Account vault synchronization request. +/// +/// Allows requesters to sync asset values for specific public accounts within a block range. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct SyncAccountVaultRequest { + /// Block range from which to start synchronizing. + /// + /// If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), + /// otherwise an error will be returned. + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, + /// Account for which we want to sync asset vault. + #[prost(message, optional, tag = "2")] + pub account_id: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncAccountVaultResponse { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// List of asset updates for the account. + /// + /// Multiple updates can be returned for a single asset, and the one with a higher `block_num` + /// is expected to be retained by the caller. + #[prost(message, repeated, tag = "2")] + pub updates: ::prost::alloc::vec::Vec, +} +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct AccountVaultUpdate { + /// Vault key associated with the asset. + #[prost(message, optional, tag = "1")] + pub vault_key: ::core::option::Option, + /// Asset value related to the vault key. + /// If not present, the asset was removed from the vault. + #[prost(message, optional, tag = "2")] + pub asset: ::core::option::Option, + /// Block number at which the above asset was updated in the account vault. + #[prost(fixed32, tag = "3")] + pub block_num: u32, +} +/// Note synchronization request. +/// +/// Specifies note tags that requester is interested in. The server will return the first block which +/// contains a note matching `note_tags` or the chain tip. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct SyncNotesRequest { + /// Block range from which to start synchronizing. + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, + /// Specifies the tags which the requester is interested in. + #[prost(fixed32, repeated, tag = "2")] + pub note_tags: ::prost::alloc::vec::Vec, +} +/// Represents the result of syncing notes request. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncNotesResponse { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// Block header of the block with the first note matching the specified criteria. + #[prost(message, optional, tag = "2")] + pub block_header: ::core::option::Option, + /// Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. + /// + /// An MMR proof can be constructed for the leaf of index `block_header.block_num` of + /// an MMR of forest `chain_tip` with this path. + #[prost(message, optional, tag = "3")] + pub mmr_path: ::core::option::Option, + /// List of all notes together with the Merkle paths from `response.block_header.note_root`. + #[prost(message, repeated, tag = "4")] + pub notes: ::prost::alloc::vec::Vec, +} +/// State synchronization request. +/// +/// Specifies state updates the requester is interested in. The server will return the first block which +/// contains a note matching `note_tags` or the chain tip. And the corresponding updates to +/// `account_ids` for that block range. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncStateRequest { + /// Last block known by the requester. The response will contain data starting from the next block, + /// until the first block which contains a note of matching the requested tag, or the chain tip + /// if there are no notes. + #[prost(fixed32, tag = "1")] + pub block_num: u32, + /// Accounts' commitment to include in the response. + /// + /// An account commitment will be included if-and-only-if it is the latest update. Meaning it is + /// possible there was an update to the account for the given range, but if it is not the latest, + /// it won't be included in the response. + #[prost(message, repeated, tag = "2")] + pub account_ids: ::prost::alloc::vec::Vec, + /// Specifies the tags which the requester is interested in. + #[prost(fixed32, repeated, tag = "3")] + pub note_tags: ::prost::alloc::vec::Vec, +} +/// Represents the result of syncing state request. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncStateResponse { + /// Number of the latest block in the chain. + #[prost(fixed32, tag = "1")] + pub chain_tip: u32, + /// Block header of the block with the first note matching the specified criteria. + #[prost(message, optional, tag = "2")] + pub block_header: ::core::option::Option, + /// Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. + #[prost(message, optional, tag = "3")] + pub mmr_delta: ::core::option::Option, + /// List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. + #[prost(message, repeated, tag = "5")] + pub accounts: ::prost::alloc::vec::Vec, + /// List of transactions executed against requested accounts between `request.block_num + 1` and + /// `response.block_header.block_num`. + #[prost(message, repeated, tag = "6")] + pub transactions: ::prost::alloc::vec::Vec, + /// List of all notes together with the Merkle paths from `response.block_header.note_root`. + #[prost(message, repeated, tag = "7")] + pub notes: ::prost::alloc::vec::Vec, +} +/// Storage map synchronization request. +/// +/// Allows requesters to sync storage map values for specific public accounts within a block range, +/// with support for cursor-based pagination to handle large storage maps. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct SyncStorageMapsRequest { + /// Block range from which to start synchronizing. + /// + /// If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), + /// otherwise an error will be returned. + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, + /// Account for which we want to sync storage maps. + #[prost(message, optional, tag = "3")] + pub account_id: ::core::option::Option, +} +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncStorageMapsResponse { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// The list of storage map updates. + /// + /// Multiple updates can be returned for a single slot index and key combination, and the one + /// with a higher `block_num` is expected to be retained by the caller. + #[prost(message, repeated, tag = "2")] + pub updates: ::prost::alloc::vec::Vec, +} +/// Represents a single storage map update. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct StorageMapUpdate { + /// Block number in which the slot was updated. + #[prost(fixed32, tag = "1")] + pub block_num: u32, + /// Storage slot name. + #[prost(string, tag = "2")] + pub slot_name: ::prost::alloc::string::String, + /// The storage map key. + #[prost(message, optional, tag = "3")] + pub key: ::core::option::Option, + /// The storage map value. + #[prost(message, optional, tag = "4")] + pub value: ::core::option::Option, +} +/// Represents a block range. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct BlockRange { + /// Block number from which to start (inclusive). + #[prost(fixed32, tag = "1")] + pub block_from: u32, + /// Block number up to which to check (inclusive). If not specified, checks up to the latest block. + #[prost(fixed32, optional, tag = "2")] + pub block_to: ::core::option::Option, +} +/// Represents pagination information for chunked responses. +/// +/// Pagination is done using block numbers as the axis, allowing requesters to request +/// data in chunks by specifying block ranges and continuing from where the previous +/// response left off. +/// +/// To request the next chunk, the requester should use `block_num + 1` from the previous response +/// as the `block_from` for the next request. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct PaginationInfo { + /// Current chain tip + #[prost(fixed32, tag = "1")] + pub chain_tip: u32, + /// The block number of the last check included in this response. + /// + /// For chunked responses, this may be less than `request.block_range.block_to`. + /// If it is less than request.block_range.block_to, the user is expected to make a subsequent request + /// starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). + #[prost(fixed32, tag = "2")] + pub block_num: u32, +} +/// Transactions synchronization request. +/// +/// Allows requesters to sync transactions for specific accounts within a block range. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncTransactionsRequest { + /// Block range from which to start synchronizing. + #[prost(message, optional, tag = "1")] + pub block_range: ::core::option::Option, + /// Accounts to sync transactions for. + #[prost(message, repeated, tag = "2")] + pub account_ids: ::prost::alloc::vec::Vec, +} +/// Represents the result of syncing transactions request. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct SyncTransactionsResponse { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// List of transaction records. + #[prost(message, repeated, tag = "2")] + pub transactions: ::prost::alloc::vec::Vec, +} +/// Represents a transaction record. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionRecord { + /// Block number in which the transaction was included. + #[prost(fixed32, tag = "1")] + pub block_num: u32, + /// A transaction header. + #[prost(message, optional, tag = "2")] + pub header: ::core::option::Option, +} +/// Represents the query parameter limits for RPC endpoints. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct RpcLimits { + /// Maps RPC endpoint names to their parameter limits. + /// Key: endpoint name (e.g., "CheckNullifiers", "SyncState") + /// Value: map of parameter names to their limit values + #[prost(map = "string, message", tag = "1")] + pub endpoints: ::std::collections::HashMap< + ::prost::alloc::string::String, + EndpointLimits, >, } +/// Represents the parameter limits for a single endpoint. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct EndpointLimits { + /// Maps parameter names to their limit values. + /// Key: parameter name (e.g., "nullifier", "account_id") + /// Value: limit value + #[prost(map = "string, uint32", tag = "1")] + pub parameters: ::std::collections::HashMap<::prost::alloc::string::String, u32>, +} /// Generated client implementations. pub mod api_client { #![allow( @@ -128,12 +675,24 @@ pub mod api_client { req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "Status")); self.inner.unary(req, path, codec).await } - /// Returns a nullifier proof for each of the requested nullifiers. + /// Returns a Sparse Merkle Tree opening proof for each requested nullifier + /// + /// Each proof demonstrates either: + /// + /// * **Inclusion**: Nullifier exists in the tree (note was consumed) + /// * **Non-inclusion**: Nullifier does not exist (note was not consumed) + /// + /// The `leaf` field indicates the status: + /// + /// * `empty_leaf_index`: Non-inclusion proof (nullifier not in tree) + /// * `single` or `multiple`: Inclusion proof only if the requested nullifier appears as a key. + /// + /// Verify proofs against the nullifier tree root in the latest block header. pub async fn check_nullifiers( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -177,11 +736,9 @@ pub mod api_client { /// Returns the latest state proof of the specified account. pub async fn get_account_proof( &mut self, - request: impl tonic::IntoRequest< - super::super::rpc_store::AccountProofRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -224,11 +781,9 @@ pub mod api_client { /// and current chain length to authenticate the block's inclusion. pub async fn get_block_header_by_number( &mut self, - request: impl tonic::IntoRequest< - super::super::shared::BlockHeaderByNumberRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -275,7 +830,7 @@ pub mod api_client { &mut self, request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -295,16 +850,14 @@ pub mod api_client { .insert(GrpcMethod::new("rpc.Api", "GetNoteScriptByRoot")); self.inner.unary(req, path, codec).await } - /// Submits proven transaction to the Miden network. + /// Submits proven transaction to the Miden network. Returns the node's current block height. pub async fn submit_proven_transaction( &mut self, request: impl tonic::IntoRequest< super::super::transaction::ProvenTransaction, >, ) -> std::result::Result< - tonic::Response< - super::super::block_producer::SubmitProvenTransactionResponse, - >, + tonic::Response, tonic::Status, > { self.inner @@ -334,13 +887,15 @@ pub mod api_client { /// /// All transactions in the batch but not in the mempool must build on the current mempool /// state following normal transaction submission rules. + /// + /// Returns the node's current block height. pub async fn submit_proven_batch( &mut self, request: impl tonic::IntoRequest< super::super::transaction::ProvenTransactionBatch, >, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -364,11 +919,9 @@ pub mod api_client { /// Note that only 16-bit prefixes are supported at this time. pub async fn sync_nullifiers( &mut self, - request: impl tonic::IntoRequest< - super::super::rpc_store::SyncNullifiersRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -388,11 +941,9 @@ pub mod api_client { /// Returns account vault updates for specified account within a block range. pub async fn sync_account_vault( &mut self, - request: impl tonic::IntoRequest< - super::super::rpc_store::SyncAccountVaultRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -420,9 +971,9 @@ pub mod api_client { /// tip of the chain. pub async fn sync_notes( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -456,9 +1007,9 @@ pub mod api_client { /// additional filtering of that data on its side. pub async fn sync_state( &mut self, - request: impl tonic::IntoRequest, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -478,11 +1029,9 @@ pub mod api_client { /// Returns storage map updates for specified account and storage slots within a block range. pub async fn sync_storage_maps( &mut self, - request: impl tonic::IntoRequest< - super::super::rpc_store::SyncStorageMapsRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -502,11 +1051,9 @@ pub mod api_client { /// Returns transactions records for specific accounts within a block range. pub async fn sync_transactions( &mut self, - request: impl tonic::IntoRequest< - super::super::rpc_store::SyncTransactionsRequest, - >, + request: impl tonic::IntoRequest, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, > { self.inner @@ -523,6 +1070,29 @@ pub mod api_client { req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "SyncTransactions")); self.inner.unary(req, path, codec).await } + /// Returns the query parameter limits configured for RPC methods. + /// + /// These define the maximum number of each parameter a method will accept. + /// Exceeding the limit will result in the request being rejected and you should instead send + /// multiple smaller requests. + pub async fn get_limits( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/rpc.Api/GetLimits"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("rpc.Api", "GetLimits")); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -543,12 +1113,24 @@ pub mod api_server { &self, request: tonic::Request<()>, ) -> std::result::Result, tonic::Status>; - /// Returns a nullifier proof for each of the requested nullifiers. + /// Returns a Sparse Merkle Tree opening proof for each requested nullifier + /// + /// Each proof demonstrates either: + /// + /// * **Inclusion**: Nullifier exists in the tree (note was consumed) + /// * **Non-inclusion**: Nullifier does not exist (note was not consumed) + /// + /// The `leaf` field indicates the status: + /// + /// * `empty_leaf_index`: Non-inclusion proof (nullifier not in tree) + /// * `single` or `multiple`: Inclusion proof only if the requested nullifier appears as a key. + /// + /// Verify proofs against the nullifier tree root in the latest block header. async fn check_nullifiers( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns the latest state of an account with the specified ID. @@ -562,9 +1144,9 @@ pub mod api_server { /// Returns the latest state proof of the specified account. async fn get_account_proof( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns raw block data for the specified block number. @@ -579,9 +1161,9 @@ pub mod api_server { /// and current chain length to authenticate the block's inclusion. async fn get_block_header_by_number( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns a list of notes matching the provided note IDs. @@ -596,18 +1178,13 @@ pub mod api_server { async fn get_note_script_by_root( &self, request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Submits proven transaction to the Miden network. + ) -> std::result::Result, tonic::Status>; + /// Submits proven transaction to the Miden network. Returns the node's current block height. async fn submit_proven_transaction( &self, request: tonic::Request, ) -> std::result::Result< - tonic::Response< - super::super::block_producer::SubmitProvenTransactionResponse, - >, + tonic::Response, tonic::Status, >; /// Submits a proven batch of transactions to the Miden network. @@ -620,11 +1197,13 @@ pub mod api_server { /// /// All transactions in the batch but not in the mempool must build on the current mempool /// state following normal transaction submission rules. + /// + /// Returns the node's current block height. async fn submit_proven_batch( &self, request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. @@ -632,17 +1211,17 @@ pub mod api_server { /// Note that only 16-bit prefixes are supported at this time. async fn sync_nullifiers( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns account vault updates for specified account within a block range. async fn sync_account_vault( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. @@ -656,9 +1235,9 @@ pub mod api_server { /// tip of the chain. async fn sync_notes( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns info which can be used by the client to sync up to the latest state of the chain @@ -678,27 +1257,36 @@ pub mod api_server { /// additional filtering of that data on its side. async fn sync_state( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns storage map updates for specified account and storage slots within a block range. async fn sync_storage_maps( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; /// Returns transactions records for specific accounts within a block range. async fn sync_transactions( &self, - request: tonic::Request, + request: tonic::Request, ) -> std::result::Result< - tonic::Response, + tonic::Response, tonic::Status, >; + /// Returns the query parameter limits configured for RPC methods. + /// + /// These define the maximum number of each parameter a method will accept. + /// Exceeding the limit will result in the request being rejected and you should instead send + /// multiple smaller requests. + async fn get_limits( + &self, + request: tonic::Request<()>, + ) -> std::result::Result, tonic::Status>; } /// RPC API for the RPC component #[derive(Debug)] @@ -819,20 +1407,16 @@ pub mod api_server { "/rpc.Api/CheckNullifiers" => { #[allow(non_camel_case_types)] struct CheckNullifiersSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService + impl tonic::server::UnaryService for CheckNullifiersSvc { - type Response = super::super::rpc_store::CheckNullifiersResponse; + type Response = super::CheckNullifiersResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::NullifierList, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -911,21 +1495,16 @@ pub mod api_server { "/rpc.Api/GetAccountProof" => { #[allow(non_camel_case_types)] struct GetAccountProofSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::AccountProofRequest, - > for GetAccountProofSvc { - type Response = super::super::rpc_store::AccountProofResponse; + impl tonic::server::UnaryService + for GetAccountProofSvc { + type Response = super::AccountProofResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::AccountProofRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1008,19 +1587,16 @@ pub mod api_server { struct GetBlockHeaderByNumberSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService< - super::super::shared::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::shared::BlockHeaderByNumberResponse; + > tonic::server::UnaryService + for GetBlockHeaderByNumberSvc { + type Response = super::BlockHeaderByNumberResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::shared::BlockHeaderByNumberRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1104,7 +1680,7 @@ pub mod api_server { T: Api, > tonic::server::UnaryService for GetNoteScriptByRootSvc { - type Response = super::super::shared::MaybeNoteScript; + type Response = super::MaybeNoteScript; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -1150,7 +1726,7 @@ pub mod api_server { > tonic::server::UnaryService< super::super::transaction::ProvenTransaction, > for SubmitProvenTransactionSvc { - type Response = super::super::block_producer::SubmitProvenTransactionResponse; + type Response = super::super::blockchain::BlockNumber; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -1198,7 +1774,7 @@ pub mod api_server { > tonic::server::UnaryService< super::super::transaction::ProvenTransactionBatch, > for SubmitProvenBatchSvc { - type Response = super::super::block_producer::SubmitProvenBatchResponse; + type Response = super::super::blockchain::BlockNumber; type Future = BoxFuture< tonic::Response, tonic::Status, @@ -1243,19 +1819,16 @@ pub mod api_server { struct SyncNullifiersSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncNullifiersRequest, - > for SyncNullifiersSvc { - type Response = super::super::rpc_store::SyncNullifiersResponse; + > tonic::server::UnaryService + for SyncNullifiersSvc { + type Response = super::SyncNullifiersResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncNullifiersRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1291,19 +1864,16 @@ pub mod api_server { struct SyncAccountVaultSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncAccountVaultRequest, - > for SyncAccountVaultSvc { - type Response = super::super::rpc_store::SyncAccountVaultResponse; + > tonic::server::UnaryService + for SyncAccountVaultSvc { + type Response = super::SyncAccountVaultResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncAccountVaultRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1337,21 +1907,16 @@ pub mod api_server { "/rpc.Api/SyncNotes" => { #[allow(non_camel_case_types)] struct SyncNotesSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncNotesRequest, - > for SyncNotesSvc { - type Response = super::super::rpc_store::SyncNotesResponse; + impl tonic::server::UnaryService + for SyncNotesSvc { + type Response = super::SyncNotesResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncNotesRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1385,21 +1950,16 @@ pub mod api_server { "/rpc.Api/SyncState" => { #[allow(non_camel_case_types)] struct SyncStateSvc(pub Arc); - impl< - T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncStateRequest, - > for SyncStateSvc { - type Response = super::super::rpc_store::SyncStateResponse; + impl tonic::server::UnaryService + for SyncStateSvc { + type Response = super::SyncStateResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncStateRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1435,19 +1995,16 @@ pub mod api_server { struct SyncStorageMapsSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncStorageMapsRequest, - > for SyncStorageMapsSvc { - type Response = super::super::rpc_store::SyncStorageMapsResponse; + > tonic::server::UnaryService + for SyncStorageMapsSvc { + type Response = super::SyncStorageMapsResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncStorageMapsRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1483,19 +2040,16 @@ pub mod api_server { struct SyncTransactionsSvc(pub Arc); impl< T: Api, - > tonic::server::UnaryService< - super::super::rpc_store::SyncTransactionsRequest, - > for SyncTransactionsSvc { - type Response = super::super::rpc_store::SyncTransactionsResponse; + > tonic::server::UnaryService + for SyncTransactionsSvc { + type Response = super::SyncTransactionsResponse; type Future = BoxFuture< tonic::Response, tonic::Status, >; fn call( &mut self, - request: tonic::Request< - super::super::rpc_store::SyncTransactionsRequest, - >, + request: tonic::Request, ) -> Self::Future { let inner = Arc::clone(&self.0); let fut = async move { @@ -1526,6 +2080,45 @@ pub mod api_server { }; Box::pin(fut) } + "/rpc.Api/GetLimits" => { + #[allow(non_camel_case_types)] + struct GetLimitsSvc(pub Arc); + impl tonic::server::UnaryService<()> for GetLimitsSvc { + type Response = super::RpcLimits; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call(&mut self, request: tonic::Request<()>) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_limits(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetLimitsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { let mut response = http::Response::new( diff --git a/crates/proto/src/generated/rpc_store.rs b/crates/proto/src/generated/rpc_store.rs deleted file mode 100644 index 187f559ef..000000000 --- a/crates/proto/src/generated/rpc_store.rs +++ /dev/null @@ -1,1811 +0,0 @@ -// This file is @generated by prost-build. -/// Represents the status of the store. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct StoreStatus { - /// The store's running version. - #[prost(string, tag = "1")] - pub version: ::prost::alloc::string::String, - /// The store's status. - #[prost(string, tag = "2")] - pub status: ::prost::alloc::string::String, - /// Number of the latest block in the chain. - #[prost(fixed32, tag = "3")] - pub chain_tip: u32, -} -/// Returns the latest state proof of the specified account. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountProofRequest { - /// ID of the account for which we want to get data - #[prost(message, optional, tag = "1")] - pub account_id: ::core::option::Option, - /// Block at which we'd like to get this data. If present, must be close to the chain tip. - /// If not present, data from the latest block will be returned. - #[prost(message, optional, tag = "2")] - pub block_num: ::core::option::Option, - /// Request for additional account details; valid only for public accounts. - #[prost(message, optional, tag = "3")] - pub details: ::core::option::Option, -} -/// Nested message and enum types in `AccountProofRequest`. -pub mod account_proof_request { - /// Request the details for a public account. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct AccountDetailRequest { - /// Last known code commitment to the requester. The response will include account code - /// only if its commitment is different from this value. - /// - /// If the field is ommiteed, the response will not include the account code. - #[prost(message, optional, tag = "1")] - pub code_commitment: ::core::option::Option, - /// Last known asset vault commitment to the requester. The response will include asset vault data - /// only if its commitment is different from this value. If the value is not present in the - /// request, the response will not contain one either. - /// If the number of to-be-returned asset entries exceed a threshold, they have to be requested - /// separately, which is signaled in the response message with dedicated flag. - #[prost(message, optional, tag = "2")] - pub asset_vault_commitment: ::core::option::Option< - super::super::primitives::Digest, - >, - /// Additional request per storage map. - #[prost(message, repeated, tag = "3")] - pub storage_maps: ::prost::alloc::vec::Vec< - account_detail_request::StorageMapDetailRequest, - >, - } - /// Nested message and enum types in `AccountDetailRequest`. - pub mod account_detail_request { - /// Represents a storage slot index and the associated map keys. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct StorageMapDetailRequest { - /// Storage slot index (`\[0..255\]`). - #[prost(uint32, tag = "1")] - pub slot_index: u32, - #[prost(oneof = "storage_map_detail_request::SlotData", tags = "2, 3")] - pub slot_data: ::core::option::Option, - } - /// Nested message and enum types in `StorageMapDetailRequest`. - pub mod storage_map_detail_request { - /// Indirection required for use in `oneof {..}` block. - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct MapKeys { - /// A list of map keys associated with this storage slot. - #[prost(message, repeated, tag = "1")] - pub map_keys: ::prost::alloc::vec::Vec< - super::super::super::super::primitives::Digest, - >, - } - #[derive(Clone, PartialEq, ::prost::Oneof)] - pub enum SlotData { - /// Request to return all storage map data. If the number exceeds a threshold of 1000 entries, - /// the response will not contain them but must be requested separately. - #[prost(bool, tag = "2")] - AllEntries(bool), - /// A list of map keys associated with the given storage slot identified by `slot_index`. - #[prost(message, tag = "3")] - MapKeys(MapKeys), - } - } - } -} -/// Represents the result of getting account proof. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountProofResponse { - /// The block number at which the account witness was created and the account details were observed. - #[prost(message, optional, tag = "1")] - pub block_num: ::core::option::Option, - /// Account ID, current state commitment, and SMT path. - #[prost(message, optional, tag = "2")] - pub witness: ::core::option::Option, - /// Additional details for public accounts. - #[prost(message, optional, tag = "3")] - pub details: ::core::option::Option, -} -/// Nested message and enum types in `AccountProofResponse`. -pub mod account_proof_response { - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct AccountDetails { - /// Account header. - #[prost(message, optional, tag = "1")] - pub header: ::core::option::Option, - /// Account storage data - #[prost(message, optional, tag = "2")] - pub storage_details: ::core::option::Option, - /// Account code; empty if code commitments matched or none was requested. - #[prost(bytes = "vec", optional, tag = "3")] - pub code: ::core::option::Option<::prost::alloc::vec::Vec>, - /// Account asset vault data; empty if vault commitments matched or the requester - /// omitted it in the request. - #[prost(message, optional, tag = "4")] - pub vault_details: ::core::option::Option, - } -} -/// Account vault details for AccountProofResponse -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountVaultDetails { - /// A flag that is set to true if the account contains too many assets. This indicates - /// to the user that `SyncAccountVault` endpoint should be used to retrieve the - /// account's assets - #[prost(bool, tag = "1")] - pub too_many_assets: bool, - /// When too_many_assets == false, this will contain the list of assets in the - /// account's vault - #[prost(message, repeated, tag = "2")] - pub assets: ::prost::alloc::vec::Vec, -} -/// Account storage details for AccountProofResponse -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct AccountStorageDetails { - /// Account storage header (storage slot info for up to 256 slots) - #[prost(message, optional, tag = "1")] - pub header: ::core::option::Option, - /// Additional data for the requested storage maps - #[prost(message, repeated, tag = "2")] - pub map_details: ::prost::alloc::vec::Vec< - account_storage_details::AccountStorageMapDetails, - >, -} -/// Nested message and enum types in `AccountStorageDetails`. -pub mod account_storage_details { - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct AccountStorageMapDetails { - /// slot index of the storage map - #[prost(uint32, tag = "1")] - pub slot_index: u32, - /// A flag that is set to `true` if the number of to-be-returned entries in the - /// storage map would exceed a threshold. This indicates to the user that `SyncStorageMaps` - /// endpoint should be used to get all storage map data. - #[prost(bool, tag = "2")] - pub too_many_entries: bool, - /// By default we provide all storage entries. - #[prost(message, optional, tag = "3")] - pub entries: ::core::option::Option, - } - /// Nested message and enum types in `AccountStorageMapDetails`. - pub mod account_storage_map_details { - /// Wrapper for repeated storage map entries - #[derive(Clone, PartialEq, ::prost::Message)] - pub struct MapEntries { - #[prost(message, repeated, tag = "1")] - pub entries: ::prost::alloc::vec::Vec, - } - /// Nested message and enum types in `MapEntries`. - pub mod map_entries { - /// Definition of individual storage entries. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] - pub struct StorageMapEntry { - #[prost(message, optional, tag = "1")] - pub key: ::core::option::Option< - super::super::super::super::primitives::Digest, - >, - #[prost(message, optional, tag = "2")] - pub value: ::core::option::Option< - super::super::super::super::primitives::Digest, - >, - } - } - } -} -/// List of nullifiers to return proofs for. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct NullifierList { - /// List of nullifiers to return proofs for. - #[prost(message, repeated, tag = "1")] - pub nullifiers: ::prost::alloc::vec::Vec, -} -/// Represents the result of checking nullifiers. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct CheckNullifiersResponse { - /// Each requested nullifier has its corresponding nullifier proof at the same position. - #[prost(message, repeated, tag = "1")] - pub proofs: ::prost::alloc::vec::Vec, -} -/// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncNullifiersRequest { - /// Block number from which the nullifiers are requested (inclusive). - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Number of bits used for nullifier prefix. Currently the only supported value is 16. - #[prost(uint32, tag = "2")] - pub prefix_len: u32, - /// List of nullifiers to check. Each nullifier is specified by its prefix with length equal - /// to `prefix_len`. - #[prost(uint32, repeated, tag = "3")] - pub nullifiers: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing nullifiers. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncNullifiersResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// List of nullifiers matching the prefixes specified in the request. - #[prost(message, repeated, tag = "2")] - pub nullifiers: ::prost::alloc::vec::Vec, -} -/// Nested message and enum types in `SyncNullifiersResponse`. -pub mod sync_nullifiers_response { - /// Represents a single nullifier update. - #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] - pub struct NullifierUpdate { - /// Nullifier ID. - #[prost(message, optional, tag = "1")] - pub nullifier: ::core::option::Option, - /// Block number. - #[prost(fixed32, tag = "2")] - pub block_num: u32, - } -} -/// State synchronization request. -/// -/// Specifies state updates the requester is interested in. The server will return the first block which -/// contains a note matching `note_tags` or the chain tip. And the corresponding updates to -/// `account_ids` for that block range. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncStateRequest { - /// Last block known by the requester. The response will contain data starting from the next block, - /// until the first block which contains a note of matching the requested tag, or the chain tip - /// if there are no notes. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// Accounts' commitment to include in the response. - /// - /// An account commitment will be included if-and-only-if it is the latest update. Meaning it is - /// possible there was an update to the account for the given range, but if it is not the latest, - /// it won't be included in the response. - #[prost(message, repeated, tag = "2")] - pub account_ids: ::prost::alloc::vec::Vec, - /// Specifies the tags which the requester is interested in. - #[prost(fixed32, repeated, tag = "3")] - pub note_tags: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing state request. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncStateResponse { - /// Number of the latest block in the chain. - #[prost(fixed32, tag = "1")] - pub chain_tip: u32, - /// Block header of the block with the first note matching the specified criteria. - #[prost(message, optional, tag = "2")] - pub block_header: ::core::option::Option, - /// Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. - #[prost(message, optional, tag = "3")] - pub mmr_delta: ::core::option::Option, - /// List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. - #[prost(message, repeated, tag = "5")] - pub accounts: ::prost::alloc::vec::Vec, - /// List of transactions executed against requested accounts between `request.block_num + 1` and - /// `response.block_header.block_num`. - #[prost(message, repeated, tag = "6")] - pub transactions: ::prost::alloc::vec::Vec, - /// List of all notes together with the Merkle paths from `response.block_header.note_root`. - #[prost(message, repeated, tag = "7")] - pub notes: ::prost::alloc::vec::Vec, -} -/// Account vault synchronization request. -/// -/// Allows requesters to sync asset values for specific public accounts within a block range. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncAccountVaultRequest { - /// Block range from which to start synchronizing. - /// - /// If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - /// otherwise an error will be returned. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Account for which we want to sync asset vault. - #[prost(message, optional, tag = "2")] - pub account_id: ::core::option::Option, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncAccountVaultResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// List of asset updates for the account. - /// - /// Multiple updates can be returned for a single asset, and the one with a higher `block_num` - /// is expected to be retained by the caller. - #[prost(message, repeated, tag = "2")] - pub updates: ::prost::alloc::vec::Vec, -} -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct AccountVaultUpdate { - /// Vault key associated with the asset. - #[prost(message, optional, tag = "1")] - pub vault_key: ::core::option::Option, - /// Asset value related to the vault key. - /// If not present, the asset was removed from the vault. - #[prost(message, optional, tag = "2")] - pub asset: ::core::option::Option, - /// Block number at which the above asset was updated in the account vault. - #[prost(fixed32, tag = "3")] - pub block_num: u32, -} -/// Note synchronization request. -/// -/// Specifies note tags that requester is interested in. The server will return the first block which -/// contains a note matching `note_tags` or the chain tip. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncNotesRequest { - /// Block range from which to start synchronizing. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Specifies the tags which the requester is interested in. - #[prost(fixed32, repeated, tag = "2")] - pub note_tags: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing notes request. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncNotesResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// Block header of the block with the first note matching the specified criteria. - #[prost(message, optional, tag = "2")] - pub block_header: ::core::option::Option, - /// Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. - /// - /// An MMR proof can be constructed for the leaf of index `block_header.block_num` of - /// an MMR of forest `chain_tip` with this path. - #[prost(message, optional, tag = "3")] - pub mmr_path: ::core::option::Option, - /// List of all notes together with the Merkle paths from `response.block_header.note_root`. - #[prost(message, repeated, tag = "4")] - pub notes: ::prost::alloc::vec::Vec, -} -/// Storage map synchronization request. -/// -/// Allows requesters to sync storage map values for specific public accounts within a block range, -/// with support for cursor-based pagination to handle large storage maps. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct SyncStorageMapsRequest { - /// Block range from which to start synchronizing. - /// - /// If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - /// otherwise an error will be returned. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Account for which we want to sync storage maps. - #[prost(message, optional, tag = "3")] - pub account_id: ::core::option::Option, -} -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncStorageMapsResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// The list of storage map updates. - /// - /// Multiple updates can be returned for a single slot index and key combination, and the one - /// with a higher `block_num` is expected to be retained by the caller. - #[prost(message, repeated, tag = "2")] - pub updates: ::prost::alloc::vec::Vec, -} -/// Represents a single storage map update. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct StorageMapUpdate { - /// Block number in which the slot was updated. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// Slot index (\[0..255\]). - #[prost(uint32, tag = "2")] - pub slot_index: u32, - /// The storage map key. - #[prost(message, optional, tag = "3")] - pub key: ::core::option::Option, - /// The storage map value. - #[prost(message, optional, tag = "4")] - pub value: ::core::option::Option, -} -/// Represents a block range. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockRange { - /// Block number from which to start (inclusive). - #[prost(fixed32, tag = "1")] - pub block_from: u32, - /// Block number up to which to check (inclusive). If not specified, checks up to the latest block. - #[prost(fixed32, optional, tag = "2")] - pub block_to: ::core::option::Option, -} -/// Represents pagination information for chunked responses. -/// -/// Pagination is done using block numbers as the axis, allowing requesters to request -/// data in chunks by specifying block ranges and continuing from where the previous -/// response left off. -/// -/// To request the next chunk, the requester should use `block_num + 1` from the previous response -/// as the `block_from` for the next request. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct PaginationInfo { - /// Current chain tip - #[prost(fixed32, tag = "1")] - pub chain_tip: u32, - /// The block number of the last check included in this response. - /// - /// For chunked responses, this may be less than `request.block_range.block_to`. - /// If it is less than request.block_range.block_to, the user is expected to make a subsequent request - /// starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). - #[prost(fixed32, tag = "2")] - pub block_num: u32, -} -/// Transactions synchronization request. -/// -/// Allows requesters to sync transactions for specific accounts within a block range. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncTransactionsRequest { - /// Block range from which to start synchronizing. - #[prost(message, optional, tag = "1")] - pub block_range: ::core::option::Option, - /// Accounts to sync transactions for. - #[prost(message, repeated, tag = "2")] - pub account_ids: ::prost::alloc::vec::Vec, -} -/// Represents the result of syncing transactions request. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct SyncTransactionsResponse { - /// Pagination information. - #[prost(message, optional, tag = "1")] - pub pagination_info: ::core::option::Option, - /// List of transaction records. - #[prost(message, repeated, tag = "2")] - pub transaction_records: ::prost::alloc::vec::Vec, -} -/// Represents a transaction record. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct TransactionRecord { - /// Block number in which the transaction was included. - #[prost(fixed32, tag = "1")] - pub block_num: u32, - /// A transaction header. - #[prost(message, optional, tag = "2")] - pub transaction_header: ::core::option::Option< - super::transaction::TransactionHeader, - >, -} -/// Generated client implementations. -pub mod rpc_client { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - use tonic::codegen::http::Uri; - /// Store API for the RPC component - #[derive(Debug, Clone)] - pub struct RpcClient { - inner: tonic::client::Grpc, - } - impl RpcClient { - /// Attempt to create a new client by connecting to a given endpoint. - pub async fn connect(dst: D) -> Result - where - D: TryInto, - D::Error: Into, - { - let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; - Ok(Self::new(conn)) - } - } - impl RpcClient - where - T: tonic::client::GrpcService, - T::Error: Into, - T::ResponseBody: Body + std::marker::Send + 'static, - ::Error: Into + std::marker::Send, - { - pub fn new(inner: T) -> Self { - let inner = tonic::client::Grpc::new(inner); - Self { inner } - } - pub fn with_origin(inner: T, origin: Uri) -> Self { - let inner = tonic::client::Grpc::with_origin(inner, origin); - Self { inner } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> RpcClient> - where - F: tonic::service::Interceptor, - T::ResponseBody: Default, - T: tonic::codegen::Service< - http::Request, - Response = http::Response< - >::ResponseBody, - >, - >, - , - >>::Error: Into + std::marker::Send + std::marker::Sync, - { - RpcClient::new(InterceptedService::new(inner, interceptor)) - } - /// Compress requests with the given encoding. - /// - /// This requires the server to support it otherwise it might respond with an - /// error. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.send_compressed(encoding); - self - } - /// Enable decompressing responses. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.inner = self.inner.accept_compressed(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_decoding_message_size(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.inner = self.inner.max_encoding_message_size(limit); - self - } - /// Returns the status info. - pub async fn status( - &mut self, - request: impl tonic::IntoRequest<()>, - ) -> std::result::Result, tonic::Status> { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc_store.Rpc/Status"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc_store.Rpc", "Status")); - self.inner.unary(req, path, codec).await - } - /// Returns a nullifier proof for each of the requested nullifiers. - pub async fn check_nullifiers( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/CheckNullifiers", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "CheckNullifiers")); - self.inner.unary(req, path, codec).await - } - /// Returns the latest state of an account with the specified ID. - pub async fn get_account_details( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetAccountDetails", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetAccountDetails")); - self.inner.unary(req, path, codec).await - } - /// Returns the latest state proof of the specified account. - pub async fn get_account_proof( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetAccountProof", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetAccountProof")); - self.inner.unary(req, path, codec).await - } - /// Returns raw block data for the specified block number. - pub async fn get_block_by_number( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetBlockByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetBlockByNumber")); - self.inner.unary(req, path, codec).await - } - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - pub async fn get_block_header_by_number( - &mut self, - request: impl tonic::IntoRequest< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetBlockHeaderByNumber", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetBlockHeaderByNumber")); - self.inner.unary(req, path, codec).await - } - /// Returns a list of committed notes matching the provided note IDs. - pub async fn get_notes_by_id( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetNotesById", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetNotesById")); - self.inner.unary(req, path, codec).await - } - /// Returns the script for a note by its root. - pub async fn get_note_script_by_root( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/GetNoteScriptByRoot", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "GetNoteScriptByRoot")); - self.inner.unary(req, path, codec).await - } - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - pub async fn sync_nullifiers( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/SyncNullifiers", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "SyncNullifiers")); - self.inner.unary(req, path, codec).await - } - /// Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. - /// - /// requester specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. - /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. - pub async fn sync_notes( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc_store.Rpc/SyncNotes"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc_store.Rpc", "SyncNotes")); - self.inner.unary(req, path, codec).await - } - /// Returns info which can be used by the requester to sync up to the latest state of the chain - /// for the objects (accounts, notes, nullifiers) the requester is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. requester is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the requester is fully synchronized with the chain. - /// - /// Each request also returns info about new notes, nullifiers etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags and nullifiers filters contain only high - /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - /// additional filtering of that data on its side. - pub async fn sync_state( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static("/rpc_store.Rpc/SyncState"); - let mut req = request.into_request(); - req.extensions_mut().insert(GrpcMethod::new("rpc_store.Rpc", "SyncState")); - self.inner.unary(req, path, codec).await - } - /// Returns account vault updates for specified account within a block range. - pub async fn sync_account_vault( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/SyncAccountVault", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "SyncAccountVault")); - self.inner.unary(req, path, codec).await - } - /// Returns storage map updates for specified account and storage slots within a block range. - pub async fn sync_storage_maps( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/SyncStorageMaps", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "SyncStorageMaps")); - self.inner.unary(req, path, codec).await - } - /// Returns transactions records for specific accounts within a block range. - pub async fn sync_transactions( - &mut self, - request: impl tonic::IntoRequest, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { - self.inner - .ready() - .await - .map_err(|e| { - tonic::Status::unknown( - format!("Service was not ready: {}", e.into()), - ) - })?; - let codec = tonic_prost::ProstCodec::default(); - let path = http::uri::PathAndQuery::from_static( - "/rpc_store.Rpc/SyncTransactions", - ); - let mut req = request.into_request(); - req.extensions_mut() - .insert(GrpcMethod::new("rpc_store.Rpc", "SyncTransactions")); - self.inner.unary(req, path, codec).await - } - } -} -/// Generated server implementations. -pub mod rpc_server { - #![allow( - unused_variables, - dead_code, - missing_docs, - clippy::wildcard_imports, - clippy::let_unit_value, - )] - use tonic::codegen::*; - /// Generated trait containing gRPC methods that should be implemented for use with RpcServer. - #[async_trait] - pub trait Rpc: std::marker::Send + std::marker::Sync + 'static { - /// Returns the status info. - async fn status( - &self, - request: tonic::Request<()>, - ) -> std::result::Result, tonic::Status>; - /// Returns a nullifier proof for each of the requested nullifiers. - async fn check_nullifiers( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest state of an account with the specified ID. - async fn get_account_details( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the latest state proof of the specified account. - async fn get_account_proof( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns raw block data for the specified block number. - async fn get_block_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Retrieves block header by given block number. Optionally, it also returns the MMR path - /// and current chain length to authenticate the block's inclusion. - async fn get_block_header_by_number( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a list of committed notes matching the provided note IDs. - async fn get_notes_by_id( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns the script for a note by its root. - async fn get_note_script_by_root( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - /// - /// Note that only 16-bit prefixes are supported at this time. - async fn sync_nullifiers( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. - /// - /// requester specifies the `note_tags` they are interested in, and the block height from which to search for new for - /// matching notes for. The request will then return the next block containing any note matching the provided tags. - /// - /// The response includes each note's metadata and inclusion proof. - /// - /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - /// tip of the chain. - async fn sync_notes( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns info which can be used by the requester to sync up to the latest state of the chain - /// for the objects (accounts, notes, nullifiers) the requester is interested in. - /// - /// This request returns the next block containing requested data. It also returns `chain_tip` - /// which is the latest block number in the chain. requester is expected to repeat these requests - /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point - /// the requester is fully synchronized with the chain. - /// - /// Each request also returns info about new notes, nullifiers etc. created. It also returns - /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - /// MMR peaks and chain MMR nodes. - /// - /// For preserving some degree of privacy, note tags and nullifiers filters contain only high - /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - /// additional filtering of that data on its side. - async fn sync_state( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns account vault updates for specified account within a block range. - async fn sync_account_vault( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns storage map updates for specified account and storage slots within a block range. - async fn sync_storage_maps( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - /// Returns transactions records for specific accounts within a block range. - async fn sync_transactions( - &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - >; - } - /// Store API for the RPC component - #[derive(Debug)] - pub struct RpcServer { - inner: Arc, - accept_compression_encodings: EnabledCompressionEncodings, - send_compression_encodings: EnabledCompressionEncodings, - max_decoding_message_size: Option, - max_encoding_message_size: Option, - } - impl RpcServer { - pub fn new(inner: T) -> Self { - Self::from_arc(Arc::new(inner)) - } - pub fn from_arc(inner: Arc) -> Self { - Self { - inner, - accept_compression_encodings: Default::default(), - send_compression_encodings: Default::default(), - max_decoding_message_size: None, - max_encoding_message_size: None, - } - } - pub fn with_interceptor( - inner: T, - interceptor: F, - ) -> InterceptedService - where - F: tonic::service::Interceptor, - { - InterceptedService::new(Self::new(inner), interceptor) - } - /// Enable decompressing requests with the given encoding. - #[must_use] - pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.accept_compression_encodings.enable(encoding); - self - } - /// Compress responses with the given encoding, if the client supports it. - #[must_use] - pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { - self.send_compression_encodings.enable(encoding); - self - } - /// Limits the maximum size of a decoded message. - /// - /// Default: `4MB` - #[must_use] - pub fn max_decoding_message_size(mut self, limit: usize) -> Self { - self.max_decoding_message_size = Some(limit); - self - } - /// Limits the maximum size of an encoded message. - /// - /// Default: `usize::MAX` - #[must_use] - pub fn max_encoding_message_size(mut self, limit: usize) -> Self { - self.max_encoding_message_size = Some(limit); - self - } - } - impl tonic::codegen::Service> for RpcServer - where - T: Rpc, - B: Body + std::marker::Send + 'static, - B::Error: Into + std::marker::Send + 'static, - { - type Response = http::Response; - type Error = std::convert::Infallible; - type Future = BoxFuture; - fn poll_ready( - &mut self, - _cx: &mut Context<'_>, - ) -> Poll> { - Poll::Ready(Ok(())) - } - fn call(&mut self, req: http::Request) -> Self::Future { - match req.uri().path() { - "/rpc_store.Rpc/Status" => { - #[allow(non_camel_case_types)] - struct StatusSvc(pub Arc); - impl tonic::server::UnaryService<()> for StatusSvc { - type Response = super::StoreStatus; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call(&mut self, request: tonic::Request<()>) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::status(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = StatusSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/CheckNullifiers" => { - #[allow(non_camel_case_types)] - struct CheckNullifiersSvc(pub Arc); - impl tonic::server::UnaryService - for CheckNullifiersSvc { - type Response = super::CheckNullifiersResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::check_nullifiers(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = CheckNullifiersSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetAccountDetails" => { - #[allow(non_camel_case_types)] - struct GetAccountDetailsSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetAccountDetailsSvc { - type Response = super::super::account::AccountDetails; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_account_details(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetAccountDetailsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetAccountProof" => { - #[allow(non_camel_case_types)] - struct GetAccountProofSvc(pub Arc); - impl tonic::server::UnaryService - for GetAccountProofSvc { - type Response = super::AccountProofResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_account_proof(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetAccountProofSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetBlockByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockByNumberSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetBlockByNumberSvc { - type Response = super::super::blockchain::MaybeBlock; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::blockchain::BlockNumber, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_by_number(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetBlockHeaderByNumber" => { - #[allow(non_camel_case_types)] - struct GetBlockHeaderByNumberSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService< - super::super::shared::BlockHeaderByNumberRequest, - > for GetBlockHeaderByNumberSvc { - type Response = super::super::shared::BlockHeaderByNumberResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request< - super::super::shared::BlockHeaderByNumberRequest, - >, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_block_header_by_number(&inner, request) - .await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetBlockHeaderByNumberSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetNotesById" => { - #[allow(non_camel_case_types)] - struct GetNotesByIdSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetNotesByIdSvc { - type Response = super::super::note::CommittedNoteList; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_notes_by_id(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNotesByIdSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/GetNoteScriptByRoot" => { - #[allow(non_camel_case_types)] - struct GetNoteScriptByRootSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for GetNoteScriptByRootSvc { - type Response = super::super::shared::MaybeNoteScript; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::get_note_script_by_root(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = GetNoteScriptByRootSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncNullifiers" => { - #[allow(non_camel_case_types)] - struct SyncNullifiersSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for SyncNullifiersSvc { - type Response = super::SyncNullifiersResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_nullifiers(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncNullifiersSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncNotes" => { - #[allow(non_camel_case_types)] - struct SyncNotesSvc(pub Arc); - impl tonic::server::UnaryService - for SyncNotesSvc { - type Response = super::SyncNotesResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_notes(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncNotesSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncState" => { - #[allow(non_camel_case_types)] - struct SyncStateSvc(pub Arc); - impl tonic::server::UnaryService - for SyncStateSvc { - type Response = super::SyncStateResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_state(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncStateSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncAccountVault" => { - #[allow(non_camel_case_types)] - struct SyncAccountVaultSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for SyncAccountVaultSvc { - type Response = super::SyncAccountVaultResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_account_vault(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncAccountVaultSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncStorageMaps" => { - #[allow(non_camel_case_types)] - struct SyncStorageMapsSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for SyncStorageMapsSvc { - type Response = super::SyncStorageMapsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_storage_maps(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncStorageMapsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - "/rpc_store.Rpc/SyncTransactions" => { - #[allow(non_camel_case_types)] - struct SyncTransactionsSvc(pub Arc); - impl< - T: Rpc, - > tonic::server::UnaryService - for SyncTransactionsSvc { - type Response = super::SyncTransactionsResponse; - type Future = BoxFuture< - tonic::Response, - tonic::Status, - >; - fn call( - &mut self, - request: tonic::Request, - ) -> Self::Future { - let inner = Arc::clone(&self.0); - let fut = async move { - ::sync_transactions(&inner, request).await - }; - Box::pin(fut) - } - } - let accept_compression_encodings = self.accept_compression_encodings; - let send_compression_encodings = self.send_compression_encodings; - let max_decoding_message_size = self.max_decoding_message_size; - let max_encoding_message_size = self.max_encoding_message_size; - let inner = self.inner.clone(); - let fut = async move { - let method = SyncTransactionsSvc(inner); - let codec = tonic_prost::ProstCodec::default(); - let mut grpc = tonic::server::Grpc::new(codec) - .apply_compression_config( - accept_compression_encodings, - send_compression_encodings, - ) - .apply_max_message_size_config( - max_decoding_message_size, - max_encoding_message_size, - ); - let res = grpc.unary(method, req).await; - Ok(res) - }; - Box::pin(fut) - } - _ => { - Box::pin(async move { - let mut response = http::Response::new( - tonic::body::Body::default(), - ); - let headers = response.headers_mut(); - headers - .insert( - tonic::Status::GRPC_STATUS, - (tonic::Code::Unimplemented as i32).into(), - ); - headers - .insert( - http::header::CONTENT_TYPE, - tonic::metadata::GRPC_CONTENT_TYPE, - ); - Ok(response) - }) - } - } - } - } - impl Clone for RpcServer { - fn clone(&self) -> Self { - let inner = self.inner.clone(); - Self { - inner, - accept_compression_encodings: self.accept_compression_encodings, - send_compression_encodings: self.send_compression_encodings, - max_decoding_message_size: self.max_decoding_message_size, - max_encoding_message_size: self.max_encoding_message_size, - } - } - } - /// Generated gRPC service name - pub const SERVICE_NAME: &str = "rpc_store.Rpc"; - impl tonic::server::NamedService for RpcServer { - const NAME: &'static str = SERVICE_NAME; - } -} diff --git a/crates/proto/src/generated/shared.rs b/crates/proto/src/generated/shared.rs deleted file mode 100644 index f79b9117e..000000000 --- a/crates/proto/src/generated/shared.rs +++ /dev/null @@ -1,34 +0,0 @@ -// This file is @generated by prost-build. -/// Returns the block header corresponding to the requested block number, as well as the merkle -/// path and current forest which validate the block's inclusion in the chain. -/// -/// The Merkle path is an MMR proof for the block's leaf, based on the current chain length. -#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] -pub struct BlockHeaderByNumberRequest { - /// The target block height, defaults to latest if not provided. - #[prost(uint32, optional, tag = "1")] - pub block_num: ::core::option::Option, - /// Whether or not to return authentication data for the block header. - #[prost(bool, optional, tag = "2")] - pub include_mmr_proof: ::core::option::Option, -} -/// Represents the result of getting a block header by block number. -#[derive(Clone, PartialEq, ::prost::Message)] -pub struct BlockHeaderByNumberResponse { - /// The requested block header. - #[prost(message, optional, tag = "1")] - pub block_header: ::core::option::Option, - /// Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. - #[prost(message, optional, tag = "2")] - pub mmr_path: ::core::option::Option, - /// Current chain length. - #[prost(fixed32, optional, tag = "3")] - pub chain_length: ::core::option::Option, -} -/// Represents a note script or nothing. -#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] -pub struct MaybeNoteScript { - /// The script for a note by its root. - #[prost(message, optional, tag = "1")] - pub script: ::core::option::Option, -} diff --git a/crates/proto/src/generated/store.rs b/crates/proto/src/generated/store.rs new file mode 100644 index 000000000..451922429 --- /dev/null +++ b/crates/proto/src/generated/store.rs @@ -0,0 +1,2964 @@ +// This file is @generated by prost-build. +/// Returns data required to prove the next block. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockInputsRequest { + /// IDs of all accounts updated in the proposed block for which to retrieve account witnesses. + #[prost(message, repeated, tag = "1")] + pub account_ids: ::prost::alloc::vec::Vec, + /// Nullifiers of all notes consumed by the block for which to retrieve witnesses. + /// + /// Due to note erasure it will generally not be possible to know the exact set of nullifiers + /// a block will create, unless we pre-execute note erasure. So in practice, this set of + /// nullifiers will be the set of nullifiers of all proven batches in the block, which is a + /// superset of the nullifiers the block may create. + /// + /// However, if it is known that a certain note will be erased, it would not be necessary to + /// provide a nullifier witness for it. + #[prost(message, repeated, tag = "2")] + pub nullifiers: ::prost::alloc::vec::Vec, + /// Array of note IDs for which to retrieve note inclusion proofs, **if they exist in the store**. + #[prost(message, repeated, tag = "3")] + pub unauthenticated_notes: ::prost::alloc::vec::Vec, + /// Array of block numbers referenced by all batches in the block. + #[prost(fixed32, repeated, tag = "4")] + pub reference_blocks: ::prost::alloc::vec::Vec, +} +/// Represents the result of getting block inputs. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BlockInputs { + /// The latest block header. + #[prost(message, optional, tag = "1")] + pub latest_block_header: ::core::option::Option, + /// Proof of each requested unauthenticated note's inclusion in a block, **if it existed in + /// the store**. + #[prost(message, repeated, tag = "2")] + pub unauthenticated_note_proofs: ::prost::alloc::vec::Vec< + super::note::NoteInclusionInBlockProof, + >, + /// The serialized chain MMR which includes proofs for all blocks referenced by the + /// above note inclusion proofs as well as proofs for inclusion of the requested blocks + /// referenced by the batches in the block. + #[prost(bytes = "vec", tag = "3")] + pub partial_block_chain: ::prost::alloc::vec::Vec, + /// The state commitments of the requested accounts and their authentication paths. + #[prost(message, repeated, tag = "4")] + pub account_witnesses: ::prost::alloc::vec::Vec, + /// The requested nullifiers and their authentication paths. + #[prost(message, repeated, tag = "5")] + pub nullifier_witnesses: ::prost::alloc::vec::Vec, +} +/// Nested message and enum types in `BlockInputs`. +pub mod block_inputs { + /// A nullifier returned as a response to the `GetBlockInputs`. + #[derive(Clone, PartialEq, ::prost::Message)] + pub struct NullifierWitness { + /// The nullifier. + #[prost(message, optional, tag = "1")] + pub nullifier: ::core::option::Option, + /// The SMT proof to verify the nullifier's inclusion in the nullifier tree. + #[prost(message, optional, tag = "2")] + pub opening: ::core::option::Option, + } +} +/// Returns the inputs for a transaction batch. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchInputsRequest { + /// List of unauthenticated note commitments to be queried from the database. + #[prost(message, repeated, tag = "1")] + pub note_commitments: ::prost::alloc::vec::Vec, + /// Set of block numbers referenced by transactions. + #[prost(fixed32, repeated, tag = "2")] + pub reference_blocks: ::prost::alloc::vec::Vec, +} +/// Represents the result of getting batch inputs. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct BatchInputs { + /// The block header that the transaction batch should reference. + #[prost(message, optional, tag = "1")] + pub batch_reference_block_header: ::core::option::Option< + super::blockchain::BlockHeader, + >, + /// Proof of each *found* unauthenticated note's inclusion in a block. + #[prost(message, repeated, tag = "2")] + pub note_proofs: ::prost::alloc::vec::Vec, + /// The serialized chain MMR which includes proofs for all blocks referenced by the + /// above note inclusion proofs as well as proofs for inclusion of the blocks referenced + /// by the transactions in the batch. + #[prost(bytes = "vec", tag = "3")] + pub partial_block_chain: ::prost::alloc::vec::Vec, +} +/// Returns data required to validate a new transaction. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionInputsRequest { + /// ID of the account against which a transaction is executed. + #[prost(message, optional, tag = "1")] + pub account_id: ::core::option::Option, + /// Set of nullifiers consumed by this transaction. + #[prost(message, repeated, tag = "2")] + pub nullifiers: ::prost::alloc::vec::Vec, + /// Set of unauthenticated note commitments to check for existence on-chain. + /// + /// These are notes which were not on-chain at the state the transaction was proven, + /// but could by now be present. + #[prost(message, repeated, tag = "3")] + pub unauthenticated_notes: ::prost::alloc::vec::Vec, +} +/// Represents the result of getting transaction inputs. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct TransactionInputs { + /// Account state proof. + #[prost(message, optional, tag = "1")] + pub account_state: ::core::option::Option< + transaction_inputs::AccountTransactionInputRecord, + >, + /// List of nullifiers that have been consumed. + #[prost(message, repeated, tag = "2")] + pub nullifiers: ::prost::alloc::vec::Vec< + transaction_inputs::NullifierTransactionInputRecord, + >, + /// List of unauthenticated notes that were not found in the database. + #[prost(message, repeated, tag = "3")] + pub found_unauthenticated_notes: ::prost::alloc::vec::Vec, + /// The node's current block height. + #[prost(fixed32, tag = "4")] + pub block_height: u32, + /// Whether the account ID prefix is unique. Only relevant for account creation requests. + /// + /// TODO: Replace this with an error. When a general error message exists. + #[prost(bool, optional, tag = "5")] + pub new_account_id_prefix_is_unique: ::core::option::Option, +} +/// Nested message and enum types in `TransactionInputs`. +pub mod transaction_inputs { + /// An account returned as a response to the `GetTransactionInputs`. + #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] + pub struct AccountTransactionInputRecord { + /// The account ID. + #[prost(message, optional, tag = "1")] + pub account_id: ::core::option::Option, + /// The latest account commitment, zero commitment if the account doesn't exist. + #[prost(message, optional, tag = "2")] + pub account_commitment: ::core::option::Option, + } + /// A nullifier returned as a response to the `GetTransactionInputs`. + #[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] + pub struct NullifierTransactionInputRecord { + /// The nullifier ID. + #[prost(message, optional, tag = "1")] + pub nullifier: ::core::option::Option, + /// The block at which the nullifier has been consumed, zero if not consumed. + #[prost(fixed32, tag = "2")] + pub block_num: u32, + } +} +/// Account ID prefix. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct AccountIdPrefix { + /// Account ID prefix. + #[prost(fixed32, tag = "1")] + pub account_id_prefix: u32, +} +/// Represents the result of getting network account details by prefix. +#[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] +pub struct MaybeAccountDetails { + /// Account details. + #[prost(message, optional, tag = "1")] + pub details: ::core::option::Option, +} +/// Returns a paginated list of unconsumed network notes for an account. +/// +/// Notes created or consumed after the specified block are excluded from the result. +#[derive(Clone, Copy, PartialEq, Eq, Hash, ::prost::Message)] +pub struct UnconsumedNetworkNotesRequest { + /// This should be null on the first call, and set to the response token until the response token + /// is null, at which point all data has been fetched. + /// + /// Note that this token is only valid if used with the same parameters. + #[prost(uint64, optional, tag = "1")] + pub page_token: ::core::option::Option, + /// Number of notes to retrieve per page. + #[prost(uint64, tag = "2")] + pub page_size: u64, + /// The network account ID prefix to filter notes by. + #[prost(uint32, tag = "3")] + pub network_account_id_prefix: u32, + /// The block number to filter the returned notes by. + /// + /// Notes that are created or consumed after this block are excluded from the result. + #[prost(fixed32, tag = "4")] + pub block_num: u32, +} +/// Represents the result of getting the unconsumed network notes. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct UnconsumedNetworkNotes { + /// An opaque pagination token. + /// + /// Use this in your next request to get the next + /// set of data. + /// + /// Will be null once there is no more data remaining. + #[prost(uint64, optional, tag = "1")] + pub next_token: ::core::option::Option, + /// The list of unconsumed network notes. + #[prost(message, repeated, tag = "2")] + pub notes: ::prost::alloc::vec::Vec, +} +/// Represents the result of getting the network account ids. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct NetworkAccountIdList { + /// Pagination information. + #[prost(message, optional, tag = "1")] + pub pagination_info: ::core::option::Option, + /// The list of network account ids. + #[prost(message, repeated, tag = "2")] + pub account_ids: ::prost::alloc::vec::Vec, +} +/// Current blockchain data based on the requested block number. +#[derive(Clone, PartialEq, ::prost::Message)] +pub struct CurrentBlockchainData { + /// Commitments that represent the current state according to the MMR. + #[prost(message, repeated, tag = "1")] + pub current_peaks: ::prost::alloc::vec::Vec, + /// Current block header. + #[prost(message, optional, tag = "2")] + pub current_block_header: ::core::option::Option, +} +/// Generated client implementations. +pub mod rpc_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Store API for the RPC component + #[derive(Debug, Clone)] + pub struct RpcClient { + inner: tonic::client::Grpc, + } + impl RpcClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl RpcClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> RpcClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + RpcClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Returns the status info. + pub async fn status( + &mut self, + request: impl tonic::IntoRequest<()>, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/Status"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "Status")); + self.inner.unary(req, path, codec).await + } + /// Returns a Sparse Merkle Tree opening proof for each requested nullifier + /// + /// Each proof demonstrates either: + /// + /// * **Inclusion**: Nullifier exists in the tree (note was consumed) + /// * **Non-inclusion**: Nullifier does not exist (note was not consumed) + /// + /// The `leaf` field indicates the status: + /// + /// * `empty_leaf_index`: Non-inclusion proof + /// * `single` or `multiple`: Inclusion proof if the nullifier key is present + /// + /// Verify proofs against the nullifier tree root in the latest block header. + pub async fn check_nullifiers( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/CheckNullifiers", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "CheckNullifiers")); + self.inner.unary(req, path, codec).await + } + /// Returns the latest state of an account with the specified ID. + pub async fn get_account_details( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/GetAccountDetails", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "GetAccountDetails")); + self.inner.unary(req, path, codec).await + } + /// Returns the latest state proof of the specified account. + pub async fn get_account_proof( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/GetAccountProof", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "GetAccountProof")); + self.inner.unary(req, path, codec).await + } + /// Returns raw block data for the specified block number. + pub async fn get_block_by_number( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/GetBlockByNumber", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "GetBlockByNumber")); + self.inner.unary(req, path, codec).await + } + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + pub async fn get_block_header_by_number( + &mut self, + request: impl tonic::IntoRequest< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/GetBlockHeaderByNumber", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "GetBlockHeaderByNumber")); + self.inner.unary(req, path, codec).await + } + /// Returns a list of committed notes matching the provided note IDs. + pub async fn get_notes_by_id( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/GetNotesById"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "GetNotesById")); + self.inner.unary(req, path, codec).await + } + /// Returns the script for a note by its root. + pub async fn get_note_script_by_root( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/GetNoteScriptByRoot", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "GetNoteScriptByRoot")); + self.inner.unary(req, path, codec).await + } + /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. + /// + /// Note that only 16-bit prefixes are supported at this time. + pub async fn sync_nullifiers( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncNullifiers"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncNullifiers")); + self.inner.unary(req, path, codec).await + } + /// Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. + /// + /// requester specifies the `note_tags` they are interested in, and the block height from which to search for new for + /// matching notes for. The request will then return the next block containing any note matching the provided tags. + /// + /// The response includes each note's metadata and inclusion proof. + /// + /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the + /// tip of the chain. + pub async fn sync_notes( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncNotes"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncNotes")); + self.inner.unary(req, path, codec).await + } + /// Returns info which can be used by the requester to sync up to the latest state of the chain + /// for the objects (accounts, notes, nullifiers) the requester is interested in. + /// + /// This request returns the next block containing requested data. It also returns `chain_tip` + /// which is the latest block number in the chain. requester is expected to repeat these requests + /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point + /// the requester is fully synchronized with the chain. + /// + /// Each request also returns info about new notes, nullifiers etc. created. It also returns + /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain + /// MMR peaks and chain MMR nodes. + /// + /// For preserving some degree of privacy, note tags and nullifiers filters contain only high + /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make + /// additional filtering of that data on its side. + pub async fn sync_state( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/store.Rpc/SyncState"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncState")); + self.inner.unary(req, path, codec).await + } + /// Returns account vault updates for specified account within a block range. + pub async fn sync_account_vault( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/SyncAccountVault", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "SyncAccountVault")); + self.inner.unary(req, path, codec).await + } + /// Returns storage map updates for specified account and storage slots within a block range. + pub async fn sync_storage_maps( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/SyncStorageMaps", + ); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("store.Rpc", "SyncStorageMaps")); + self.inner.unary(req, path, codec).await + } + /// Returns transactions records for specific accounts within a block range. + pub async fn sync_transactions( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.Rpc/SyncTransactions", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.Rpc", "SyncTransactions")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod rpc_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with RpcServer. + #[async_trait] + pub trait Rpc: std::marker::Send + std::marker::Sync + 'static { + /// Returns the status info. + async fn status( + &self, + request: tonic::Request<()>, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns a Sparse Merkle Tree opening proof for each requested nullifier + /// + /// Each proof demonstrates either: + /// + /// * **Inclusion**: Nullifier exists in the tree (note was consumed) + /// * **Non-inclusion**: Nullifier does not exist (note was not consumed) + /// + /// The `leaf` field indicates the status: + /// + /// * `empty_leaf_index`: Non-inclusion proof + /// * `single` or `multiple`: Inclusion proof if the nullifier key is present + /// + /// Verify proofs against the nullifier tree root in the latest block header. + async fn check_nullifiers( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the latest state of an account with the specified ID. + async fn get_account_details( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the latest state proof of the specified account. + async fn get_account_proof( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns raw block data for the specified block number. + async fn get_block_by_number( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + async fn get_block_header_by_number( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns a list of committed notes matching the provided note IDs. + async fn get_notes_by_id( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the script for a note by its root. + async fn get_note_script_by_root( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. + /// + /// Note that only 16-bit prefixes are supported at this time. + async fn sync_nullifiers( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. + /// + /// requester specifies the `note_tags` they are interested in, and the block height from which to search for new for + /// matching notes for. The request will then return the next block containing any note matching the provided tags. + /// + /// The response includes each note's metadata and inclusion proof. + /// + /// A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the + /// tip of the chain. + async fn sync_notes( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns info which can be used by the requester to sync up to the latest state of the chain + /// for the objects (accounts, notes, nullifiers) the requester is interested in. + /// + /// This request returns the next block containing requested data. It also returns `chain_tip` + /// which is the latest block number in the chain. requester is expected to repeat these requests + /// in a loop until `response.block_header.block_num == response.chain_tip`, at which point + /// the requester is fully synchronized with the chain. + /// + /// Each request also returns info about new notes, nullifiers etc. created. It also returns + /// Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain + /// MMR peaks and chain MMR nodes. + /// + /// For preserving some degree of privacy, note tags and nullifiers filters contain only high + /// part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make + /// additional filtering of that data on its side. + async fn sync_state( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns account vault updates for specified account within a block range. + async fn sync_account_vault( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns storage map updates for specified account and storage slots within a block range. + async fn sync_storage_maps( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns transactions records for specific accounts within a block range. + async fn sync_transactions( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /// Store API for the RPC component + #[derive(Debug)] + pub struct RpcServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl RpcServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for RpcServer + where + T: Rpc, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/store.Rpc/Status" => { + #[allow(non_camel_case_types)] + struct StatusSvc(pub Arc); + impl tonic::server::UnaryService<()> for StatusSvc { + type Response = super::super::rpc::StoreStatus; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call(&mut self, request: tonic::Request<()>) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::status(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = StatusSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/CheckNullifiers" => { + #[allow(non_camel_case_types)] + struct CheckNullifiersSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for CheckNullifiersSvc { + type Response = super::super::rpc::CheckNullifiersResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::check_nullifiers(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = CheckNullifiersSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/GetAccountDetails" => { + #[allow(non_camel_case_types)] + struct GetAccountDetailsSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for GetAccountDetailsSvc { + type Response = super::super::account::AccountDetails; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_account_details(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetAccountDetailsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/GetAccountProof" => { + #[allow(non_camel_case_types)] + struct GetAccountProofSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for GetAccountProofSvc { + type Response = super::super::rpc::AccountProofResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::AccountProofRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_account_proof(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetAccountProofSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/GetBlockByNumber" => { + #[allow(non_camel_case_types)] + struct GetBlockByNumberSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for GetBlockByNumberSvc { + type Response = super::super::blockchain::MaybeBlock; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::blockchain::BlockNumber, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_block_by_number(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBlockByNumberSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/GetBlockHeaderByNumber" => { + #[allow(non_camel_case_types)] + struct GetBlockHeaderByNumberSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService< + super::super::rpc::BlockHeaderByNumberRequest, + > for GetBlockHeaderByNumberSvc { + type Response = super::super::rpc::BlockHeaderByNumberResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_block_header_by_number(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBlockHeaderByNumberSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/GetNotesById" => { + #[allow(non_camel_case_types)] + struct GetNotesByIdSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for GetNotesByIdSvc { + type Response = super::super::note::CommittedNoteList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_notes_by_id(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetNotesByIdSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/GetNoteScriptByRoot" => { + #[allow(non_camel_case_types)] + struct GetNoteScriptByRootSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for GetNoteScriptByRootSvc { + type Response = super::super::rpc::MaybeNoteScript; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_note_script_by_root(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetNoteScriptByRootSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncNullifiers" => { + #[allow(non_camel_case_types)] + struct SyncNullifiersSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService< + super::super::rpc::SyncNullifiersRequest, + > for SyncNullifiersSvc { + type Response = super::super::rpc::SyncNullifiersResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::SyncNullifiersRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_nullifiers(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncNullifiersSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncNotes" => { + #[allow(non_camel_case_types)] + struct SyncNotesSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for SyncNotesSvc { + type Response = super::super::rpc::SyncNotesResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_notes(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncNotesSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncState" => { + #[allow(non_camel_case_types)] + struct SyncStateSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService + for SyncStateSvc { + type Response = super::super::rpc::SyncStateResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_state(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncStateSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncAccountVault" => { + #[allow(non_camel_case_types)] + struct SyncAccountVaultSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService< + super::super::rpc::SyncAccountVaultRequest, + > for SyncAccountVaultSvc { + type Response = super::super::rpc::SyncAccountVaultResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::SyncAccountVaultRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_account_vault(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncAccountVaultSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncStorageMaps" => { + #[allow(non_camel_case_types)] + struct SyncStorageMapsSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService< + super::super::rpc::SyncStorageMapsRequest, + > for SyncStorageMapsSvc { + type Response = super::super::rpc::SyncStorageMapsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::SyncStorageMapsRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_storage_maps(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncStorageMapsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.Rpc/SyncTransactions" => { + #[allow(non_camel_case_types)] + struct SyncTransactionsSvc(pub Arc); + impl< + T: Rpc, + > tonic::server::UnaryService< + super::super::rpc::SyncTransactionsRequest, + > for SyncTransactionsSvc { + type Response = super::super::rpc::SyncTransactionsResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::SyncTransactionsRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sync_transactions(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SyncTransactionsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new( + tonic::body::Body::default(), + ); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for RpcServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "store.Rpc"; + impl tonic::server::NamedService for RpcServer { + const NAME: &'static str = SERVICE_NAME; + } +} +/// Generated client implementations. +pub mod block_producer_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Store API for the BlockProducer component + #[derive(Debug, Clone)] + pub struct BlockProducerClient { + inner: tonic::client::Grpc, + } + impl BlockProducerClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl BlockProducerClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> BlockProducerClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + BlockProducerClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Applies changes of a new block to the DB and in-memory data structures. + pub async fn apply_block( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.BlockProducer/ApplyBlock", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.BlockProducer", "ApplyBlock")); + self.inner.unary(req, path, codec).await + } + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + pub async fn get_block_header_by_number( + &mut self, + request: impl tonic::IntoRequest< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.BlockProducer/GetBlockHeaderByNumber", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("store.BlockProducer", "GetBlockHeaderByNumber"), + ); + self.inner.unary(req, path, codec).await + } + /// Returns data required to prove the next block. + pub async fn get_block_inputs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.BlockProducer/GetBlockInputs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.BlockProducer", "GetBlockInputs")); + self.inner.unary(req, path, codec).await + } + /// Returns the inputs for a transaction batch. + pub async fn get_batch_inputs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result, tonic::Status> { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.BlockProducer/GetBatchInputs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.BlockProducer", "GetBatchInputs")); + self.inner.unary(req, path, codec).await + } + /// Returns data required to validate a new transaction. + pub async fn get_transaction_inputs( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.BlockProducer/GetTransactionInputs", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.BlockProducer", "GetTransactionInputs")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod block_producer_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with BlockProducerServer. + #[async_trait] + pub trait BlockProducer: std::marker::Send + std::marker::Sync + 'static { + /// Applies changes of a new block to the DB and in-memory data structures. + async fn apply_block( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + async fn get_block_header_by_number( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns data required to prove the next block. + async fn get_block_inputs( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Returns the inputs for a transaction batch. + async fn get_batch_inputs( + &self, + request: tonic::Request, + ) -> std::result::Result, tonic::Status>; + /// Returns data required to validate a new transaction. + async fn get_transaction_inputs( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /// Store API for the BlockProducer component + #[derive(Debug)] + pub struct BlockProducerServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl BlockProducerServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for BlockProducerServer + where + T: BlockProducer, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/store.BlockProducer/ApplyBlock" => { + #[allow(non_camel_case_types)] + struct ApplyBlockSvc(pub Arc); + impl< + T: BlockProducer, + > tonic::server::UnaryService + for ApplyBlockSvc { + type Response = (); + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::apply_block(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = ApplyBlockSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.BlockProducer/GetBlockHeaderByNumber" => { + #[allow(non_camel_case_types)] + struct GetBlockHeaderByNumberSvc(pub Arc); + impl< + T: BlockProducer, + > tonic::server::UnaryService< + super::super::rpc::BlockHeaderByNumberRequest, + > for GetBlockHeaderByNumberSvc { + type Response = super::super::rpc::BlockHeaderByNumberResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_block_header_by_number( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBlockHeaderByNumberSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.BlockProducer/GetBlockInputs" => { + #[allow(non_camel_case_types)] + struct GetBlockInputsSvc(pub Arc); + impl< + T: BlockProducer, + > tonic::server::UnaryService + for GetBlockInputsSvc { + type Response = super::BlockInputs; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_block_inputs(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBlockInputsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.BlockProducer/GetBatchInputs" => { + #[allow(non_camel_case_types)] + struct GetBatchInputsSvc(pub Arc); + impl< + T: BlockProducer, + > tonic::server::UnaryService + for GetBatchInputsSvc { + type Response = super::BatchInputs; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_batch_inputs(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBatchInputsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.BlockProducer/GetTransactionInputs" => { + #[allow(non_camel_case_types)] + struct GetTransactionInputsSvc(pub Arc); + impl< + T: BlockProducer, + > tonic::server::UnaryService + for GetTransactionInputsSvc { + type Response = super::TransactionInputs; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_transaction_inputs( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetTransactionInputsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new( + tonic::body::Body::default(), + ); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for BlockProducerServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "store.BlockProducer"; + impl tonic::server::NamedService for BlockProducerServer { + const NAME: &'static str = SERVICE_NAME; + } +} +/// Generated client implementations. +pub mod ntx_builder_client { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + use tonic::codegen::http::Uri; + /// Store API for the network transaction builder component + #[derive(Debug, Clone)] + pub struct NtxBuilderClient { + inner: tonic::client::Grpc, + } + impl NtxBuilderClient { + /// Attempt to create a new client by connecting to a given endpoint. + pub async fn connect(dst: D) -> Result + where + D: TryInto, + D::Error: Into, + { + let conn = tonic::transport::Endpoint::new(dst)?.connect().await?; + Ok(Self::new(conn)) + } + } + impl NtxBuilderClient + where + T: tonic::client::GrpcService, + T::Error: Into, + T::ResponseBody: Body + std::marker::Send + 'static, + ::Error: Into + std::marker::Send, + { + pub fn new(inner: T) -> Self { + let inner = tonic::client::Grpc::new(inner); + Self { inner } + } + pub fn with_origin(inner: T, origin: Uri) -> Self { + let inner = tonic::client::Grpc::with_origin(inner, origin); + Self { inner } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> NtxBuilderClient> + where + F: tonic::service::Interceptor, + T::ResponseBody: Default, + T: tonic::codegen::Service< + http::Request, + Response = http::Response< + >::ResponseBody, + >, + >, + , + >>::Error: Into + std::marker::Send + std::marker::Sync, + { + NtxBuilderClient::new(InterceptedService::new(inner, interceptor)) + } + /// Compress requests with the given encoding. + /// + /// This requires the server to support it otherwise it might respond with an + /// error. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.send_compressed(encoding); + self + } + /// Enable decompressing responses. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.inner = self.inner.accept_compressed(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_decoding_message_size(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.inner = self.inner.max_encoding_message_size(limit); + self + } + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + pub async fn get_block_header_by_number( + &mut self, + request: impl tonic::IntoRequest< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetBlockHeaderByNumber", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetBlockHeaderByNumber")); + self.inner.unary(req, path, codec).await + } + /// Returns a paginated list of unconsumed network notes. + pub async fn get_unconsumed_network_notes( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetUnconsumedNetworkNotes", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new("store.NtxBuilder", "GetUnconsumedNetworkNotes"), + ); + self.inner.unary(req, path, codec).await + } + /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this + /// header for executing network transactions. If the block number is not provided, the latest + /// header and peaks will be retrieved. + pub async fn get_current_blockchain_data( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetCurrentBlockchainData", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetCurrentBlockchainData")); + self.inner.unary(req, path, codec).await + } + /// Returns the latest state of a network account with the specified account prefix. + pub async fn get_network_account_details_by_prefix( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetNetworkAccountDetailsByPrefix", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert( + GrpcMethod::new( + "store.NtxBuilder", + "GetNetworkAccountDetailsByPrefix", + ), + ); + self.inner.unary(req, path, codec).await + } + /// Returns a list of all network account ids. + pub async fn get_network_account_ids( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetNetworkAccountIds", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetNetworkAccountIds")); + self.inner.unary(req, path, codec).await + } + /// Returns the script for a note by its root. + pub async fn get_note_script_by_root( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static( + "/store.NtxBuilder/GetNoteScriptByRoot", + ); + let mut req = request.into_request(); + req.extensions_mut() + .insert(GrpcMethod::new("store.NtxBuilder", "GetNoteScriptByRoot")); + self.inner.unary(req, path, codec).await + } + } +} +/// Generated server implementations. +pub mod ntx_builder_server { + #![allow( + unused_variables, + dead_code, + missing_docs, + clippy::wildcard_imports, + clippy::let_unit_value, + )] + use tonic::codegen::*; + /// Generated trait containing gRPC methods that should be implemented for use with NtxBuilderServer. + #[async_trait] + pub trait NtxBuilder: std::marker::Send + std::marker::Sync + 'static { + /// Retrieves block header by given block number. Optionally, it also returns the MMR path + /// and current chain length to authenticate the block's inclusion. + async fn get_block_header_by_number( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns a paginated list of unconsumed network notes. + async fn get_unconsumed_network_notes( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the block header at the chain tip, as well as the MMR peaks corresponding to this + /// header for executing network transactions. If the block number is not provided, the latest + /// header and peaks will be retrieved. + async fn get_current_blockchain_data( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the latest state of a network account with the specified account prefix. + async fn get_network_account_details_by_prefix( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns a list of all network account ids. + async fn get_network_account_ids( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + /// Returns the script for a note by its root. + async fn get_note_script_by_root( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; + } + /// Store API for the network transaction builder component + #[derive(Debug)] + pub struct NtxBuilderServer { + inner: Arc, + accept_compression_encodings: EnabledCompressionEncodings, + send_compression_encodings: EnabledCompressionEncodings, + max_decoding_message_size: Option, + max_encoding_message_size: Option, + } + impl NtxBuilderServer { + pub fn new(inner: T) -> Self { + Self::from_arc(Arc::new(inner)) + } + pub fn from_arc(inner: Arc) -> Self { + Self { + inner, + accept_compression_encodings: Default::default(), + send_compression_encodings: Default::default(), + max_decoding_message_size: None, + max_encoding_message_size: None, + } + } + pub fn with_interceptor( + inner: T, + interceptor: F, + ) -> InterceptedService + where + F: tonic::service::Interceptor, + { + InterceptedService::new(Self::new(inner), interceptor) + } + /// Enable decompressing requests with the given encoding. + #[must_use] + pub fn accept_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.accept_compression_encodings.enable(encoding); + self + } + /// Compress responses with the given encoding, if the client supports it. + #[must_use] + pub fn send_compressed(mut self, encoding: CompressionEncoding) -> Self { + self.send_compression_encodings.enable(encoding); + self + } + /// Limits the maximum size of a decoded message. + /// + /// Default: `4MB` + #[must_use] + pub fn max_decoding_message_size(mut self, limit: usize) -> Self { + self.max_decoding_message_size = Some(limit); + self + } + /// Limits the maximum size of an encoded message. + /// + /// Default: `usize::MAX` + #[must_use] + pub fn max_encoding_message_size(mut self, limit: usize) -> Self { + self.max_encoding_message_size = Some(limit); + self + } + } + impl tonic::codegen::Service> for NtxBuilderServer + where + T: NtxBuilder, + B: Body + std::marker::Send + 'static, + B::Error: Into + std::marker::Send + 'static, + { + type Response = http::Response; + type Error = std::convert::Infallible; + type Future = BoxFuture; + fn poll_ready( + &mut self, + _cx: &mut Context<'_>, + ) -> Poll> { + Poll::Ready(Ok(())) + } + fn call(&mut self, req: http::Request) -> Self::Future { + match req.uri().path() { + "/store.NtxBuilder/GetBlockHeaderByNumber" => { + #[allow(non_camel_case_types)] + struct GetBlockHeaderByNumberSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService< + super::super::rpc::BlockHeaderByNumberRequest, + > for GetBlockHeaderByNumberSvc { + type Response = super::super::rpc::BlockHeaderByNumberResponse; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::rpc::BlockHeaderByNumberRequest, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_block_header_by_number( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetBlockHeaderByNumberSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetUnconsumedNetworkNotes" => { + #[allow(non_camel_case_types)] + struct GetUnconsumedNetworkNotesSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService + for GetUnconsumedNetworkNotesSvc { + type Response = super::UnconsumedNetworkNotes; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_unconsumed_network_notes( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetUnconsumedNetworkNotesSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetCurrentBlockchainData" => { + #[allow(non_camel_case_types)] + struct GetCurrentBlockchainDataSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService< + super::super::blockchain::MaybeBlockNumber, + > for GetCurrentBlockchainDataSvc { + type Response = super::CurrentBlockchainData; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::blockchain::MaybeBlockNumber, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_current_blockchain_data( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetCurrentBlockchainDataSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetNetworkAccountDetailsByPrefix" => { + #[allow(non_camel_case_types)] + struct GetNetworkAccountDetailsByPrefixSvc( + pub Arc, + ); + impl< + T: NtxBuilder, + > tonic::server::UnaryService + for GetNetworkAccountDetailsByPrefixSvc { + type Response = super::MaybeAccountDetails; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_network_account_details_by_prefix( + &inner, + request, + ) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetNetworkAccountDetailsByPrefixSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetNetworkAccountIds" => { + #[allow(non_camel_case_types)] + struct GetNetworkAccountIdsSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService + for GetNetworkAccountIdsSvc { + type Response = super::NetworkAccountIdList; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_network_account_ids(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetNetworkAccountIdsSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + "/store.NtxBuilder/GetNoteScriptByRoot" => { + #[allow(non_camel_case_types)] + struct GetNoteScriptByRootSvc(pub Arc); + impl< + T: NtxBuilder, + > tonic::server::UnaryService + for GetNoteScriptByRootSvc { + type Response = super::super::rpc::MaybeNoteScript; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::get_note_script_by_root(&inner, request) + .await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = GetNoteScriptByRootSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } + _ => { + Box::pin(async move { + let mut response = http::Response::new( + tonic::body::Body::default(), + ); + let headers = response.headers_mut(); + headers + .insert( + tonic::Status::GRPC_STATUS, + (tonic::Code::Unimplemented as i32).into(), + ); + headers + .insert( + http::header::CONTENT_TYPE, + tonic::metadata::GRPC_CONTENT_TYPE, + ); + Ok(response) + }) + } + } + } + } + impl Clone for NtxBuilderServer { + fn clone(&self) -> Self { + let inner = self.inner.clone(); + Self { + inner, + accept_compression_encodings: self.accept_compression_encodings, + send_compression_encodings: self.send_compression_encodings, + max_decoding_message_size: self.max_decoding_message_size, + max_encoding_message_size: self.max_encoding_message_size, + } + } + } + /// Generated gRPC service name + pub const SERVICE_NAME: &str = "store.NtxBuilder"; + impl tonic::server::NamedService for NtxBuilderServer { + const NAME: &'static str = SERVICE_NAME; + } +} diff --git a/crates/proto/src/generated/transaction.rs b/crates/proto/src/generated/transaction.rs index e02a63636..a9dc784d6 100644 --- a/crates/proto/src/generated/transaction.rs +++ b/crates/proto/src/generated/transaction.rs @@ -3,18 +3,18 @@ #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProvenTransaction { /// Transaction encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_objects::transaction::proven_tx::ProvenTransaction\]. + /// \[miden_protocol::transaction::proven_tx::ProvenTransaction\]. #[prost(bytes = "vec", tag = "1")] pub transaction: ::prost::alloc::vec::Vec, /// Transaction inputs encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_objects::transaction::TransactionInputs\]. + /// \[miden_protocol::transaction::TransactionInputs\]. #[prost(bytes = "vec", optional, tag = "2")] pub transaction_inputs: ::core::option::Option<::prost::alloc::vec::Vec>, } #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProvenTransactionBatch { /// Encoded using \[winter_utils::Serializable\] implementation for - /// \[miden_objects::transaction::proven_tx::ProvenTransaction\]. + /// \[miden_protocol::transaction::proven_tx::ProvenTransaction\]. #[prost(bytes = "vec", tag = "1")] pub encoded: ::prost::alloc::vec::Vec, } @@ -52,7 +52,7 @@ pub struct TransactionHeader { pub final_state_commitment: ::core::option::Option, /// Nullifiers of the input notes of the transaction. #[prost(message, repeated, tag = "4")] - pub input_notes: ::prost::alloc::vec::Vec, + pub nullifiers: ::prost::alloc::vec::Vec, /// Output notes of the transaction. #[prost(message, repeated, tag = "5")] pub output_notes: ::prost::alloc::vec::Vec, diff --git a/crates/proto/src/generated/validator.rs b/crates/proto/src/generated/validator.rs index a5a31a35f..39869d9fc 100644 --- a/crates/proto/src/generated/validator.rs +++ b/crates/proto/src/generated/validator.rs @@ -147,6 +147,28 @@ pub mod api_client { .insert(GrpcMethod::new("validator.Api", "SubmitProvenTransaction")); self.inner.unary(req, path, codec).await } + /// Validates a proposed block and returns the block header and body. + pub async fn sign_block( + &mut self, + request: impl tonic::IntoRequest, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + > { + self.inner + .ready() + .await + .map_err(|e| { + tonic::Status::unknown( + format!("Service was not ready: {}", e.into()), + ) + })?; + let codec = tonic_prost::ProstCodec::default(); + let path = http::uri::PathAndQuery::from_static("/validator.Api/SignBlock"); + let mut req = request.into_request(); + req.extensions_mut().insert(GrpcMethod::new("validator.Api", "SignBlock")); + self.inner.unary(req, path, codec).await + } } } /// Generated server implementations. @@ -172,6 +194,14 @@ pub mod api_server { &self, request: tonic::Request, ) -> std::result::Result, tonic::Status>; + /// Validates a proposed block and returns the block header and body. + async fn sign_block( + &self, + request: tonic::Request, + ) -> std::result::Result< + tonic::Response, + tonic::Status, + >; } /// Validator API for the Validator component. #[derive(Debug)] @@ -337,6 +367,54 @@ pub mod api_server { }; Box::pin(fut) } + "/validator.Api/SignBlock" => { + #[allow(non_camel_case_types)] + struct SignBlockSvc(pub Arc); + impl< + T: Api, + > tonic::server::UnaryService< + super::super::blockchain::ProposedBlock, + > for SignBlockSvc { + type Response = super::super::blockchain::BlockSignature; + type Future = BoxFuture< + tonic::Response, + tonic::Status, + >; + fn call( + &mut self, + request: tonic::Request< + super::super::blockchain::ProposedBlock, + >, + ) -> Self::Future { + let inner = Arc::clone(&self.0); + let fut = async move { + ::sign_block(&inner, request).await + }; + Box::pin(fut) + } + } + let accept_compression_encodings = self.accept_compression_encodings; + let send_compression_encodings = self.send_compression_encodings; + let max_decoding_message_size = self.max_decoding_message_size; + let max_encoding_message_size = self.max_encoding_message_size; + let inner = self.inner.clone(); + let fut = async move { + let method = SignBlockSvc(inner); + let codec = tonic_prost::ProstCodec::default(); + let mut grpc = tonic::server::Grpc::new(codec) + .apply_compression_config( + accept_compression_encodings, + send_compression_encodings, + ) + .apply_max_message_size_config( + max_decoding_message_size, + max_encoding_message_size, + ); + let res = grpc.unary(method, req).await; + Ok(res) + }; + Box::pin(fut) + } _ => { Box::pin(async move { let mut response = http::Response::new( diff --git a/crates/proto/src/lib.rs b/crates/proto/src/lib.rs index 5cd0afe4b..0f5cbb8f5 100644 --- a/crates/proto/src/lib.rs +++ b/crates/proto/src/lib.rs @@ -10,4 +10,5 @@ pub mod generated; pub use domain::account::{AccountState, AccountWitnessRecord}; pub use domain::nullifier::NullifierWitnessRecord; +pub use domain::proof_request::BlockProofRequest; pub use domain::{convert, try_convert}; diff --git a/crates/remote-prover-client/Cargo.toml b/crates/remote-prover-client/Cargo.toml index 049fb1621..f73600f27 100644 --- a/crates/remote-prover-client/Cargo.toml +++ b/crates/remote-prover-client/Cargo.toml @@ -14,38 +14,34 @@ version.workspace = true crate-type = ["lib"] [features] -batch-prover = ["dep:miden-objects", "dep:tokio"] -block-prover = ["dep:miden-objects", "dep:tokio"] +batch-prover = ["dep:miden-protocol", "dep:tokio"] +block-prover = ["dep:miden-protocol", "dep:tokio"] default = ["std"] -std = ["miden-objects/std", "miden-tx/std"] -tx-prover = ["dep:miden-objects", "dep:miden-tx", "dep:tokio"] +std = ["miden-protocol/std", "miden-tx/std"] +tx-prover = ["dep:miden-protocol", "dep:miden-tx", "dep:tokio"] [target.'cfg(all(target_arch = "wasm32", target_os = "unknown"))'.dependencies] getrandom = { features = ["wasm_js"], version = "0.3" } -tonic = { default-features = false, features = ["codegen"], version = "0.14" } +tonic = { features = ["codegen"], workspace = true } tonic-web-wasm-client = { default-features = false, version = "0.8" } [target.'cfg(not(all(target_arch = "wasm32", target_os = "unknown")))'.dependencies] -tonic = { default-features = false, features = [ - "codegen", - "tls-native-roots", - "tls-ring", - "transport", -], version = "0.14" } +tonic = { features = ["codegen", "tls-native-roots", "tls-ring", "transport"], workspace = true } tonic-web = { optional = true, version = "0.14" } [lints] workspace = true [dependencies] -miden-objects = { optional = true, workspace = true } -miden-tx = { optional = true, workspace = true } -prost = { default-features = false, features = ["derive"], version = "0.14" } -thiserror = { workspace = true } -tokio = { default-features = false, features = ["sync"], optional = true, version = "1.44" } -tonic-prost = { workspace = true } +miden-protocol = { optional = true, workspace = true } +miden-tx = { optional = true, workspace = true } +prost = { default-features = false, features = ["derive"], workspace = true } +thiserror = { workspace = true } +tokio = { default-features = false, features = ["sync"], optional = true, version = "1.44" } +tonic-prost = { workspace = true } [build-dependencies] +fs-err = { workspace = true } miden-node-proto-build = { workspace = true } miette = { features = ["fancy"], version = "7.5" } tonic-prost-build = { workspace = true } diff --git a/crates/remote-prover-client/build.rs b/crates/remote-prover-client/build.rs index 4a6c5e254..ffd9b2e71 100644 --- a/crates/remote-prover-client/build.rs +++ b/crates/remote-prover-client/build.rs @@ -56,7 +56,7 @@ fn build_tonic_from_descriptor( /// Replaces std references with core and alloc for nostd compatibility fn convert_to_nostd(file_path: &str) -> miette::Result<()> { - let file_content = fs::read_to_string(file_path).into_diagnostic()?; + let file_content = fs_err::read_to_string(file_path).into_diagnostic()?; let updated_content = file_content .replace("std::result", "core::result") .replace("std::marker", "core::marker") diff --git a/crates/remote-prover-client/src/remote_prover/batch_prover.rs b/crates/remote-prover-client/src/remote_prover/batch_prover.rs index 3c75097c2..b0d472656 100644 --- a/crates/remote-prover-client/src/remote_prover/batch_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/batch_prover.rs @@ -3,9 +3,14 @@ use alloc::sync::Arc; use alloc::vec::Vec; use core::time::Duration; -use miden_objects::batch::{ProposedBatch, ProvenBatch}; -use miden_objects::transaction::{OutputNote, ProvenTransaction, TransactionHeader, TransactionId}; -use miden_objects::utils::{Deserializable, DeserializationError, Serializable}; +use miden_protocol::batch::{ProposedBatch, ProvenBatch}; +use miden_protocol::transaction::{ + OutputNote, + ProvenTransaction, + TransactionHeader, + TransactionId, +}; +use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; use tokio::sync::Mutex; use super::generated::api_client::ApiClient; @@ -71,7 +76,12 @@ impl RemoteBatchProver { #[cfg(target_arch = "wasm32")] let new_client = { - let web_client = tonic_web_wasm_client::Client::new(self.endpoint.clone()); + let fetch_options = + tonic_web_wasm_client::options::FetchOptions::new().timeout(self.timeout); + let web_client = tonic_web_wasm_client::Client::new_with_options( + self.endpoint.clone(), + fetch_options, + ); ApiClient::new(web_client) }; @@ -100,7 +110,7 @@ impl RemoteBatchProver { &self, proposed_batch: ProposedBatch, ) -> Result { - use miden_objects::utils::Serializable; + use miden_protocol::utils::Serializable; self.connect().await?; let mut client = self diff --git a/crates/remote-prover-client/src/remote_prover/block_prover.rs b/crates/remote-prover-client/src/remote_prover/block_prover.rs index 694f11b37..d1fa43548 100644 --- a/crates/remote-prover-client/src/remote_prover/block_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/block_prover.rs @@ -3,10 +3,10 @@ use alloc::sync::Arc; use alloc::vec::Vec; use core::time::Duration; -use miden_objects::batch::ProvenBatch; -use miden_objects::block::{ProposedBlock, ProvenBlock}; -use miden_objects::transaction::{OrderedTransactionHeaders, TransactionHeader}; -use miden_objects::utils::{Deserializable, DeserializationError, Serializable}; +use miden_protocol::batch::{OrderedBatches, ProvenBatch}; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockProof, ProposedBlock, ProvenBlock}; +use miden_protocol::transaction::{OrderedTransactionHeaders, TransactionHeader}; +use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; use tokio::sync::Mutex; use super::generated::api_client::ApiClient; @@ -72,7 +72,12 @@ impl RemoteBlockProver { #[cfg(target_arch = "wasm32")] let new_client = { - let web_client = tonic_web_wasm_client::Client::new(self.endpoint.clone()); + let fetch_options = + tonic_web_wasm_client::options::FetchOptions::new().timeout(self.timeout); + let web_client = tonic_web_wasm_client::Client::new_with_options( + self.endpoint.clone(), + fetch_options, + ); ApiClient::new(web_client) }; @@ -99,9 +104,11 @@ impl RemoteBlockProver { impl RemoteBlockProver { pub async fn prove( &self, - proposed_block: ProposedBlock, - ) -> Result { - use miden_objects::utils::Serializable; + tx_batches: OrderedBatches, + block_header: BlockHeader, + block_inputs: BlockInputs, + ) -> Result { + use miden_protocol::utils::Serializable; self.connect().await?; let mut client = self @@ -114,70 +121,41 @@ impl RemoteBlockProver { })? .clone(); - // Get the set of expected transaction headers. - let proposed_txs = proposed_block.batches().to_transactions(); + let block_proof_request = + ProposedBlock::new_at(block_inputs, tx_batches.into_vec(), block_header.timestamp()) + .map_err(|err| { + RemoteProverClientError::other_with_source( + "failed to create proposed block", + err, + ) + })?; - let request = tonic::Request::new(proposed_block.into()); + let request = tonic::Request::new(block_proof_request.into()); let response = client.prove(request).await.map_err(|err| { RemoteProverClientError::other_with_source("failed to prove block", err) })?; - // Deserialize the response bytes back into a ProvenBlock. - let proven_block = ProvenBlock::try_from(response.into_inner()).map_err(|err| { + // Deserialize the response bytes back into a BlockProof. + let block_proof = BlockProof::try_from(response.into_inner()).map_err(|err| { RemoteProverClientError::other_with_source( "failed to deserialize received response from remote block prover", err, ) })?; - Self::validate_tx_headers(&proven_block, &proposed_txs)?; - - Ok(proven_block) - } - - /// Validates that the proven block's transaction headers are consistent with the transactions - /// passed in the proposed block. - /// - /// This expects that transactions from the proposed block and proven block are in the same - /// order, as define by [`OrderedTransactionHeaders`]. - fn validate_tx_headers( - proven_block: &ProvenBlock, - proposed_txs: &OrderedTransactionHeaders, - ) -> Result<(), RemoteProverClientError> { - if proposed_txs.as_slice().len() != proven_block.transactions().as_slice().len() { - return Err(RemoteProverClientError::other(format!( - "remote prover returned {} transaction headers but {} transactions were passed as part of the proposed block", - proven_block.transactions().as_slice().len(), - proposed_txs.as_slice().len() - ))); - } - - // Because we checked the length matches we can zip the iterators up. - // We expect the transaction headers to be in the same order. - for (proposed_header, proven_header) in - proposed_txs.as_slice().iter().zip(proven_block.transactions().as_slice()) - { - if proposed_header != proven_header { - return Err(RemoteProverClientError::other(format!( - "transaction header with id {} does not match header of the transaction in the proposed block", - proposed_header.id() - ))); - } - } - - Ok(()) + Ok(block_proof) } } // CONVERSION // ================================================================================================ -impl TryFrom for ProvenBlock { +impl TryFrom for BlockProof { type Error = DeserializationError; fn try_from(value: proto::Proof) -> Result { - ProvenBlock::read_from_bytes(&value.payload) + BlockProof::read_from_bytes(&value.payload) } } diff --git a/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs b/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs index 53326a3fb..1074dd5b8 100644 --- a/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/generated/nostd/remote_prover.rs @@ -10,7 +10,7 @@ pub struct ProofRequest { /// /// * TRANSACTION: TransactionInputs encoded. /// * BATCH: ProposedBatch encoded. - /// * BLOCK: ProposedBlock encoded. + /// * BLOCK: BlockProofRequest encoded. #[prost(bytes = "vec", tag = "2")] pub payload: ::prost::alloc::vec::Vec, } @@ -21,16 +21,16 @@ pub struct Proof { /// /// * TRANSACTION: Returns an encoded ProvenTransaction. /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded ProvenBlock. + /// * BLOCK: Returns an encoded BlockProof. #[prost(bytes = "vec", tag = "1")] pub payload: ::prost::alloc::vec::Vec, } /// Status of an individual worker in the proxy. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProxyWorkerStatus { - /// The address of the worker. + /// The name of the worker. #[prost(string, tag = "1")] - pub address: ::prost::alloc::string::String, + pub name: ::prost::alloc::string::String, /// The version of the worker. #[prost(string, tag = "2")] pub version: ::prost::alloc::string::String, diff --git a/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs b/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs index 7f33a307f..7be124daa 100644 --- a/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/generated/std/remote_prover.rs @@ -10,7 +10,7 @@ pub struct ProofRequest { /// /// * TRANSACTION: TransactionInputs encoded. /// * BATCH: ProposedBatch encoded. - /// * BLOCK: ProposedBlock encoded. + /// * BLOCK: BlockProofRequest encoded. #[prost(bytes = "vec", tag = "2")] pub payload: ::prost::alloc::vec::Vec, } @@ -21,16 +21,16 @@ pub struct Proof { /// /// * TRANSACTION: Returns an encoded ProvenTransaction. /// * BATCH: Returns an encoded ProvenBatch. - /// * BLOCK: Returns an encoded ProvenBlock. + /// * BLOCK: Returns an encoded BlockProof. #[prost(bytes = "vec", tag = "1")] pub payload: ::prost::alloc::vec::Vec, } /// Status of an individual worker in the proxy. #[derive(Clone, PartialEq, Eq, Hash, ::prost::Message)] pub struct ProxyWorkerStatus { - /// The address of the worker. + /// The name of the worker. #[prost(string, tag = "1")] - pub address: ::prost::alloc::string::String, + pub name: ::prost::alloc::string::String, /// The version of the worker. #[prost(string, tag = "2")] pub version: ::prost::alloc::string::String, diff --git a/crates/remote-prover-client/src/remote_prover/tx_prover.rs b/crates/remote-prover-client/src/remote_prover/tx_prover.rs index bf6239646..aea58aa11 100644 --- a/crates/remote-prover-client/src/remote_prover/tx_prover.rs +++ b/crates/remote-prover-client/src/remote_prover/tx_prover.rs @@ -3,9 +3,9 @@ use alloc::string::{String, ToString}; use alloc::sync::Arc; use core::time::Duration; -use miden_objects::transaction::{ProvenTransaction, TransactionInputs}; -use miden_objects::utils::{Deserializable, DeserializationError, Serializable}; -use miden_objects::vm::FutureMaybeSend; +use miden_protocol::transaction::{ProvenTransaction, TransactionInputs}; +use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; +use miden_protocol::vm::FutureMaybeSend; use miden_tx::TransactionProverError; use tokio::sync::Mutex; @@ -72,7 +72,12 @@ impl RemoteTransactionProver { #[cfg(target_arch = "wasm32")] let new_client = { - let web_client = tonic_web_wasm_client::Client::new(self.endpoint.clone()); + let fetch_options = + tonic_web_wasm_client::options::FetchOptions::new().timeout(self.timeout); + let web_client = tonic_web_wasm_client::Client::new_with_options( + self.endpoint.clone(), + fetch_options, + ); ApiClient::new(web_client) }; @@ -102,7 +107,7 @@ impl RemoteTransactionProver { tx_inputs: TransactionInputs, ) -> impl FutureMaybeSend> { async move { - use miden_objects::utils::Serializable; + use miden_protocol::utils::Serializable; self.connect().await.map_err(|err| { TransactionProverError::other_with_source( "failed to connect to the remote prover", diff --git a/crates/rpc/Cargo.toml b/crates/rpc/Cargo.toml index 540f85eb2..30ec4dcb8 100644 --- a/crates/rpc/Cargo.toml +++ b/crates/rpc/Cargo.toml @@ -22,13 +22,13 @@ mediatype = { version = "0.21" } miden-node-proto = { workspace = true } miden-node-proto-build = { workspace = true } miden-node-utils = { workspace = true } -miden-objects = { default-features = true, workspace = true } +miden-protocol = { default-features = true, workspace = true } miden-tx = { default-features = true, workspace = true } semver = { version = "1.0" } thiserror = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } -tonic = { features = ["tls-native-roots", "tls-ring"], workspace = true } +tonic = { default-features = true, features = ["tls-native-roots", "tls-ring"], workspace = true } tonic-reflection = { workspace = true } tonic-web = { version = "0.14" } tower = { workspace = true } @@ -38,10 +38,10 @@ url = { workspace = true } [dev-dependencies] miden-air = { features = ["testing"], workspace = true } -miden-lib = { workspace = true } miden-node-store = { workspace = true } miden-node-utils = { features = ["testing", "tracing-forest"], workspace = true } -miden-objects = { default-features = true, features = ["testing"], workspace = true } +miden-protocol = { default-features = true, features = ["testing"], workspace = true } +miden-standards = { workspace = true } reqwest = { version = "0.12" } rstest = { workspace = true } tempfile = { version = "3.20" } diff --git a/crates/rpc/README.md b/crates/rpc/README.md index 408f0affb..da30b7144 100644 --- a/crates/rpc/README.md +++ b/crates/rpc/README.md @@ -19,6 +19,7 @@ The full gRPC method definitions can be found in the [proto](../proto/README.md) - [GetAccountProofs](#getaccountproofs) - [GetBlockByNumber](#getblockbynumber) - [GetBlockHeaderByNumber](#getblockheaderbynumber) +- [GetLimits](#getlimits) - [GetNotesById](#getnotesbyid) - [GetNoteScriptByRoot](#getnotescriptbyroot) - [SubmitProvenTransaction](#submitproventransaction) @@ -36,6 +37,8 @@ The full gRPC method definitions can be found in the [proto](../proto/README.md) Returns a nullifier proof for each of the requested nullifiers. +**Limits:** `nullifier` (1000) + #### Error Handling When nullifier checking fails, detailed error information is provided through gRPC status details. The following error codes may be returned: @@ -73,10 +76,21 @@ authenticate the block's inclusion. --- +### GetLimits + +Returns the query parameter limits configured for RPC endpoints. + +This endpoint allows clients to discover the maximum number of items that can be requested in a single call for +various endpoints. The response contains a map of endpoint names to their parameter limits. + +--- + ### GetNotesById Returns a list of notes matching the provided note IDs. +**Limits:** `note_id` (100) + #### Error Handling When note retrieval fails, detailed error information is provided through gRPC status details. The following error codes may be returned: @@ -137,6 +151,8 @@ Clients should inspect both the gRPC status code and the detailed error code in Returns nullifier synchronization data for a set of prefixes within a given block range. This method allows clients to efficiently track nullifier creation by retrieving only the nullifiers produced between two blocks. +**Limits:** `nullifier` (1000) + Caller specifies the `prefix_len` (currently only 16), the list of prefix values (`nullifiers`), and the block range (`from_start_block`, optional `to_end_block`). The response includes all matching nullifiers created within that range, the last block included in the response (`block_num`), and the current chain tip (`chain_tip`). @@ -181,6 +197,8 @@ When account vault synchronization fails, detailed error information is provided Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. +**Limits:** `note_tag` (1000) + Client specifies the `note_tags` they are interested in, and the block range from which to search for matching notes. The request will then return the next block containing any note matching the provided tags within the specified range. The response includes each note's metadata and inclusion proof. @@ -205,6 +223,8 @@ When note synchronization fails, detailed error information is provided through Returns info which can be used by the client to sync up to the latest state of the chain for the objects (accounts and notes) the client is interested in. +**Limits:** `account_id` (1000), `note_tag` (1000) + This request returns the next block containing requested data. It also returns `chain_tip` which is the latest block number in the chain. Client is expected to repeat these requests in a loop until `response.block_header.block_num == response.chain_tip`, at which point the client is fully synchronized with the chain. diff --git a/crates/rpc/src/server/accept.rs b/crates/rpc/src/server/accept.rs index 5ea5650af..01103356f 100644 --- a/crates/rpc/src/server/accept.rs +++ b/crates/rpc/src/server/accept.rs @@ -6,10 +6,16 @@ use futures::future::BoxFuture; use http::header::{ACCEPT, ToStrError}; use mediatype::{Name, ReadParams}; use miden_node_utils::{ErrorReport, FlattenResult}; -use miden_objects::{Word, WordError}; +use miden_protocol::{Word, WordError}; use semver::{Comparator, Version, VersionReq}; use tower::{Layer, Service}; +#[derive(Copy, Clone, Debug, PartialEq, Eq)] +pub enum GenesisNegotiation { + Optional, + Mandatory, +} + /// Performs content negotiation by rejecting requests which don't match our RPC version or network. /// Clients can specify these as parameters in our `application/vnd.miden` accept media range. /// @@ -29,13 +35,18 @@ use tower::{Layer, Service}; /// /// Parameters are optional and order is not important. /// -/// ``` +/// ```text /// application/vnd.miden; version=; genesis=0x1234 /// ``` #[derive(Clone)] pub struct AcceptHeaderLayer { supported_versions: VersionReq, genesis_commitment: Word, + /// RPC method names for which the `genesis` parameter is mandatory. + /// + /// These should be gRPC method names (e.g. `SubmitProvenTransaction`), + /// matched against the end of the request path like "/rpc.Api/". + require_genesis_methods: Vec<&'static str>, } #[derive(Debug, thiserror::Error)] @@ -71,7 +82,17 @@ impl AcceptHeaderLayer { }], }; - AcceptHeaderLayer { supported_versions, genesis_commitment } + AcceptHeaderLayer { + supported_versions, + genesis_commitment, + require_genesis_methods: Vec::new(), + } + } + + /// Mark a gRPC method as requiring a `genesis` parameter in the Accept header. + pub fn with_genesis_enforced_method(mut self, method: &'static str) -> Self { + self.require_genesis_methods.push(method); + self } } @@ -89,13 +110,21 @@ impl AcceptHeaderLayer { const GRPC: Name<'static> = Name::new_unchecked("grpc"); /// Parses the `Accept` header's contents, searching for any media type compatible with our - /// RPC version and genesis commitment. - fn negotiate(&self, accept: &str) -> Result<(), AcceptHeaderError> { + /// RPC version and genesis commitment, controlling whether `genesis` is optional or mandatory. + fn negotiate( + &self, + accept: &str, + genesis_mode: GenesisNegotiation, + ) -> Result<(), AcceptHeaderError> { let mut media_types = mediatype::MediaTypeList::new(accept).peekable(); // Its debatable whether an empty header value is valid. Let's err on the side of being // gracious if the client want's to be weird. if media_types.peek().is_none() { + // If there are no media types provided and genesis is required, reject. + if matches!(genesis_mode, GenesisNegotiation::Mandatory) { + return Err(AcceptHeaderError::NoSupportedMediaRange); + } return Ok(()); } @@ -150,16 +179,16 @@ impl AcceptHeaderLayer { continue; } - // Skip if the genesis commitment does not match. + // Skip if the genesis commitment does not match, or if it is required but missing. let genesis = media_type .get_param(Self::GENESIS) .map(|value| Word::try_from(value.unquoted_str().as_ref())) .transpose() .map_err(AcceptHeaderError::InvalidGenesis)?; - if let Some(genesis) = genesis - && genesis != self.genesis_commitment - { - continue; + match (genesis_mode, genesis) { + (_, Some(value)) if value != self.genesis_commitment => continue, + (GenesisNegotiation::Mandatory, None) => continue, + _ => {}, } // All preconditions met, this is a valid media type that we can serve. @@ -195,14 +224,44 @@ where } fn call(&mut self, request: http::Request) -> Self::Future { + // Skip negotiation entirely for CORS preflight/non-gRPC requests. + // + // Browsers often automatically perform an `OPTIONS` check _before_ the client + // SDK can inject the appropriate `ACCEPT` header, causing a rejection. + // Since an `OPTIONS` request does nothing its safe for us to simply allow them. + if request.method() == http::Method::OPTIONS { + return self.inner.call(request).boxed(); + } + + // Determine if this RPC method requires the `genesis` parameter. + let path = request.uri().path(); + let method_name = path.rsplit('/').next().unwrap_or_default(); + + let requires_genesis = self.verifier.require_genesis_methods.contains(&method_name); + + // If `genesis` is required but the header is missing entirely, reject early. let Some(header) = request.headers().get(ACCEPT) else { + if requires_genesis { + let response = tonic::Status::invalid_argument( + "Accept header with 'genesis' parameter is required for write RPC methods", + ) + .into_http(); + return futures::future::ready(Ok(response)).boxed(); + } return self.inner.call(request).boxed(); }; let result = header .to_str() .map_err(AcceptHeaderError::InvalidUtf8) - .map(|header| self.verifier.negotiate(header)) + .map(|header| { + let mode = if requires_genesis { + GenesisNegotiation::Mandatory + } else { + GenesisNegotiation::Optional + }; + self.verifier.negotiate(header, mode) + }) .flatten_result(); match result { @@ -298,7 +357,7 @@ impl FromStr for QValue { #[cfg(test)] mod tests { - use miden_objects::Word; + use miden_protocol::Word; use semver::Version; use super::{AcceptHeaderLayer, QParsingError}; @@ -342,7 +401,9 @@ mod tests { #[case::quoted_network(r#"application/vnd.miden; genesis="0x00000000000000000000000000000000000000000000000000000000deadbeef""#)] #[test] fn request_should_pass(#[case] accept: &'static str) { - AcceptHeaderLayer::for_tests().negotiate(accept).unwrap(); + AcceptHeaderLayer::for_tests() + .negotiate(accept, super::GenesisNegotiation::Optional) + .unwrap(); } #[rstest::rstest] @@ -356,7 +417,52 @@ mod tests { #[case::wildcard_subtype("application/*")] #[test] fn request_should_be_rejected(#[case] accept: &'static str) { - AcceptHeaderLayer::for_tests().negotiate(accept).unwrap_err(); + AcceptHeaderLayer::for_tests() + .negotiate(accept, super::GenesisNegotiation::Optional) + .unwrap_err(); + } + + #[test] + fn write_requires_genesis_param_missing_or_empty_or_mismatch() { + let layer = AcceptHeaderLayer::for_tests(); + + // Missing genesis parameter + assert!( + layer + .negotiate("application/vnd.miden", super::GenesisNegotiation::Mandatory) + .is_err() + ); + + // Empty header value + assert!(layer.negotiate("", super::GenesisNegotiation::Mandatory).is_err()); + + // Present but mismatched genesis parameter + let mismatched = "application/vnd.miden; genesis=0x00000000000000000000000000000000000000000000000000000000deadbeee"; + assert!(layer.negotiate(mismatched, super::GenesisNegotiation::Mandatory).is_err()); + } + + #[rstest::rstest] + #[case::matching_network( + "application/vnd.miden; genesis=0x00000000000000000000000000000000000000000000000000000000deadbeef" + )] + #[case::matching_network_and_version( + "application/vnd.miden; genesis=0x00000000000000000000000000000000000000000000000000000000deadbeef; version=0.2.3" + )] + #[test] + fn request_with_mandadory_genesis_should_pass(#[case] accept: &'static str) { + AcceptHeaderLayer::for_tests() + .negotiate(accept, super::GenesisNegotiation::Mandatory) + .unwrap(); + } + + #[rstest::rstest] + #[case::missing_network("application/vnd.miden;")] + #[case::missing_network_wildcard("*/*")] + #[test] + fn request_with_mandadory_genesis_should_be_rejected(#[case] accept: &'static str) { + AcceptHeaderLayer::for_tests() + .negotiate(accept, super::GenesisNegotiation::Mandatory) + .unwrap_err(); } #[rstest::rstest] diff --git a/crates/rpc/src/server/api.rs b/crates/rpc/src/server/api.rs index a75341f56..86bb35e59 100644 --- a/crates/rpc/src/server/api.rs +++ b/crates/rpc/src/server/api.rs @@ -1,15 +1,10 @@ -use std::sync::Arc; +use std::sync::{Arc, LazyLock}; use std::time::Duration; use anyhow::Context; -use miden_node_proto::clients::{ - BlockProducer, - BlockProducerClient, - Builder, - StoreRpc, - StoreRpcClient, -}; +use miden_node_proto::clients::{BlockProducerClient, Builder, StoreRpcClient, ValidatorClient}; use miden_node_proto::errors::ConversionError; +use miden_node_proto::generated::rpc::MempoolStats; use miden_node_proto::generated::rpc::api_server::{self, Api}; use miden_node_proto::generated::{self as proto}; use miden_node_proto::try_convert; @@ -21,25 +16,19 @@ use miden_node_utils::limiter::{ QueryParamNoteTagLimit, QueryParamNullifierLimit, }; -use miden_objects::account::AccountId; -use miden_objects::batch::ProvenBatch; -use miden_objects::block::{BlockHeader, BlockNumber}; -use miden_objects::note::{Note, NoteRecipient, NoteScript}; -use miden_objects::transaction::{ - OutputNote, - ProvenTransaction, - ProvenTransactionBuilder, - TransactionInputs, -}; -use miden_objects::utils::serde::{Deserializable, Serializable}; -use miden_objects::{MIN_PROOF_SECURITY_LEVEL, Word}; +use miden_protocol::account::AccountId; +use miden_protocol::batch::ProvenBatch; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::note::{Note, NoteRecipient, NoteScript}; +use miden_protocol::transaction::{OutputNote, ProvenTransaction, ProvenTransactionBuilder}; +use miden_protocol::utils::serde::{Deserializable, Serializable}; +use miden_protocol::{MIN_PROOF_SECURITY_LEVEL, Word}; use miden_tx::TransactionVerifier; use tonic::{IntoRequest, Request, Response, Status}; -use tracing::{debug, info, instrument, warn}; +use tracing::{debug, info, instrument}; use url::Url; use crate::COMPONENT; -use crate::server::validator; // RPC SERVICE // ================================================================================================ @@ -47,11 +36,12 @@ use crate::server::validator; pub struct RpcService { store: StoreRpcClient, block_producer: Option, + validator: ValidatorClient, genesis_commitment: Option, } impl RpcService { - pub(super) fn new(store_url: Url, block_producer_url: Option) -> Self { + pub(super) fn new(store_url: Url, block_producer_url: Option, validator_url: Url) -> Self { let store = { info!(target: COMPONENT, store_endpoint = %store_url, "Initializing store client"); Builder::new(store_url) @@ -59,7 +49,8 @@ impl RpcService { .without_timeout() .without_metadata_version() .without_metadata_genesis() - .connect_lazy::() + .with_otel_context_injection() + .connect_lazy::() }; let block_producer = block_producer_url.map(|block_producer_url| { @@ -73,12 +64,29 @@ impl RpcService { .without_timeout() .without_metadata_version() .without_metadata_genesis() - .connect_lazy::() + .with_otel_context_injection() + .connect_lazy::() }); + let validator = { + info!( + target: COMPONENT, + validator_endpoint = %validator_url, + "Initializing validator client", + ); + Builder::new(validator_url) + .without_tls() + .without_timeout() + .without_metadata_version() + .without_metadata_genesis() + .with_otel_context_injection() + .connect_lazy::() + }; + Self { store, block_producer, + validator, genesis_commitment: None, } } @@ -103,7 +111,7 @@ impl RpcService { loop { let result = self .get_block_header_by_number( - proto::shared::BlockHeaderByNumberRequest { + proto::rpc::BlockHeaderByNumberRequest { block_num: Some(BlockNumber::GENESIS.as_u32()), include_mmr_proof: None, } @@ -132,7 +140,7 @@ impl RpcService { ?backoff, %retry_counter, %err, - "connection failed while subscribing to the mempool, retrying" + "connection failed while fetching genesis header, retrying" ); retry_counter += 1; @@ -156,8 +164,8 @@ impl api_server::Api for RpcService { )] async fn check_nullifiers( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); check::(request.get_ref().nullifiers.len())?; @@ -182,8 +190,8 @@ impl api_server::Api for RpcService { )] async fn sync_nullifiers( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); check::(request.get_ref().nullifiers.len())?; @@ -201,8 +209,8 @@ impl api_server::Api for RpcService { )] async fn get_block_header_by_number( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { info!(target: COMPONENT, request = ?request.get_ref()); self.store.clone().get_block_header_by_number(request).await @@ -218,8 +226,8 @@ impl api_server::Api for RpcService { )] async fn sync_state( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); check::(request.get_ref().account_ids.len())?; @@ -238,8 +246,8 @@ impl api_server::Api for RpcService { )] async fn sync_storage_maps( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); self.store.clone().sync_storage_maps(request).await @@ -255,8 +263,8 @@ impl api_server::Api for RpcService { )] async fn sync_notes( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); check::(request.get_ref().note_tags.len())?; @@ -303,11 +311,9 @@ impl api_server::Api for RpcService { )] async fn sync_account_vault( &self, - request: tonic::Request, - ) -> std::result::Result< - tonic::Response, - tonic::Status, - > { + request: tonic::Request, + ) -> std::result::Result, tonic::Status> + { debug!(target: COMPONENT, request = ?request.get_ref()); self.store.clone().sync_account_vault(request).await @@ -317,7 +323,7 @@ impl api_server::Api for RpcService { async fn submit_proven_transaction( &self, request: Request, - ) -> Result, Status> { + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request.get_ref()); let Some(block_producer) = &self.block_producer else { @@ -384,29 +390,9 @@ impl api_server::Api for RpcService { })?; // If transaction inputs are provided, re-execute the transaction to validate it. - if let Some(tx_inputs_bytes) = &request.transaction_inputs { - // Deserialize the transaction inputs. - let tx_inputs = TransactionInputs::read_from_bytes(tx_inputs_bytes).map_err(|err| { - Status::invalid_argument(err.as_report_context("Invalid transaction inputs")) - })?; - // Re-execute the transaction. - match validator::re_execute_transaction(tx_inputs).await { - Ok(_executed_tx) => { - debug!( - target = COMPONENT, - tx_id = %tx.id().to_hex(), - "Transaction re-execution successful" - ); - }, - Err(e) => { - warn!( - target = COMPONENT, - tx_id = %tx.id().to_hex(), - error = %e, - "Transaction re-execution failed, but continuing with submission" - ); - }, - } + if request.transaction_inputs.is_some() { + // Re-execute the transaction via the Validator. + self.validator.clone().submit_proven_transaction(request.clone()).await?; } block_producer.clone().submit_proven_transaction(request).await @@ -416,7 +402,7 @@ impl api_server::Api for RpcService { async fn submit_proven_batch( &self, request: tonic::Request, - ) -> Result, Status> { + ) -> Result, Status> { let Some(block_producer) = &self.block_producer else { return Err(Status::unavailable("Batch submission not available in read-only mode")); }; @@ -524,8 +510,8 @@ impl api_server::Api for RpcService { )] async fn get_account_proof( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); debug!(target: COMPONENT, ?request); @@ -562,17 +548,17 @@ impl api_server::Api for RpcService { Ok(Response::new(proto::rpc::RpcStatus { version: env!("CARGO_PKG_VERSION").to_string(), - store: store_status.or(Some(proto::rpc_store::StoreStatus { + store: store_status.or(Some(proto::rpc::StoreStatus { status: "unreachable".to_string(), chain_tip: 0, version: "-".to_string(), })), - block_producer: block_producer_status.or(Some( - proto::block_producer::BlockProducerStatus { - status: "unreachable".to_string(), - version: "-".to_string(), - }, - )), + block_producer: block_producer_status.or(Some(proto::rpc::BlockProducerStatus { + status: "unreachable".to_string(), + version: "-".to_string(), + chain_tip: 0, + mempool_stats: Some(MempoolStats::default()), + })), genesis_commitment: self.genesis_commitment.map(Into::into), })) } @@ -588,7 +574,7 @@ impl api_server::Api for RpcService { async fn get_note_script_by_root( &self, request: Request, - ) -> Result, Status> { + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); self.store.clone().get_note_script_by_root(request).await @@ -604,12 +590,29 @@ impl api_server::Api for RpcService { )] async fn sync_transactions( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); self.store.clone().sync_transactions(request).await } + + #[instrument( + parent = None, + target = COMPONENT, + name = "rpc.server.get_limits", + skip_all, + ret(level = "debug"), + err + )] + async fn get_limits( + &self, + request: Request<()>, + ) -> Result, Status> { + debug!(target: COMPONENT, request = ?request); + + Ok(Response::new(RPC_LIMITS.clone())) + } } // LIMIT HELPERS @@ -625,3 +628,42 @@ fn out_of_range_error(err: E) -> Status { fn check(n: usize) -> Result<(), Status> { ::check(n).map_err(out_of_range_error) } + +/// Helper to build an [`EndpointLimits`](proto::rpc::EndpointLimits) from (name, limit) pairs. +fn endpoint_limits(params: &[(&str, usize)]) -> proto::rpc::EndpointLimits { + proto::rpc::EndpointLimits { + parameters: params.iter().map(|(k, v)| ((*k).to_string(), *v as u32)).collect(), + } +} + +/// Cached RPC query parameter limits. +static RPC_LIMITS: LazyLock = LazyLock::new(|| { + use { + QueryParamAccountIdLimit as AccountId, + QueryParamNoteIdLimit as NoteId, + QueryParamNoteTagLimit as NoteTag, + QueryParamNullifierLimit as Nullifier, + }; + + proto::rpc::RpcLimits { + endpoints: std::collections::HashMap::from([ + ( + "CheckNullifiers".into(), + endpoint_limits(&[(Nullifier::PARAM_NAME, Nullifier::LIMIT)]), + ), + ( + "SyncNullifiers".into(), + endpoint_limits(&[(Nullifier::PARAM_NAME, Nullifier::LIMIT)]), + ), + ( + "SyncState".into(), + endpoint_limits(&[ + (AccountId::PARAM_NAME, AccountId::LIMIT), + (NoteTag::PARAM_NAME, NoteTag::LIMIT), + ]), + ), + ("SyncNotes".into(), endpoint_limits(&[(NoteTag::PARAM_NAME, NoteTag::LIMIT)])), + ("GetNotesById".into(), endpoint_limits(&[(NoteId::PARAM_NAME, NoteId::LIMIT)])), + ]), + } +}); diff --git a/crates/rpc/src/server/mod.rs b/crates/rpc/src/server/mod.rs index c6b6349be..229907207 100644 --- a/crates/rpc/src/server/mod.rs +++ b/crates/rpc/src/server/mod.rs @@ -21,17 +21,17 @@ use crate::server::health::HealthCheckLayer; mod accept; mod api; mod health; -mod validator; /// The RPC server component. /// /// On startup, binds to the provided listener and starts serving the RPC API. -/// It connects lazily to the store and block producer components as needed. +/// It connects lazily to the store, validator and block producer components as needed. /// Requests will fail if the components are not available. pub struct Rpc { pub listener: TcpListener, pub store_url: Url, pub block_producer_url: Option, + pub validator_url: Url, /// Server-side timeout for an individual gRPC request. /// /// If the handler takes longer than this duration, the server cancels the call. @@ -44,7 +44,11 @@ impl Rpc { /// Note: Executes in place (i.e. not spawned) and will run indefinitely until /// a fatal error is encountered. pub async fn serve(self) -> anyhow::Result<()> { - let mut api = api::RpcService::new(self.store_url.clone(), self.block_producer_url.clone()); + let mut api = api::RpcService::new( + self.store_url.clone(), + self.block_producer_url.clone(), + self.validator_url, + ); let genesis = api .get_genesis_header_with_retry() @@ -80,7 +84,11 @@ impl Rpc { .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) .layer(HealthCheckLayer) - .layer(AcceptHeaderLayer::new(&rpc_version, genesis.commitment())) + .layer( + AcceptHeaderLayer::new(&rpc_version, genesis.commitment()) + .with_genesis_enforced_method("SubmitProvenTransaction") + .with_genesis_enforced_method("SubmitProvenBatch"), + ) .layer(cors_for_grpc_web_layer()) // Enables gRPC-web support. .layer(GrpcWebLayer::new()) diff --git a/crates/rpc/src/tests.rs b/crates/rpc/src/tests.rs index 6eaec910a..e80083319 100644 --- a/crates/rpc/src/tests.rs +++ b/crates/rpc/src/tests.rs @@ -3,16 +3,22 @@ use std::time::Duration; use http::header::{ACCEPT, CONTENT_TYPE}; use http::{HeaderMap, HeaderValue}; -use miden_lib::account::wallets::BasicWallet; -use miden_node_proto::clients::{Builder, Rpc as RpcClientMarker, RpcClient}; +use miden_node_proto::clients::{Builder, RpcClient}; use miden_node_proto::generated::rpc::api_client::ApiClient as ProtoClient; use miden_node_proto::generated::{self as proto}; use miden_node_store::Store; use miden_node_store::genesis::config::GenesisConfig; use miden_node_utils::fee::test_fee; -use miden_objects::Word; -use miden_objects::account::delta::AccountUpdateDetails; -use miden_objects::account::{ +use miden_node_utils::limiter::{ + QueryParamAccountIdLimit, + QueryParamLimiter, + QueryParamNoteIdLimit, + QueryParamNoteTagLimit, + QueryParamNullifierLimit, +}; +use miden_protocol::Word; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{ AccountBuilder, AccountDelta, AccountId, @@ -20,10 +26,12 @@ use miden_objects::account::{ AccountStorageMode, AccountType, }; -use miden_objects::testing::noop_auth_component::NoopAuthComponent; -use miden_objects::transaction::ProvenTransactionBuilder; -use miden_objects::utils::Serializable; -use miden_objects::vm::ExecutionProof; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::testing::noop_auth_component::NoopAuthComponent; +use miden_protocol::transaction::ProvenTransactionBuilder; +use miden_protocol::utils::Serializable; +use miden_protocol::vm::ExecutionProof; +use miden_standards::account::wallets::BasicWallet; use tempfile::TempDir; use tokio::net::TcpListener; use tokio::runtime::{self, Runtime}; @@ -36,7 +44,7 @@ use crate::Rpc; async fn rpc_server_accepts_requests_without_accept_header() { // Start the RPC. let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory) = start_store(store_addr).await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; // Override the client so that the ACCEPT header is not set. let mut rpc_client = { @@ -46,7 +54,7 @@ async fn rpc_server_accepts_requests_without_accept_header() { }; // Send any request to the RPC. - let request = proto::shared::BlockHeaderByNumberRequest { + let request = proto::rpc::BlockHeaderByNumberRequest { block_num: Some(0), include_mmr_proof: None, }; @@ -63,7 +71,7 @@ async fn rpc_server_accepts_requests_without_accept_header() { async fn rpc_server_accepts_requests_with_accept_header() { // Start the RPC. let (mut rpc_client, _, store_addr) = start_rpc().await; - let (store_runtime, _data_directory) = start_store(store_addr).await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; // Send any request to the RPC. let response = send_request(&mut rpc_client).await; @@ -80,7 +88,7 @@ async fn rpc_server_rejects_requests_with_accept_header_invalid_version() { for version in ["1.9.0", "0.8.1", "0.8.0", "0.999.0", "99.0.0"] { // Start the RPC. let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory) = start_store(store_addr).await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; // Recreate the RPC client with an invalid version. let url = rpc_addr.to_string(); @@ -91,7 +99,8 @@ async fn rpc_server_rejects_requests_with_accept_header_invalid_version() { .with_timeout(Duration::from_secs(10)) .with_metadata_version(version.to_string()) .without_metadata_genesis() - .connect::() + .without_otel_context_injection() + .connect::() .await .unwrap(); @@ -121,7 +130,7 @@ async fn rpc_startup_is_robust_to_network_failures() { assert!(response.is_err()); // Start the store. - let (store_runtime, data_directory) = start_store(store_addr).await; + let (store_runtime, data_directory, _genesis) = start_store(store_addr).await; // Test: send request against RPC api and should succeed let response = send_request(&mut rpc_client).await; @@ -159,7 +168,7 @@ async fn rpc_startup_is_robust_to_network_failures() { async fn rpc_server_has_web_support() { // Start server let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory) = start_store(store_addr).await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; // Send a status request let client = reqwest::Client::new(); @@ -202,14 +211,17 @@ async fn rpc_server_has_web_support() { async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { // Start the RPC. let (_, rpc_addr, store_addr) = start_rpc().await; - let (store_runtime, _data_directory) = start_store(store_addr).await; + let (store_runtime, _data_directory, genesis) = start_store(store_addr).await; // Override the client so that the ACCEPT header is not set. - let mut rpc_client = { - let endpoint = tonic::transport::Endpoint::try_from(format!("http://{rpc_addr}")).unwrap(); - - ProtoClient::connect(endpoint).await.unwrap() - }; + let mut rpc_client = + miden_node_proto::clients::Builder::new(Url::parse(&format!("http://{rpc_addr}")).unwrap()) + .without_tls() + .with_timeout(Duration::from_secs(5)) + .without_metadata_version() + .with_metadata_genesis(genesis.to_hex()) + .without_otel_context_injection() + .connect_lazy::(); let account_id = AccountId::dummy( [0; 15], @@ -271,13 +283,83 @@ async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { assert!(response.is_err()); // Assert that the error is due to the invalid account delta commitment. + let err = response.as_ref().unwrap_err().message(); assert!( - response - .as_ref() - .err() - .unwrap() - .message() - .contains("failed to validate account delta in transaction account update"), + err.contains("failed to validate account delta in transaction account update"), + "expected error message to contain delta commitment error but got: {err}" + ); + + // Shutdown to avoid runtime drop error. + store_runtime.shutdown_background(); +} + +#[tokio::test] +async fn rpc_server_rejects_tx_submissions_without_genesis() { + // Start the RPC. + let (_, rpc_addr, store_addr) = start_rpc().await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + + // Override the client so that the ACCEPT header is not set. + let mut rpc_client = + miden_node_proto::clients::Builder::new(Url::parse(&format!("http://{rpc_addr}")).unwrap()) + .without_tls() + .with_timeout(Duration::from_secs(5)) + .without_metadata_version() + .without_metadata_genesis() + .without_otel_context_injection() + .connect_lazy::(); + + let account_id = AccountId::dummy( + [0; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + let account = AccountBuilder::new([0; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_assets(vec![]) + .with_component(BasicWallet) + .with_auth_component(NoopAuthComponent) + .build_existing() + .unwrap(); + + let account_delta: AccountDelta = account.clone().try_into().unwrap(); + + // Send any request to the RPC. + let tx = ProvenTransactionBuilder::new( + account_id, + [8; 32].try_into().unwrap(), + account.commitment(), + account_delta.clone().to_commitment(), // delta commitment + 0.into(), + Word::default(), + test_fee(), + u32::MAX.into(), + ExecutionProof::new_dummy(), + ) + .account_update_details(AccountUpdateDetails::Delta(account_delta)) + .build() + .unwrap(); + + let request = proto::transaction::ProvenTransaction { + transaction: tx.to_bytes(), + transaction_inputs: None, + }; + + let response = rpc_client.submit_proven_transaction(request).await; + + // Assert that the server rejected our request. + assert!(response.is_err()); + + // Assert that the error is due to the invalid account delta commitment. + let err = response.as_ref().unwrap_err().message(); + assert!( + err.contains( + "server does not support any of the specified application/vnd.miden content types" + ), + "expected error message to reference incompatible content media types but got: {err:?}" ); // Shutdown to avoid runtime drop error. @@ -287,9 +369,8 @@ async fn rpc_server_rejects_proven_transactions_with_invalid_commitment() { /// Sends an arbitrary / irrelevant request to the RPC. async fn send_request( rpc_client: &mut RpcClient, -) -> std::result::Result, tonic::Status> -{ - let request = proto::shared::BlockHeaderByNumberRequest { +) -> std::result::Result, tonic::Status> { + let request = proto::rpc::BlockHeaderByNumberRequest { block_num: Some(0), include_mmr_proof: None, }; @@ -320,10 +401,13 @@ async fn start_rpc() -> (RpcClient, std::net::SocketAddr, std::net::SocketAddr) let store_url = Url::parse(&format!("http://{store_addr}")).unwrap(); // SAFETY: The block_producer_addr is always valid as it is created from a `SocketAddr`. let block_producer_url = Url::parse(&format!("http://{block_producer_addr}")).unwrap(); + // SAFETY: Using dummy validator URL for test - not actually contacted in this test + let validator_url = Url::parse("http://127.0.0.1:0").unwrap(); Rpc { listener: rpc_listener, store_url, block_producer_url: Some(block_producer_url), + validator_url, grpc_timeout: Duration::from_secs(30), } .serve() @@ -338,18 +422,21 @@ async fn start_rpc() -> (RpcClient, std::net::SocketAddr, std::net::SocketAddr) .with_timeout(Duration::from_secs(10)) .without_metadata_version() .without_metadata_genesis() - .connect::() + .without_otel_context_injection() + .connect::() .await .expect("Failed to build client"); (rpc_client, rpc_addr, store_addr) } -async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir) { +async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir, Word) { // Start the store. let data_directory = tempfile::tempdir().expect("tempdir should be created"); - let (genesis_state, _) = GenesisConfig::default().into_state().unwrap(); + let config = GenesisConfig::default(); + let signer = SecretKey::new(); + let (genesis_state, _) = config.into_state(signer).unwrap(); Store::bootstrap(genesis_state.clone(), data_directory.path()).expect("store should bootstrap"); let dir = data_directory.path().to_path_buf(); let rpc_listener = TcpListener::bind(store_addr).await.expect("store should bind a port"); @@ -375,5 +462,64 @@ async fn start_store(store_addr: SocketAddr) -> (Runtime, TempDir) { .await .expect("store should start serving"); }); - (store_runtime, data_directory) + ( + store_runtime, + data_directory, + genesis_state.into_block().unwrap().inner().header().commitment(), + ) +} + +#[tokio::test] +async fn get_limits_endpoint() { + // Start the RPC and store + let (mut rpc_client, _rpc_addr, store_addr) = start_rpc().await; + let (store_runtime, _data_directory, _genesis) = start_store(store_addr).await; + + // Call the get_limits endpoint + let response = rpc_client.get_limits(()).await.expect("get_limits should succeed"); + let limits = response.into_inner(); + + // Verify the response contains expected endpoints and limits + assert!(!limits.endpoints.is_empty(), "endpoints should not be empty"); + + // Verify CheckNullifiers endpoint + let check_nullifiers = + limits.endpoints.get("CheckNullifiers").expect("CheckNullifiers should exist"); + + assert_eq!( + check_nullifiers.parameters.get("nullifier"), + Some(&(QueryParamNullifierLimit::LIMIT as u32)), + "CheckNullifiers nullifier limit should be {}", + QueryParamNullifierLimit::LIMIT + ); + + // Verify SyncState endpoint has multiple parameters + let sync_state = limits.endpoints.get("SyncState").expect("SyncState should exist"); + assert_eq!( + sync_state.parameters.get(QueryParamAccountIdLimit::PARAM_NAME), + Some(&(QueryParamAccountIdLimit::LIMIT as u32)), + "SyncState {} limit should be {}", + QueryParamAccountIdLimit::PARAM_NAME, + QueryParamAccountIdLimit::LIMIT + ); + assert_eq!( + sync_state.parameters.get(QueryParamNoteTagLimit::PARAM_NAME), + Some(&(QueryParamNoteTagLimit::LIMIT as u32)), + "SyncState {} limit should be {}", + QueryParamNoteTagLimit::PARAM_NAME, + QueryParamNoteTagLimit::LIMIT + ); + + // Verify GetNotesById endpoint + let get_notes_by_id = limits.endpoints.get("GetNotesById").expect("GetNotesById should exist"); + assert_eq!( + get_notes_by_id.parameters.get(QueryParamNoteIdLimit::PARAM_NAME), + Some(&(QueryParamNoteIdLimit::LIMIT as u32)), + "GetNotesById {} limit should be {}", + QueryParamNoteIdLimit::PARAM_NAME, + QueryParamNoteIdLimit::LIMIT + ); + + // Shutdown to avoid runtime drop error. + store_runtime.shutdown_background(); } diff --git a/crates/store/Cargo.toml b/crates/store/Cargo.toml index f490cf4b7..22037e4b9 100644 --- a/crates/store/Cargo.toml +++ b/crates/store/Cargo.toml @@ -19,36 +19,38 @@ anyhow = { workspace = true } deadpool = { default-features = false, features = ["managed", "rt_tokio_1"], version = "0.12" } deadpool-diesel = { features = ["sqlite"], version = "0.6" } deadpool-sync = { version = "0.1" } -diesel = { features = ["numeric", "sqlite"], version = "2.2" } -diesel_migrations = { features = ["sqlite"], version = "2.2" } +diesel = { features = ["numeric", "sqlite"], version = "2.3" } +diesel_migrations = { features = ["sqlite"], version = "2.3" } +fs-err = { workspace = true } hex = { version = "0.4" } indexmap = { workspace = true } -miden-lib = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { workspace = true } -miden-objects = { features = ["std"], workspace = true } -pretty_assertions = { workspace = true } -rand = { workspace = true } -rand_chacha = { workspace = true } -serde = { features = ["derive"], version = "1" } -thiserror = { workspace = true } -tokio = { features = ["fs", "rt-multi-thread"], workspace = true } -tokio-stream = { features = ["net"], workspace = true } -toml = { version = "0.9" } -tonic = { workspace = true } -tonic-reflection = { workspace = true } -tower-http = { features = ["util"], workspace = true } -tracing = { workspace = true } +miden-standards = { workspace = true } +# TODO remove `testing` from `miden-protocol`, required for `BlockProof::new_dummy` +miden-protocol = { features = ["std", "testing"], workspace = true } +pretty_assertions = { workspace = true } +rand = { workspace = true } +rand_chacha = { workspace = true } +serde = { features = ["derive"], version = "1" } +thiserror = { workspace = true } +tokio = { features = ["fs", "rt-multi-thread"], workspace = true } +tokio-stream = { features = ["net"], workspace = true } +toml = { version = "0.9" } +tonic = { default-features = true, workspace = true } +tonic-reflection = { workspace = true } +tower-http = { features = ["util"], workspace = true } +tracing = { workspace = true } [dev-dependencies] assert_matches = { workspace = true } criterion = { version = "0.5" } fs-err = { workspace = true } -miden-lib = { features = ["testing"], workspace = true } miden-node-test-macro = { workspace = true } miden-node-utils = { features = ["testing", "tracing-forest"], workspace = true } -miden-objects = { default-features = true, features = ["testing"], workspace = true } +miden-protocol = { default-features = true, features = ["testing"], workspace = true } +miden-standards = { features = ["testing"], workspace = true } rand = { workspace = true } regex = { version = "1.11" } termtree = { version = "0.5" } diff --git a/crates/store/benches/account_tree_historical.rs b/crates/store/benches/account_tree_historical.rs index dbb538d5a..ba7a5c2cc 100644 --- a/crates/store/benches/account_tree_historical.rs +++ b/crates/store/benches/account_tree_historical.rs @@ -1,14 +1,14 @@ use std::hint::black_box; use criterion::{BenchmarkId, Criterion, criterion_group, criterion_main}; -use miden_node_store::{AccountTreeWithHistory, InMemoryAccountTree}; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::block::BlockNumber; -use miden_objects::block::account_tree::{AccountTree, account_id_to_smt_key}; -use miden_objects::crypto::hash::rpo::Rpo256; -use miden_objects::crypto::merkle::{LargeSmt, MemoryStorage}; -use miden_objects::testing::account_id::AccountIdBuilder; +use miden_node_store::AccountTreeWithHistory; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::block::account_tree::{AccountTree, account_id_to_smt_key}; +use miden_protocol::crypto::hash::rpo::Rpo256; +use miden_protocol::crypto::merkle::smt::{LargeSmt, MemoryStorage}; +use miden_protocol::testing::account_id::AccountIdBuilder; // HELPER FUNCTIONS // ================================================================================================ @@ -70,7 +70,7 @@ fn setup_vanilla_account_tree( fn setup_account_tree_with_history( num_accounts: usize, num_blocks: usize, -) -> (AccountTreeWithHistory, Vec) { +) -> (AccountTreeWithHistory, Vec) { let mut seed = [0u8; 32]; let storage = setup_storage(); let smt = LargeSmt::with_entries(storage, std::iter::empty()) @@ -164,7 +164,7 @@ fn bench_historical_access(c: &mut Criterion) { for &num_accounts in &account_counts { for &block_depth in &block_depths { - if block_depth > AccountTreeWithHistory::::MAX_HISTORY { + if block_depth > AccountTreeWithHistory::::MAX_HISTORY { continue; } diff --git a/crates/store/src/accounts/mod.rs b/crates/store/src/accounts/mod.rs index bf18b815a..c0a37be32 100644 --- a/crates/store/src/accounts/mod.rs +++ b/crates/store/src/accounts/mod.rs @@ -2,24 +2,26 @@ use std::collections::{BTreeMap, HashMap}; -use miden_objects::account::{AccountId, AccountIdPrefix}; -use miden_objects::block::account_tree::{AccountMutationSet, AccountTree}; -use miden_objects::block::{AccountWitness, BlockNumber}; -use miden_objects::crypto::merkle::{ - EmptySubtreeRoots, +use miden_protocol::account::{AccountId, AccountIdPrefix}; +use miden_protocol::block::BlockNumber; +use miden_protocol::block::account_tree::{AccountMutationSet, AccountTree, AccountWitness}; +use miden_protocol::crypto::merkle::smt::{ LargeSmt, LeafIndex, MemoryStorage, - MerkleError, - MerklePath, - NodeIndex, NodeMutation, SMT_DEPTH, SmtLeaf, SmtStorage, +}; +use miden_protocol::crypto::merkle::{ + EmptySubtreeRoots, + MerkleError, + MerklePath, + NodeIndex, SparseMerklePath, }; -use miden_objects::{AccountTreeError, EMPTY_WORD, Word}; +use miden_protocol::{AccountTreeError, EMPTY_WORD, Word}; #[cfg(test)] mod tests; @@ -27,78 +29,6 @@ mod tests; /// Convenience for an in-memory-only account tree. pub type InMemoryAccountTree = AccountTree>; -// ACCOUNT TREE STORAGE TRAIT -// ================================================================================================ - -/// Trait abstracting operations over different account tree backends. -pub trait AccountTreeStorage { - /// Returns the root hash of the tree. - fn root(&self) -> Word; - - /// Returns the number of accounts in the tree. - fn num_accounts(&self) -> usize; - - /// Opens an account and returns its witness. - fn open(&self, account_id: AccountId) -> AccountWitness; - - /// Gets the account state commitment. - fn get(&self, account_id: AccountId) -> Word; - - /// Computes mutations for applying account updates. - fn compute_mutations( - &self, - accounts: impl IntoIterator, - ) -> Result; - - /// Applies mutations with reversion data. - fn apply_mutations_with_reversion( - &mut self, - mutations: AccountMutationSet, - ) -> Result; - - /// Checks if the tree contains an account with the given prefix. - fn contains_account_id_prefix(&self, prefix: AccountIdPrefix) -> bool; -} - -impl AccountTreeStorage for AccountTree> -where - S: SmtStorage, -{ - fn root(&self) -> Word { - self.root() - } - - fn num_accounts(&self) -> usize { - self.num_accounts() - } - - fn open(&self, account_id: AccountId) -> AccountWitness { - self.open(account_id) - } - - fn get(&self, account_id: AccountId) -> Word { - self.get(account_id) - } - - fn compute_mutations( - &self, - accounts: impl IntoIterator, - ) -> Result { - self.compute_mutations(accounts) - } - - fn apply_mutations_with_reversion( - &mut self, - mutations: AccountMutationSet, - ) -> Result { - self.apply_mutations_with_reversion(mutations) - } - - fn contains_account_id_prefix(&self, prefix: AccountIdPrefix) -> bool { - self.contains_account_id_prefix(prefix) - } -} - // HISTORICAL ERROR TYPES // ================================================================================================ @@ -178,31 +108,25 @@ impl HistoricalOverlay { /// This structure maintains a sliding window of historical account states by storing /// reversion data (mutations that undo changes). Historical witnesses are reconstructed /// by starting from the latest state and applying reversion overlays backwards in time. -#[derive(Debug, Clone)] -pub struct AccountTreeWithHistory -where - S: AccountTreeStorage, -{ +#[derive(Debug)] +pub struct AccountTreeWithHistory { /// The current block number (latest state). block_number: BlockNumber, /// The latest account tree state. - latest: S, + latest: AccountTree>, /// Historical overlays indexed by block number, storing reversion data. overlays: BTreeMap, } -impl AccountTreeWithHistory -where - S: AccountTreeStorage, -{ +impl AccountTreeWithHistory { /// Maximum number of historical blocks to maintain. - pub const MAX_HISTORY: usize = 33; + pub const MAX_HISTORY: usize = 50; // CONSTRUCTORS // -------------------------------------------------------------------------------------------- /// Creates a new historical tree starting at the given block number. - pub fn new(account_tree: S, block_number: BlockNumber) -> Self { + pub fn new(account_tree: AccountTree>, block_number: BlockNumber) -> Self { Self { block_number, latest: account_tree, diff --git a/crates/store/src/accounts/tests.rs b/crates/store/src/accounts/tests.rs index 9d90e975e..5880d3982 100644 --- a/crates/store/src/accounts/tests.rs +++ b/crates/store/src/accounts/tests.rs @@ -6,12 +6,12 @@ #[allow(clippy::uninlined_format_args)] #[allow(clippy::cast_sign_loss)] mod account_tree_with_history_tests { - use miden_objects::Word; - use miden_objects::account::AccountId; - use miden_objects::block::BlockNumber; - use miden_objects::block::account_tree::{AccountTree, account_id_to_smt_key}; - use miden_objects::crypto::merkle::{LargeSmt, MemoryStorage}; - use miden_objects::testing::account_id::AccountIdBuilder; + use miden_protocol::Word; + use miden_protocol::account::AccountId; + use miden_protocol::block::BlockNumber; + use miden_protocol::block::account_tree::{AccountTree, account_id_to_smt_key}; + use miden_protocol::crypto::merkle::smt::{LargeSmt, MemoryStorage}; + use miden_protocol::testing::account_id::AccountIdBuilder; use super::super::*; @@ -84,8 +84,7 @@ mod account_tree_with_history_tests { #[test] fn test_history_limits() { - const MAX_HIST: u32 = - AccountTreeWithHistory::>>::MAX_HISTORY as u32; + const MAX_HIST: u32 = AccountTreeWithHistory::::MAX_HISTORY as u32; use assert_matches::assert_matches; let id = AccountIdBuilder::new().build_with_seed([30; 32]); diff --git a/crates/store/src/blocks.rs b/crates/store/src/blocks.rs index 5c9570252..e771332ba 100644 --- a/crates/store/src/blocks.rs +++ b/crates/store/src/blocks.rs @@ -2,8 +2,8 @@ use std::io::ErrorKind; use std::ops::Not; use std::path::PathBuf; -use miden_lib::utils::Serializable; -use miden_objects::block::BlockNumber; +use miden_protocol::block::BlockNumber; +use miden_protocol::utils::Serializable; use tracing::instrument; use crate::COMPONENT; @@ -31,7 +31,7 @@ impl BlockStore { fields(path = %store_dir.display()), )] pub fn bootstrap(store_dir: PathBuf, genesis_block: &GenesisBlock) -> std::io::Result { - std::fs::create_dir(&store_dir)?; + fs_err::create_dir(&store_dir)?; let block_store = Self { store_dir }; block_store.save_block_blocking(BlockNumber::GENESIS, &genesis_block.inner().to_bytes())?; @@ -55,7 +55,7 @@ impl BlockStore { /// /// See also: [`std::fs::metadata`]. pub fn load(store_dir: PathBuf) -> std::io::Result { - let meta = std::fs::metadata(&store_dir)?; + let meta = fs_err::metadata(&store_dir)?; if meta.is_dir().not() { return Err(ErrorKind::NotADirectory.into()); } @@ -101,10 +101,10 @@ impl BlockStore { ) -> Result<(), std::io::Error> { let (epoch_path, block_path) = self.epoch_block_path(block_num)?; if !epoch_path.exists() { - std::fs::create_dir_all(epoch_path)?; + fs_err::create_dir_all(epoch_path)?; } - std::fs::write(block_path, data) + fs_err::write(block_path, data) } // HELPER FUNCTIONS diff --git a/crates/store/src/db/migrations.rs b/crates/store/src/db/migrations.rs index ad78548c6..01521e578 100644 --- a/crates/store/src/db/migrations.rs +++ b/crates/store/src/db/migrations.rs @@ -3,6 +3,7 @@ use diesel_migrations::{EmbeddedMigrations, MigrationHarness, embed_migrations}; use tracing::instrument; use crate::COMPONENT; +use crate::db::schema_hash::verify_schema; // The rebuild is automatically triggered by `build.rs` as described in // . @@ -17,6 +18,8 @@ pub fn apply_migrations( tracing::info!(target = COMPONENT, "Applying {} migration(s)", migrations.len()); let Err(e) = conn.run_pending_migrations(MIGRATIONS) else { + // Migrations applied successfully, verify schema hash + verify_schema(conn)?; return Ok(()); }; tracing::warn!(target = COMPONENT, "Failed to apply migration: {e:?}"); diff --git a/crates/store/src/db/migrations/2025062000000_setup/up.sql b/crates/store/src/db/migrations/2025062000000_setup/up.sql index 7235ad1be..adf06e2a3 100644 --- a/crates/store/src/db/migrations/2025062000000_setup/up.sql +++ b/crates/store/src/db/migrations/2025062000000_setup/up.sql @@ -18,22 +18,29 @@ CREATE TABLE accounts ( block_num INTEGER NOT NULL, account_commitment BLOB NOT NULL, code_commitment BLOB, - storage BLOB, - vault BLOB, nonce INTEGER, + storage_header BLOB, -- Serialized AccountStorageHeader from miden-objects + vault_root BLOB, -- Vault root commitment + is_latest BOOLEAN NOT NULL DEFAULT 0, -- Indicates if this is the latest state for this account_id + created_at_block INTEGER NOT NULL, - PRIMARY KEY (account_id), - FOREIGN KEY (block_num) REFERENCES block_headers(block_num), - FOREIGN KEY (code_commitment) REFERENCES account_codes(code_commitment), + PRIMARY KEY (account_id, block_num), CONSTRAINT all_null_or_none_null CHECK ( - (code_commitment IS NOT NULL AND storage IS NOT NULL AND vault IS NOT NULL AND nonce IS NOT NULL) + (code_commitment IS NOT NULL AND nonce IS NOT NULL AND storage_header IS NOT NULL AND vault_root IS NOT NULL) OR - (code_commitment IS NULL AND storage IS NULL AND vault IS NULL AND nonce IS NULL) + (code_commitment IS NULL AND nonce IS NULL AND storage_header IS NULL AND vault_root IS NULL) ) ) WITHOUT ROWID; CREATE INDEX idx_accounts_network_prefix ON accounts(network_account_id_prefix) WHERE network_account_id_prefix IS NOT NULL; +CREATE INDEX idx_accounts_id_block ON accounts(account_id, block_num DESC); +CREATE INDEX idx_accounts_latest ON accounts(account_id, is_latest) WHERE is_latest = 1; +CREATE INDEX idx_accounts_created_at_block ON accounts(created_at_block); +-- Index for joining with block_headers +CREATE INDEX idx_accounts_block_num ON accounts(block_num); +-- Index for joining with account_codes +CREATE INDEX idx_accounts_code_commitment ON accounts(code_commitment) WHERE code_commitment IS NOT NULL; CREATE TABLE notes ( committed_at INTEGER NOT NULL, -- Block number when the note was committed @@ -56,9 +63,6 @@ CREATE TABLE notes ( serial_num BLOB, PRIMARY KEY (committed_at, batch_index, note_index), - FOREIGN KEY (committed_at) REFERENCES block_headers(block_num), - FOREIGN KEY (sender) REFERENCES accounts(account_id), - FOREIGN KEY (script_root) REFERENCES note_scripts(script_root), CONSTRAINT notes_type_in_enum CHECK (note_type BETWEEN 1 AND 3), CONSTRAINT notes_execution_mode_in_enum CHECK (execution_mode BETWEEN 0 AND 1), CONSTRAINT notes_consumed_at_is_u32 CHECK (consumed_at BETWEEN 0 AND 0xFFFFFFFF), @@ -72,6 +76,12 @@ CREATE INDEX idx_notes_sender ON notes(sender, committed_at); CREATE INDEX idx_notes_tag ON notes(tag, committed_at); CREATE INDEX idx_notes_nullifier ON notes(nullifier); CREATE INDEX idx_unconsumed_network_notes ON notes(execution_mode, consumed_at); +-- Index for joining with block_headers on committed_at +CREATE INDEX idx_notes_committed_at ON notes(committed_at); +-- Index for joining with note_scripts +CREATE INDEX idx_notes_script_root ON notes(script_root) WHERE script_root IS NOT NULL; +-- Index for joining with block_headers on consumed_at +CREATE INDEX idx_notes_consumed_at ON notes(consumed_at) WHERE consumed_at IS NOT NULL; CREATE TABLE note_scripts ( script_root BLOB NOT NULL, @@ -81,30 +91,37 @@ CREATE TABLE note_scripts ( ) WITHOUT ROWID; CREATE TABLE account_storage_map_values ( - account_id BLOB NOT NULL, + account_id BLOB NOT NULL, block_num INTEGER NOT NULL, - slot INTEGER NOT NULL, + slot_name TEXT NOT NULL, key BLOB NOT NULL, value BLOB NOT NULL, - is_latest_update BOOLEAN NOT NULL, + is_latest BOOLEAN NOT NULL, - PRIMARY KEY (account_id, block_num, slot, key), - CONSTRAINT slot_is_u8 CHECK (slot BETWEEN 0 AND 0xFF) + PRIMARY KEY (account_id, block_num, slot_name, key), + FOREIGN KEY (account_id, block_num) REFERENCES accounts(account_id, block_num) ON DELETE CASCADE ) WITHOUT ROWID; -CREATE INDEX asm_latest_by_acct_block_slot_key ON account_storage_map_values(account_id, block_num); +-- Index for joining with accounts table on compound key +CREATE INDEX idx_account_storage_account_block ON account_storage_map_values(account_id, block_num); +-- Index for querying latest values +CREATE INDEX idx_account_storage_latest ON account_storage_map_values(account_id, is_latest) WHERE is_latest = 1; CREATE TABLE account_vault_assets ( account_id BLOB NOT NULL, block_num INTEGER NOT NULL, vault_key BLOB NOT NULL, asset BLOB, - is_latest_update BOOLEAN NOT NULL, + is_latest BOOLEAN NOT NULL, - PRIMARY KEY (account_id, block_num, vault_key) + PRIMARY KEY (account_id, block_num, vault_key), + FOREIGN KEY (account_id, block_num) REFERENCES accounts(account_id, block_num) ON DELETE CASCADE ) WITHOUT ROWID; -CREATE INDEX idx_vault_assets_id_block ON account_vault_assets (account_id, block_num); +-- Index for joining with accounts table on compound key +CREATE INDEX idx_vault_assets_account_block ON account_vault_assets(account_id, block_num); +-- Index for querying latest assets +CREATE INDEX idx_vault_assets_latest ON account_vault_assets(account_id, is_latest) WHERE is_latest = 1; CREATE TABLE nullifiers ( nullifier BLOB NOT NULL, @@ -112,12 +129,12 @@ CREATE TABLE nullifiers ( block_num INTEGER NOT NULL, PRIMARY KEY (nullifier), - FOREIGN KEY (block_num) REFERENCES block_headers(block_num), CONSTRAINT nullifiers_nullifier_is_digest CHECK (length(nullifier) = 32), CONSTRAINT nullifiers_nullifier_prefix_is_u16 CHECK (nullifier_prefix BETWEEN 0 AND 0xFFFF) ) WITHOUT ROWID; CREATE INDEX idx_nullifiers_prefix ON nullifiers(nullifier_prefix); +-- Index for joining with block_headers CREATE INDEX idx_nullifiers_block_num ON nullifiers(block_num); CREATE TABLE transactions ( @@ -126,14 +143,14 @@ CREATE TABLE transactions ( block_num INTEGER NOT NULL, -- Block number in which the transaction was included. initial_state_commitment BLOB NOT NULL, -- State of the account before applying the transaction. final_state_commitment BLOB NOT NULL, -- State of the account after applying the transaction. - input_notes BLOB NOT NULL, -- Serialized vector with the Nullifier of the input notes. + nullifiers BLOB NOT NULL, -- Serialized vector with the Nullifier of the input notes. output_notes BLOB NOT NULL, -- Serialized vector with the NoteId of the output notes. size_in_bytes INTEGER NOT NULL, -- Estimated size of the row in bytes, considering the size of the input and output notes. - PRIMARY KEY (transaction_id), - FOREIGN KEY (account_id) REFERENCES accounts(account_id), - FOREIGN KEY (block_num) REFERENCES block_headers(block_num) + PRIMARY KEY (transaction_id) ) WITHOUT ROWID; +-- Index for joining with accounts (note: account may not exist in accounts table) CREATE INDEX idx_transactions_account_id ON transactions(account_id); +-- Index for joining with block_headers CREATE INDEX idx_transactions_block_num ON transactions(block_num); diff --git a/crates/store/src/db/mod.rs b/crates/store/src/db/mod.rs index a7701a060..8a5a835a4 100644 --- a/crates/store/src/db/mod.rs +++ b/crates/store/src/db/mod.rs @@ -1,18 +1,17 @@ -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::ops::RangeInclusive; use std::path::PathBuf; use anyhow::Context; use diesel::{Connection, RunQueryDsl, SqliteConnection}; -use miden_lib::utils::{Deserializable, Serializable}; use miden_node_proto::domain::account::{AccountInfo, AccountSummary, NetworkAccountPrefix}; use miden_node_proto::generated as proto; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::asset::{Asset, AssetVaultKey}; -use miden_objects::block::{BlockHeader, BlockNoteIndex, BlockNumber, ProvenBlock}; -use miden_objects::crypto::merkle::SparseMerklePath; -use miden_objects::note::{ +use miden_protocol::Word; +use miden_protocol::account::{AccountHeader, AccountId, AccountStorage}; +use miden_protocol::asset::{Asset, AssetVaultKey}; +use miden_protocol::block::{BlockHeader, BlockNoteIndex, BlockNumber, ProvenBlock}; +use miden_protocol::crypto::merkle::SparseMerklePath; +use miden_protocol::note::{ NoteDetails, NoteId, NoteInclusionProof, @@ -20,7 +19,8 @@ use miden_objects::note::{ NoteScript, Nullifier, }; -use miden_objects::transaction::TransactionId; +use miden_protocol::transaction::TransactionId; +use miden_protocol::utils::{Deserializable, Serializable}; use tokio::sync::oneshot; use tracing::{info, info_span, instrument}; @@ -36,6 +36,7 @@ use crate::genesis::GenesisBlock; pub(crate) mod manager; mod migrations; +mod schema_hash; #[cfg(test)] mod tests; @@ -100,8 +101,8 @@ pub struct TransactionRecord { pub account_id: AccountId, pub initial_state_commitment: Word, pub final_state_commitment: Word, - pub input_notes: Vec, // Store nullifiers for input notes - pub output_notes: Vec, // Store note IDs for output notes + pub nullifiers: Vec, // Store nullifiers for input notes + pub output_notes: Vec, // Store note IDs for output notes } impl TransactionRecord { @@ -111,16 +112,16 @@ impl TransactionRecord { pub fn into_proto_with_note_records( self, note_records: Vec, - ) -> proto::rpc_store::TransactionRecord { + ) -> proto::rpc::TransactionRecord { let output_notes: Vec = note_records.into_iter().map(Into::into).collect(); - proto::rpc_store::TransactionRecord { - transaction_header: Some(proto::transaction::TransactionHeader { + proto::rpc::TransactionRecord { + header: Some(proto::transaction::TransactionHeader { account_id: Some(self.account_id.into()), initial_state_commitment: Some(self.initial_state_commitment.into()), final_state_commitment: Some(self.final_state_commitment.into()), - input_notes: self.input_notes.into_iter().map(From::from).collect(), + nullifiers: self.nullifiers.into_iter().map(From::from).collect(), output_notes, }), block_num: self.block_num.as_u32(), @@ -250,8 +251,8 @@ impl Db { genesis.header(), &[], &[], - genesis.updated_accounts(), - genesis.transactions(), + genesis.body().updated_accounts(), + genesis.body().transactions(), ) }) .context("failed to insert genesis block")?; @@ -392,7 +393,7 @@ impl Db { .await } - /// Loads all the account commitments from the DB. + /// TODO marked for removal, replace with paged version #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_all_account_commitments(&self) -> Result> { self.transact("read all account commitments", move |conn| { @@ -401,6 +402,16 @@ impl Db { .await } + /// Returns all account IDs that have public state. + #[allow(dead_code)] // Will be used by InnerForest in next PR + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_all_public_account_ids(&self) -> Result> { + self.transact("read all public account IDs", move |conn| { + queries::select_all_public_account_ids(conn) + }) + .await + } + /// Loads public account details from the DB. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_account(&self, id: AccountId) -> Result { @@ -420,6 +431,88 @@ impl Db { .await } + /// Returns network account IDs within the specified block range (based on account creation + /// block). + /// + /// The function may return fewer accounts than exist in the range if the result would exceed + /// `MAX_RESPONSE_PAYLOAD_BYTES / AccountId::SERIALIZED_SIZE` rows. In this case, the result is + /// truncated at a block boundary to ensure all accounts from included blocks are returned. + /// + /// # Returns + /// + /// A tuple containing: + /// - A vector of network account IDs. + /// - The last block number that was fully included in the result. When truncated, this will be + /// less than the requested range end. + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_all_network_account_ids( + &self, + block_range: RangeInclusive, + ) -> Result<(Vec, BlockNumber)> { + self.transact("Get all network account IDs", move |conn| { + queries::select_all_network_account_ids(conn, block_range) + }) + .await + } + + /// Reconstructs account storage at a specific block from the database + /// + /// This method queries the decomposed storage tables and reconstructs the full + /// `AccountStorage` with SMT backing for Map slots. + // TODO split querying the header from the content + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_account_storage_at_block( + &self, + account_id: AccountId, + block_num: BlockNumber, + ) -> Result { + self.transact("Get account storage at block", move |conn| { + queries::select_account_storage_at_block(conn, account_id, block_num) + }) + .await + } + + /// Queries vault assets at a specific block + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] + pub async fn select_account_vault_at_block( + &self, + account_id: AccountId, + block_num: BlockNumber, + ) -> Result> { + self.transact("Get account vault at block", move |conn| { + queries::select_account_vault_at_block(conn, account_id, block_num) + }) + .await + } + + /// Queries the account code by its commitment hash. + /// + /// Returns `None` if no code exists with that commitment. + pub async fn select_account_code_by_commitment( + &self, + code_commitment: Word, + ) -> Result>> { + self.transact("Get account code by commitment", move |conn| { + queries::select_account_code_by_commitment(conn, code_commitment) + }) + .await + } + + /// Queries the account header for a specific account at a specific block number. + /// + /// Returns `None` if the account doesn't exist at that block. + pub async fn select_account_header_at_block( + &self, + account_id: AccountId, + block_num: BlockNumber, + ) -> Result> { + self.transact("Get account header at block", move |conn| { + queries::select_account_header_at_block(conn, account_id, block_num) + .map(|opt| opt.map(|(header, _storage_header)| header)) + }) + .await + } + #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn get_state_sync( &self, @@ -445,7 +538,7 @@ impl Db { .await } - /// Loads all the [`miden_objects::note::Note`]s matching a certain [`NoteId`] from the + /// Loads all the [`miden_protocol::note::Note`]s matching a certain [`NoteId`] from the /// database. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] pub async fn select_notes_by_id(&self, note_ids: Vec) -> Result> { @@ -455,15 +548,14 @@ impl Db { .await } - /// Loads all the [`NoteRecord`]s matching a certain note commitment from the - /// database. + /// Returns all note commitments from the DB that match the provided ones. #[instrument(level = "debug", target = COMPONENT, skip_all, ret(level = "debug"), err)] - pub async fn select_notes_by_commitment( + pub async fn select_existing_note_commitments( &self, note_commitments: Vec, - ) -> Result> { + ) -> Result> { self.transact("note by commitment", move |conn| { - queries::select_notes_by_commitment(conn, note_commitments.as_slice()) + queries::select_existing_note_commitments(conn, note_commitments.as_slice()) }) .await } @@ -501,9 +593,9 @@ impl Db { conn, block.header(), ¬es, - block.created_nullifiers(), - block.updated_accounts(), - block.transactions(), + block.body().created_nullifiers(), + block.body().updated_accounts(), + block.body().transactions(), )?; // XXX FIXME TODO free floating mutex MUST NOT exist @@ -534,33 +626,9 @@ impl Db { .await } - /// Runs database optimization. - #[instrument(level = "debug", target = COMPONENT, skip_all, err)] - pub async fn optimize(&self) -> Result<(), DatabaseError> { - self.transact("db optimization", |conn| { - diesel::sql_query("PRAGMA optimize") - .execute(conn) - .map_err(DatabaseError::Diesel) - }) - .await?; - Ok(()) - } - - /// Loads the network notes that have not been consumed yet, using pagination to limit the - /// number of notes returned. - pub(crate) async fn select_unconsumed_network_notes( - &self, - page: Page, - ) -> Result<(Vec, Page)> { - self.transact("unconsumed network notes", move |conn| { - models::queries::unconsumed_network_notes(conn, page) - }) - .await - } - /// Loads the network notes for an account that are unconsumed by a specified block number. /// Pagination is used to limit the number of notes returned. - pub(crate) async fn select_unconsumed_network_notes_for_account( + pub(crate) async fn select_unconsumed_network_notes( &self, network_account_id_prefix: NetworkAccountPrefix, block_num: BlockNumber, diff --git a/crates/store/src/db/models/conv.rs b/crates/store/src/db/models/conv.rs index ffc7b80f6..37a9b019f 100644 --- a/crates/store/src/db/models/conv.rs +++ b/crates/store/src/db/models/conv.rs @@ -32,35 +32,47 @@ on relevant platforms" )] -use std::any::type_name; - -use miden_node_proto::domain::account::{NetworkAccountError, NetworkAccountPrefix}; -use miden_objects::Felt; -use miden_objects::block::BlockNumber; -use miden_objects::note::{NoteExecutionMode, NoteTag}; +use miden_node_proto::domain::account::NetworkAccountPrefix; +use miden_protocol::Felt; +use miden_protocol::account::{StorageSlotName, StorageSlotType}; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::{NoteExecutionMode, NoteTag}; #[derive(Debug, thiserror::Error)] -#[error("failed to convert a database value to it's in memory type {0}")] -pub struct DatabaseTypeConversionError(&'static str); +#[error("failed to convert from database type {from_type} into {into_type}")] +pub struct DatabaseTypeConversionError { + source: Box, + from_type: &'static str, + into_type: &'static str, +} /// Convert from and to it's database representation and back /// /// We do not assume sanity of DB types. pub(crate) trait SqlTypeConvert: Sized { type Raw: Sized; - type Error: std::error::Error + Send + Sync + 'static; + fn to_raw_sql(self) -> Self::Raw; - fn from_raw_sql(_raw: Self::Raw) -> Result; + fn from_raw_sql(_raw: Self::Raw) -> Result; + + fn map_err( + source: E, + ) -> DatabaseTypeConversionError { + DatabaseTypeConversionError { + source: Box::new(source), + from_type: std::any::type_name::(), + into_type: std::any::type_name::(), + } + } } impl SqlTypeConvert for BlockNumber { type Raw = i64; - type Error = DatabaseTypeConversionError; - fn from_raw_sql(raw: Self::Raw) -> Result { - u32::try_from(raw) - .map(BlockNumber::from) - .map_err(|_| DatabaseTypeConversionError(type_name::())) + + fn from_raw_sql(raw: Self::Raw) -> Result { + u32::try_from(raw).map(BlockNumber::from).map_err(Self::map_err) } + fn to_raw_sql(self) -> Self::Raw { i64::from(self.as_u32()) } @@ -68,10 +80,9 @@ impl SqlTypeConvert for BlockNumber { impl SqlTypeConvert for NetworkAccountPrefix { type Raw = i64; - type Error = DatabaseTypeConversionError; - fn from_raw_sql(raw: Self::Raw) -> Result { - NetworkAccountPrefix::try_from(raw as u32) - .map_err(|_e| DatabaseTypeConversionError(type_name::())) + + fn from_raw_sql(raw: Self::Raw) -> Result { + NetworkAccountPrefix::try_from(raw as u32).map_err(Self::map_err) } fn to_raw_sql(self) -> Self::Raw { i64::from(self.inner()) @@ -80,14 +91,19 @@ impl SqlTypeConvert for NetworkAccountPrefix { impl SqlTypeConvert for NoteExecutionMode { type Raw = i32; - type Error = DatabaseTypeConversionError; #[inline(always)] - fn from_raw_sql(raw: Self::Raw) -> Result { + fn from_raw_sql(raw: Self::Raw) -> Result { + #[derive(Debug, thiserror::Error)] + #[error("valid values are 0 or 1 but found {0}")] + struct ValueError(i32); + Ok(match raw { 0 => Self::Network, 1 => Self::Local, - _ => return Err(DatabaseTypeConversionError(type_name::())), + invalid => { + return Err(Self::map_err(ValueError(invalid))); + }, }) } @@ -102,10 +118,9 @@ impl SqlTypeConvert for NoteExecutionMode { impl SqlTypeConvert for NoteTag { type Raw = i32; - type Error = DatabaseTypeConversionError; #[inline(always)] - fn from_raw_sql(raw: Self::Raw) -> Result { + fn from_raw_sql(raw: Self::Raw) -> Result { #[allow(clippy::cast_sign_loss)] Ok(NoteTag::from(raw as u32)) } @@ -116,6 +131,45 @@ impl SqlTypeConvert for NoteTag { } } +impl SqlTypeConvert for StorageSlotType { + type Raw = i32; + + #[inline(always)] + fn from_raw_sql(raw: Self::Raw) -> Result { + #[derive(Debug, thiserror::Error)] + #[error("invalid storage slot type value {0}")] + struct ValueError(i32); + + Ok(match raw { + 0 => StorageSlotType::Value, + 1 => StorageSlotType::Map, + invalid => { + return Err(Self::map_err(ValueError(invalid))); + }, + }) + } + + #[inline(always)] + fn to_raw_sql(self) -> Self::Raw { + match self { + StorageSlotType::Value => 0, + StorageSlotType::Map => 1, + } + } +} + +impl SqlTypeConvert for StorageSlotName { + type Raw = String; + + fn from_raw_sql(raw: Self::Raw) -> Result { + StorageSlotName::new(raw).map_err(Self::map_err) + } + + fn to_raw_sql(self) -> Self::Raw { + String::from(self) + } +} + // Raw type conversions - eventually introduce wrapper types // =========================================================== @@ -130,25 +184,15 @@ pub(crate) fn nullifier_prefix_to_raw_sql(prefix: u16) -> i32 { } #[inline(always)] -pub(crate) fn raw_sql_to_nonce(raw: i64) -> u64 { +pub(crate) fn raw_sql_to_nonce(raw: i64) -> Felt { debug_assert!(raw >= 0); - raw as u64 + Felt::new(raw as u64) } #[inline(always)] pub(crate) fn nonce_to_raw_sql(nonce: Felt) -> i64 { nonce.as_int() as i64 } -#[inline(always)] -pub(crate) fn raw_sql_to_slot(raw: i32) -> u8 { - debug_assert!(raw >= 0); - raw as u8 -} -#[inline(always)] -pub(crate) fn slot_to_raw_sql(slot: u8) -> i32 { - i32::from(slot) -} - #[inline(always)] pub(crate) fn raw_sql_to_fungible_delta(raw: i64) -> i64 { raw diff --git a/crates/store/src/db/models/queries/accounts.rs b/crates/store/src/db/models/queries/accounts.rs index b658d9275..c1ad88d2b 100644 --- a/crates/store/src/db/models/queries/accounts.rs +++ b/crates/store/src/db/models/queries/accounts.rs @@ -1,3 +1,4 @@ +use std::collections::BTreeMap; use std::ops::RangeInclusive; use diesel::prelude::{Queryable, QueryableByName}; @@ -8,8 +9,6 @@ use diesel::{ BoolExpressionMethods, ExpressionMethods, Insertable, - JoinOnDsl, - NullableExpressionMethods, OptionalExtension, QueryDsl, RunQueryDsl, @@ -17,41 +16,90 @@ use diesel::{ SelectableHelper, SqliteConnection, }; -use miden_lib::utils::{Deserializable, Serializable}; use miden_node_proto as proto; use miden_node_proto::domain::account::{AccountInfo, AccountSummary}; -use miden_node_utils::limiter::{QueryParamAccountIdLimit, QueryParamLimiter}; -use miden_objects::account::delta::AccountUpdateDetails; -use miden_objects::account::{ +use miden_node_utils::limiter::{ + MAX_RESPONSE_PAYLOAD_BYTES, + QueryParamAccountIdLimit, + QueryParamLimiter, +}; +use miden_protocol::Word; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{ Account, AccountCode, AccountDelta, AccountId, AccountStorage, + AccountStorageHeader, NonFungibleDeltaAction, + StorageMap, StorageSlot, + StorageSlotContent, + StorageSlotName, + StorageSlotType, }; -use miden_objects::asset::{Asset, AssetVault, AssetVaultKey, FungibleAsset}; -use miden_objects::block::{BlockAccountUpdate, BlockNumber}; -use miden_objects::{Felt, Word}; - -use crate::db::models::conv::{ - SqlTypeConvert, - nonce_to_raw_sql, - raw_sql_to_nonce, - raw_sql_to_slot, - slot_to_raw_sql, -}; +use miden_protocol::asset::{Asset, AssetVault, AssetVaultKey, FungibleAsset}; +use miden_protocol::block::{BlockAccountUpdate, BlockNumber}; +use miden_protocol::utils::{Deserializable, Serializable}; + +use crate::db::models::conv::{SqlTypeConvert, nonce_to_raw_sql, raw_sql_to_nonce}; use crate::db::models::{serialize_vec, vec_raw_try_into}; use crate::db::{AccountVaultValue, schema}; use crate::errors::DatabaseError; -/// Select the latest account details by account id from the DB using the given -/// [`SqliteConnection`]. +mod at_block; +pub(crate) use at_block::{ + select_account_header_at_block, + select_account_storage_at_block, + select_account_vault_at_block, +}; + +#[cfg(test)] +mod tests; + +type StorageMapValueRow = (i64, String, Vec, Vec); + +// ACCOUNT CODE +// ================================================================================================ + +/// Select account code by its commitment hash from the `account_codes` table. /// /// # Returns /// -/// The latest account details, or an error. +/// The account code bytes if found, or `None` if no code exists with that commitment. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT code FROM account_codes WHERE code_commitment = ?1 +/// ``` +pub(crate) fn select_account_code_by_commitment( + conn: &mut SqliteConnection, + code_commitment: Word, +) -> Result>, DatabaseError> { + use schema::account_codes; + + let code_commitment_bytes = code_commitment.to_bytes(); + + let result: Option> = SelectDsl::select( + account_codes::table.filter(account_codes::code_commitment.eq(&code_commitment_bytes)), + account_codes::code, + ) + .first(conn) + .optional()?; + + Ok(result) +} + +// ACCOUNT RETRIEVAL +// ================================================================================================ + +/// Select account by ID from the DB using the given [`SqliteConnection`]. +/// +/// # Returns +/// +/// The latest account info, or an error. /// /// # Raw SQL /// @@ -59,46 +107,107 @@ use crate::errors::DatabaseError; /// SELECT /// accounts.account_id, /// accounts.account_commitment, -/// accounts.block_num, -/// accounts.storage, -/// accounts.vault, -/// accounts.nonce, -/// accounts.code_commitment, -/// account_codes.code +/// accounts.block_num /// FROM /// accounts -/// LEFT JOIN -/// account_codes ON accounts.code_commitment = account_codes.code_commitment /// WHERE /// account_id = ?1 +/// AND is_latest = 1 /// ``` pub(crate) fn select_account( conn: &mut SqliteConnection, account_id: AccountId, ) -> Result { - let raw = SelectDsl::select( - schema::accounts::table.left_join(schema::account_codes::table.on( - schema::accounts::code_commitment.eq(schema::account_codes::code_commitment.nullable()), - )), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), + let raw = SelectDsl::select(schema::accounts::table, AccountSummaryRaw::as_select()) + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)) + .get_result::(conn) + .optional()? + .ok_or(DatabaseError::AccountNotFoundInDb(account_id))?; + + let summary: AccountSummary = raw.try_into()?; + + // Backfill account details from database + // For private accounts, we don't store full details in the database + let details = if account_id.has_public_state() { + Some(select_full_account(conn, account_id)?) + } else { + None + }; + + Ok(AccountInfo { summary, details }) +} + +/// Reconstruct full Account from database tables for the latest account state +/// +/// This function queries the database tables to reconstruct a complete Account object: +/// - Code from `account_codes` table +/// - Nonce and storage header from `accounts` table +/// - Storage map entries from `account_storage_map_values` table +/// - Vault from `account_vault_assets` table +/// +/// # Note +/// +/// A stop-gap solution to retain store API and construct `AccountInfo` types. +/// The function should ultimately be removed, and any queries be served from the +/// `State` which contains an `SmtForest` to serve the latest and most recent +/// historical data. +// TODO: remove eventually once refactoring is complete +fn select_full_account( + conn: &mut SqliteConnection, + account_id: AccountId, +) -> Result { + // Get account metadata (nonce, code_commitment) and code in a single join query + let (nonce, code_bytes): (Option, Vec) = SelectDsl::select( + schema::accounts::table.inner_join(schema::account_codes::table), + (schema::accounts::nonce, schema::account_codes::code), ) .filter(schema::accounts::account_id.eq(account_id.to_bytes())) - .get_result::<(AccountRaw, Option>)>(conn) + .filter(schema::accounts::is_latest.eq(true)) + .get_result(conn) .optional()? .ok_or(DatabaseError::AccountNotFoundInDb(account_id))?; - let info = AccountWithCodeRawJoined::from(raw).try_into()?; - Ok(info) + + let nonce = raw_sql_to_nonce(nonce.ok_or_else(|| { + DatabaseError::DataCorrupted(format!("No nonce found for account {account_id}")) + })?); + + let code = AccountCode::read_from_bytes(&code_bytes)?; + + // Reconstruct storage using existing helper function + let storage = select_latest_account_storage(conn, account_id)?; + + // Reconstruct vault from account_vault_assets table + let vault_entries: Vec<(Vec, Option>)> = SelectDsl::select( + schema::account_vault_assets::table, + (schema::account_vault_assets::vault_key, schema::account_vault_assets::asset), + ) + .filter(schema::account_vault_assets::account_id.eq(account_id.to_bytes())) + .filter(schema::account_vault_assets::is_latest.eq(true)) + .load(conn)?; + + let mut assets = Vec::new(); + for (_key_bytes, maybe_asset_bytes) in vault_entries { + if let Some(asset_bytes) = maybe_asset_bytes { + let asset = Asset::read_from_bytes(&asset_bytes)?; + assets.push(asset); + } + } + + let vault = AssetVault::new(&assets)?; + + Ok(Account::new(account_id, vault, storage, code, nonce, None)?) } -/// Select the latest account details by account ID prefix from the DB using the given -/// [`SqliteConnection`] This method is meant to be used by the network transaction builder. Because -/// network notes get matched through accounts through the account's 30-bit prefix, it is possible -/// that multiple accounts match against a single prefix. In this scenario, the first account is -/// returned. +/// Select the latest account info by account ID prefix from the DB using the given +/// [`SqliteConnection`]. Meant to be used by the network transaction builder. +/// Because network notes get matched through accounts through the account's 30-bit prefix, it is +/// possible that multiple accounts match against a single prefix. In this scenario, the first +/// account is returned. /// /// # Returns /// -/// The latest account details, `None` if the account was not found, or an error. +/// The latest account info, `None` if the account was not found, or an error. /// /// # Raw SQL /// @@ -106,40 +215,34 @@ pub(crate) fn select_account( /// SELECT /// accounts.account_id, /// accounts.account_commitment, -/// accounts.block_num, -/// accounts.storage, -/// accounts.vault, -/// accounts.nonce, -/// accounts.code_commitment, -/// account_codes.code +/// accounts.block_num /// FROM /// accounts -/// LEFT JOIN -/// account_codes ON accounts.code_commitment = account_codes.code_commitment /// WHERE /// network_account_id_prefix = ?1 +/// AND is_latest = 1 /// ``` pub(crate) fn select_account_by_id_prefix( conn: &mut SqliteConnection, id_prefix: u32, ) -> Result, DatabaseError> { - let maybe_info = SelectDsl::select( - schema::accounts::table.left_join(schema::account_codes::table.on( - schema::accounts::code_commitment.eq(schema::account_codes::code_commitment.nullable()), - )), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), - ) - .filter(schema::accounts::network_account_id_prefix.eq(Some(i64::from(id_prefix)))) - .get_result::<(AccountRaw, Option>)>(conn) - .optional() - .map_err(DatabaseError::Diesel)?; - - let result: Result, DatabaseError> = maybe_info - .map(AccountWithCodeRawJoined::from) - .map(std::convert::TryInto::::try_into) - .transpose(); - - result + let maybe_summary = SelectDsl::select(schema::accounts::table, AccountSummaryRaw::as_select()) + .filter(schema::accounts::is_latest.eq(true)) + .filter(schema::accounts::network_account_id_prefix.eq(Some(i64::from(id_prefix)))) + .get_result::(conn) + .optional() + .map_err(DatabaseError::Diesel)?; + + match maybe_summary { + None => Ok(None), + Some(raw) => { + let summary: AccountSummary = raw.try_into()?; + let account_id = summary.account_id; + // Backfill account details from database + let details = select_full_account(conn, account_id).ok(); + Ok(Some(AccountInfo { summary, details })) + }, + } } /// Select all account commitments from the DB using the given [`SqliteConnection`]. @@ -156,6 +259,8 @@ pub(crate) fn select_account_by_id_prefix( /// account_commitment /// FROM /// accounts +/// WHERE +/// is_latest = 1 /// ORDER BY /// block_num ASC /// ``` @@ -166,6 +271,7 @@ pub(crate) fn select_all_account_commitments( schema::accounts::table, (schema::accounts::account_id, schema::accounts::account_commitment), ) + .filter(schema::accounts::is_latest.eq(true)) .order_by(schema::accounts::block_num.asc()) .load::<(Vec, Vec)>(conn)?; @@ -176,6 +282,48 @@ pub(crate) fn select_all_account_commitments( )) } +/// Select all account IDs that have public state. +/// +/// This filters accounts in-memory after loading only the account IDs (not commitments), +/// which is more efficient than loading full commitments when only IDs are needed. +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT +/// account_id +/// FROM +/// accounts +/// WHERE +/// is_latest = 1 +/// ORDER BY +/// block_num ASC +/// ``` +#[allow(dead_code)] // Will be used by InnerForest in next PR +pub(crate) fn select_all_public_account_ids( + conn: &mut SqliteConnection, +) -> Result, DatabaseError> { + // We could technically use a `LIKE` constraint for both postgres and sqlite backends, + // but diesel doesn't expose that. + let raw: Vec> = + SelectDsl::select(schema::accounts::table, schema::accounts::account_id) + .filter(schema::accounts::is_latest.eq(true)) + .order_by(schema::accounts::block_num.asc()) + .load::>(conn)?; + + Result::from_iter( + raw.into_iter() + .map(|bytes| { + AccountId::read_from_bytes(&bytes).map_err(DatabaseError::DeserializationError) + }) + .filter_map(|result| match result { + Ok(id) if id.has_public_state() => Some(Ok(id)), + Ok(_) => None, + Err(e) => Some(Err(e)), + }), + ) +} + /// Select account vault assets within a block range (inclusive). /// /// # Parameters @@ -211,9 +359,8 @@ pub(crate) fn select_account_vault_assets( use schema::account_vault_assets as t; // TODO: These limits should be given by the protocol. // See miden-base/issues/1770 for more details - const MAX_PAYLOAD_BYTES: usize = 2 * 1024 * 1024; // 2 MB const ROW_OVERHEAD_BYTES: usize = 2 * size_of::() + size_of::(); // key + asset + block_num - const MAX_ROWS: usize = MAX_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; + const MAX_ROWS: usize = MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; if !account_id.is_public() { return Err(DatabaseError::AccountNotPublic(account_id)); @@ -240,7 +387,7 @@ pub(crate) fn select_account_vault_assets( // Discard the last block in the response (assumes more than one block may be present) let (last_block_included, values) = if let Some(&(last_block_num, ..)) = raw.last() - && raw.len() >= MAX_ROWS + && raw.len() > MAX_ROWS { // NOTE: If the query contains at least one more row than the amount of storage map updates // allowed in a single block for an account, then the response is guaranteed to have at @@ -318,16 +465,11 @@ pub fn select_accounts_by_block_range( /// SELECT /// accounts.account_id, /// accounts.account_commitment, -/// accounts.block_num, -/// accounts.storage, -/// accounts.vault, -/// accounts.nonce, -/// accounts.code_commitment, -/// account_codes.code +/// accounts.block_num /// FROM /// accounts -/// LEFT JOIN -/// account_codes ON accounts.code_commitment = account_codes.code_commitment +/// WHERE +/// is_latest = 1 /// ORDER BY /// block_num ASC /// ``` @@ -335,23 +477,105 @@ pub fn select_accounts_by_block_range( pub(crate) fn select_all_accounts( conn: &mut SqliteConnection, ) -> Result, DatabaseError> { - let accounts_raw = QueryDsl::select( - schema::accounts::table.left_join(schema::account_codes::table.on( - schema::accounts::code_commitment.eq(schema::account_codes::code_commitment.nullable()), - )), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), - ) - .load::<(AccountRaw, Option>)>(conn)?; - let account_infos = vec_raw_try_into::( - accounts_raw.into_iter().map(AccountWithCodeRawJoined::from), - )?; + let raw = SelectDsl::select(schema::accounts::table, AccountSummaryRaw::as_select()) + .filter(schema::accounts::is_latest.eq(true)) + .order_by(schema::accounts::block_num.asc()) + .load::(conn)?; + + let summaries: Vec = vec_raw_try_into(raw)?; + + // Backfill account details from database + let account_infos = summaries + .into_iter() + .map(|summary| { + let account_id = summary.account_id; + let details = select_full_account(conn, account_id).ok(); + AccountInfo { summary, details } + }) + .collect(); + Ok(account_infos) } +/// Returns network account IDs within the specified block range (based on account creation +/// block). +/// +/// The function may return fewer accounts than exist in the range if the result would exceed +/// `MAX_RESPONSE_PAYLOAD_BYTES / AccountId::SERIALIZED_SIZE` rows. In this case, the result is +/// truncated at a block boundary to ensure all accounts from included blocks are returned. +/// +/// # Returns +/// +/// A tuple containing: +/// - A vector of network account IDs. +/// - The last block number that was fully included in the result. When truncated, this will be less +/// than the requested range end. +pub(crate) fn select_all_network_account_ids( + conn: &mut SqliteConnection, + block_range: RangeInclusive, +) -> Result<(Vec, BlockNumber), DatabaseError> { + const ROW_OVERHEAD_BYTES: usize = AccountId::SERIALIZED_SIZE; + const MAX_ROWS: usize = MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; + + const _: () = assert!( + MAX_ROWS > miden_protocol::MAX_ACCOUNTS_PER_BLOCK, + "Block pagination limit must exceed maximum block capacity to uphold assumed logic invariant" + ); + + if block_range.is_empty() { + return Err(DatabaseError::InvalidBlockRange { + from: *block_range.start(), + to: *block_range.end(), + }); + } + + let account_ids_raw: Vec<(Vec, i64)> = Box::new( + QueryDsl::select( + schema::accounts::table + .filter(schema::accounts::network_account_id_prefix.is_not_null()), + (schema::accounts::account_id, schema::accounts::created_at_block), + ) + .filter( + schema::accounts::block_num + .between(block_range.start().to_raw_sql(), block_range.end().to_raw_sql()), + ) + .order(schema::accounts::created_at_block.asc()) + .limit(i64::try_from(MAX_ROWS + 1).expect("limit fits within i64")), + ) + .load::<(Vec, i64)>(conn)?; + + if account_ids_raw.len() > MAX_ROWS { + // SAFETY: We just checked that len > MAX_ROWS, so the vec is not empty. + let last_created_at_block = account_ids_raw.last().expect("vec is not empty").1; + + let account_ids = account_ids_raw + .into_iter() + .take_while(|(_, created_at_block)| *created_at_block != last_created_at_block) + .map(|(id_bytes, _)| { + AccountId::read_from_bytes(&id_bytes).map_err(DatabaseError::DeserializationError) + }) + .collect::, DatabaseError>>()?; + + let last_block_included = + BlockNumber::from_raw_sql(last_created_at_block.saturating_sub(1))?; + + Ok((account_ids, last_block_included)) + } else { + let account_ids = account_ids_raw + .into_iter() + .map(|(id_bytes, _)| { + AccountId::read_from_bytes(&id_bytes).map_err(DatabaseError::DeserializationError) + }) + .collect::, DatabaseError>>()?; + + Ok((account_ids, *block_range.end())) + } +} + #[derive(Debug, Clone, PartialEq, Eq)] pub struct StorageMapValue { pub block_num: BlockNumber, - pub slot_index: u8, + pub slot_name: StorageSlotName, pub key: Word, pub value: Word, } @@ -365,11 +589,11 @@ pub struct StorageMapValuesPage { } impl StorageMapValue { - pub fn from_raw_row(row: (i64, i32, Vec, Vec)) -> Result { - let (block_num, slot_index, key, value) = row; + pub fn from_raw_row(row: StorageMapValueRow) -> Result { + let (block_num, slot_name, key, value) = row; Ok(Self { block_num: BlockNumber::from_raw_sql(block_num)?, - slot_index: raw_sql_to_slot(slot_index), + slot_name: StorageSlotName::from_raw_sql(slot_name)?, key: Word::read_from_bytes(&key)?, value: Word::read_from_bytes(&value)?, }) @@ -380,7 +604,7 @@ impl StorageMapValue { /// /// # Returns /// -/// A vector of tuples containing `(slot, key, value, is_latest_update)` for the given account. +/// A vector of tuples containing `(slot, key, value, is_latest)` for the given account. /// Each row contains one of: /// /// - the historical value for a slot and key specifically on block `block_to` @@ -426,10 +650,9 @@ pub(crate) fn select_account_storage_map_values( // TODO: These limits should be given by the protocol. // See miden-base/issues/1770 for more details - pub const MAX_PAYLOAD_BYTES: usize = 2 * 1024 * 1024; // 2 MB pub const ROW_OVERHEAD_BYTES: usize = 2 * size_of::() + size_of::() + size_of::(); // key + value + block_num + slot_idx - pub const MAX_ROWS: usize = MAX_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; + pub const MAX_ROWS: usize = MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; if !account_id.is_public() { return Err(DatabaseError::AccountNotPublic(account_id)); @@ -442,8 +665,8 @@ pub(crate) fn select_account_storage_map_values( }); } - let raw: Vec<(i64, i32, Vec, Vec)> = - SelectDsl::select(t::table, (t::block_num, t::slot, t::key, t::value)) + let raw: Vec = + SelectDsl::select(t::table, (t::block_num, t::slot_name, t::key, t::value)) .filter( t::account_id .eq(account_id.to_bytes()) @@ -457,7 +680,7 @@ pub(crate) fn select_account_storage_map_values( // Discard the last block in the response (assumes more than one block may be present) let (last_block_included, values) = if let Some(&(last_block_num, ..)) = raw.last() - && raw.len() >= MAX_ROWS + && raw.len() > MAX_ROWS { // NOTE: If the query contains at least one more row than the amount of storage map updates // allowed in a single block for an account, then the response is guaranteed to have at @@ -480,6 +703,76 @@ pub(crate) fn select_account_storage_map_values( Ok(StorageMapValuesPage { last_block_included, values }) } +/// Select latest account storage by querying `accounts.storage_header` where `is_latest=true` +/// and reconstructing full storage from the header plus map values from +/// `account_storage_map_values`. +pub(crate) fn select_latest_account_storage( + conn: &mut SqliteConnection, + account_id: AccountId, +) -> Result { + use schema::account_storage_map_values as t; + + let account_id_bytes = account_id.to_bytes(); + + // Query storage header blob for this account where is_latest = true + let storage_blob: Option> = + SelectDsl::select(schema::accounts::table, schema::accounts::storage_header) + .filter(schema::accounts::account_id.eq(&account_id_bytes)) + .filter(schema::accounts::is_latest.eq(true)) + .first(conn) + .optional()? + .flatten(); + + let Some(blob) = storage_blob else { + // No storage means empty storage + return Ok(AccountStorage::new(Vec::new())?); + }; + + // Deserialize the AccountStorageHeader from the blob + let header = AccountStorageHeader::read_from_bytes(&blob)?; + + // Query all latest map values for this account + let map_values: Vec<(String, Vec, Vec)> = + SelectDsl::select(t::table, (t::slot_name, t::key, t::value)) + .filter(t::account_id.eq(&account_id_bytes)) + .filter(t::is_latest.eq(true)) + .load(conn)?; + + // Group map values by slot name + let mut map_entries_by_slot: BTreeMap> = BTreeMap::new(); + for (slot_name_str, key_bytes, value_bytes) in map_values { + let slot_name: StorageSlotName = slot_name_str.parse().map_err(|_| { + DatabaseError::DataCorrupted(format!("Invalid slot name: {slot_name_str}")) + })?; + let key = Word::read_from_bytes(&key_bytes)?; + let value = Word::read_from_bytes(&value_bytes)?; + map_entries_by_slot.entry(slot_name).or_default().push((key, value)); + } + + // Reconstruct StorageSlots from header slots + map entries + let mut slots = Vec::new(); + for slot_header in header.slots() { + let slot = match slot_header.slot_type() { + StorageSlotType::Value => { + // For value slots, the header value IS the slot value + StorageSlot::with_value(slot_header.name().clone(), slot_header.value()) + }, + StorageSlotType::Map => { + // For map slots, reconstruct from map entries + let entries = map_entries_by_slot.remove(slot_header.name()).unwrap_or_default(); + let storage_map = StorageMap::with_entries(entries)?; + StorageSlot::with_map(slot_header.name().clone(), storage_map) + }, + }; + slots.push(slot); + } + + Ok(AccountStorage::new(slots)?) +} + +// ACCOUNT MUTATION +// ================================================================================================ + #[derive(Queryable, Selectable)] #[diesel(table_name = crate::db::schema::account_vault_assets)] #[diesel(check_for_backend(diesel::sqlite::Sqlite))] @@ -501,73 +794,6 @@ impl TryFrom for AccountVaultValue { } } -#[derive(Debug, Clone, Queryable, QueryableByName, Selectable)] -#[diesel(table_name = schema::accounts)] -#[diesel(check_for_backend(diesel::sqlite::Sqlite))] -pub struct AccountRaw { - pub account_id: Vec, - pub account_commitment: Vec, - pub block_num: i64, - pub storage: Option>, - pub vault: Option>, - pub nonce: Option, -} - -#[derive(Debug, Clone, QueryableByName)] -pub struct AccountWithCodeRawJoined { - #[diesel(embed)] - pub account: AccountRaw, - #[diesel(embed)] - pub code: Option>, -} - -impl From<(AccountRaw, Option>)> for AccountWithCodeRawJoined { - fn from((account, code): (AccountRaw, Option>)) -> Self { - Self { account, code } - } -} - -impl TryInto for AccountWithCodeRawJoined { - type Error = DatabaseError; - fn try_into(self) -> Result { - use proto::domain::account::{AccountInfo, AccountSummary}; - - let account_id = AccountId::read_from_bytes(&self.account.account_id[..])?; - let account_commitment = Word::read_from_bytes(&self.account.account_commitment[..])?; - let block_num = BlockNumber::from_raw_sql(self.account.block_num)?; - let summary = AccountSummary { - account_id, - account_commitment, - block_num, - }; - let maybe_account = self.try_into()?; - Ok(AccountInfo { summary, details: maybe_account }) - } -} - -impl TryInto> for AccountWithCodeRawJoined { - type Error = DatabaseError; - fn try_into(self) -> Result, Self::Error> { - let account_id = AccountId::read_from_bytes(&self.account.account_id[..])?; - - let details = if let (Some(vault), Some(storage), Some(nonce), Some(code)) = - (self.account.vault, self.account.storage, self.account.nonce, self.code) - { - let vault = AssetVault::read_from_bytes(&vault)?; - let storage = AccountStorage::read_from_bytes(&storage)?; - let code = AccountCode::read_from_bytes(&code)?; - let nonce = raw_sql_to_nonce(nonce); - let nonce = Felt::new(nonce); - let account = Account::new_unchecked(account_id, vault, storage, code, nonce, None); - Some(account) - } else { - // a private account - None - }; - Ok(details) - } -} - #[derive(Debug, Clone, PartialEq, Eq, Selectable, Queryable, QueryableByName)] #[diesel(table_name = schema::accounts)] #[diesel(check_for_backend(Sqlite))] @@ -594,8 +820,8 @@ impl TryInto for AccountSummaryRaw { /// Insert an account vault asset row into the DB using the given [`SqliteConnection`]. /// -/// This function will set `is_latest_update=true` for the new row and update any existing -/// row with the same `(account_id, vault_key)` tuple to `is_latest_update=false`. +/// Sets `is_latest=true` for the new row and updates any existing +/// row with the same `(account_id, vault_key)` tuple to `is_latest=false`. /// /// # Returns /// @@ -611,16 +837,16 @@ pub(crate) fn insert_account_vault_asset( diesel::Connection::transaction(conn, |conn| { // First, update any existing rows with the same (account_id, vault_key) to set - // is_latest_update=false + // is_latest=false let vault_key: Word = vault_key.into(); let update_count = diesel::update(schema::account_vault_assets::table) .filter( schema::account_vault_assets::account_id .eq(&account_id.to_bytes()) .and(schema::account_vault_assets::vault_key.eq(&vault_key.to_bytes())) - .and(schema::account_vault_assets::is_latest_update.eq(true)), + .and(schema::account_vault_assets::is_latest.eq(true)), ) - .set(schema::account_vault_assets::is_latest_update.eq(false)) + .set(schema::account_vault_assets::is_latest.eq(false)) .execute(conn)?; // Insert the new latest row @@ -634,8 +860,8 @@ pub(crate) fn insert_account_vault_asset( /// Insert an account storage map value into the DB using the given [`SqliteConnection`]. /// -/// This function will set `is_latest_update=true` for the new row and update any existing -/// row with the same `(account_id, slot, key)` tuple to `is_latest_update=false`. +/// Sets `is_latest=true` for the new row and updates any existing +/// row with the same `(account_id, slot_index, key)` tuple to `is_latest=false`. /// /// # Returns /// @@ -644,34 +870,34 @@ pub(crate) fn insert_account_storage_map_value( conn: &mut SqliteConnection, account_id: AccountId, block_num: BlockNumber, - slot: u8, + slot_name: StorageSlotName, key: Word, value: Word, ) -> Result { let account_id = account_id.to_bytes(); let key = key.to_bytes(); let value = value.to_bytes(); - let slot = slot_to_raw_sql(slot); + let slot_name = slot_name.to_raw_sql(); let block_num = block_num.to_raw_sql(); let update_count = diesel::update(schema::account_storage_map_values::table) .filter( schema::account_storage_map_values::account_id .eq(&account_id) - .and(schema::account_storage_map_values::slot.eq(slot)) + .and(schema::account_storage_map_values::slot_name.eq(&slot_name)) .and(schema::account_storage_map_values::key.eq(&key)) - .and(schema::account_storage_map_values::is_latest_update.eq(true)), + .and(schema::account_storage_map_values::is_latest.eq(true)), ) - .set(schema::account_storage_map_values::is_latest_update.eq(false)) + .set(schema::account_storage_map_values::is_latest.eq(false)) .execute(conn)?; let record = AccountStorageMapRowInsert { account_id, key, value, - slot, + slot_name, block_num, - is_latest_update: true, + is_latest: true, }; let insert_count = diesel::insert_into(schema::account_storage_map_values::table) .values(record) @@ -689,44 +915,40 @@ pub(crate) fn upsert_accounts( ) -> Result { use proto::domain::account::NetworkAccountPrefix; - fn select_details_stmt( - conn: &mut SqliteConnection, - account_id: AccountId, - ) -> Result, DatabaseError> { - let account_id = account_id.to_bytes(); - let accounts = SelectDsl::select( - schema::accounts::table.left_join( - schema::account_codes::table.on(schema::accounts::code_commitment - .eq(schema::account_codes::code_commitment.nullable())), - ), - (AccountRaw::as_select(), schema::account_codes::code.nullable()), - ) - .filter(schema::accounts::account_id.eq(account_id)) - .get_results::<(AccountRaw, Option>)>(conn)?; - - // SELECT .. FROM accounts LEFT JOIN account_codes - // ON accounts.code_commitment == account_codes.code_commitment - - let accounts = Result::from_iter(accounts.into_iter().filter_map(|x| { - let account_with_code = AccountWithCodeRawJoined::from(x); - account_with_code.try_into().transpose() - }))?; - Ok(accounts) - } - let mut count = 0; for update in accounts { let account_id = update.account_id(); - // Extract the 30-bit prefix to provide easy look ups for NTB - // Do not store prefix for accounts that are not network + let account_id_bytes = account_id.to_bytes(); + let block_num_raw = block_num.to_raw_sql(); + let network_account_id_prefix = if account_id.is_network() { Some(NetworkAccountPrefix::try_from(account_id)?) } else { None }; - let full_account: Option = match update.details() { - AccountUpdateDetails::Private => None, + // Preserve the original creation block when updating existing accounts. + let created_at_block = QueryDsl::select( + schema::accounts::table.filter( + schema::accounts::account_id + .eq(&account_id_bytes) + .and(schema::accounts::is_latest.eq(true)), + ), + schema::accounts::created_at_block, + ) + .first::(conn) + .optional() + .map_err(DatabaseError::Diesel)? + .unwrap_or(block_num_raw); + + // NOTE: we collect storage / asset inserts to apply them only after the account row is + // written. The storage and vault tables have FKs pointing to `accounts (account_id, + // block_num)`, so inserting them earlier would violate those constraints when inserting a + // brand-new account. + let (full_account, pending_storage_inserts, pending_asset_inserts) = match update.details() + { + AccountUpdateDetails::Private => (None, vec![], vec![]), + AccountUpdateDetails::Delta(delta) if delta.is_full_state() => { let account = Account::try_from(delta)?; debug_assert_eq!(account_id, account.id()); @@ -738,69 +960,60 @@ pub(crate) fn upsert_accounts( }); } - for (slot_idx, slot) in account.storage().slots().iter().enumerate() { - match slot { - StorageSlot::Value(_) => {}, - StorageSlot::Map(storage_map) => { - for (key, value) in storage_map.entries() { - // SAFETY: We can safely unwrap the conversion to u8 because - // accounts have a limit of 255 storage elements - insert_account_storage_map_value( - conn, - account_id, - block_num, - u8::try_from(slot_idx).unwrap(), - *key, - *value, - )?; - } - }, + // collect storage-map inserts to apply after account upsert + let mut storage = Vec::new(); + for slot in account.storage().slots() { + if let StorageSlotContent::Map(storage_map) = slot.content() { + for (key, value) in storage_map.entries() { + storage.push((account_id, slot.name().clone(), *key, *value)); + } + } + } + + // collect vault-asset inserts to apply after account upsert + let mut assets = Vec::new(); + for asset in account.vault().assets() { + // Only insert assets with non-zero values for fungible assets + let should_insert = match asset { + Asset::Fungible(fungible) => fungible.amount() > 0, + Asset::NonFungible(_) => true, + }; + if should_insert { + assets.push((account_id, asset.vault_key(), Some(asset))); } } - Some(account) + (Some(account), storage, assets) }, + AccountUpdateDetails::Delta(delta) => { - let mut rows = select_details_stmt(conn, account_id)?.into_iter(); - let Some(account) = rows.next() else { - return Err(DatabaseError::AccountNotFoundInDb(account_id)); - }; + // Reconstruct the full account from database tables + let account = select_full_account(conn, account_id)?; - // --- process storage map updates ---------------------------- + // --- collect storage map updates ---------------------------- - for (&slot, map_delta) in delta.storage().maps() { + let mut storage = Vec::new(); + for (slot_name, map_delta) in delta.storage().maps() { for (key, value) in map_delta.entries() { - insert_account_storage_map_value( - conn, - account_id, - block_num, - slot, - (*key).into(), - *value, - )?; + storage.push((account_id, slot_name.clone(), (*key).into(), *value)); } } // apply delta to the account; we need to do this before we process asset updates // because we currently need to get the current value of fungible assets from the // account - let account = apply_delta(account, delta, &update.final_state_commitment())?; + let account_after = apply_delta(account, delta, &update.final_state_commitment())?; // --- process asset updates ---------------------------------- + let mut assets = Vec::new(); + for (faucet_id, _) in delta.vault().fungible().iter() { - let current_amount = account.vault().get_balance(*faucet_id).unwrap(); + let current_amount = account_after.vault().get_balance(*faucet_id).unwrap(); let asset: Asset = FungibleAsset::new(*faucet_id, current_amount)?.into(); - let asset_update_or_removal = - if current_amount == 0 { None } else { Some(asset) }; - - insert_account_vault_asset( - conn, - account.id(), - block_num, - asset.vault_key(), - asset_update_or_removal, - )?; + let update_or_remove = if current_amount == 0 { None } else { Some(asset) }; + + assets.push((account_id, asset.vault_key(), update_or_remove)); } for (asset, delta_action) in delta.vault().non_fungible().iter() { @@ -808,16 +1021,10 @@ pub(crate) fn upsert_accounts( NonFungibleDeltaAction::Add => Some(Asset::NonFungible(*asset)), NonFungibleDeltaAction::Remove => None, }; - insert_account_vault_asset( - conn, - account.id(), - block_num, - asset.vault_key(), - asset_update, - )?; + assets.push((account_id, asset.vault_key(), asset_update)); } - Some(account) + (Some(account_after), storage, assets) }, }; @@ -833,31 +1040,49 @@ pub(crate) fn upsert_accounts( .execute(conn)?; } + // mark previous rows as non-latest and insert NEW account row + diesel::update(schema::accounts::table) + .filter( + schema::accounts::account_id + .eq(&account_id_bytes) + .and(schema::accounts::is_latest.eq(true)), + ) + .set(schema::accounts::is_latest.eq(false)) + .execute(conn)?; + let account_value = AccountRowInsert { - account_id: account_id.to_bytes(), + account_id: account_id_bytes, network_account_id_prefix: network_account_id_prefix .map(NetworkAccountPrefix::to_raw_sql), account_commitment: update.final_state_commitment().to_bytes(), - block_num: block_num.to_raw_sql(), + block_num: block_num_raw, nonce: full_account.as_ref().map(|account| nonce_to_raw_sql(account.nonce())), - storage: full_account.as_ref().map(|account| account.storage().to_bytes()), - vault: full_account.as_ref().map(|account| account.vault().to_bytes()), code_commitment: full_account .as_ref() .map(|account| account.code().commitment().to_bytes()), + // Store only the header (slot metadata + map roots), not full storage with map contents + storage_header: full_account + .as_ref() + .map(|account| account.storage().to_header().to_bytes()), + vault_root: full_account.as_ref().map(|account| account.vault().root().to_bytes()), + is_latest: true, + created_at_block, }; - let v = account_value.clone(); - let inserted = diesel::insert_into(schema::accounts::table) - .values(&v) - .on_conflict(schema::accounts::account_id) - .do_update() - .set(account_value) + diesel::insert_into(schema::accounts::table) + .values(&account_value) .execute(conn)?; - debug_assert_eq!(inserted, 1); + // insert pending storage map entries + for (acc_id, slot_name, key, value) in pending_storage_inserts { + insert_account_storage_map_value(conn, acc_id, block_num, slot_name, key, value)?; + } + + for (acc_id, vault_key, update) in pending_asset_inserts { + insert_account_vault_asset(conn, acc_id, block_num, vault_key, update)?; + } - count += inserted; + count += 1; } Ok(count) @@ -897,9 +1122,11 @@ pub(crate) struct AccountRowInsert { pub(crate) block_num: i64, pub(crate) account_commitment: Vec, pub(crate) code_commitment: Option>, - pub(crate) storage: Option>, - pub(crate) vault: Option>, pub(crate) nonce: Option, + pub(crate) storage_header: Option>, + pub(crate) vault_root: Option>, + pub(crate) is_latest: bool, + pub(crate) created_at_block: i64, } #[derive(Insertable, AsChangeset, Debug, Clone)] @@ -909,7 +1136,7 @@ pub(crate) struct AccountAssetRowInsert { pub(crate) block_num: i64, pub(crate) vault_key: Vec, pub(crate) asset: Option>, - pub(crate) is_latest_update: bool, + pub(crate) is_latest: bool, } impl AccountAssetRowInsert { @@ -918,7 +1145,7 @@ impl AccountAssetRowInsert { vault_key: &AssetVaultKey, block_num: BlockNumber, asset: Option, - is_latest_update: bool, + is_latest: bool, ) -> Self { let account_id = account_id.to_bytes(); let vault_key: Word = (*vault_key).into(); @@ -930,7 +1157,7 @@ impl AccountAssetRowInsert { block_num, vault_key, asset, - is_latest_update, + is_latest, } } } @@ -940,8 +1167,8 @@ impl AccountAssetRowInsert { pub(crate) struct AccountStorageMapRowInsert { pub(crate) account_id: Vec, pub(crate) block_num: i64, - pub(crate) slot: i32, + pub(crate) slot_name: String, pub(crate) key: Vec, pub(crate) value: Vec, - pub(crate) is_latest_update: bool, + pub(crate) is_latest: bool, } diff --git a/crates/store/src/db/models/queries/accounts/at_block.rs b/crates/store/src/db/models/queries/accounts/at_block.rs new file mode 100644 index 000000000..dc613a9c6 --- /dev/null +++ b/crates/store/src/db/models/queries/accounts/at_block.rs @@ -0,0 +1,269 @@ +use std::collections::BTreeMap; + +use diesel::prelude::Queryable; +use diesel::query_dsl::methods::SelectDsl; +use diesel::{ + BoolExpressionMethods, + ExpressionMethods, + OptionalExtension, + QueryDsl, + RunQueryDsl, + SqliteConnection, +}; +use miden_protocol::account::{ + AccountHeader, + AccountId, + AccountStorage, + AccountStorageHeader, + StorageMap, + StorageSlot, + StorageSlotName, + StorageSlotType, +}; +use miden_protocol::asset::Asset; +use miden_protocol::block::BlockNumber; +use miden_protocol::utils::{Deserializable, Serializable}; +use miden_protocol::{Felt, FieldElement, Word}; + +use crate::db::models::conv::{SqlTypeConvert, raw_sql_to_nonce}; +use crate::db::schema; +use crate::errors::DatabaseError; + +// ACCOUNT HEADER +// ================================================================================================ + +#[derive(Debug, Clone, Queryable)] +struct AccountHeaderDataRaw { + code_commitment: Option>, + nonce: Option, + storage_header: Option>, + vault_root: Option>, +} + +/// Queries the account header for a specific account at a specific block number. +/// +/// This reconstructs the `AccountHeader` by reading from the `accounts` table: +/// - `account_id`, `nonce`, `code_commitment`, `storage_header`, `vault_root` +/// +/// Returns `None` if the account doesn't exist at that block. +/// +/// # Arguments +/// +/// * `conn` - Database connection +/// * `account_id` - The account ID to query +/// * `block_num` - The block number at which to query the account header +/// +/// # Returns +/// +/// * `Ok(Some(AccountHeader))` - The account header if found +/// * `Ok(None)` - If account doesn't exist at that block +/// * `Err(DatabaseError)` - If there's a database error +pub(crate) fn select_account_header_at_block( + conn: &mut SqliteConnection, + account_id: AccountId, + block_num: BlockNumber, +) -> Result, DatabaseError> { + use schema::accounts; + + let account_id_bytes = account_id.to_bytes(); + let block_num_sql = block_num.to_raw_sql(); + + let account_data: Option = SelectDsl::select( + accounts::table + .filter(accounts::account_id.eq(&account_id_bytes)) + .filter(accounts::block_num.le(block_num_sql)) + .order(accounts::block_num.desc()) + .limit(1), + ( + accounts::code_commitment, + accounts::nonce, + accounts::storage_header, + accounts::vault_root, + ), + ) + .first(conn) + .optional()?; + + let Some(AccountHeaderDataRaw { + code_commitment: code_commitment_bytes, + nonce: nonce_raw, + storage_header: storage_header_blob, + vault_root: vault_root_bytes, + }) = account_data + else { + return Ok(None); + }; + + let (storage_commitment, storage_header) = match storage_header_blob { + Some(blob) => { + let header = AccountStorageHeader::read_from_bytes(&blob)?; + let commitment = header.to_commitment(); + (commitment, header) + }, + None => (Word::default(), AccountStorageHeader::new(Vec::new())?), + }; + + let code_commitment = code_commitment_bytes + .map(|bytes| Word::read_from_bytes(&bytes)) + .transpose()? + .unwrap_or(Word::default()); + + let nonce = nonce_raw.map_or(Felt::ZERO, raw_sql_to_nonce); + + let vault_root = vault_root_bytes + .map(|bytes| Word::read_from_bytes(&bytes)) + .transpose()? + .unwrap_or(Word::default()); + + Ok(Some(( + AccountHeader::new(account_id, nonce, vault_root, storage_commitment, code_commitment), + storage_header, + ))) +} + +// ACCOUNT VAULT +// ================================================================================================ + +/// Query vault assets at a specific block by finding the most recent update for each `vault_key`. +pub(crate) fn select_account_vault_at_block( + conn: &mut SqliteConnection, + account_id: AccountId, + block_num: BlockNumber, +) -> Result, DatabaseError> { + use schema::account_vault_assets as t; + + let account_id_bytes = account_id.to_bytes(); + let block_num_sql = block_num.to_raw_sql(); + + // Since Diesel doesn't support composite keys in subqueries easily, we use a two-step approach: + // Step 1: Get max block_num for each vault_key + let latest_blocks_per_vault_key = Vec::from_iter( + QueryDsl::select( + t::table + .filter(t::account_id.eq(&account_id_bytes)) + .filter(t::block_num.le(block_num_sql)) + .group_by(t::vault_key), + (t::vault_key, diesel::dsl::max(t::block_num)), + ) + .load::<(Vec, Option)>(conn)? + .into_iter() + .filter_map(|(key, maybe_block)| maybe_block.map(|block| (key, block))), + ); + + if latest_blocks_per_vault_key.is_empty() { + return Ok(Vec::new()); + } + + // Step 2: Fetch the full rows matching (vault_key, block_num) pairs + let mut assets = Vec::new(); + for (vault_key_bytes, max_block) in latest_blocks_per_vault_key { + // TODO we should not make a query per vault key, but query many at once or + // or find an alternative approach + let result: Option>> = QueryDsl::select( + t::table.filter( + t::account_id + .eq(&account_id_bytes) + .and(t::vault_key.eq(&vault_key_bytes)) + .and(t::block_num.eq(max_block)), + ), + t::asset, + ) + .first(conn) + .optional()?; + if let Some(Some(asset_bytes)) = result { + let asset = Asset::read_from_bytes(&asset_bytes)?; + assets.push(asset); + } + } + + // Sort by vault_key for consistent ordering + assets.sort_by_key(Asset::vault_key); + + Ok(assets) +} + +// ACCOUNT STORAGE +// ================================================================================================ + +/// Returns account storage at a given block by reading from `accounts.storage_header` +/// (which contains the `AccountStorageHeader`) and reconstructing full storage from +/// map values in `account_storage_map_values` table. +pub(crate) fn select_account_storage_at_block( + conn: &mut SqliteConnection, + account_id: AccountId, + block_num: BlockNumber, +) -> Result { + use schema::account_storage_map_values as t; + + let account_id_bytes = account_id.to_bytes(); + let block_num_sql = block_num.to_raw_sql(); + + // Query storage header blob for this account at or before this block + let storage_blob: Option> = + SelectDsl::select(schema::accounts::table, schema::accounts::storage_header) + .filter(schema::accounts::account_id.eq(&account_id_bytes)) + .filter(schema::accounts::block_num.le(block_num_sql)) + .order(schema::accounts::block_num.desc()) + .limit(1) + .first(conn) + .optional()? + .flatten(); + + let Some(blob) = storage_blob else { + // No storage means empty storage + return Ok(AccountStorage::new(Vec::new())?); + }; + + // Deserialize the AccountStorageHeader from the blob + let header = AccountStorageHeader::read_from_bytes(&blob)?; + + // Query all map values for this account up to and including this block. + // For each (slot_name, key), we need the latest value at or before block_num. + // First, get all entries up to block_num + let map_values: Vec<(i64, String, Vec, Vec)> = + SelectDsl::select(t::table, (t::block_num, t::slot_name, t::key, t::value)) + .filter(t::account_id.eq(&account_id_bytes).and(t::block_num.le(block_num_sql))) + .order((t::slot_name.asc(), t::key.asc(), t::block_num.desc())) + .load(conn)?; + + // For each (slot_name, key) pair, keep only the latest entry (highest block_num) + let mut latest_map_entries: BTreeMap<(StorageSlotName, Word), Word> = BTreeMap::new(); + + for (_, slot_name_str, key_bytes, value_bytes) in map_values { + let slot_name: StorageSlotName = slot_name_str.parse().map_err(|_| { + DatabaseError::DataCorrupted(format!("Invalid slot name: {slot_name_str}")) + })?; + let key = Word::read_from_bytes(&key_bytes)?; + let value = Word::read_from_bytes(&value_bytes)?; + + // Only insert if we haven't seen this (slot_name, key) yet + // (since results are ordered by block_num desc, first one is latest) + latest_map_entries.entry((slot_name, key)).or_insert(value); + } + + // Group entries by slot name + let mut map_entries_by_slot: BTreeMap> = BTreeMap::new(); + for ((slot_name, key), value) in latest_map_entries { + map_entries_by_slot.entry(slot_name).or_default().push((key, value)); + } + + // Reconstruct StorageSlots from header slots + map entries + let mut slots = Vec::new(); + for slot_header in header.slots() { + let slot = match slot_header.slot_type() { + StorageSlotType::Value => { + // For value slots, the header value IS the slot value + StorageSlot::with_value(slot_header.name().clone(), slot_header.value()) + }, + StorageSlotType::Map => { + // For map slots, reconstruct from map entries + let entries = map_entries_by_slot.remove(slot_header.name()).unwrap_or_default(); + let storage_map = StorageMap::with_entries(entries)?; + StorageSlot::with_map(slot_header.name().clone(), storage_map) + }, + }; + slots.push(slot); + } + + Ok(AccountStorage::new(slots)?) +} diff --git a/crates/store/src/db/models/queries/accounts/tests.rs b/crates/store/src/db/models/queries/accounts/tests.rs new file mode 100644 index 000000000..67eb24c1f --- /dev/null +++ b/crates/store/src/db/models/queries/accounts/tests.rs @@ -0,0 +1,552 @@ +//! Tests for the `accounts` module, specifically for account storage and historical queries. + +use diesel::query_dsl::methods::SelectDsl; +use diesel::{Connection, OptionalExtension, QueryDsl, RunQueryDsl}; +use diesel_migrations::MigrationHarness; +use miden_node_utils::fee::test_fee_params; +use miden_protocol::account::auth::PublicKeyCommitment; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{ + Account, + AccountBuilder, + AccountComponent, + AccountDelta, + AccountId, + AccountIdVersion, + AccountStorageMode, + AccountType, + StorageSlot, + StorageSlotName, +}; +use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::utils::Serializable; +use miden_protocol::{EMPTY_WORD, Felt, Word}; +use miden_standards::account::auth::AuthRpoFalcon512; +use miden_standards::code_builder::CodeBuilder; + +use super::*; +use crate::db::migrations::MIGRATIONS; + +fn setup_test_db() -> SqliteConnection { + let mut conn = + SqliteConnection::establish(":memory:").expect("Failed to create in-memory database"); + + conn.run_pending_migrations(MIGRATIONS).expect("Failed to run migrations"); + + conn +} + +fn create_test_account_with_storage() -> (Account, AccountId) { + // Create a simple public account with one value storage slot + let account_id = AccountId::dummy( + [1u8; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + let storage_value = Word::from([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]); + let component_storage = vec![StorageSlot::with_value(StorageSlotName::mock(0), storage_value)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account = AccountBuilder::new([1u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + (account, account_id) +} + +fn insert_block_header(conn: &mut SqliteConnection, block_num: BlockNumber) { + use crate::db::schema::block_headers; + + let block_header = BlockHeader::new( + 1_u8.into(), + Word::default(), + block_num, + Word::default(), + Word::default(), + Word::default(), + Word::default(), + Word::default(), + Word::default(), + SecretKey::new().public_key(), + test_fee_params(), + 0_u8.into(), + ); + + diesel::insert_into(block_headers::table) + .values(( + block_headers::block_num.eq(i64::from(block_num.as_u32())), + block_headers::block_header.eq(block_header.to_bytes()), + )) + .execute(conn) + .expect("Failed to insert block header"); +} + +// ACCOUNT HEADER AT BLOCK TESTS +// ================================================================================================ + +#[test] +fn test_select_account_header_at_block_returns_none_for_nonexistent() { + let mut conn = setup_test_db(); + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let account_id = AccountId::dummy( + [99u8; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + // Query for a non-existent account + let result = select_account_header_at_block(&mut conn, account_id, block_num) + .expect("Query should succeed"); + + assert!(result.is_none(), "Should return None for non-existent account"); +} + +#[test] +fn test_select_account_header_at_block_returns_correct_header() { + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + // Insert the account + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta), + ); + + upsert_accounts(&mut conn, &[account_update], block_num).expect("upsert_accounts failed"); + + // Query the account header + let (header, _storage_header) = + select_account_header_at_block(&mut conn, account_id, block_num) + .expect("Query should succeed") + .expect("Header should exist"); + + assert_eq!(header.id(), account_id, "Account ID should match"); + assert_eq!(header.nonce(), account.nonce(), "Nonce should match"); + assert_eq!( + header.code_commitment(), + account.code().commitment(), + "Code commitment should match" + ); +} + +#[test] +fn test_select_account_header_at_block_historical_query() { + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + let block_num_1 = BlockNumber::from_epoch(0); + let block_num_2 = BlockNumber::from_epoch(1); + insert_block_header(&mut conn, block_num_1); + insert_block_header(&mut conn, block_num_2); + + // Insert the account at block 1 + let nonce_1 = account.nonce(); + let delta_1 = AccountDelta::try_from(account.clone()).unwrap(); + let account_update_1 = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta_1), + ); + + upsert_accounts(&mut conn, &[account_update_1], block_num_1).expect("First upsert failed"); + + // Query at block 1 - should return the account + let (header_1, _) = select_account_header_at_block(&mut conn, account_id, block_num_1) + .expect("Query should succeed") + .expect("Header should exist at block 1"); + + assert_eq!(header_1.nonce(), nonce_1, "Nonce at block 1 should match"); + + // Query at block 2 - should return the same account (most recent before block 2) + let (header_2, _) = select_account_header_at_block(&mut conn, account_id, block_num_2) + .expect("Query should succeed") + .expect("Header should exist at block 2"); + + assert_eq!(header_2.nonce(), nonce_1, "Nonce at block 2 should match block 1"); +} + +// ACCOUNT VAULT AT BLOCK TESTS +// ================================================================================================ + +#[test] +fn test_select_account_vault_at_block_empty() { + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + // Insert account without vault assets + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta), + ); + + upsert_accounts(&mut conn, &[account_update], block_num).expect("upsert_accounts failed"); + + // Query vault - should return empty (the test account has no assets) + let assets = select_account_vault_at_block(&mut conn, account_id, block_num) + .expect("Query should succeed"); + + assert!(assets.is_empty(), "Account should have no assets"); +} + +// ACCOUNT STORAGE AT BLOCK TESTS +// ================================================================================================ + +#[test] +fn test_select_account_storage_at_block_returns_storage() { + let mut conn = setup_test_db(); + let (account, _) = create_test_account_with_storage(); + let account_id = account.id(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let original_storage_commitment = account.storage().to_commitment(); + + // Insert the account + let delta = AccountDelta::try_from(account.clone()).unwrap(); + let account_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(delta), + ); + + upsert_accounts(&mut conn, &[account_update], block_num).expect("upsert_accounts failed"); + + // Query storage + let storage = select_account_storage_at_block(&mut conn, account_id, block_num) + .expect("Query should succeed"); + + assert_eq!( + storage.to_commitment(), + original_storage_commitment, + "Storage commitment should match" + ); +} + +#[test] +fn test_upsert_accounts_inserts_storage_header() { + let mut conn = setup_test_db(); + let (account, account_id) = create_test_account_with_storage(); + + // Block 1 + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let storage_commitment_original = account.storage().to_commitment(); + let storage_slots_len = account.storage().slots().len(); + let account_commitment = account.commitment(); + + // Create full state delta from the account + let delta = AccountDelta::try_from(account).unwrap(); + assert!(delta.is_full_state(), "Delta should be full state"); + + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + // Upsert account + let result = upsert_accounts(&mut conn, &[account_update], block_num); + assert!(result.is_ok(), "upsert_accounts failed: {:?}", result.err()); + assert_eq!(result.unwrap(), 1, "Expected 1 account to be inserted"); + + // Query storage header back + let queried_storage = select_latest_account_storage(&mut conn, account_id) + .expect("Failed to query storage header"); + + // Verify storage commitment matches + assert_eq!( + queried_storage.to_commitment(), + storage_commitment_original, + "Storage commitment mismatch" + ); + + // Verify number of slots matches + assert_eq!(queried_storage.slots().len(), storage_slots_len, "Storage slots count mismatch"); + + // Verify exactly 1 latest account with storage exists + let header_count: i64 = schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)) + .filter(schema::accounts::storage_header.is_not_null()) + .count() + .get_result(&mut conn) + .expect("Failed to count accounts with storage"); + + assert_eq!(header_count, 1, "Expected exactly 1 latest account with storage"); +} + +#[test] +fn test_upsert_accounts_updates_is_latest_flag() { + let mut conn = setup_test_db(); + let (account, account_id) = create_test_account_with_storage(); + + // Block 1 and 2 + let block_num_1 = BlockNumber::from_epoch(0); + let block_num_2 = BlockNumber::from_epoch(1); + + insert_block_header(&mut conn, block_num_1); + insert_block_header(&mut conn, block_num_2); + + // Save storage commitment before moving account + let storage_commitment_1 = account.storage().to_commitment(); + let account_commitment_1 = account.commitment(); + + // First update with original account - full state delta + let delta_1 = AccountDelta::try_from(account).unwrap(); + + let account_update_1 = BlockAccountUpdate::new( + account_id, + account_commitment_1, + AccountUpdateDetails::Delta(delta_1), + ); + + upsert_accounts(&mut conn, &[account_update_1], block_num_1).expect("First upsert failed"); + + // Create modified account with different storage value + let storage_value_modified = + Word::from([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]); + let component_storage_modified = + vec![StorageSlot::with_value(StorageSlotName::mock(0), storage_value_modified)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component_2 = AccountComponent::new(account_component_code, component_storage_modified) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account_2 = AccountBuilder::new([1u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component_2) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let storage_commitment_2 = account_2.storage().to_commitment(); + let account_commitment_2 = account_2.commitment(); + + // Second update with modified account - full state delta + let delta_2 = AccountDelta::try_from(account_2).unwrap(); + + let account_update_2 = BlockAccountUpdate::new( + account_id, + account_commitment_2, + AccountUpdateDetails::Delta(delta_2), + ); + + upsert_accounts(&mut conn, &[account_update_2], block_num_2).expect("Second upsert failed"); + + // Verify 2 total account rows exist (both historical records) + let total_accounts: i64 = schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .count() + .get_result(&mut conn) + .expect("Failed to count total accounts"); + + assert_eq!(total_accounts, 2, "Expected 2 total account records"); + + // Verify only 1 is marked as latest + let latest_accounts: i64 = schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)) + .count() + .get_result(&mut conn) + .expect("Failed to count latest accounts"); + + assert_eq!(latest_accounts, 1, "Expected exactly 1 latest account"); + + // Verify latest storage matches second update + let latest_storage = select_latest_account_storage(&mut conn, account_id) + .expect("Failed to query latest storage"); + + assert_eq!( + latest_storage.to_commitment(), + storage_commitment_2, + "Latest storage should match second update" + ); + + // Verify historical query returns first update + let storage_at_block_1 = select_account_storage_at_block(&mut conn, account_id, block_num_1) + .expect("Failed to query storage at block 1"); + + assert_eq!( + storage_at_block_1.to_commitment(), + storage_commitment_1, + "Storage at block 1 should match first update" + ); +} + +#[test] +fn test_upsert_accounts_with_multiple_storage_slots() { + let mut conn = setup_test_db(); + + // Create account with 3 storage slots + let account_id = AccountId::dummy( + [2u8; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + let slot_value_1 = Word::from([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]); + let slot_value_2 = Word::from([Felt::new(5), Felt::new(6), Felt::new(7), Felt::new(8)]); + let slot_value_3 = Word::from([Felt::new(9), Felt::new(10), Felt::new(11), Felt::new(12)]); + + let component_storage = vec![ + StorageSlot::with_value(StorageSlotName::mock(0), slot_value_1), + StorageSlot::with_value(StorageSlotName::mock(1), slot_value_2), + StorageSlot::with_value(StorageSlotName::mock(2), slot_value_3), + ]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account = AccountBuilder::new([2u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let storage_commitment = account.storage().to_commitment(); + let account_commitment = account.commitment(); + let delta = AccountDelta::try_from(account).unwrap(); + + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + upsert_accounts(&mut conn, &[account_update], block_num) + .expect("Upsert with multiple storage slots failed"); + + // Query back and verify + let queried_storage = + select_latest_account_storage(&mut conn, account_id).expect("Failed to query storage"); + + assert_eq!( + queried_storage.to_commitment(), + storage_commitment, + "Storage commitment mismatch" + ); + + // Note: Auth component adds 1 storage slot, so 3 component slots + 1 auth = 4 total + assert_eq!( + queried_storage.slots().len(), + 4, + "Expected 4 storage slots (3 component + 1 auth)" + ); + + // The storage commitment matching proves that all values are correctly preserved. + // We don't check individual slot values by index since slot ordering may vary. +} + +#[test] +fn test_upsert_accounts_with_empty_storage() { + let mut conn = setup_test_db(); + + // Create account with no component storage slots (only auth slot) + let account_id = AccountId::dummy( + [3u8; 15], + AccountIdVersion::Version0, + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + ); + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", "pub proc foo push.1 end") + .unwrap(); + + let component = AccountComponent::new(account_component_code, vec![]) + .unwrap() + .with_supported_type(AccountType::RegularAccountImmutableCode); + + let account = AccountBuilder::new([3u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let block_num = BlockNumber::from_epoch(0); + insert_block_header(&mut conn, block_num); + + let storage_commitment = account.storage().to_commitment(); + let account_commitment = account.commitment(); + let delta = AccountDelta::try_from(account).unwrap(); + + let account_update = + BlockAccountUpdate::new(account_id, account_commitment, AccountUpdateDetails::Delta(delta)); + + upsert_accounts(&mut conn, &[account_update], block_num) + .expect("Upsert with empty storage failed"); + + // Query back and verify + let queried_storage = + select_latest_account_storage(&mut conn, account_id).expect("Failed to query storage"); + + assert_eq!( + queried_storage.to_commitment(), + storage_commitment, + "Storage commitment mismatch for empty storage" + ); + + // Note: Auth component adds 1 storage slot, so even "empty" accounts have 1 slot + assert_eq!(queried_storage.slots().len(), 1, "Expected 1 storage slot (auth component)"); + + // Verify the storage header blob exists in database + let storage_header_exists: Option = SelectDsl::select( + schema::accounts::table + .filter(schema::accounts::account_id.eq(account_id.to_bytes())) + .filter(schema::accounts::is_latest.eq(true)), + schema::accounts::storage_header.is_not_null(), + ) + .first(&mut conn) + .optional() + .expect("Failed to check storage header existence"); + + assert_eq!( + storage_header_exists, + Some(true), + "Storage header blob should exist even for empty storage" + ); +} diff --git a/crates/store/src/db/models/queries/block_headers.rs b/crates/store/src/db/models/queries/block_headers.rs index 42ec3b0e5..ae12f18d5 100644 --- a/crates/store/src/db/models/queries/block_headers.rs +++ b/crates/store/src/db/models/queries/block_headers.rs @@ -11,9 +11,9 @@ use diesel::{ SelectableHelper, SqliteConnection, }; -use miden_lib::utils::{Deserializable, Serializable}; use miden_node_utils::limiter::{QueryParamBlockLimit, QueryParamLimiter}; -use miden_objects::block::{BlockHeader, BlockNumber}; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::utils::{Deserializable, Serializable}; use super::DatabaseError; use crate::db::models::conv::SqlTypeConvert; diff --git a/crates/store/src/db/models/queries/mod.rs b/crates/store/src/db/models/queries/mod.rs index 0d40dd8c4..0f29b0015 100644 --- a/crates/store/src/db/models/queries/mod.rs +++ b/crates/store/src/db/models/queries/mod.rs @@ -31,10 +31,10 @@ )] use diesel::SqliteConnection; -use miden_objects::account::AccountId; -use miden_objects::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; -use miden_objects::note::Nullifier; -use miden_objects::transaction::OrderedTransactionHeaders; +use miden_protocol::account::AccountId; +use miden_protocol::block::{BlockAccountUpdate, BlockHeader, BlockNumber}; +use miden_protocol::note::Nullifier; +use miden_protocol::transaction::OrderedTransactionHeaders; use super::DatabaseError; use crate::db::{NoteRecord, StateSyncUpdate}; diff --git a/crates/store/src/db/models/queries/notes.rs b/crates/store/src/db/models/queries/notes.rs index 01c981dc1..0daed04c1 100644 --- a/crates/store/src/db/models/queries/notes.rs +++ b/crates/store/src/db/models/queries/notes.rs @@ -3,7 +3,7 @@ reason = "We will not approach the item count where i64 and usize cause issues" )] -use std::collections::{BTreeMap, BTreeSet}; +use std::collections::{BTreeMap, BTreeSet, HashSet}; use std::ops::RangeInclusive; use diesel::prelude::{ @@ -25,17 +25,16 @@ use diesel::{ SelectableHelper, SqliteConnection, }; -use miden_lib::utils::{Deserializable, Serializable}; use miden_node_utils::limiter::{ QueryParamAccountIdLimit, QueryParamLimiter, QueryParamNoteCommitmentLimit, QueryParamNoteTagLimit, }; -use miden_objects::account::AccountId; -use miden_objects::block::{BlockNoteIndex, BlockNumber}; -use miden_objects::crypto::merkle::SparseMerklePath; -use miden_objects::note::{ +use miden_protocol::account::AccountId; +use miden_protocol::block::{BlockNoteIndex, BlockNumber}; +use miden_protocol::crypto::merkle::SparseMerklePath; +use miden_protocol::note::{ NoteAssets, NoteDetails, NoteExecutionHint, @@ -50,7 +49,8 @@ use miden_objects::note::{ NoteType, Nullifier, }; -use miden_objects::{Felt, Word}; +use miden_protocol::utils::{Deserializable, Serializable}; +use miden_protocol::{Felt, Word}; use crate::db::models::conv::{ SqlTypeConvert, @@ -217,26 +217,34 @@ pub(crate) fn select_notes_by_id( Ok(records) } -pub(crate) fn select_notes_by_commitment( +/// Select the subset of note commitments that already exist in the notes table +/// +/// # Raw SQL +/// +/// ```sql +/// SELECT +/// notes.note_commitment +/// FROM notes +/// WHERE note_commitment IN (?1) +/// ``` +pub(crate) fn select_existing_note_commitments( conn: &mut SqliteConnection, note_commitments: &[Word], -) -> Result, DatabaseError> { +) -> Result, DatabaseError> { + QueryParamNoteCommitmentLimit::check(note_commitments.len())?; + let note_commitments = serialize_vec(note_commitments.iter()); - let q = schema::notes::table - .left_join( - schema::note_scripts::table - .on(schema::notes::script_root.eq(schema::note_scripts::script_root.nullable())), - ) - .filter(schema::notes::note_commitment.eq_any(¬e_commitments)); - let raw: Vec<_> = SelectDsl::select( - q, - (NoteRecordRawRow::as_select(), schema::note_scripts::script.nullable()), - ) - .load::<(NoteRecordRawRow, Option>)>(conn)?; - let records = vec_raw_try_into::( - raw.into_iter().map(NoteRecordWithScriptRawJoined::from), - )?; - Ok(records) + + let raw_commitments = SelectDsl::select(schema::notes::table, schema::notes::note_commitment) + .filter(schema::notes::note_commitment.eq_any(¬e_commitments)) + .load::>(conn)?; + + let commitments = raw_commitments + .into_iter() + .map(|commitment| Word::read_from_bytes(&commitment[..])) + .collect::, _>>()?; + + Ok(commitments) } /// Select all notes from the DB using the given [`SqliteConnection`]. @@ -378,115 +386,6 @@ pub(crate) fn select_note_script_by_root( .map_err(Into::into) } -/// Returns a paginated batch of network notes that have not yet been consumed. -/// -/// # Returns -/// -/// A set of unconsumed network notes with maximum length of `size` and the page to get -/// the next set. -/// -/// Attention: uses the _implicit_ column `rowid`, which requires to use a few raw SQL nugget -/// statements -/// -/// # Raw SQL -/// -/// ```sql -/// SELECT -/// notes.committed_at, -/// notes.batch_index, -/// notes.note_index, -/// notes.note_id, -/// notes.note_type, -/// notes.sender, -/// notes.tag, -/// notes.aux, -/// notes.execution_hint, -/// notes.assets, -/// notes.inputs, -/// notes.serial_num, -/// notes.inclusion_path, -/// note_scripts.script, -/// notes.rowid -/// FROM notes -/// LEFT JOIN note_scripts ON notes.script_root = note_scripts.script_root -/// WHERE -/// execution_mode = 0 AND consumed_at IS NULL AND notes.rowid >= ?1 -/// ORDER BY notes.rowid ASC -/// LIMIT ?2 -/// ``` -#[allow( - clippy::cast_sign_loss, - reason = "We need custom SQL statements which has given types that we need to convert" -)] -pub(crate) fn unconsumed_network_notes( - conn: &mut SqliteConnection, - mut page: Page, -) -> Result<(Vec, Page), DatabaseError> { - assert_eq!( - NoteExecutionMode::Network as u8, - 0, - "Hardcoded execution value must match query" - ); - - let rowid_sel = diesel::dsl::sql::("notes.rowid"); - let rowid_sel_ge = - diesel::dsl::sql::("notes.rowid >= ") - .bind::(page.token.unwrap_or_default() as i64); - - #[allow( - clippy::items_after_statements, - reason = "It's only relevant for a single call function" - )] - type RawLoadedTuple = ( - NoteRecordRawRow, - Option>, // script - i64, // rowid (from sql::("notes.rowid")) - ); - - #[allow( - clippy::items_after_statements, - reason = "It's only relevant for a single call function" - )] - fn split_into_raw_note_record_and_implicit_row_id( - tuple: RawLoadedTuple, - ) -> (NoteRecordWithScriptRawJoined, i64) { - let (note, script, row) = tuple; - let combined = NoteRecordWithScriptRawJoined::from((note, script)); - (combined, row) - } - - let raw = SelectDsl::select( - schema::notes::table.left_join( - schema::note_scripts::table - .on(schema::notes::script_root.eq(schema::note_scripts::script_root.nullable())), - ), - ( - NoteRecordRawRow::as_select(), - schema::note_scripts::script.nullable(), - rowid_sel.clone(), - ), - ) - .filter(schema::notes::execution_mode.eq(NoteExecutionMode::Network.to_raw_sql())) - .filter(schema::notes::consumed_at.is_null()) - .filter(rowid_sel_ge) - .order(rowid_sel.asc()) - .limit(page.size.get() as i64 + 1) - .load::(conn)?; - - let mut notes = Vec::with_capacity(page.size.into()); - for raw_item in raw { - let (raw_item, row_id) = split_into_raw_note_record_and_implicit_row_id(raw_item); - page.token = None; - if notes.len() == page.size.get() { - page.token = Some(row_id as u64); - break; - } - notes.push(TryInto::::try_into(raw_item)?); - } - - Ok((notes, page)) -} - /// Returns a paginated batch of network notes for an account that are unconsumed by a specified /// block number. /// diff --git a/crates/store/src/db/models/queries/nullifiers.rs b/crates/store/src/db/models/queries/nullifiers.rs index a81ca050e..9b4a1029f 100644 --- a/crates/store/src/db/models/queries/nullifiers.rs +++ b/crates/store/src/db/models/queries/nullifiers.rs @@ -11,14 +11,15 @@ use diesel::{ SelectableHelper, SqliteConnection, }; -use miden_lib::utils::{Deserializable, Serializable}; use miden_node_utils::limiter::{ + MAX_RESPONSE_PAYLOAD_BYTES, QueryParamLimiter, QueryParamNullifierLimit, QueryParamNullifierPrefixLimit, }; -use miden_objects::block::BlockNumber; -use miden_objects::note::Nullifier; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::Nullifier; +use miden_protocol::utils::{Deserializable, Serializable}; use super::DatabaseError; use crate::db::models::conv::{SqlTypeConvert, nullifier_prefix_to_raw_sql}; @@ -65,12 +66,10 @@ pub(crate) fn select_nullifiers_by_prefix( block_range: RangeInclusive, ) -> Result<(Vec, BlockNumber), DatabaseError> { // Size calculation: max 2^16 nullifiers per block × 36 bytes per nullifier = ~2.25MB - // We use 2.5MB to provide a safety margin for the unlikely case of hitting the maximum - pub const MAX_PAYLOAD_BYTES: usize = 2_500_000; // 2.5 MB - allows for max block size of ~2.25MB pub const NULLIFIER_BYTES: usize = 32; // digest size (nullifier) pub const BLOCK_NUM_BYTES: usize = 4; // 32 bits per block number pub const ROW_OVERHEAD_BYTES: usize = NULLIFIER_BYTES + BLOCK_NUM_BYTES; // 36 bytes - pub const MAX_ROWS: usize = MAX_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; + pub const MAX_ROWS: usize = MAX_RESPONSE_PAYLOAD_BYTES / ROW_OVERHEAD_BYTES; assert_eq!(prefix_len, 16, "Only 16-bit prefixes are supported"); diff --git a/crates/store/src/db/models/queries/transactions.rs b/crates/store/src/db/models/queries/transactions.rs index b011c2d72..720404f67 100644 --- a/crates/store/src/db/models/queries/transactions.rs +++ b/crates/store/src/db/models/queries/transactions.rs @@ -12,12 +12,16 @@ use diesel::{ SelectableHelper, SqliteConnection, }; -use miden_lib::utils::Deserializable; -use miden_node_utils::limiter::{QueryParamAccountIdLimit, QueryParamLimiter}; -use miden_objects::account::AccountId; -use miden_objects::block::BlockNumber; -use miden_objects::note::{NoteId, Nullifier}; -use miden_objects::transaction::{OrderedTransactionHeaders, TransactionId}; +use miden_node_utils::limiter::{ + MAX_RESPONSE_PAYLOAD_BYTES, + QueryParamAccountIdLimit, + QueryParamLimiter, +}; +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::{NoteId, Nullifier}; +use miden_protocol::transaction::{OrderedTransactionHeaders, TransactionId}; +use miden_protocol::utils::{Deserializable, Serializable}; use super::DatabaseError; use crate::db::models::conv::SqlTypeConvert; @@ -93,7 +97,7 @@ pub struct TransactionRecordRaw { transaction_id: Vec, initial_state_commitment: Vec, final_state_commitment: Vec, - input_notes: Vec, + nullifiers: Vec, output_notes: Vec, size_in_bytes: i64, } @@ -112,16 +116,15 @@ impl TryInto for TransactionSummaryRaw { impl TryInto for TransactionRecordRaw { type Error = DatabaseError; fn try_into(self) -> Result { - use miden_lib::utils::Deserializable; - use miden_objects::Word; + use miden_protocol::Word; let initial_state_commitment = self.initial_state_commitment; let final_state_commitment = self.final_state_commitment; - let input_notes_binary = self.input_notes; + let nullifiers_binary = self.nullifiers; let output_notes_binary = self.output_notes; // Deserialize input notes as nullifiers and output notes as note IDs - let input_notes: Vec = Deserializable::read_from_bytes(&input_notes_binary)?; + let nullifiers: Vec = Deserializable::read_from_bytes(&nullifiers_binary)?; let output_notes: Vec = Deserializable::read_from_bytes(&output_notes_binary)?; Ok(crate::db::TransactionRecord { @@ -130,7 +133,7 @@ impl TryInto for TransactionRecordRaw { transaction_id: TransactionId::read_from_bytes(&self.transaction_id[..])?, initial_state_commitment: Word::read_from_bytes(&initial_state_commitment)?, final_state_commitment: Word::read_from_bytes(&final_state_commitment)?, - input_notes, + nullifiers, output_notes, }) } @@ -171,7 +174,7 @@ pub struct TransactionSummaryRowInsert { block_num: i64, initial_state_commitment: Vec, final_state_commitment: Vec, - input_notes: Vec, + nullifiers: Vec, output_notes: Vec, size_in_bytes: i64, } @@ -182,15 +185,13 @@ impl TransactionSummaryRowInsert { reason = "We will not approach the item count where i64 and usize cause issues" )] fn new( - transaction_header: &miden_objects::transaction::TransactionHeader, + transaction_header: &miden_protocol::transaction::TransactionHeader, block_num: BlockNumber, ) -> Self { - use miden_lib::utils::Serializable; - const HEADER_BASE_SIZE: usize = 4 + 32 + 16 + 64; // block_num + tx_id + account_id + commitments // Serialize input notes using binary format (store nullifiers) - let input_notes_binary = transaction_header.input_notes().to_bytes(); + let nullifiers_binary = transaction_header.input_notes().to_bytes(); // Serialize output notes using binary format (store note IDs) let output_notes_binary = transaction_header.output_notes().to_bytes(); @@ -206,9 +207,9 @@ impl TransactionSummaryRowInsert { // // Note: 500 bytes per output note is an over-estimate but ensures we don't // exceed memory limits when these transactions are later converted to proto records. - let input_notes_size = (transaction_header.input_notes().num_notes() * 32) as usize; + let nullifiers_size = (transaction_header.input_notes().num_notes() * 32) as usize; let output_notes_size = transaction_header.output_notes().len() * 500; - let size_in_bytes = (HEADER_BASE_SIZE + input_notes_size + output_notes_size) as i64; + let size_in_bytes = (HEADER_BASE_SIZE + nullifiers_size + output_notes_size) as i64; Self { transaction_id: transaction_header.id().to_bytes(), @@ -216,7 +217,7 @@ impl TransactionSummaryRowInsert { block_num: block_num.to_raw_sql(), initial_state_commitment: transaction_header.initial_state_commitment().to_bytes(), final_state_commitment: transaction_header.final_state_commitment().to_bytes(), - input_notes: input_notes_binary, + nullifiers: nullifiers_binary, output_notes: output_notes_binary, size_in_bytes, } @@ -280,11 +281,13 @@ pub fn select_transactions_records( account_ids: &[AccountId], block_range: RangeInclusive, ) -> Result<(BlockNumber, Vec), DatabaseError> { - const MAX_PAYLOAD_BYTES: i64 = 4 * 1024 * 1024; // 4 MB const NUM_TXS_PER_CHUNK: i64 = 1000; // Read 1000 transactions at a time QueryParamAccountIdLimit::check(account_ids.len())?; + let max_payload_bytes = + i64::try_from(MAX_RESPONSE_PAYLOAD_BYTES).expect("payload limit fits within i64"); + if block_range.is_empty() { return Err(DatabaseError::InvalidBlockRange { from: *block_range.start(), @@ -334,7 +337,7 @@ pub fn select_transactions_records( let mut last_added_tx: Option = None; for tx in chunk { - if total_size + tx.size_in_bytes <= MAX_PAYLOAD_BYTES { + if total_size + tx.size_in_bytes <= max_payload_bytes { total_size += tx.size_in_bytes; last_added_tx = Some(tx); added_from_chunk += 1; @@ -359,7 +362,7 @@ pub fn select_transactions_records( // Ensure block consistency: remove the last block if it's incomplete // (we may have stopped loading mid-block due to size constraints) - if total_size >= MAX_PAYLOAD_BYTES { + if total_size >= max_payload_bytes { // SAFETY: We're guaranteed to have at least one transaction since total_size > 0 let last_block_num = last_block_num.expect( "guaranteed to have processed at least one transaction when size limit is reached", diff --git a/crates/store/src/db/models/utils.rs b/crates/store/src/db/models/utils.rs index 5124beabc..c472940e4 100644 --- a/crates/store/src/db/models/utils.rs +++ b/crates/store/src/db/models/utils.rs @@ -1,6 +1,6 @@ use diesel::{Connection, RunQueryDsl, SqliteConnection}; -use miden_lib::utils::{Deserializable, DeserializationError, Serializable}; -use miden_objects::note::Nullifier; +use miden_protocol::note::Nullifier; +use miden_protocol::utils::{Deserializable, DeserializationError, Serializable}; use crate::errors::DatabaseError; diff --git a/crates/store/src/db/schema.rs b/crates/store/src/db/schema.rs index 9fadd0175..6bf6af3cf 100644 --- a/crates/store/src/db/schema.rs +++ b/crates/store/src/db/schema.rs @@ -1,13 +1,13 @@ // @generated automatically by Diesel CLI. diesel::table! { - account_storage_map_values (account_id, block_num, slot, key) { + account_storage_map_values (account_id, block_num, slot_name, key) { account_id -> Binary, block_num -> BigInt, - slot -> Integer, + slot_name -> Text, key -> Binary, value -> Binary, - is_latest_update -> Bool, + is_latest -> Bool, } } @@ -17,20 +17,22 @@ diesel::table! { block_num -> BigInt, vault_key -> Binary, asset -> Nullable, - is_latest_update -> Bool, + is_latest -> Bool, } } diesel::table! { - accounts (account_id) { + accounts (account_id, block_num) { account_id -> Binary, network_account_id_prefix -> Nullable, account_commitment -> Binary, code_commitment -> Nullable, - storage -> Nullable, - vault -> Nullable, nonce -> Nullable, + storage_header -> Nullable, + vault_root -> Nullable, block_num -> BigInt, + is_latest -> Bool, + created_at_block -> BigInt, } } @@ -93,7 +95,7 @@ diesel::table! { block_num -> BigInt, initial_state_commitment -> Binary, final_state_commitment -> Binary, - input_notes -> Binary, + nullifiers -> Binary, output_notes -> Binary, size_in_bytes -> BigInt, } @@ -101,11 +103,12 @@ diesel::table! { diesel::joinable!(accounts -> account_codes (code_commitment)); diesel::joinable!(accounts -> block_headers (block_num)); -diesel::joinable!(notes -> accounts (sender)); +// Note: Cannot use diesel::joinable! with accounts table due to composite primary key +// diesel::joinable!(notes -> accounts (sender)); +// diesel::joinable!(transactions -> accounts (account_id)); diesel::joinable!(notes -> block_headers (committed_at)); diesel::joinable!(notes -> note_scripts (script_root)); diesel::joinable!(nullifiers -> block_headers (block_num)); -diesel::joinable!(transactions -> accounts (account_id)); diesel::joinable!(transactions -> block_headers (block_num)); diesel::allow_tables_to_appear_in_same_query!( diff --git a/crates/store/src/db/schema_hash.rs b/crates/store/src/db/schema_hash.rs new file mode 100644 index 000000000..28e480fc0 --- /dev/null +++ b/crates/store/src/db/schema_hash.rs @@ -0,0 +1,186 @@ +//! Schema verification to detect database schema changes. +//! +//! Detects: +//! +//! - Direct modifications to the database schema outside of migrations +//! - Running a node against a database created with different set of migrations +//! - Forgetting to reset the database after schema changes i.e. for a specific migration +//! +//! The verification works by creating an in-memory reference database, applying all +//! migrations to it, and comparing its schema against the actual database schema. + +use diesel::{Connection, RunQueryDsl, SqliteConnection}; +use diesel_migrations::MigrationHarness; +use tracing::instrument; + +use crate::COMPONENT; +use crate::db::migrations::MIGRATIONS; +use crate::errors::SchemaVerificationError; + +/// Represents a schema object for comparison. +#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] +struct SchemaObject { + object_type: String, + name: String, + sql: String, +} + +/// Represents a row from the `sqlite_schema` table. +#[derive(diesel::QueryableByName, Debug)] +struct SqliteSchemaRow { + #[diesel(sql_type = diesel::sql_types::Text)] + schema_type: String, + #[diesel(sql_type = diesel::sql_types::Text)] + name: String, + #[diesel(sql_type = diesel::sql_types::Nullable)] + sql: Option, +} + +/// Extracts all schema objects from a database connection. +fn extract_schema( + conn: &mut SqliteConnection, +) -> Result, SchemaVerificationError> { + let rows: Vec = diesel::sql_query( + "SELECT type as schema_type, name, sql FROM sqlite_schema \ + WHERE type IN ('table', 'index') \ + AND name NOT LIKE 'sqlite_%' \ + AND name NOT LIKE '__diesel_%' \ + ORDER BY type, name", + ) + .load(conn) + .map_err(SchemaVerificationError::SchemaExtraction)?; + + let mut objects: Vec = rows + .into_iter() + .filter_map(|row| { + row.sql.map(|sql| SchemaObject { + object_type: row.schema_type, + name: row.name, + sql, + }) + }) + .collect(); + + objects.sort(); + Ok(objects) +} + +/// Computes the expected schema by applying migrations to an in-memory database. +fn compute_expected_schema() -> Result, SchemaVerificationError> { + let mut conn = SqliteConnection::establish(":memory:") + .map_err(SchemaVerificationError::InMemoryDbCreation)?; + + conn.run_pending_migrations(MIGRATIONS) + .map_err(SchemaVerificationError::MigrationApplication)?; + + extract_schema(&mut conn) +} + +/// Verifies that the database schema matches the expected schema. +/// +/// Creates an in-memory database, applies all migrations, and compares schemas. +/// +/// # Errors +/// +/// Returns `SchemaVerificationError::Mismatch` if schemas differ. +#[instrument(level = "info", target = COMPONENT, skip_all, err)] +pub fn verify_schema(conn: &mut SqliteConnection) -> Result<(), SchemaVerificationError> { + let expected = compute_expected_schema()?; + let actual = extract_schema(conn)?; + + if actual != expected { + let expected_names: Vec<_> = expected.iter().map(|o| &o.name).collect(); + let actual_names: Vec<_> = actual.iter().map(|o| &o.name).collect(); + + // Find differences for better error messages + let missing: Vec<_> = expected.iter().filter(|e| !actual.contains(e)).collect(); + let extra: Vec<_> = actual.iter().filter(|a| !expected.contains(a)).collect(); + + tracing::error!( + target: COMPONENT, + ?expected_names, + ?actual_names, + missing_count = missing.len(), + extra_count = extra.len(), + "Database schema mismatch detected" + ); + + // Log specific differences at debug level + for obj in &missing { + tracing::debug!(target: COMPONENT, name = %obj.name, "Missing or modified: {}", obj.sql); + } + for obj in &extra { + tracing::debug!(target: COMPONENT, name = %obj.name, "Extra or modified: {}", obj.sql); + } + + return Err(SchemaVerificationError::Mismatch { + expected_count: expected.len(), + actual_count: actual.len(), + missing_count: missing.len(), + extra_count: extra.len(), + }); + } + + tracing::info!(target: COMPONENT, objects = expected.len(), "Database schema verification passed"); + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::db::migrations::apply_migrations; + use crate::errors::DatabaseError; + + #[test] + fn verify_schema_passes_for_correct_schema() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + verify_schema(&mut conn).expect("Should pass for correct schema"); + } + + #[test] + fn verify_schema_fails_for_added_object() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("CREATE TABLE rogue_table (id INTEGER PRIMARY KEY)") + .execute(&mut conn) + .unwrap(); + + assert!(matches!( + verify_schema(&mut conn), + Err(SchemaVerificationError::Mismatch { .. }) + )); + } + + #[test] + fn verify_schema_fails_for_removed_object() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("DROP TABLE transactions").execute(&mut conn).unwrap(); + + assert!(matches!( + verify_schema(&mut conn), + Err(SchemaVerificationError::Mismatch { .. }) + )); + } + + #[test] + fn apply_migrations_succeeds_on_fresh_database() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + apply_migrations(&mut conn).expect("Should succeed on fresh database"); + } + + #[test] + fn apply_migrations_fails_on_tampered_database() { + let mut conn = SqliteConnection::establish(":memory:").unwrap(); + conn.run_pending_migrations(MIGRATIONS).unwrap(); + + diesel::sql_query("CREATE TABLE tampered (id INTEGER)") + .execute(&mut conn) + .unwrap(); + + assert!(matches!(apply_migrations(&mut conn), Err(DatabaseError::SchemaVerification(_)))); + } +} diff --git a/crates/store/src/db/tests.rs b/crates/store/src/db/tests.rs index aa8a5617c..3988e160d 100644 --- a/crates/store/src/db/tests.rs +++ b/crates/store/src/db/tests.rs @@ -5,16 +5,14 @@ use std::num::NonZeroUsize; use std::sync::{Arc, Mutex}; use diesel::{Connection, SqliteConnection}; -use miden_lib::account::auth::AuthRpoFalcon512; -use miden_lib::note::create_p2id_note; -use miden_lib::transaction::TransactionKernel; use miden_node_proto::domain::account::AccountSummary; -use miden_node_utils::fee::test_fee_params; -use miden_objects::account::auth::PublicKeyCommitment; -use miden_objects::account::delta::AccountUpdateDetails; -use miden_objects::account::{ +use miden_node_utils::fee::{test_fee, test_fee_params}; +use miden_protocol::account::auth::PublicKeyCommitment; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{ Account, AccountBuilder, + AccountCode, AccountComponent, AccountDelta, AccountId, @@ -24,18 +22,22 @@ use miden_objects::account::{ AccountType, AccountVaultDelta, StorageSlot, + StorageSlotContent, + StorageSlotDelta, + StorageSlotName, }; -use miden_objects::asset::{Asset, AssetVaultKey, FungibleAsset}; -use miden_objects::block::{ +use miden_protocol::asset::{Asset, AssetVaultKey, FungibleAsset}; +use miden_protocol::block::{ BlockAccountUpdate, BlockHeader, BlockNoteIndex, BlockNoteTree, BlockNumber, }; -use miden_objects::crypto::merkle::SparseMerklePath; -use miden_objects::crypto::rand::RpoRandomCoin; -use miden_objects::note::{ +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; +use miden_protocol::crypto::merkle::SparseMerklePath; +use miden_protocol::crypto::rand::RpoRandomCoin; +use miden_protocol::note::{ Note, NoteDetails, NoteExecutionHint, @@ -46,21 +48,26 @@ use miden_objects::note::{ NoteType, Nullifier, }; -use miden_objects::testing::account_id::{ +use miden_protocol::testing::account_id::{ ACCOUNT_ID_PRIVATE_SENDER, ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET, ACCOUNT_ID_REGULAR_PRIVATE_ACCOUNT_UPDATABLE_CODE, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE, ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2, }; -use miden_objects::transaction::{ +use miden_protocol::testing::random_signer::RandomBlockSigner; +use miden_protocol::transaction::{ InputNoteCommitment, InputNotes, OrderedTransactionHeaders, TransactionHeader, TransactionId, }; -use miden_objects::{EMPTY_WORD, Felt, FieldElement, Word, ZERO}; +use miden_protocol::utils::{Deserializable, Serializable}; +use miden_protocol::{EMPTY_WORD, Felt, FieldElement, Word, ZERO}; +use miden_standards::account::auth::AuthRpoFalcon512; +use miden_standards::code_builder::CodeBuilder; +use miden_standards::note::create_p2id_note; use pretty_assertions::assert_eq; use rand::Rng; @@ -88,7 +95,7 @@ fn create_block(conn: &mut SqliteConnection, block_num: BlockNumber) { num_to_word(7), num_to_word(8), num_to_word(9), - num_to_word(10), + SecretKey::new().public_key(), test_fee_params(), 11_u8.into(), ); @@ -317,7 +324,8 @@ fn sql_select_notes_different_execution_hints() { let res = queries::insert_notes(conn, &[(note_none, None)]); assert_eq!(res.unwrap(), 1, "One element must have been inserted"); - let note = &queries::select_notes_by_id(conn, &[num_to_word(0).into()]).unwrap()[0]; + let note_id = NoteId::from_raw(num_to_word(0)); + let note = &queries::select_notes_by_id(conn, &[note_id]).unwrap()[0]; assert_eq!(note.metadata.execution_hint(), NoteExecutionHint::none()); @@ -342,7 +350,8 @@ fn sql_select_notes_different_execution_hints() { let res = queries::insert_notes(conn, &[(note_always, None)]); assert_eq!(res.unwrap(), 1, "One element must have been inserted"); - let note = &queries::select_notes_by_id(conn, &[num_to_word(1).into()]).unwrap()[0]; + let note_id = NoteId::from_raw(num_to_word(1)); + let note = &queries::select_notes_by_id(conn, &[note_id]).unwrap()[0]; assert_eq!(note.metadata.execution_hint(), NoteExecutionHint::always()); let note_after_block = NoteRecord { @@ -365,7 +374,8 @@ fn sql_select_notes_different_execution_hints() { let res = queries::insert_notes(conn, &[(note_after_block, None)]); assert_eq!(res.unwrap(), 1, "One element must have been inserted"); - let note = &queries::select_notes_by_id(conn, &[num_to_word(2).into()]).unwrap()[0]; + let note_id = NoteId::from_raw(num_to_word(2)); + let note = &queries::select_notes_by_id(conn, &[note_id]).unwrap()[0]; assert_eq!( note.metadata.execution_hint(), NoteExecutionHint::after_block(12.into()).unwrap() @@ -446,139 +456,6 @@ fn make_account_and_note( #[test] #[miden_node_test_macro::enable_logging] fn sql_unconsumed_network_notes() { - // Number of notes to generate. - const N: u64 = 32; - - let mut conn = create_db(); - let conn = &mut conn; - - let block_num = BlockNumber::from(1); - // An arbitrary public account (network note tag requires public account). - create_block(conn, block_num); - - let account_notes = [ - make_account_and_note(conn, block_num, [0u8; 32], AccountStorageMode::Public), - make_account_and_note(conn, block_num, [1u8; 32], AccountStorageMode::Network), - ]; - let network_account_id = account_notes[1].0; - - // Create some notes, of which half are network notes. - let notes = (0..N) - .map(|i| { - let index = (i % 2) as usize; - let is_network = account_notes[index].0.storage_mode() == AccountStorageMode::Network; - let account_id = account_notes[index].0; - let new_note = &account_notes[index].1; - let note = NoteRecord { - block_num, - note_index: BlockNoteIndex::new(0, i as usize).unwrap(), - note_id: num_to_word(i), - note_commitment: num_to_word(i), - metadata: NoteMetadata::new( - account_notes[index].0, - NoteType::Public, - NoteTag::from_account_id(account_id), - NoteExecutionHint::none(), - Felt::default(), - ) - .unwrap(), - details: is_network.then_some(NoteDetails::from(new_note)), - inclusion_path: SparseMerklePath::default(), - }; - - (note, is_network.then_some(num_to_nullifier(i))) - }) - .collect::>(); - - // Copy out all network notes to assert against. These will be in chronological order already. - let network_notes = notes - .iter() - .filter_map(|(note, nullifier)| nullifier.is_some().then_some(note.clone())) - .collect::>(); - - // Insert the set of notes. - queries::insert_scripts(conn, notes.iter().map(|(note, _)| note)).unwrap(); - queries::insert_notes(conn, ¬es).unwrap(); - - // Fetch all network notes by setting a limit larger than the amount available. - let (result, _) = queries::unconsumed_network_notes( - conn, - Page { - token: None, - size: NonZeroUsize::new(N as usize * 10).unwrap(), - }, - ) - .unwrap(); - assert_eq!(result, network_notes); - let (result, _) = queries::select_unconsumed_network_notes_by_tag( - conn, - NoteTag::from_account_id(network_account_id).into(), - block_num, - Page { - token: None, - size: NonZeroUsize::new(N as usize * 10).unwrap(), - }, - ) - .unwrap(); - assert_eq!(result, network_notes); - - // Check pagination works as expected. - let limit = 5; - let mut page = Page { - token: None, - size: NonZeroUsize::new(limit).unwrap(), - }; - network_notes.chunks(limit).for_each(|expected| { - let (result, new_page) = queries::unconsumed_network_notes(conn, page).unwrap(); - page = new_page; - assert_eq!(result, expected); - }); - network_notes.chunks(limit).for_each(|expected| { - let (result, new_page) = queries::select_unconsumed_network_notes_by_tag( - conn, - NoteTag::from_account_id(network_account_id).into(), - block_num, - page, - ) - .unwrap(); - page = new_page; - assert_eq!(result, expected); - }); - assert!(page.token.is_none()); - - // Consume every third network note and ensure these are now excluded from the results. - let consumed = notes - .iter() - .filter_map(|(_, nullifier)| *nullifier) - .step_by(3) - .collect::>(); - queries::insert_nullifiers_for_block(conn, &consumed, block_num).unwrap(); - - let expected = network_notes - .iter() - .enumerate() - .filter(|(i, _)| i % 3 != 0) - .map(|(_, note)| note.clone()) - .collect::>(); - let page = Page { - token: None, - size: NonZeroUsize::new(N as usize * 10).unwrap(), - }; - let (result, _) = queries::unconsumed_network_notes(conn, page).unwrap(); - assert_eq!(result, expected); - let (result, _) = queries::select_unconsumed_network_notes_by_tag( - conn, - NoteTag::from_account_id(network_account_id).into(), - block_num, - page, - ) - .unwrap(); - assert_eq!(result, expected); -} - -#[test] -#[miden_node_test_macro::enable_logging] -fn sql_unconsumed_network_notes_for_account() { let mut conn = create_db(); // Create account. @@ -590,27 +467,25 @@ fn sql_unconsumed_network_notes_for_account() { create_block(&mut conn, 1.into()); // Create an unconsumed note in each block. - let notes = (0..2) - .map(|i: u32| { - let note = NoteRecord { - block_num: 0.into(), // Created on same block. - note_index: BlockNoteIndex::new(0, i as usize).unwrap(), - note_id: num_to_word(i.into()), - note_commitment: num_to_word(i.into()), - metadata: NoteMetadata::new( - account_note.0, - NoteType::Public, - NoteTag::from_account_id(account_note.0), - NoteExecutionHint::none(), - Felt::default(), - ) - .unwrap(), - details: None, - inclusion_path: SparseMerklePath::default(), - }; - (note, Some(num_to_nullifier(i.into()))) - }) - .collect::>(); + let notes = Vec::from_iter((0..2).map(|i: u32| { + let note = NoteRecord { + block_num: 0.into(), // Created on same block. + note_index: BlockNoteIndex::new(0, i as usize).unwrap(), + note_id: num_to_word(i.into()), + note_commitment: num_to_word(i.into()), + metadata: NoteMetadata::new( + account_note.0, + NoteType::Public, + NoteTag::from_account_id(account_note.0), + NoteExecutionHint::none(), + Felt::default(), + ) + .unwrap(), + details: None, + inclusion_path: SparseMerklePath::default(), + }; + (note, Some(num_to_nullifier(i.into()))) + })); queries::insert_scripts(&mut conn, notes.iter().map(|(note, _)| note)).unwrap(); queries::insert_notes(&mut conn, ¬es).unwrap(); @@ -984,7 +859,7 @@ fn db_block_header() { num_to_word(7), num_to_word(8), num_to_word(9), - num_to_word(10), + SecretKey::new().public_key(), test_fee_params(), 11_u8.into(), ); @@ -1016,7 +891,7 @@ fn db_block_header() { num_to_word(17), num_to_word(18), num_to_word(19), - num_to_word(20), + SecretKey::new().public_key(), test_fee_params(), 21_u8.into(), ); @@ -1152,7 +1027,7 @@ fn notes() { let note = NoteRecord { block_num: block_num_1, note_index, - note_id: new_note.id().into(), + note_id: new_note.id().as_word(), note_commitment: new_note.commitment(), metadata: NoteMetadata::new( sender, @@ -1199,7 +1074,7 @@ fn notes() { let note2 = NoteRecord { block_num: block_num_2, note_index: note.note_index, - note_id: new_note.id().into(), + note_id: new_note.id().as_word(), note_commitment: new_note.commitment(), metadata: note.metadata, details: None, @@ -1229,7 +1104,7 @@ fn notes() { // test query notes by id let notes = vec![note.clone(), note2]; - let note_ids = Vec::from_iter(notes.iter().map(|note| NoteId::from(note.note_id))); + let note_ids = Vec::from_iter(notes.iter().map(|note| NoteId::from_raw(note.note_id))); let res = queries::select_notes_by_id(conn, ¬e_ids).unwrap(); assert_eq!(res, notes); @@ -1247,10 +1122,17 @@ fn insert_account_delta( block_number: BlockNumber, delta: &AccountDelta, ) { - for (slot, slot_delta) in delta.storage().maps() { + for (slot_name, slot_delta) in delta.storage().maps() { for (k, v) in slot_delta.entries() { - insert_account_storage_map_value(conn, account_id, block_number, *slot, *k.inner(), *v) - .unwrap(); + insert_account_storage_map_value( + conn, + account_id, + block_number, + slot_name.clone(), + *k.inner(), + *v, + ) + .unwrap(); } } } @@ -1260,7 +1142,7 @@ fn insert_account_delta( fn sql_account_storage_map_values_insertion() { use std::collections::BTreeMap; - use miden_objects::account::StorageMapDelta; + use miden_protocol::account::StorageMapDelta; let mut conn = create_db(); let conn = &mut conn; @@ -1273,7 +1155,7 @@ fn sql_account_storage_map_values_insertion() { let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE_2).unwrap(); - let slot = 3u8; + let slot_name = StorageSlotName::mock(3); let key1 = Word::from([1u32, 2, 3, 4]); let key2 = Word::from([5u32, 6, 7, 8]); let value1 = Word::from([10u32, 11, 12, 13]); @@ -1284,8 +1166,8 @@ fn sql_account_storage_map_values_insertion() { let mut map1 = StorageMapDelta::default(); map1.insert(key1, value1); map1.insert(key2, value2); - let maps1: BTreeMap<_, _> = [(slot, map1)].into_iter().collect(); - let storage1 = AccountStorageDelta::from_parts(BTreeMap::new(), maps1).unwrap(); + let delta1 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map1))]); + let storage1 = AccountStorageDelta::from_raw(delta1); let delta1 = AccountDelta::new(account_id, storage1, AccountVaultDelta::default(), Felt::ONE).unwrap(); insert_account_delta(conn, account_id, block1, &delta1); @@ -1298,8 +1180,8 @@ fn sql_account_storage_map_values_insertion() { // Update key1 at block 2 let mut map2 = StorageMapDelta::default(); map2.insert(key1, value3); - let maps2 = BTreeMap::from_iter([(slot, map2)]); - let storage2 = AccountStorageDelta::from_parts(BTreeMap::new(), maps2).unwrap(); + let delta2 = BTreeMap::from_iter([(slot_name.clone(), StorageSlotDelta::Map(map2))]); + let storage2 = AccountStorageDelta::from_raw(delta2); let delta2 = AccountDelta::new(account_id, storage2, AccountVaultDelta::default(), Felt::new(2)) .unwrap(); @@ -1315,14 +1197,14 @@ fn sql_account_storage_map_values_insertion() { storage_map_values .values .iter() - .any(|val| val.slot_index == slot && val.key == key1 && val.value == value3), + .any(|val| val.slot_name == slot_name && val.key == key1 && val.value == value3), "key1 should point to new value at block2" ); assert!( storage_map_values .values .iter() - .any(|val| val.slot_index == slot && val.key == key2 && val.value == value2), + .any(|val| val.slot_name == slot_name && val.key == key2 && val.value == value2), "key2 should stay the same (from block1)" ); } @@ -1331,7 +1213,7 @@ fn sql_account_storage_map_values_insertion() { fn select_storage_map_sync_values() { let mut conn = create_db(); let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); - let slot = 5u8; + let slot_name = StorageSlotName::mock(5); let key1 = num_to_word(1); let key2 = num_to_word(2); @@ -1346,20 +1228,55 @@ fn select_storage_map_sync_values() { // Insert data across multiple blocks using individual inserts // Block 1: key1 -> value1, key2 -> value2 - queries::insert_account_storage_map_value(&mut conn, account_id, block1, slot, key1, value1) - .unwrap(); - queries::insert_account_storage_map_value(&mut conn, account_id, block1, slot, key2, value2) - .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block1, + slot_name.clone(), + key1, + value1, + ) + .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block1, + slot_name.clone(), + key2, + value2, + ) + .unwrap(); // Block 2: key2 -> value3 (update), key3 -> value3 (new) - queries::insert_account_storage_map_value(&mut conn, account_id, block2, slot, key2, value3) - .unwrap(); - queries::insert_account_storage_map_value(&mut conn, account_id, block2, slot, key3, value3) - .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block2, + slot_name.clone(), + key2, + value3, + ) + .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block2, + slot_name.clone(), + key3, + value3, + ) + .unwrap(); // Block 3: key1 -> value2 (update) - queries::insert_account_storage_map_value(&mut conn, account_id, block3, slot, key1, value2) - .unwrap(); + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block3, + slot_name.clone(), + key1, + value2, + ) + .unwrap(); let page = queries::select_account_storage_map_values( &mut conn, @@ -1373,19 +1290,19 @@ fn select_storage_map_sync_values() { // Compare ordered by key using a tuple view to avoid relying on the concrete struct name let expected = vec![ StorageMapValue { - slot_index: slot, + slot_name: slot_name.clone(), key: key2, value: value3, block_num: block2, }, StorageMapValue { - slot_index: slot, + slot_name: slot_name.clone(), key: key3, value: value3, block_num: block2, }, StorageMapValue { - slot_index: slot, + slot_name, key: key1, value: value2, block_num: block3, @@ -1402,13 +1319,37 @@ fn num_to_word(n: u64) -> Word { } fn num_to_nullifier(n: u64) -> Nullifier { - Nullifier::from(num_to_word(n)) + Nullifier::from_raw(num_to_word(n)) } fn mock_block_account_update(account_id: AccountId, num: u64) -> BlockAccountUpdate { BlockAccountUpdate::new(account_id, num_to_word(num), AccountUpdateDetails::Private) } +// Helper function to create account with specific code for tests +fn create_account_with_code(code_str: &str, seed: [u8; 32]) -> Account { + let component_storage = vec![ + StorageSlot::with_value(StorageSlotName::mock(0), Word::empty()), + StorageSlot::with_value(StorageSlotName::mock(1), num_to_word(1)), + ]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", code_str) + .unwrap(); + + let component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supported_type(AccountType::RegularAccountUpdatableCode); + + AccountBuilder::new(seed) + .account_type(AccountType::RegularAccountUpdatableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap() +} + fn mock_block_transaction(account_id: AccountId, num: u64) -> TransactionHeader { let initial_state_commitment = Word::try_from([num, 0, 0, 0]).unwrap(); let final_account_commitment = Word::try_from([0, num, 0, 0]).unwrap(); @@ -1445,6 +1386,7 @@ fn mock_block_transaction(account_id: AccountId, num: u64) -> TransactionHeader final_account_commitment, input_notes, output_notes, + test_fee(), ) } @@ -1478,35 +1420,931 @@ fn mock_account_code_and_storage( init_seed: Option<[u8; 32]>, ) -> Account { let component_code = "\ - export.account_procedure_1 + pub proc account_procedure_1 push.1.2 add end "; let component_storage = vec![ - StorageSlot::Value(Word::empty()), - StorageSlot::Value(num_to_word(1)), - StorageSlot::Value(Word::empty()), - StorageSlot::Value(num_to_word(3)), - StorageSlot::Value(Word::empty()), - StorageSlot::Value(num_to_word(5)), + StorageSlot::with_value(StorageSlotName::mock(0), Word::empty()), + StorageSlot::with_value(StorageSlotName::mock(1), num_to_word(1)), + StorageSlot::with_value(StorageSlotName::mock(2), Word::empty()), + StorageSlot::with_value(StorageSlotName::mock(3), num_to_word(3)), + StorageSlot::with_value(StorageSlotName::mock(4), Word::empty()), + StorageSlot::with_value(StorageSlotName::mock(5), num_to_word(5)), ]; - let component = AccountComponent::compile( - component_code, - TransactionKernel::assembler(), - component_storage, - ) - .unwrap() - .with_supported_type(account_type); + let account_component_code = CodeBuilder::default() + .compile_component_code("counter_contract::interface", component_code) + .unwrap(); + let account_component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supports_all_types(); AccountBuilder::new(init_seed.unwrap_or([0; 32])) .account_type(account_type) .storage_mode(storage_mode) .with_assets(assets) - .with_component(component) + .with_component(account_component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap() +} + +// ACCOUNT CODE TESTS +// ================================================================================================ + +#[test] +fn test_select_account_code_by_commitment() { + let mut conn = create_db(); + + let block_num_1 = BlockNumber::from(1); + + // Create block 1 + create_block(&mut conn, block_num_1); + + // Create an account with code at block 1 using the existing mock function + let account = mock_account_code_and_storage( + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + [], + None, + ); + + // Get the code commitment and bytes before inserting + let code_commitment = account.code().commitment(); + let expected_code = account.code().to_bytes(); + + // Insert the account at block 1 + queries::upsert_accounts( + &mut conn, + &[BlockAccountUpdate::new( + account.id(), + account.commitment(), + AccountUpdateDetails::Delta(AccountDelta::try_from(account).unwrap()), + )], + block_num_1, + ) + .unwrap(); + + // Query code by commitment - should return the code + let code = queries::select_account_code_by_commitment(&mut conn, code_commitment) + .unwrap() + .expect("Code should exist"); + assert_eq!(code, expected_code); + + // Query code for non-existent commitment - should return None + let non_existent_commitment = [0u8; 32]; + let non_existent_commitment = Word::read_from_bytes(&non_existent_commitment).unwrap(); + let code_other = + queries::select_account_code_by_commitment(&mut conn, non_existent_commitment).unwrap(); + assert!(code_other.is_none(), "Code should not exist for non-existent commitment"); +} + +#[test] +fn test_select_account_code_by_commitment_multiple_codes() { + let mut conn = create_db(); + + let block_num_1 = BlockNumber::from(1); + let block_num_2 = BlockNumber::from(2); + + // Create blocks + create_block(&mut conn, block_num_1); + create_block(&mut conn, block_num_2); + + // Create account with code v1 at block 1 + let code_v1_str = "\ + pub proc account_procedure_1 + push.1.2 + add + end + "; + let account_v1 = create_account_with_code(code_v1_str, [1u8; 32]); + let code_v1_commitment = account_v1.code().commitment(); + let code_v1 = account_v1.code().to_bytes(); + + // Insert the account at block 1 + queries::upsert_accounts( + &mut conn, + &[BlockAccountUpdate::new( + account_v1.id(), + account_v1.commitment(), + AccountUpdateDetails::Delta(AccountDelta::try_from(account_v1).unwrap()), + )], + block_num_1, + ) + .unwrap(); + + // Create account with different code v2 at block 2 + let code_v2_str = "\ + pub proc account_procedure_1 + push.3.4 + mul + end + "; + let account_v2 = create_account_with_code(code_v2_str, [1u8; 32]); // Same seed to keep same account_id + let code_v2_commitment = account_v2.code().commitment(); + let code_v2 = account_v2.code().to_bytes(); + + // Verify that the codes are actually different + assert_ne!( + code_v1, code_v2, + "Test setup error: codes should be different for different code strings" + ); + assert_ne!( + code_v1_commitment, code_v2_commitment, + "Test setup error: code commitments should be different" + ); + + // Insert the updated account at block 2 + queries::upsert_accounts( + &mut conn, + &[BlockAccountUpdate::new( + account_v2.id(), + account_v2.commitment(), + AccountUpdateDetails::Delta(AccountDelta::try_from(account_v2).unwrap()), + )], + block_num_2, + ) + .unwrap(); + + // Both codes should be retrievable by their respective commitments + let code_from_v1_commitment = + queries::select_account_code_by_commitment(&mut conn, code_v1_commitment) + .unwrap() + .expect("v1 code should exist"); + assert_eq!(code_from_v1_commitment, code_v1, "v1 commitment should return v1 code"); + + let code_from_v2_commitment = + queries::select_account_code_by_commitment(&mut conn, code_v2_commitment) + .unwrap() + .expect("v2 code should exist"); + assert_eq!(code_from_v2_commitment, code_v2, "v2 commitment should return v2 code"); +} + +// GENESIS REGRESSION TESTS +// ================================================================================================ + +/// Verifies genesis block with account containing vault assets can be inserted. +#[test] +#[miden_node_test_macro::enable_logging] +fn genesis_with_account_assets() { + use crate::genesis::GenesisState; + let component_code = "pub proc foo push.1 end"; + + let account_component_code = CodeBuilder::default() + .compile_component_code("foo::interface", component_code) + .unwrap(); + let account_component = AccountComponent::new(account_component_code, Vec::new()) + .unwrap() + .with_supports_all_types(); + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let fungible_asset = FungibleAsset::new(faucet_id, 1000).unwrap(); + + let account = AccountBuilder::new([1u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(account_component) + .with_assets([fungible_asset.into()]) .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) .build_existing() + .unwrap(); + + let genesis_state = + GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); + let genesis_block = genesis_state.into_block().unwrap(); + + crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); +} + +/// Verifies genesis block with account containing storage maps can be inserted. +#[test] +#[miden_node_test_macro::enable_logging] +fn genesis_with_account_storage_map() { + use miden_protocol::account::StorageMap; + + use crate::genesis::GenesisState; + + let storage_map = StorageMap::with_entries(vec![ + ( + Word::from([Felt::new(1), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]), + ), + ( + Word::from([Felt::new(2), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(50), Felt::new(60), Felt::new(70), Felt::new(80)]), + ), + ]) + .unwrap(); + + let component_storage = vec![ + StorageSlot::with_map(StorageSlotName::mock(0), storage_map), + StorageSlot::with_empty_value(StorageSlotName::mock(1)), + ]; + + let component_code = "pub proc foo push.1 end"; + + let account_component_code = CodeBuilder::default() + .compile_component_code("foo::interface", component_code) + .unwrap(); + let account_component = AccountComponent::new(account_component_code, component_storage) .unwrap() + .with_supports_all_types(); + + let account = AccountBuilder::new([2u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(account_component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let genesis_state = + GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); + let genesis_block = genesis_state.into_block().unwrap(); + + crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); +} + +/// Verifies genesis block with account containing both vault assets and storage maps. +#[test] +#[miden_node_test_macro::enable_logging] +fn genesis_with_account_assets_and_storage() { + use miden_protocol::account::StorageMap; + + use crate::genesis::GenesisState; + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let fungible_asset = FungibleAsset::new(faucet_id, 5000).unwrap(); + + let storage_map = StorageMap::with_entries(vec![( + Word::from([Felt::new(100), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(1), Felt::new(2), Felt::new(3), Felt::new(4)]), + )]) + .unwrap(); + + let component_storage = vec![ + StorageSlot::with_empty_value(StorageSlotName::mock(0)), + StorageSlot::with_map(StorageSlotName::mock(2), storage_map), + ]; + + let component_code = "pub proc foo push.1 end"; + + let account_component_code = CodeBuilder::default() + .compile_component_code("foo::interface", component_code) + .unwrap(); + let account_component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supports_all_types(); + + let account = AccountBuilder::new([3u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(account_component) + .with_assets([fungible_asset.into()]) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let genesis_state = + GenesisState::new(vec![account], test_fee_params(), 1, 0, SecretKey::random()); + let genesis_block = genesis_state.into_block().unwrap(); + + crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); +} + +/// Verifies genesis block with multiple accounts of different types. +/// Tests realistic genesis scenario with basic accounts, assets, and storage. +#[test] +#[miden_node_test_macro::enable_logging] +fn genesis_with_multiple_accounts() { + use miden_protocol::account::StorageMap; + + use crate::genesis::GenesisState; + + let account_component_code = CodeBuilder::default() + .compile_component_code("foo::interface", "pub proc foo push.1 end") + .unwrap(); + let account_component1 = AccountComponent::new(account_component_code, Vec::new()) + .unwrap() + .with_supports_all_types(); + + let account1 = AccountBuilder::new([1u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(account_component1) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let fungible_asset = FungibleAsset::new(faucet_id, 2000).unwrap(); + + let account_component_code = CodeBuilder::default() + .compile_component_code("bar::interface", "pub proc bar push.2 end") + .unwrap(); + let account_component2 = AccountComponent::new(account_component_code, Vec::new()) + .unwrap() + .with_supports_all_types(); + + let account2 = AccountBuilder::new([2u8; 32]) + .account_type(AccountType::RegularAccountImmutableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(account_component2) + .with_assets([fungible_asset.into()]) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let storage_map = StorageMap::with_entries(vec![( + Word::from([Felt::new(5), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(15), Felt::new(25), Felt::new(35), Felt::new(45)]), + )]) + .unwrap(); + + let component_storage = vec![StorageSlot::with_map(StorageSlotName::mock(0), storage_map)]; + + let account_component_code = CodeBuilder::default() + .compile_component_code("baz::interface", "pub proc baz push.3 end") + .unwrap(); + let account_component3 = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supports_all_types(); + + let account3 = AccountBuilder::new([3u8; 32]) + .account_type(AccountType::RegularAccountUpdatableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(account_component3) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let genesis_state = GenesisState::new( + vec![account1, account2, account3], + test_fee_params(), + 1, + 0, + SecretKey::random(), + ); + let genesis_block = genesis_state.into_block().unwrap(); + + crate::db::Db::bootstrap(":memory:".into(), &genesis_block).unwrap(); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn regression_1461_full_state_delta_inserts_vault_assets() { + let mut conn = create_db(); + let block_num: BlockNumber = 1.into(); + create_block(&mut conn, block_num); + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let fungible_asset = FungibleAsset::new(faucet_id, 5000).unwrap(); + + let account = mock_account_code_and_storage( + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + [fungible_asset.into()], + Some([42u8; 32]), + ); + let account_id = account.id(); + + // Convert to full state delta, same as genesis + let account_delta = AccountDelta::try_from(account.clone()).unwrap(); + assert!(account_delta.is_full_state()); + + let block_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(account_delta), + ); + + queries::upsert_accounts(&mut conn, &[block_update], block_num).unwrap(); + + let (_, vault_assets) = queries::select_account_vault_assets( + &mut conn, + account_id, + BlockNumber::GENESIS..=block_num, + ) + .unwrap(); + + // Before the fix, vault_assets was empty + let vault_asset = vault_assets.first().unwrap(); + let expected_asset: Asset = fungible_asset.into(); + assert_eq!(vault_asset.block_num, block_num); + assert_eq!(vault_asset.asset, Some(expected_asset)); + assert_eq!(vault_asset.vault_key, expected_asset.vault_key()); +} + +// SERIALIZATION SYMMETRY TESTS +// ================================================================================================ +// +// These tests ensure that `to_bytes` and `from_bytes`/`read_from_bytes` are symmetric for all +// types used in database operations. This guarantees that data inserted into the database can +// always be correctly retrieved. + +#[test] +fn serialization_symmetry_core_types() { + // AccountId + let account_id = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + let bytes = account_id.to_bytes(); + let restored = AccountId::read_from_bytes(&bytes).unwrap(); + assert_eq!(account_id, restored, "AccountId serialization must be symmetric"); + + // Word + let word = num_to_word(0x1234_5678_9ABC_DEF0); + let bytes = word.to_bytes(); + let restored = Word::read_from_bytes(&bytes).unwrap(); + assert_eq!(word, restored, "Word serialization must be symmetric"); + + // Nullifier + let nullifier = num_to_nullifier(0xDEAD_BEEF); + let bytes = nullifier.to_bytes(); + let restored = Nullifier::read_from_bytes(&bytes).unwrap(); + assert_eq!(nullifier, restored, "Nullifier serialization must be symmetric"); + + // TransactionId + let tx_id = TransactionId::new(num_to_word(1), num_to_word(2), num_to_word(3), num_to_word(4)); + let bytes = tx_id.to_bytes(); + let restored = TransactionId::read_from_bytes(&bytes).unwrap(); + assert_eq!(tx_id, restored, "TransactionId serialization must be symmetric"); + + // NoteId + let note_id = NoteId::new(num_to_word(1), num_to_word(2)); + let bytes = note_id.to_bytes(); + let restored = NoteId::read_from_bytes(&bytes).unwrap(); + assert_eq!(note_id, restored, "NoteId serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_block_header() { + let block_header = BlockHeader::new( + 1_u8.into(), + num_to_word(2), + 3.into(), + num_to_word(4), + num_to_word(5), + num_to_word(6), + num_to_word(7), + num_to_word(8), + num_to_word(9), + SecretKey::new().public_key(), + test_fee_params(), + 11_u8.into(), + ); + + let bytes = block_header.to_bytes(); + let restored = BlockHeader::read_from_bytes(&bytes).unwrap(); + assert_eq!(block_header, restored, "BlockHeader serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_assets() { + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + + // FungibleAsset + let fungible = FungibleAsset::new(faucet_id, 1000).unwrap(); + let asset: Asset = fungible.into(); + let bytes = asset.to_bytes(); + let restored = Asset::read_from_bytes(&bytes).unwrap(); + assert_eq!(asset, restored, "Asset (fungible) serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_account_code() { + let account = mock_account_code_and_storage( + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + [], + None, + ); + + let code = account.code(); + let bytes = code.to_bytes(); + let restored = AccountCode::read_from_bytes(&bytes).unwrap(); + assert_eq!(*code, restored, "AccountCode serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_sparse_merkle_path() { + let path = SparseMerklePath::default(); + let bytes = path.to_bytes(); + let restored = SparseMerklePath::read_from_bytes(&bytes).unwrap(); + assert_eq!(path, restored, "SparseMerklePath serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_note_metadata() { + let sender = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + // Use a tag that roundtrips properly - NoteTag::LocalAny stores the full u32 including type + // bits + let tag = NoteTag::from_account_id(sender); + let metadata = NoteMetadata::new( + sender, + NoteType::Public, + tag, + NoteExecutionHint::always(), + Felt::new(42), + ) + .unwrap(); + + let bytes = metadata.to_bytes(); + let restored = NoteMetadata::read_from_bytes(&bytes).unwrap(); + assert_eq!(metadata, restored, "NoteMetadata serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_nullifier_vec() { + let nullifiers: Vec = (0..5).map(num_to_nullifier).collect(); + let bytes = nullifiers.to_bytes(); + let restored: Vec = Deserializable::read_from_bytes(&bytes).unwrap(); + assert_eq!(nullifiers, restored, "Vec serialization must be symmetric"); +} + +#[test] +fn serialization_symmetry_note_id_vec() { + let note_ids: Vec = + (0..5).map(|i| NoteId::new(num_to_word(i), num_to_word(i + 100))).collect(); + let bytes = note_ids.to_bytes(); + let restored: Vec = Deserializable::read_from_bytes(&bytes).unwrap(); + assert_eq!(note_ids, restored, "Vec serialization must be symmetric"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_block_header() { + let mut conn = create_db(); + + let block_header = BlockHeader::new( + 1_u8.into(), + num_to_word(2), + BlockNumber::from(42), + num_to_word(4), + num_to_word(5), + num_to_word(6), + num_to_word(7), + num_to_word(8), + num_to_word(9), + SecretKey::new().public_key(), + test_fee_params(), + 11_u8.into(), + ); + + // Insert + queries::insert_block_header(&mut conn, &block_header).unwrap(); + + // Retrieve + let retrieved = + queries::select_block_header_by_block_num(&mut conn, Some(block_header.block_num())) + .unwrap() + .expect("Block header should exist"); + + assert_eq!(block_header, retrieved, "BlockHeader DB roundtrip must be symmetric"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_nullifiers() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let nullifiers: Vec = (0..5).map(|i| num_to_nullifier(i << 48)).collect(); + + // Insert + queries::insert_nullifiers_for_block(&mut conn, &nullifiers, block_num).unwrap(); + + // Retrieve + let retrieved = queries::select_all_nullifiers(&mut conn).unwrap(); + + assert_eq!(nullifiers.len(), retrieved.len(), "Should retrieve same number of nullifiers"); + for (orig, info) in nullifiers.iter().zip(retrieved.iter()) { + assert_eq!(*orig, info.nullifier, "Nullifier DB roundtrip must be symmetric"); + assert_eq!(block_num, info.block_num, "Block number must match"); + } +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_account() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let account = mock_account_code_and_storage( + AccountType::RegularAccountImmutableCode, + AccountStorageMode::Public, + [], + Some([99u8; 32]), + ); + let account_id = account.id(); + let account_commitment = account.commitment(); + + // Insert with full delta (like genesis) + let account_delta = AccountDelta::try_from(account.clone()).unwrap(); + let block_update = BlockAccountUpdate::new( + account_id, + account_commitment, + AccountUpdateDetails::Delta(account_delta), + ); + queries::upsert_accounts(&mut conn, &[block_update], block_num).unwrap(); + + // Retrieve + let retrieved = queries::select_all_accounts(&mut conn).unwrap(); + assert_eq!(retrieved.len(), 1, "Should have one account"); + + let retrieved_info = &retrieved[0]; + assert_eq!( + retrieved_info.summary.account_id, account_id, + "AccountId DB roundtrip must be symmetric" + ); + assert_eq!( + retrieved_info.summary.account_commitment, account_commitment, + "Account commitment DB roundtrip must be symmetric" + ); + assert_eq!(retrieved_info.summary.block_num, block_num, "Block number must match"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_notes() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let sender = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(sender, 0)], block_num) + .unwrap(); + + let new_note = create_note(sender); + let note_index = BlockNoteIndex::new(0, 0).unwrap(); + + let note = NoteRecord { + block_num, + note_index, + note_id: new_note.id().as_word(), + note_commitment: new_note.commitment(), + metadata: *new_note.metadata(), + details: Some(NoteDetails::from(&new_note)), + inclusion_path: SparseMerklePath::default(), + }; + + // Insert + queries::insert_scripts(&mut conn, [¬e]).unwrap(); + queries::insert_notes(&mut conn, &[(note.clone(), None)]).unwrap(); + + // Retrieve + let note_ids = vec![NoteId::from_raw(note.note_id)]; + let retrieved = queries::select_notes_by_id(&mut conn, ¬e_ids).unwrap(); + + assert_eq!(retrieved.len(), 1, "Should have one note"); + let retrieved_note = &retrieved[0]; + + assert_eq!(note.note_id, retrieved_note.note_id, "NoteId DB roundtrip must be symmetric"); + assert_eq!( + note.note_commitment, retrieved_note.note_commitment, + "Note commitment DB roundtrip must be symmetric" + ); + assert_eq!( + note.metadata, retrieved_note.metadata, + "Metadata DB roundtrip must be symmetric" + ); + assert_eq!( + note.inclusion_path, retrieved_note.inclusion_path, + "Inclusion path DB roundtrip must be symmetric" + ); + assert_eq!( + note.details, retrieved_note.details, + "Note details DB roundtrip must be symmetric" + ); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_transactions() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let account_id = AccountId::try_from(ACCOUNT_ID_PRIVATE_SENDER).unwrap(); + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 1)], block_num) + .unwrap(); + + let tx = mock_block_transaction(account_id, 1); + let ordered_tx = OrderedTransactionHeaders::new_unchecked(vec![tx.clone()]); + + // Insert + queries::insert_transactions(&mut conn, block_num, &ordered_tx).unwrap(); + + // Retrieve + let retrieved = queries::select_transactions_by_accounts_and_block_range( + &mut conn, + &[account_id], + BlockNumber::from(0)..=BlockNumber::from(2), + ) + .unwrap(); + + assert_eq!(retrieved.len(), 1, "Should have one transaction"); + let retrieved_tx = &retrieved[0]; + + assert_eq!( + tx.account_id(), + retrieved_tx.account_id, + "AccountId DB roundtrip must be symmetric" + ); + assert_eq!( + tx.id(), + retrieved_tx.transaction_id, + "TransactionId DB roundtrip must be symmetric" + ); + assert_eq!(block_num, retrieved_tx.block_num, "Block number must match"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_vault_assets() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let faucet_id = AccountId::try_from(ACCOUNT_ID_PUBLIC_FUNGIBLE_FAUCET).unwrap(); + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + + // Create account first + queries::upsert_accounts(&mut conn, &[mock_block_account_update(account_id, 0)], block_num) + .unwrap(); + + let fungible_asset = FungibleAsset::new(faucet_id, 5000).unwrap(); + let asset: Asset = fungible_asset.into(); + let vault_key = asset.vault_key(); + + // Insert vault asset + queries::insert_account_vault_asset(&mut conn, account_id, block_num, vault_key, Some(asset)) + .unwrap(); + + // Retrieve + let (_, vault_assets) = queries::select_account_vault_assets( + &mut conn, + account_id, + BlockNumber::GENESIS..=block_num, + ) + .unwrap(); + + assert_eq!(vault_assets.len(), 1, "Should have one vault asset"); + let retrieved = &vault_assets[0]; + + assert_eq!(retrieved.asset, Some(asset), "Asset DB roundtrip must be symmetric"); + assert_eq!(retrieved.vault_key, vault_key, "VaultKey DB roundtrip must be symmetric"); + assert_eq!(retrieved.block_num, block_num, "Block number must match"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_storage_map_values() { + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + let account_id = AccountId::try_from(ACCOUNT_ID_REGULAR_PUBLIC_ACCOUNT_IMMUTABLE_CODE).unwrap(); + let slot_name = StorageSlotName::mock(5); + let key = num_to_word(12345); + let value = num_to_word(67890); + + // Insert + queries::insert_account_storage_map_value( + &mut conn, + account_id, + block_num, + slot_name.clone(), + key, + value, + ) + .unwrap(); + + // Retrieve + let page = queries::select_account_storage_map_values( + &mut conn, + account_id, + BlockNumber::GENESIS..=block_num, + ) + .unwrap(); + + assert_eq!(page.values.len(), 1, "Should have one storage map value"); + let retrieved = &page.values[0]; + + assert_eq!(retrieved.slot_name, slot_name, "StorageSlotName DB roundtrip must be symmetric"); + assert_eq!(retrieved.key, key, "Key (Word) DB roundtrip must be symmetric"); + assert_eq!(retrieved.value, value, "Value (Word) DB roundtrip must be symmetric"); + assert_eq!(retrieved.block_num, block_num, "Block number must match"); +} + +#[test] +#[miden_node_test_macro::enable_logging] +fn db_roundtrip_account_storage_with_maps() { + use miden_protocol::account::StorageMap; + + let mut conn = create_db(); + let block_num = BlockNumber::from(1); + create_block(&mut conn, block_num); + + // Create storage with both value slots and map slots + let storage_map = StorageMap::with_entries(vec![ + ( + Word::from([Felt::new(1), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(10), Felt::new(20), Felt::new(30), Felt::new(40)]), + ), + ( + Word::from([Felt::new(2), Felt::ZERO, Felt::ZERO, Felt::ZERO]), + Word::from([Felt::new(50), Felt::new(60), Felt::new(70), Felt::new(80)]), + ), + ]) + .unwrap(); + + let component_storage = vec![ + StorageSlot::with_value(StorageSlotName::mock(0), num_to_word(42)), + StorageSlot::with_map(StorageSlotName::mock(1), storage_map), + StorageSlot::with_empty_value(StorageSlotName::mock(2)), + ]; + + let component_code = "pub proc foo push.1 end"; + let account_component_code = CodeBuilder::default() + .compile_component_code("test::interface", component_code) + .unwrap(); + let account_component = AccountComponent::new(account_component_code, component_storage) + .unwrap() + .with_supports_all_types(); + + let account = AccountBuilder::new([50u8; 32]) + .account_type(AccountType::RegularAccountUpdatableCode) + .storage_mode(AccountStorageMode::Public) + .with_component(account_component) + .with_auth_component(AuthRpoFalcon512::new(PublicKeyCommitment::from(EMPTY_WORD))) + .build_existing() + .unwrap(); + + let account_id = account.id(); + let original_storage = account.storage().clone(); + let original_commitment = original_storage.to_commitment(); + + // Insert the account (this should store header + map values separately) + let account_delta = AccountDelta::try_from(account.clone()).unwrap(); + let block_update = BlockAccountUpdate::new( + account_id, + account.commitment(), + AccountUpdateDetails::Delta(account_delta), + ); + queries::upsert_accounts(&mut conn, &[block_update], block_num).unwrap(); + + // Retrieve the storage using select_latest_account_storage (reconstructs from header + map + // values) + let retrieved_storage = queries::select_latest_account_storage(&mut conn, account_id).unwrap(); + let retrieved_commitment = retrieved_storage.to_commitment(); + + // Verify the commitment matches (this proves the reconstruction is correct) + assert_eq!( + original_commitment, retrieved_commitment, + "Storage commitment must match after DB roundtrip" + ); + + // Verify slot count matches + assert_eq!( + original_storage.slots().len(), + retrieved_storage.slots().len(), + "Number of slots must match" + ); + + // Verify each slot + for (original_slot, retrieved_slot) in + original_storage.slots().iter().zip(retrieved_storage.slots().iter()) + { + assert_eq!(original_slot.name(), retrieved_slot.name(), "Slot names must match"); + assert_eq!(original_slot.slot_type(), retrieved_slot.slot_type(), "Slot types must match"); + + match (original_slot.content(), retrieved_slot.content()) { + (StorageSlotContent::Value(orig), StorageSlotContent::Value(retr)) => { + assert_eq!(orig, retr, "Value slot contents must match"); + }, + (StorageSlotContent::Map(orig_map), StorageSlotContent::Map(retr_map)) => { + assert_eq!(orig_map.root(), retr_map.root(), "Map slot roots must match"); + for (key, value) in orig_map.entries() { + let retrieved_value = retr_map.get(key); + assert_eq!(*value, retrieved_value, "Map entry for key {:?} must match", key); + } + }, + // The slot_type assertion above guarantees matching variants, so this is unreachable + _ => unreachable!(), + } + } + + // Also verify full account reconstruction via select_account (which calls select_full_account) + let account_info = queries::select_account(&mut conn, account_id).unwrap(); + assert!(account_info.details.is_some(), "Public account should have details"); + let retrieved_account = account_info.details.unwrap(); + assert_eq!( + account.commitment(), + retrieved_account.commitment(), + "Full account commitment must match after DB roundtrip" + ); } diff --git a/crates/store/src/errors.rs b/crates/store/src/errors.rs index 6a17c60c5..7ac836ed3 100644 --- a/crates/store/src/errors.rs +++ b/crates/store/src/errors.rs @@ -6,13 +6,14 @@ use miden_node_proto::domain::account::NetworkAccountError; use miden_node_proto::domain::block::InvalidBlockRange; use miden_node_proto::errors::{ConversionError, GrpcError}; use miden_node_utils::limiter::QueryLimitError; -use miden_objects::account::AccountId; -use miden_objects::block::BlockNumber; -use miden_objects::crypto::merkle::MmrError; -use miden_objects::crypto::utils::DeserializationError; -use miden_objects::note::Nullifier; -use miden_objects::transaction::OutputNote; -use miden_objects::{ +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::crypto::merkle::MerkleError; +use miden_protocol::crypto::merkle::mmr::MmrError; +use miden_protocol::crypto::utils::DeserializationError; +use miden_protocol::note::{NoteId, Nullifier}; +use miden_protocol::transaction::OutputNote; +use miden_protocol::{ AccountDeltaError, AccountError, AccountTreeError, @@ -21,6 +22,7 @@ use miden_objects::{ FeeError, NoteError, NullifierTreeError, + StorageMapError, Word, }; use thiserror::Error; @@ -56,11 +58,13 @@ pub enum DatabaseError { #[error("I/O error")] IoError(#[from] io::Error), #[error("merkle error")] - MerkleError(#[from] miden_objects::crypto::merkle::MerkleError), + MerkleError(#[from] MerkleError), #[error("network account error")] NetworkAccountError(#[from] NetworkAccountError), #[error("note error")] NoteError(#[from] NoteError), + #[error("storage map error")] + StorageMapError(#[from] StorageMapError), #[error("setup deadpool connection pool failed")] Deadpool(#[from] deadpool::managed::PoolError), #[error("setup deadpool connection pool failed")] @@ -98,12 +102,18 @@ pub enum DatabaseError { AccountNotFoundInDb(AccountId), #[error("account {0} state at block height {1} not found")] AccountAtBlockHeightNotFoundInDb(AccountId, BlockNumber), + #[error("block {0} not found in database")] + BlockNotFound(BlockNumber), + #[error("historical block {block_num} not available: {reason}")] + HistoricalBlockNotAvailable { block_num: BlockNumber, reason: String }, #[error("accounts {0:?} not found")] AccountsNotFoundInDb(Vec), #[error("account {0} is not on the chain")] AccountNotPublic(AccountId), #[error("invalid block parameters: block_from ({from}) > block_to ({to})")] InvalidBlockRange { from: BlockNumber, to: BlockNumber }, + #[error("invalid storage slot type: {0}")] + InvalidStorageSlotType(i32), #[error("data corrupted: {0}")] DataCorrupted(String), #[error("SQLite pool interaction failed: {0}")] @@ -115,6 +125,8 @@ pub enum DatabaseError { Remove all database files and try again." )] UnsupportedDatabaseVersion, + #[error("schema verification failed")] + SchemaVerification(#[from] SchemaVerificationError), #[error(transparent)] ConnectionManager(#[from] ConnectionManagerError), #[error(transparent)] @@ -169,6 +181,10 @@ impl From for Status { #[derive(Error, Debug)] pub enum StateInitializationError { + #[error("account tree IO error: {0}")] + AccountTreeIoError(String), + #[error("nullifier tree IO error: {0}")] + NullifierTreeIoError(String), #[error("database error")] DatabaseError(#[from] DatabaseError), #[error("failed to create nullifier tree")] @@ -242,6 +258,8 @@ pub enum InvalidBlockError { NewBlockNullifierAlreadySpent(#[source] NullifierTreeError), #[error("duplicate account ID prefix in new block")] NewBlockDuplicateAccountIdPrefix(#[source] AccountTreeError), + #[error("failed to build note tree: {0}")] + FailedToBuildNoteTree(String), } #[derive(Error, Debug)] @@ -333,8 +351,6 @@ pub enum NoteSyncError { MmrError(#[from] MmrError), #[error("invalid block range")] InvalidBlockRange(#[from] InvalidBlockRange), - #[error("too many note tags: received {0}, max {1}")] - TooManyNoteTags(usize, usize), #[error("malformed note tags")] DeserializationFailed(#[from] ConversionError), } @@ -420,6 +436,20 @@ pub enum SyncStorageMapsError { AccountNotPublic(AccountId), } +// GET NETWORK ACCOUNT IDS +// ================================================================================================ + +#[derive(Debug, Error, GrpcError)] +pub enum GetNetworkAccountIdsError { + #[error("database error")] + #[grpc(internal)] + DatabaseError(#[from] DatabaseError), + #[error("invalid block range")] + InvalidBlockRange(#[from] InvalidBlockRange), + #[error("malformed nullifier prefix")] + DeserializationFailed(#[from] ConversionError), +} + // GET BLOCK BY NUMBER ERRORS // ================================================================================================ @@ -443,11 +473,9 @@ pub enum GetNotesByIdError { #[error("malformed note ID")] DeserializationFailed(#[from] ConversionError), #[error("note {0} not found")] - NoteNotFound(miden_objects::note::NoteId), - #[error("too many note IDs: received {0}, max {1}")] - TooManyNoteIds(usize, usize), + NoteNotFound(NoteId), #[error("note {0} is not public")] - NoteNotPublic(miden_objects::note::NoteId), + NoteNotPublic(NoteId), } // GET NOTE SCRIPT BY ROOT ERRORS @@ -474,8 +502,6 @@ pub enum CheckNullifiersError { DatabaseError(#[from] DatabaseError), #[error("malformed nullifier")] DeserializationFailed(#[from] ConversionError), - #[error("too many nullifiers: received {0}, maximum {1}")] - TooManyNullifiers(usize, usize), } // SYNC TRANSACTIONS ERRORS @@ -492,8 +518,30 @@ pub enum SyncTransactionsError { DeserializationFailed(#[from] ConversionError), #[error("account {0} not found")] AccountNotFound(AccountId), - #[error("too many account IDs: received {0}, max {1}")] - TooManyAccountIds(usize, usize), +} + +// SCHEMA VERIFICATION ERRORS +// ================================================================================================= + +/// Errors that can occur during schema verification. +#[derive(Debug, Error)] +pub enum SchemaVerificationError { + #[error("failed to create in-memory reference database")] + InMemoryDbCreation(#[source] diesel::ConnectionError), + #[error("failed to apply migrations to reference database")] + MigrationApplication(#[source] Box), + #[error("failed to extract schema from database")] + SchemaExtraction(#[source] diesel::result::Error), + #[error( + "schema mismatch: expected {expected_count} objects, found {actual_count} \ + ({missing_count} missing, {extra_count} unexpected)" + )] + Mismatch { + expected_count: usize, + actual_count: usize, + missing_count: usize, + extra_count: usize, + }, } // Do not scope for `cfg(test)` - if it the traitbounds don't suffice the issue will already appear diff --git a/crates/store/src/genesis/config/errors.rs b/crates/store/src/genesis/config/errors.rs index 313d390cd..e4eb4810e 100644 --- a/crates/store/src/genesis/config/errors.rs +++ b/crates/store/src/genesis/config/errors.rs @@ -1,7 +1,8 @@ -use miden_lib::account::faucets::FungibleFaucetError; -use miden_lib::account::wallets::BasicWalletError; -use miden_objects::account::AccountId; -use miden_objects::{AccountError, AssetError, FeeError, TokenSymbolError}; +use miden_protocol::account::AccountId; +use miden_protocol::utils::DeserializationError; +use miden_protocol::{AccountError, AssetError, FeeError, TokenSymbolError}; +use miden_standards::account::faucets::FungibleFaucetError; +use miden_standards::account::wallets::BasicWalletError; use crate::genesis::config::TokenSymbolStr; @@ -15,7 +16,7 @@ pub enum GenesisConfigError { #[error("asset translation from config to state failed")] Asset(#[from] AssetError), #[error("adding assets to account failed")] - AccountDelta(#[from] miden_objects::AccountDeltaError), + AccountDelta(#[from] miden_protocol::AccountDeltaError), #[error("the defined asset {symbol:?} has no corresponding faucet")] MissingFaucetDefinition { symbol: TokenSymbolStr }, #[error("account with id {account_id} was referenced but is not part of given genesis state")] @@ -54,4 +55,8 @@ pub enum GenesisConfigError { NativeAssetFaucetIsNotPublic(TokenSymbolStr), #[error("faucet account of {0} is not public")] NativeAssetFaucitIsNotAFungibleFaucet(TokenSymbolStr), + #[error("invalid secret key")] + InvalidSecretKey(#[from] DeserializationError), + #[error("provided signer config is not supported")] + UnsupportedSignerConfig, } diff --git a/crates/store/src/genesis/config/mod.rs b/crates/store/src/genesis/config/mod.rs index 193d2f105..ed0c0077c 100644 --- a/crates/store/src/genesis/config/mod.rs +++ b/crates/store/src/genesis/config/mod.rs @@ -4,19 +4,15 @@ use std::cmp::Ordering; use std::str::FromStr; use indexmap::IndexMap; -use miden_lib::AuthScheme; -use miden_lib::account::auth::AuthRpoFalcon512; -use miden_lib::account::faucets::BasicFungibleFaucet; -use miden_lib::account::wallets::create_basic_wallet; -use miden_lib::transaction::memory; use miden_node_utils::crypto::get_rpo_random_coin; -use miden_objects::account::auth::AuthSecretKey; -use miden_objects::account::{ +use miden_protocol::account::auth::AuthSecretKey; +use miden_protocol::account::{ Account, AccountBuilder, AccountDelta, AccountFile, AccountId, + AccountStorage, AccountStorageDelta, AccountStorageMode, AccountType, @@ -24,10 +20,14 @@ use miden_objects::account::{ FungibleAssetDelta, NonFungibleAssetDelta, }; -use miden_objects::asset::{FungibleAsset, TokenSymbol}; -use miden_objects::block::FeeParameters; -use miden_objects::crypto::dsa::rpo_falcon512::SecretKey; -use miden_objects::{Felt, FieldElement, ONE, TokenSymbolError, ZERO}; +use miden_protocol::asset::{FungibleAsset, TokenSymbol}; +use miden_protocol::block::FeeParameters; +use miden_protocol::crypto::dsa::falcon512_rpo::SecretKey as RpoSecretKey; +use miden_protocol::{Felt, FieldElement, ONE, TokenSymbolError, ZERO}; +use miden_standards::AuthScheme; +use miden_standards::account::auth::AuthRpoFalcon512; +use miden_standards::account::faucets::BasicFungibleFaucet; +use miden_standards::account::wallets::create_basic_wallet; use rand::distr::weighted::Weight; use rand::{Rng, SeedableRng}; use rand_chacha::ChaCha20Rng; @@ -94,7 +94,10 @@ impl GenesisConfig { /// /// Also returns the set of secrets for the generated accounts. #[allow(clippy::too_many_lines)] - pub fn into_state(self) -> Result<(GenesisState, AccountSecrets), GenesisConfigError> { + pub fn into_state( + self, + signer: S, + ) -> Result<(GenesisState, AccountSecrets), GenesisConfigError> { let GenesisConfig { version, timestamp, @@ -102,6 +105,7 @@ impl GenesisConfig { fee_parameters, fungible_faucet: fungible_faucet_configs, wallet: wallet_configs, + .. } = self; let symbol = native_faucet.symbol.clone(); @@ -154,7 +158,7 @@ impl GenesisConfig { tracing::debug!("Adding wallet account {index} with {assets:?}"); let mut rng = ChaCha20Rng::from_seed(rand::random()); - let secret_key = SecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); + let secret_key = RpoSecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); let auth = AuthScheme::RpoFalcon512 { pub_key: secret_key.public_key().into() }; let init_seed: [u8; 32] = rng.random(); @@ -215,9 +219,9 @@ impl GenesisConfig { if total_issuance != 0 { // slot 0 storage_delta.set_item( - memory::FAUCET_STORAGE_DATA_SLOT, + AccountStorage::faucet_sysdata_slot().clone(), [ZERO, ZERO, ZERO, Felt::new(total_issuance)].into(), - ); + )?; tracing::debug!( "Reducing faucet account {faucet} for {symbol} by {amount}", faucet = faucet_id.to_hex(), @@ -263,6 +267,7 @@ impl GenesisConfig { accounts: all_accounts, version, timestamp, + block_signer: signer, }, AccountSecrets { secrets }, )) @@ -332,7 +337,7 @@ pub struct FungibleFaucetConfig { impl FungibleFaucetConfig { /// Create a fungible faucet from a config entry - fn build_account(self) -> Result<(Account, SecretKey), GenesisConfigError> { + fn build_account(self) -> Result<(Account, RpoSecretKey), GenesisConfigError> { let FungibleFaucetConfig { symbol, decimals, @@ -340,7 +345,7 @@ impl FungibleFaucetConfig { storage_mode, } = self; let mut rng = ChaCha20Rng::from_seed(rand::random()); - let secret_key = SecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); + let secret_key = RpoSecretKey::with_rng(&mut get_rpo_random_coin(&mut rng)); let auth = AuthRpoFalcon512::new(secret_key.public_key().into()); let init_seed: [u8; 32] = rng.random(); @@ -426,7 +431,7 @@ pub struct AccountFileWithName { #[derive(Debug, Clone)] pub struct AccountSecrets { // name, account, private key, account seed - pub secrets: Vec<(String, AccountId, SecretKey)>, + pub secrets: Vec<(String, AccountId, RpoSecretKey)>, } impl AccountSecrets { @@ -434,10 +439,10 @@ impl AccountSecrets { /// /// If no name is present, a new one is generated based on the current time /// and the index in - pub fn as_account_files( + pub fn as_account_files( &self, - genesis_state: &GenesisState, - ) -> impl Iterator> + use<'_> { + genesis_state: &GenesisState, + ) -> impl Iterator> + use<'_, S> { let account_lut = IndexMap::::from_iter( genesis_state.accounts.iter().map(|account| (account.id(), account.clone())), ); diff --git a/crates/store/src/genesis/config/tests.rs b/crates/store/src/genesis/config/tests.rs index 2f7fd15e4..23e2daa43 100644 --- a/crates/store/src/genesis/config/tests.rs +++ b/crates/store/src/genesis/config/tests.rs @@ -1,6 +1,6 @@ use assert_matches::assert_matches; -use miden_lib::transaction::memory; -use miden_objects::ONE; +use miden_protocol::ONE; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::SecretKey; use super::*; @@ -11,7 +11,7 @@ type TestResult = Result<(), Box>; fn parsing_yields_expected_default_values() -> TestResult { let s = include_str!("./samples/01-simple.toml"); let gcfg = GenesisConfig::read_toml(s)?; - let (state, _secrets) = gcfg.into_state()?; + let (state, _secrets) = gcfg.into_state(SecretKey::new())?; let _ = state; // faucets always precede wallet accounts let native_faucet = state.accounts[0].clone(); @@ -45,7 +45,7 @@ fn parsing_yields_expected_default_values() -> TestResult { // check total issuance of the faucet assert_eq!( - native_faucet.storage().get_item(memory::FAUCET_STORAGE_DATA_SLOT).unwrap()[3], + native_faucet.storage().get_item(AccountStorage::faucet_sysdata_slot()).unwrap()[3], Felt::new(999_777), "Issuance mismatch" ); @@ -57,7 +57,7 @@ fn parsing_yields_expected_default_values() -> TestResult { #[miden_node_test_macro::enable_logging] fn genesis_accounts_have_nonce_one() -> TestResult { let gcfg = GenesisConfig::default(); - let (state, secrets) = gcfg.into_state().unwrap(); + let (state, secrets) = gcfg.into_state(SecretKey::new()).unwrap(); let mut iter = secrets.as_account_files(&state); let AccountFileWithName { account_file: status_quo, .. } = iter.next().unwrap().unwrap(); assert!(iter.next().is_none()); diff --git a/crates/store/src/genesis/mod.rs b/crates/store/src/genesis/mod.rs index cad4d51c9..5df1825d6 100644 --- a/crates/store/src/genesis/mod.rs +++ b/crates/store/src/genesis/mod.rs @@ -1,20 +1,22 @@ -use miden_lib::transaction::TransactionKernel; -use miden_objects::Word; -use miden_objects::account::delta::AccountUpdateDetails; -use miden_objects::account::{Account, AccountDelta}; -use miden_objects::block::account_tree::{AccountTree, account_id_to_smt_key}; -use miden_objects::block::{ +use miden_protocol::Word; +use miden_protocol::account::delta::AccountUpdateDetails; +use miden_protocol::account::{Account, AccountDelta}; +use miden_protocol::block::account_tree::{AccountTree, account_id_to_smt_key}; +use miden_protocol::block::{ BlockAccountUpdate, + BlockBody, BlockHeader, BlockNoteTree, BlockNumber, + BlockProof, + BlockSigner, FeeParameters, ProvenBlock, }; -use miden_objects::crypto::merkle::{Forest, LargeSmt, MemoryStorage, MmrPeaks, Smt}; -use miden_objects::note::Nullifier; -use miden_objects::transaction::OrderedTransactionHeaders; -use miden_objects::utils::serde::{ByteReader, Deserializable, DeserializationError}; +use miden_protocol::crypto::merkle::mmr::{Forest, MmrPeaks}; +use miden_protocol::crypto::merkle::smt::{LargeSmt, MemoryStorage, Smt}; +use miden_protocol::note::Nullifier; +use miden_protocol::transaction::{OrderedTransactionHeaders, TransactionKernel}; use crate::errors::GenesisError; @@ -25,11 +27,12 @@ pub mod config; /// Represents the state at genesis, which will be used to derive the genesis block. #[derive(Clone, Debug, PartialEq, Eq)] -pub struct GenesisState { +pub struct GenesisState { pub accounts: Vec, pub fee_parameters: FeeParameters, pub version: u32, pub timestamp: u32, + pub block_signer: S, } /// A type-safety wrapper ensuring that genesis block data can only be created from @@ -46,21 +49,25 @@ impl GenesisBlock { } } -impl GenesisState { +impl GenesisState { pub fn new( accounts: Vec, fee_parameters: FeeParameters, version: u32, timestamp: u32, + signer: S, ) -> Self { Self { accounts, fee_parameters, version, timestamp, + block_signer: signer, } } +} +impl GenesisState { /// Returns the block header and the account SMT pub fn into_block(self) -> Result { let accounts: Vec = self @@ -113,36 +120,24 @@ impl GenesisState { empty_block_note_tree.root(), Word::empty(), TransactionKernel.to_commitment(), - Word::empty(), + self.block_signer.public_key(), self.fee_parameters, self.timestamp, ); - // SAFETY: Header and accounts should be valid by construction. - // No notes or nullifiers are created at genesis, which is consistent with the above empty - // block note tree root and empty nullifier tree root. - Ok(GenesisBlock(ProvenBlock::new_unchecked( - header, + let body = BlockBody::new_unchecked( accounts, empty_output_notes, empty_nullifiers, empty_transactions, - ))) - } -} - -// SERIALIZATION -// ================================================================================================ - -impl Deserializable for GenesisState { - fn read_from(source: &mut R) -> Result { - let num_accounts = source.read_usize()?; - let accounts = source.read_many::(num_accounts)?; + ); - let version = source.read_u32()?; - let timestamp = source.read_u32()?; - let fee_parameters = source.read::()?; + let block_proof = BlockProof::new_dummy(); - Ok(Self::new(accounts, fee_parameters, version, timestamp)) + let signature = self.block_signer.sign(&header); + // SAFETY: Header and accounts should be valid by construction. + // No notes or nullifiers are created at genesis, which is consistent with the above empty + // block note tree root and empty nullifier tree root. + Ok(GenesisBlock(ProvenBlock::new_unchecked(header, body, signature, block_proof))) } } diff --git a/crates/store/src/lib.rs b/crates/store/src/lib.rs index d50f124f7..636225da1 100644 --- a/crates/store/src/lib.rs +++ b/crates/store/src/lib.rs @@ -1,5 +1,3 @@ -use std::time::Duration; - mod accounts; mod blocks; mod db; @@ -8,18 +6,10 @@ pub mod genesis; mod server; pub mod state; -pub use accounts::{ - AccountTreeStorage, - AccountTreeWithHistory, - HistoricalError, - InMemoryAccountTree, -}; +pub use accounts::{AccountTreeWithHistory, HistoricalError, InMemoryAccountTree}; pub use genesis::GenesisState; pub use server::{DataDirectory, Store}; // CONSTANTS // ================================================================================================= const COMPONENT: &str = "miden-store"; - -/// How often to run the database maintenance routine. -const DATABASE_MAINTENANCE_INTERVAL: Duration = Duration::from_secs(24 * 60 * 60); diff --git a/crates/store/src/server/api.rs b/crates/store/src/server/api.rs index cd7d27cef..6974b8dcb 100644 --- a/crates/store/src/server/api.rs +++ b/crates/store/src/server/api.rs @@ -4,10 +4,10 @@ use std::sync::Arc; use miden_node_proto::errors::ConversionError; use miden_node_proto::generated as proto; use miden_node_utils::ErrorReport; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::block::BlockNumber; -use miden_objects::note::Nullifier; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::Nullifier; use tonic::{Request, Response, Status}; use tracing::{info, instrument}; @@ -25,8 +25,8 @@ impl StoreApi { /// Shared implementation for all `get_block_header_by_number` endpoints. pub async fn get_block_header_by_number_inner( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { info!(target: COMPONENT, ?request); let request = request.into_inner(); @@ -36,7 +36,7 @@ impl StoreApi { .get_block_header(block_num, request.include_mmr_proof.unwrap_or(false)) .await?; - Ok(Response::new(proto::shared::BlockHeaderByNumberResponse { + Ok(Response::new(proto::rpc::BlockHeaderByNumberResponse { block_header: block_header.map(Into::into), chain_length: mmr_proof.as_ref().map(|p| p.forest.num_leaves() as u32), mmr_path: mmr_proof.map(|p| Into::into(&p.merkle_path)), @@ -64,9 +64,9 @@ pub fn conversion_error_to_status(value: &ConversionError) -> Status { /// Reads a block range from a request, returning a specific error type if the field is missing pub fn read_block_range( - block_range: Option, + block_range: Option, entity: &'static str, -) -> Result +) -> Result where E: From, { @@ -128,7 +128,7 @@ where id.ok_or_else(|| { ConversionError::deserialization_error( "AccountId", - miden_objects::crypto::utils::DeserializationError::InvalidValue( + miden_protocol::crypto::utils::DeserializationError::InvalidValue( "Missing account ID".to_string(), ), ) diff --git a/crates/store/src/server/block_producer.rs b/crates/store/src/server/block_producer.rs index 57cad0a7a..73b83de22 100644 --- a/crates/store/src/server/block_producer.rs +++ b/crates/store/src/server/block_producer.rs @@ -1,12 +1,12 @@ use std::convert::Infallible; -use miden_node_proto::generated::block_producer_store::block_producer_server; +use miden_node_proto::generated::store::block_producer_server; use miden_node_proto::generated::{self as proto}; use miden_node_proto::try_convert; use miden_node_utils::ErrorReport; -use miden_objects::Word; -use miden_objects::block::{BlockNumber, ProvenBlock}; -use miden_objects::utils::Deserializable; +use miden_protocol::Word; +use miden_protocol::block::{BlockNumber, ProvenBlock}; +use miden_protocol::utils::Deserializable; use tonic::{Request, Response, Status}; use tracing::{debug, info, instrument}; @@ -40,8 +40,8 @@ impl block_producer_server::BlockProducer for StoreApi { )] async fn get_block_header_by_number( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { self.get_block_header_by_number_inner(request).await } @@ -71,10 +71,10 @@ impl block_producer_server::BlockProducer for StoreApi { info!( target: COMPONENT, block_num, - block_commitment = %block.commitment(), - account_count = block.updated_accounts().len(), - note_count = block.output_notes().count(), - nullifier_count = block.created_nullifiers().len(), + block_commitment = %block.header().commitment(), + account_count = block.body().updated_accounts().len(), + note_count = block.body().output_notes().count(), + nullifier_count = block.body().created_nullifiers().len(), ); self.state.apply_block(block).await?; @@ -93,8 +93,8 @@ impl block_producer_server::BlockProducer for StoreApi { )] async fn get_block_inputs( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let account_ids = read_account_ids::(&request.account_ids)?; @@ -114,7 +114,7 @@ impl block_producer_server::BlockProducer for StoreApi { reference_blocks, ) .await - .map(proto::block_producer_store::BlockInputs::from) + .map(proto::store::BlockInputs::from) .map(Response::new) .map_err(internal_error) } @@ -132,8 +132,8 @@ impl block_producer_server::BlockProducer for StoreApi { )] async fn get_batch_inputs( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let note_commitments: Vec = try_convert(request.note_commitments) @@ -164,8 +164,8 @@ impl block_producer_server::BlockProducer for StoreApi { )] async fn get_transaction_inputs( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); debug!(target: COMPONENT, ?request); @@ -183,17 +183,19 @@ impl block_producer_server::BlockProducer for StoreApi { let block_height = self.state.latest_block_num().await.as_u32(); - Ok(Response::new(proto::block_producer_store::TransactionInputs { - account_state: Some(proto::block_producer_store::transaction_inputs::AccountTransactionInputRecord { + Ok(Response::new(proto::store::TransactionInputs { + account_state: Some(proto::store::transaction_inputs::AccountTransactionInputRecord { account_id: Some(account_id.into()), account_commitment: Some(tx_inputs.account_commitment.into()), }), nullifiers: tx_inputs .nullifiers .into_iter() - .map(|nullifier| proto::block_producer_store::transaction_inputs::NullifierTransactionInputRecord { - nullifier: Some(nullifier.nullifier.into()), - block_num: nullifier.block_num.as_u32(), + .map(|nullifier| { + proto::store::transaction_inputs::NullifierTransactionInputRecord { + nullifier: Some(nullifier.nullifier.into()), + block_num: nullifier.block_num.as_u32(), + } }) .collect(), found_unauthenticated_notes: tx_inputs diff --git a/crates/store/src/server/db_maintenance.rs b/crates/store/src/server/db_maintenance.rs deleted file mode 100644 index fce267677..000000000 --- a/crates/store/src/server/db_maintenance.rs +++ /dev/null @@ -1,35 +0,0 @@ -use std::sync::Arc; -use std::time::Duration; - -use miden_node_utils::tracing::OpenTelemetrySpanExt; -use tracing::{Instrument, Span}; - -use crate::state::State; - -pub struct DbMaintenance { - state: Arc, - optimization_interval: Duration, -} - -impl DbMaintenance { - pub fn new(state: Arc, optimization_interval: Duration) -> Self { - Self { state, optimization_interval } - } - - /// Runs infinite maintenance loop. - pub async fn run(self) { - loop { - tokio::time::sleep(self.optimization_interval).await; - - let root_span = tracing::info_span!( - "optimize_database", - interval = self.optimization_interval.as_secs_f32() - ); - self.state - .optimize_db() - .instrument(root_span) - .await - .unwrap_or_else(|err| Span::current().set_error(&err)); - } - } -} diff --git a/crates/store/src/server/mod.rs b/crates/store/src/server/mod.rs index 2dd41fca0..de51256ad 100644 --- a/crates/store/src/server/mod.rs +++ b/crates/store/src/server/mod.rs @@ -4,15 +4,15 @@ use std::sync::Arc; use std::time::Duration; use anyhow::Context; -use miden_node_proto::generated::{block_producer_store, ntx_builder_store, rpc_store}; +use miden_node_proto::generated::store; use miden_node_proto_build::{ store_block_producer_api_descriptor, store_ntx_builder_api_descriptor, store_rpc_api_descriptor, - store_shared_api_descriptor, }; use miden_node_utils::panic::{CatchPanicLayer, catch_panic_layer_fn}; use miden_node_utils::tracing::grpc::grpc_trace_fn; +use miden_protocol::block::BlockSigner; use tokio::net::TcpListener; use tokio::task::JoinSet; use tokio_stream::wrappers::TcpListenerStream; @@ -21,13 +21,11 @@ use tracing::{info, instrument}; use crate::blocks::BlockStore; use crate::db::Db; -use crate::server::db_maintenance::DbMaintenance; use crate::state::State; -use crate::{COMPONENT, DATABASE_MAINTENANCE_INTERVAL, GenesisState}; +use crate::{COMPONENT, GenesisState}; mod api; mod block_producer; -mod db_maintenance; mod ntx_builder; mod rpc_api; @@ -51,7 +49,10 @@ impl Store { skip_all, err, )] - pub fn bootstrap(genesis: GenesisState, data_directory: &Path) -> anyhow::Result<()> { + pub fn bootstrap( + genesis: GenesisState, + data_directory: &Path, + ) -> anyhow::Result<()> { let genesis = genesis .into_block() .context("failed to convert genesis configuration into the genesis block")?; @@ -92,24 +93,19 @@ impl Store { let state = Arc::new(State::load(&self.data_directory).await.context("failed to load state")?); - let db_maintenance_service = - DbMaintenance::new(Arc::clone(&state), DATABASE_MAINTENANCE_INTERVAL); - let rpc_service = - rpc_store::rpc_server::RpcServer::new(api::StoreApi { state: Arc::clone(&state) }); - let ntx_builder_service = - ntx_builder_store::ntx_builder_server::NtxBuilderServer::new(api::StoreApi { - state: Arc::clone(&state), - }); + store::rpc_server::RpcServer::new(api::StoreApi { state: Arc::clone(&state) }); + let ntx_builder_service = store::ntx_builder_server::NtxBuilderServer::new(api::StoreApi { + state: Arc::clone(&state), + }); let block_producer_service = - block_producer_store::block_producer_server::BlockProducerServer::new(api::StoreApi { + store::block_producer_server::BlockProducerServer::new(api::StoreApi { state: Arc::clone(&state), }); let reflection_service = tonic_reflection::server::Builder::configure() .register_file_descriptor_set(store_rpc_api_descriptor()) .register_file_descriptor_set(store_ntx_builder_api_descriptor()) .register_file_descriptor_set(store_block_producer_api_descriptor()) - .register_file_descriptor_set(store_shared_api_descriptor()) .build_v1() .context("failed to build reflection service")?; @@ -121,7 +117,6 @@ impl Store { .register_file_descriptor_set(store_rpc_api_descriptor()) .register_file_descriptor_set(store_ntx_builder_api_descriptor()) .register_file_descriptor_set(store_block_producer_api_descriptor()) - .register_file_descriptor_set(store_shared_api_descriptor()) .build_v1alpha() .context("failed to build reflection service")?; @@ -129,11 +124,6 @@ impl Store { let mut join_set = JoinSet::new(); - join_set.spawn(async move { - db_maintenance_service.run().await; - Ok(()) - }); - // Build the gRPC server with the API services and trace layer. join_set.spawn( tonic::transport::Server::builder() @@ -183,7 +173,7 @@ impl DataDirectory { /// Creates a new [`DataDirectory`], ensuring that the directory exists and is accessible /// insofar as is possible. pub fn load(path: PathBuf) -> std::io::Result { - let meta = std::fs::metadata(&path)?; + let meta = fs_err::metadata(&path)?; if meta.is_dir().not() { return Err(std::io::ErrorKind::NotConnected.into()); } diff --git a/crates/store/src/server/ntx_builder.rs b/crates/store/src/server/ntx_builder.rs index 1f2dd1595..ba8e82b4f 100644 --- a/crates/store/src/server/ntx_builder.rs +++ b/crates/store/src/server/ntx_builder.rs @@ -1,18 +1,19 @@ use std::num::{NonZero, TryFromIntError}; use miden_node_proto::domain::account::{AccountInfo, NetworkAccountPrefix}; -use miden_node_proto::generated::ntx_builder_store::ntx_builder_server; +use miden_node_proto::generated::rpc::BlockRange; +use miden_node_proto::generated::store::ntx_builder_server; use miden_node_proto::generated::{self as proto}; use miden_node_utils::ErrorReport; -use miden_objects::block::BlockNumber; -use miden_objects::note::Note; +use miden_protocol::block::BlockNumber; +use miden_protocol::note::Note; use tonic::{Request, Response, Status}; use tracing::{debug, instrument}; use crate::COMPONENT; use crate::db::models::Page; -use crate::errors::GetNoteScriptByRootError; -use crate::server::api::{StoreApi, internal_error, invalid_argument, read_root}; +use crate::errors::{GetNetworkAccountIdsError, GetNoteScriptByRootError}; +use crate::server::api::{StoreApi, internal_error, invalid_argument, read_block_range, read_root}; // NTX BUILDER ENDPOINTS // ================================================================================================ @@ -32,8 +33,8 @@ impl ntx_builder_server::NtxBuilder for StoreApi { )] async fn get_block_header_by_number( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { self.get_block_header_by_number_inner(request).await } @@ -53,7 +54,7 @@ impl ntx_builder_server::NtxBuilder for StoreApi { async fn get_current_blockchain_data( &self, request: Request, - ) -> Result, Status> { + ) -> Result, Status> { let block_num = request.into_inner().block_num.map(BlockNumber::from); let response = match self @@ -62,11 +63,11 @@ impl ntx_builder_server::NtxBuilder for StoreApi { .await .map_err(internal_error)? { - Some((header, peaks)) => proto::ntx_builder_store::CurrentBlockchainData { + Some((header, peaks)) => proto::store::CurrentBlockchainData { current_peaks: peaks.peaks().iter().map(Into::into).collect(), current_block_header: Some(header.into()), }, - None => proto::ntx_builder_store::CurrentBlockchainData { + None => proto::store::CurrentBlockchainData { current_peaks: vec![], current_block_header: None, }, @@ -85,8 +86,8 @@ impl ntx_builder_server::NtxBuilder for StoreApi { )] async fn get_network_account_details_by_prefix( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); // Validate that the call is for a valid network account prefix @@ -98,7 +99,7 @@ impl ntx_builder_server::NtxBuilder for StoreApi { let account_info: Option = self.state.get_network_account_details_by_prefix(prefix.inner()).await?; - Ok(Response::new(proto::ntx_builder_store::MaybeAccountDetails { + Ok(Response::new(proto::store::MaybeAccountDetails { details: account_info.map(|acc| (&acc).into()), })) } @@ -112,9 +113,14 @@ impl ntx_builder_server::NtxBuilder for StoreApi { )] async fn get_unconsumed_network_notes( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); + let block_num = BlockNumber::from(request.block_num); + let network_account_id_prefix = + NetworkAccountPrefix::try_from(request.network_account_id_prefix).map_err(|err| { + invalid_argument(err.as_report_context("invalid network_account_id_prefix")) + })?; let state = self.state.clone(); @@ -125,8 +131,10 @@ impl ntx_builder_server::NtxBuilder for StoreApi { let page = Page { token: request.page_token, size }; // TODO: no need to get the whole NoteRecord here, a NetworkNote wrapper should be created // instead - let (notes, next_page) = - state.get_unconsumed_network_notes(page).await.map_err(internal_error)?; + let (notes, next_page) = state + .get_unconsumed_network_notes_for_account(network_account_id_prefix, block_num, page) + .await + .map_err(internal_error)?; let mut network_notes = Vec::with_capacity(notes.len()); for note in notes { @@ -137,71 +145,72 @@ impl ntx_builder_server::NtxBuilder for StoreApi { network_notes.push(note.into()); } - Ok(Response::new(proto::ntx_builder_store::UnconsumedNetworkNotes { + Ok(Response::new(proto::store::UnconsumedNetworkNotes { notes: network_notes, next_token: next_page.token, })) } + /// Returns network account IDs within the specified block range (based on account creation + /// block). + /// + /// The function may return fewer accounts than exist in the range if the result would exceed + /// `MAX_RESPONSE_PAYLOAD_BYTES / AccountId::SERIALIZED_SIZE` rows. In this case, the result is + /// truncated at a block boundary to ensure all accounts from included blocks are returned. + /// + /// The response includes pagination info with the last block number that was fully included. #[instrument( parent = None, target = COMPONENT, - name = "store.ntx_builder_server.get_unconsumed_network_notes_for_account", + name = "store.ntx_builder_server.get_network_account_ids", skip_all, + ret(level = "debug"), err )] - async fn get_unconsumed_network_notes_for_account( + async fn get_network_account_ids( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); - let block_num = BlockNumber::from(request.block_num); - let network_account_id_prefix = - NetworkAccountPrefix::try_from(request.network_account_id_prefix).map_err(|err| { - invalid_argument(err.as_report_context("invalid network_account_id_prefix")) - })?; - let state = self.state.clone(); + let mut chain_tip = self.state.latest_block_num().await; + let block_range = + read_block_range::(Some(request), "GetNetworkAccountIds")? + .into_inclusive_range::(&chain_tip)?; - let size = - NonZero::try_from(request.page_size as usize).map_err(|err: TryFromIntError| { - invalid_argument(err.as_report_context("invalid page_size")) - })?; - let page = Page { token: request.page_token, size }; - // TODO: no need to get the whole NoteRecord here, a NetworkNote wrapper should be created - // instead - let (notes, next_page) = state - .get_unconsumed_network_notes_for_account(network_account_id_prefix, block_num, page) - .await - .map_err(internal_error)?; + let (account_ids, mut last_block_included) = + self.state.get_all_network_accounts(block_range).await.map_err(internal_error)?; - let mut network_notes = Vec::with_capacity(notes.len()); - for note in notes { - // SAFETY: Network notes are filtered in the database, so they should have details; - // otherwise the state would be corrupted - let (assets, recipient) = note.details.unwrap().into_parts(); - let note = Note::new(assets, note.metadata, recipient); - network_notes.push(note.into()); + let account_ids: Vec = + account_ids.into_iter().map(Into::into).collect(); + + if last_block_included > chain_tip { + last_block_included = chain_tip; } - Ok(Response::new(proto::ntx_builder_store::UnconsumedNetworkNotes { - notes: network_notes, - next_token: next_page.token, + chain_tip = self.state.latest_block_num().await; + + Ok(Response::new(proto::store::NetworkAccountIdList { + account_ids, + pagination_info: Some(proto::rpc::PaginationInfo { + chain_tip: chain_tip.as_u32(), + block_num: last_block_included.as_u32(), + }), })) } #[instrument( - parent = None, - target = COMPONENT, - name = "store.ntx_builder_server.get_note_script_by_root", - skip_all, - ret(level = "debug"), - err + parent = None, + target = COMPONENT, + name = "store.ntx_builder_server.get_note_script_by_root", + skip_all, + ret(level = "debug"), + err )] async fn get_note_script_by_root( &self, request: Request, - ) -> Result, Status> { + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); let root = read_root::(request.into_inner().root, "NoteRoot")?; @@ -212,7 +221,7 @@ impl ntx_builder_server::NtxBuilder for StoreApi { .await .map_err(GetNoteScriptByRootError::from)?; - Ok(Response::new(proto::shared::MaybeNoteScript { + Ok(Response::new(proto::rpc::MaybeNoteScript { script: note_script.map(Into::into), })) } diff --git a/crates/store/src/server/rpc_api.rs b/crates/store/src/server/rpc_api.rs index e1c923e27..5ac014868 100644 --- a/crates/store/src/server/rpc_api.rs +++ b/crates/store/src/server/rpc_api.rs @@ -1,10 +1,17 @@ use miden_node_proto::convert; use miden_node_proto::domain::account::AccountInfo; -use miden_node_proto::generated::rpc_store::rpc_server; +use miden_node_proto::generated::store::rpc_server; use miden_node_proto::generated::{self as proto}; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::note::NoteId; +use miden_node_utils::limiter::{ + QueryParamAccountIdLimit, + QueryParamLimiter, + QueryParamNoteIdLimit, + QueryParamNoteTagLimit, + QueryParamNullifierLimit, +}; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::note::NoteId; use tonic::{Request, Response, Status}; use tracing::{debug, info, instrument}; @@ -31,14 +38,6 @@ use crate::server::api::{ validate_nullifiers, }; -// CONSTANTS -// ================================================================================================ - -const MAX_ACCOUNT_IDS: usize = 100; -const MAX_NULLIFIERS: usize = 100; -const MAX_NOTE_TAGS: usize = 100; -const MAX_NOTE_IDS: usize = 100; - // CLIENT ENDPOINTS // ================================================================================================ @@ -58,8 +57,8 @@ impl rpc_server::Rpc for StoreApi { )] async fn get_block_header_by_number( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { self.get_block_header_by_number_inner(request).await } @@ -78,26 +77,20 @@ impl rpc_server::Rpc for StoreApi { )] async fn check_nullifiers( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { // Validate the nullifiers and convert them to Word values. Stop on first error. let request = request.into_inner(); // Validate nullifiers count - if request.nullifiers.len() > MAX_NULLIFIERS { - return Err(CheckNullifiersError::TooManyNullifiers( - request.nullifiers.len(), - MAX_NULLIFIERS, - ) - .into()); - } + check::(request.nullifiers.len())?; let nullifiers = validate_nullifiers::(&request.nullifiers)?; // Query the state for the request's nullifiers let proofs = self.state.check_nullifiers(&nullifiers).await; - Ok(Response::new(proto::rpc_store::CheckNullifiersResponse { + Ok(Response::new(proto::rpc::CheckNullifiersResponse { proofs: convert(proofs).collect(), })) } @@ -116,8 +109,8 @@ impl rpc_server::Rpc for StoreApi { )] async fn sync_nullifiers( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); if request.prefix_len != 16 { @@ -137,14 +130,14 @@ impl rpc_server::Rpc for StoreApi { let nullifiers = nullifiers .into_iter() - .map(|nullifier_info| proto::rpc_store::sync_nullifiers_response::NullifierUpdate { + .map(|nullifier_info| proto::rpc::sync_nullifiers_response::NullifierUpdate { nullifier: Some(nullifier_info.nullifier.into()), block_num: nullifier_info.block_num.as_u32(), }) .collect(); - Ok(Response::new(proto::rpc_store::SyncNullifiersResponse { - pagination_info: Some(proto::rpc_store::PaginationInfo { + Ok(Response::new(proto::rpc::SyncNullifiersResponse { + pagination_info: Some(proto::rpc::PaginationInfo { chain_tip: chain_tip.as_u32(), block_num: block_num.as_u32(), }), @@ -165,8 +158,8 @@ impl rpc_server::Rpc for StoreApi { )] async fn sync_state( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let account_ids: Vec = read_account_ids::(&request.account_ids)?; @@ -199,7 +192,7 @@ impl rpc_server::Rpc for StoreApi { let notes = state.notes.into_iter().map(Into::into).collect(); - Ok(Response::new(proto::rpc_store::SyncStateResponse { + Ok(Response::new(proto::rpc::SyncStateResponse { chain_tip: self.state.latest_block_num().await.as_u32(), block_header: Some(state.block_header.into()), mmr_delta: Some(delta.into()), @@ -221,8 +214,8 @@ impl rpc_server::Rpc for StoreApi { )] async fn sync_notes( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let chain_tip = self.state.latest_block_num().await; @@ -231,19 +224,15 @@ impl rpc_server::Rpc for StoreApi { .into_inclusive_range::(&chain_tip)?; // Validate note tags count - if request.note_tags.len() > MAX_NOTE_TAGS { - return Err( - NoteSyncError::TooManyNoteTags(request.note_tags.len(), MAX_NOTE_TAGS).into() - ); - } + check::(request.note_tags.len())?; let (state, mmr_proof, last_block_included) = self.state.sync_notes(request.note_tags, block_range).await?; let notes = state.notes.into_iter().map(Into::into).collect(); - Ok(Response::new(proto::rpc_store::SyncNotesResponse { - pagination_info: Some(proto::rpc_store::PaginationInfo { + Ok(Response::new(proto::rpc::SyncNotesResponse { + pagination_info: Some(proto::rpc::PaginationInfo { chain_tip: chain_tip.as_u32(), block_num: last_block_included.as_u32(), }), @@ -275,13 +264,11 @@ impl rpc_server::Rpc for StoreApi { let note_ids = request.into_inner().ids; // Validate note IDs count - if note_ids.len() > MAX_NOTE_IDS { - return Err(GetNotesByIdError::TooManyNoteIds(note_ids.len(), MAX_NOTE_IDS).into()); - } + check::(note_ids.len())?; let note_ids: Vec = convert_digests_to_words::(note_ids)?; - let note_ids: Vec = note_ids.into_iter().map(From::from).collect(); + let note_ids: Vec = note_ids.into_iter().map(NoteId::from_raw).collect(); let notes = self .state @@ -355,8 +342,8 @@ impl rpc_server::Rpc for StoreApi { )] async fn get_account_proof( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, ?request); let request = request.into_inner(); let account_proof_request = request.try_into()?; @@ -377,8 +364,8 @@ impl rpc_server::Rpc for StoreApi { )] async fn sync_account_vault( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let chain_tip = self.state.latest_block_num().await; @@ -404,7 +391,7 @@ impl rpc_server::Rpc for StoreApi { .into_iter() .map(|update| { let vault_key: Word = update.vault_key.into(); - proto::rpc_store::AccountVaultUpdate { + proto::rpc::AccountVaultUpdate { vault_key: Some(vault_key.into()), asset: update.asset.map(Into::into), block_num: update.block_num.as_u32(), @@ -412,8 +399,8 @@ impl rpc_server::Rpc for StoreApi { }) .collect(); - Ok(Response::new(proto::rpc_store::SyncAccountVaultResponse { - pagination_info: Some(proto::rpc_store::PaginationInfo { + Ok(Response::new(proto::rpc::SyncAccountVaultResponse { + pagination_info: Some(proto::rpc::PaginationInfo { chain_tip: chain_tip.as_u32(), block_num: last_included_block.as_u32(), }), @@ -435,8 +422,8 @@ impl rpc_server::Rpc for StoreApi { )] async fn sync_storage_maps( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { let request = request.into_inner(); let account_id = read_account_id::(request.account_id)?; @@ -461,16 +448,16 @@ impl rpc_server::Rpc for StoreApi { let updates = storage_maps_page .values .into_iter() - .map(|map_value| proto::rpc_store::StorageMapUpdate { - slot_index: u32::from(map_value.slot_index), + .map(|map_value| proto::rpc::StorageMapUpdate { + slot_name: map_value.slot_name.to_string(), key: Some(map_value.key.into()), value: Some(map_value.value.into()), block_num: map_value.block_num.as_u32(), }) .collect(); - Ok(Response::new(proto::rpc_store::SyncStorageMapsResponse { - pagination_info: Some(proto::rpc_store::PaginationInfo { + Ok(Response::new(proto::rpc::SyncStorageMapsResponse { + pagination_info: Some(proto::rpc::PaginationInfo { chain_tip: chain_tip.as_u32(), block_num: storage_maps_page.last_block_included.as_u32(), }), @@ -490,8 +477,8 @@ impl rpc_server::Rpc for StoreApi { async fn status( &self, _request: Request<()>, - ) -> Result, Status> { - Ok(Response::new(proto::rpc_store::StoreStatus { + ) -> Result, Status> { + Ok(Response::new(proto::rpc::StoreStatus { version: env!("CARGO_PKG_VERSION").to_string(), status: "connected".to_string(), chain_tip: self.state.latest_block_num().await.as_u32(), @@ -509,7 +496,7 @@ impl rpc_server::Rpc for StoreApi { async fn get_note_script_by_root( &self, request: Request, - ) -> Result, Status> { + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); let root = read_root::(request.into_inner().root, "NoteRoot")?; @@ -520,7 +507,7 @@ impl rpc_server::Rpc for StoreApi { .await .map_err(GetNoteScriptByRootError::from)?; - Ok(Response::new(proto::shared::MaybeNoteScript { + Ok(Response::new(proto::rpc::MaybeNoteScript { script: note_script.map(Into::into), })) } @@ -535,8 +522,8 @@ impl rpc_server::Rpc for StoreApi { )] async fn sync_transactions( &self, - request: Request, - ) -> Result, Status> { + request: Request, + ) -> Result, Status> { debug!(target: COMPONENT, request = ?request); let request = request.into_inner(); @@ -552,13 +539,7 @@ impl rpc_server::Rpc for StoreApi { read_account_ids::(&request.account_ids)?; // Validate account IDs count - if account_ids.len() > MAX_ACCOUNT_IDS { - return Err(SyncTransactionsError::TooManyAccountIds( - account_ids.len(), - MAX_ACCOUNT_IDS, - ) - .into()); - } + check::(account_ids.len())?; let (last_block_included, transaction_records_db) = self .state @@ -566,28 +547,61 @@ impl rpc_server::Rpc for StoreApi { .await .map_err(SyncTransactionsError::from)?; + // Collect all note IDs from all transactions to make a single query + let all_notes_ids = transaction_records_db + .iter() + .flat_map(|tx| tx.output_notes.iter()) + .copied() + .collect::>(); + + // Retrieve all note data in a single query + let all_note_records = self + .state + .get_notes_by_id(all_notes_ids) + .await + .map_err(SyncTransactionsError::from)?; + + // Create a map from note ID to note record for efficient lookup + let note_map: std::collections::HashMap<_, _> = all_note_records + .into_iter() + .map(|note_record| (note_record.note_id, note_record)) + .collect(); + // Convert database TransactionRecord to proto TransactionRecord - let mut transaction_records = Vec::with_capacity(transaction_records_db.len()); + let mut transactions = Vec::with_capacity(transaction_records_db.len()); for tx_header in transaction_records_db { - // Retrieve full note data for output notes from the database - let note_records = self - .state - .get_notes_by_id(tx_header.output_notes.clone()) - .await - .map_err(SyncTransactionsError::from)?; + // Get note records for this transaction's output notes + let note_records: Vec<_> = tx_header + .output_notes + .iter() + .filter_map(|note_id| note_map.get(¬e_id.as_word()).cloned()) + .collect(); // Convert to proto using the helper method let proto_record = tx_header.into_proto_with_note_records(note_records); - transaction_records.push(proto_record); + transactions.push(proto_record); } - Ok(Response::new(proto::rpc_store::SyncTransactionsResponse { - pagination_info: Some(proto::rpc_store::PaginationInfo { + Ok(Response::new(proto::rpc::SyncTransactionsResponse { + pagination_info: Some(proto::rpc::PaginationInfo { chain_tip: chain_tip.as_u32(), block_num: last_block_included.as_u32(), }), - transaction_records, + transactions, })) } } + +// LIMIT HELPERS +// ================================================================================================ + +/// Formats an "Out of range" error +fn out_of_range_error(err: E) -> Status { + Status::out_of_range(err.to_string()) +} + +/// Check, but don't repeat ourselves mapping the error +fn check(n: usize) -> Result<(), Status> { + ::check(n).map_err(out_of_range_error) +} diff --git a/crates/store/src/state.rs b/crates/store/src/state.rs index b94e57bdb..7c72ae818 100644 --- a/crates/store/src/state.rs +++ b/crates/store/src/state.rs @@ -23,34 +23,22 @@ use miden_node_proto::domain::account::{ use miden_node_proto::domain::batch::BatchInputs; use miden_node_utils::ErrorReport; use miden_node_utils::formatting::format_array; -use miden_objects::account::{AccountHeader, AccountId, StorageSlot}; -use miden_objects::block::account_tree::{AccountTree, account_id_to_smt_key}; -use miden_objects::block::{ - AccountWitness, - BlockHeader, - BlockInputs, - BlockNumber, - Blockchain, - NullifierTree, - NullifierWitness, - ProvenBlock, -}; -use miden_objects::crypto::merkle::{ - Forest, +use miden_protocol::account::{AccountId, StorageSlotContent}; +use miden_protocol::block::account_tree::{AccountTree, AccountWitness, account_id_to_smt_key}; +use miden_protocol::block::nullifier_tree::{NullifierTree, NullifierWitness}; +use miden_protocol::block::{BlockHeader, BlockInputs, BlockNumber, Blockchain, ProvenBlock}; +use miden_protocol::crypto::merkle::mmr::{Forest, MmrDelta, MmrPeaks, MmrProof, PartialMmr}; +use miden_protocol::crypto::merkle::smt::{ LargeSmt, + LargeSmtError, MemoryStorage, - Mmr, - MmrDelta, - MmrPeaks, - MmrProof, - PartialMmr, SmtProof, SmtStorage, }; -use miden_objects::note::{NoteDetails, NoteId, NoteScript, Nullifier}; -use miden_objects::transaction::{OutputNote, PartialBlockchain}; -use miden_objects::utils::Serializable; -use miden_objects::{AccountError, Word}; +use miden_protocol::note::{NoteDetails, NoteId, NoteScript, Nullifier}; +use miden_protocol::transaction::{OutputNote, PartialBlockchain}; +use miden_protocol::utils::Serializable; +use miden_protocol::{AccountError, Word}; use tokio::sync::{Mutex, RwLock, oneshot}; use tracing::{info, info_span, instrument}; @@ -77,7 +65,7 @@ use crate::errors::{ StateInitializationError, StateSyncError, }; -use crate::{AccountTreeWithHistory, COMPONENT, DataDirectory, InMemoryAccountTree}; +use crate::{AccountTreeWithHistory, COMPONENT, DataDirectory}; // STRUCTURES // ================================================================================================ @@ -95,9 +83,9 @@ struct InnerState where S: SmtStorage, { - nullifier_tree: NullifierTree, + nullifier_tree: NullifierTree>, blockchain: Blockchain, - account_tree: AccountTreeWithHistory>>, + account_tree: AccountTreeWithHistory, } impl InnerState @@ -112,7 +100,10 @@ where } } -/// The rollup state +// CHAIN STATE +// ================================================================================================ + +/// The rollup state. pub struct State { /// The database which stores block headers, nullifiers, notes, and the latest states of /// accounts. @@ -132,6 +123,9 @@ pub struct State { } impl State { + // CONSTRUCTOR + // -------------------------------------------------------------------------------------------- + /// Loads the state from the `db`. #[instrument(target = COMPONENT, skip_all)] pub async fn load(data_path: &Path) -> Result { @@ -148,26 +142,12 @@ impl State { .await .map_err(StateInitializationError::DatabaseLoadError)?; - let chain_mmr = load_mmr(&mut db).await?; - let block_headers = db.select_all_block_headers().await?; - // TODO: Account tree loading synchronization - // Currently `load_account_tree` loads all account commitments from the DB. This could - // potentially lead to inconsistency if the DB contains account states from blocks beyond - // `latest_block_num`, though in practice the DB writes are transactional and this - // should not occur. - let latest_block_num = block_headers - .last() - .map_or(BlockNumber::GENESIS, miden_objects::block::BlockHeader::block_num); + let blockchain = load_mmr(&mut db).await?; + let latest_block_num = blockchain.chain_tip().unwrap_or(BlockNumber::GENESIS); let account_tree = load_account_tree(&mut db, latest_block_num).await?; let nullifier_tree = load_nullifier_tree(&mut db).await?; - let inner = RwLock::new(InnerState { - nullifier_tree, - // SAFETY: We assume the loaded MMR is valid and does not have more than u32::MAX - // entries. - blockchain: Blockchain::from_mmr_unchecked(chain_mmr), - account_tree, - }); + let inner = RwLock::new(InnerState { nullifier_tree, blockchain, account_tree }); let writer = Mutex::new(()); let db = Arc::new(db); @@ -175,6 +155,9 @@ impl State { Ok(Self { db, block_store, inner, writer }) } + // STATE MUTATOR + // -------------------------------------------------------------------------------------------- + /// Apply changes of a new block to the DB and in-memory data structures. /// /// ## Note on state consistency @@ -206,7 +189,7 @@ impl State { let header = block.header(); - let tx_commitment = block.transactions().commitment(); + let tx_commitment = block.body().transactions().commitment(); if header.tx_commitment() != tx_commitment { return Err(InvalidBlockError::InvalidBlockTxCommitment { @@ -217,7 +200,7 @@ impl State { } let block_num = header.block_num(); - let block_commitment = block.commitment(); + let block_commitment = header.commitment(); // ensures the right block header is being processed let prev_block = self @@ -263,9 +246,10 @@ impl State { // nullifiers can be produced only once let duplicate_nullifiers: Vec<_> = block + .body() .created_nullifiers() .iter() - .filter(|&n| inner.nullifier_tree.get_block_num(n).is_some()) + .filter(|&nullifier| inner.nullifier_tree.get_block_num(nullifier).is_some()) .copied() .collect(); if !duplicate_nullifiers.is_empty() { @@ -284,7 +268,11 @@ impl State { let nullifier_tree_update = inner .nullifier_tree .compute_mutations( - block.created_nullifiers().iter().map(|nullifier| (*nullifier, block_num)), + block + .body() + .created_nullifiers() + .iter() + .map(|nullifier| (*nullifier, block_num)), ) .map_err(InvalidBlockError::NewBlockNullifierAlreadySpent)?; @@ -297,6 +285,7 @@ impl State { .account_tree .compute_mutations( block + .body() .updated_accounts() .iter() .map(|update| (update.account_id(), update.final_state_commitment())), @@ -323,12 +312,13 @@ impl State { }; // build note tree - let note_tree = block.build_output_note_tree(); + let note_tree = block.body().compute_block_note_tree(); if note_tree.root() != header.note_root() { return Err(InvalidBlockError::NewBlockInvalidNoteRoot.into()); } let notes = block + .body() .output_notes() .map(|(note_index, note)| { let (details, nullifier) = match note { @@ -348,7 +338,7 @@ impl State { let note_record = NoteRecord { block_num, note_index, - note_id: note.id().into(), + note_id: note.id().as_word(), note_commitment: note.commitment(), metadata: *note.metadata(), details, @@ -428,6 +418,9 @@ impl State { Ok(()) } + // STATE ACCESSORS + // -------------------------------------------------------------------------------------------- + /// Queries a [BlockHeader] from the database, and returns it alongside its inclusion proof. /// /// If [None] is given as the value of `block_num`, the data for the latest [BlockHeader] is @@ -892,11 +885,8 @@ impl State { let found_unauthenticated_notes = self .db - .select_notes_by_commitment(unauthenticated_note_commitments) - .await? - .into_iter() - .map(|note| note.note_commitment) - .collect(); + .select_existing_note_commitments(unauthenticated_note_commitments) + .await?; Ok(TransactionInputs { account_commitment, @@ -919,109 +909,163 @@ impl State { self.db.select_network_account_by_prefix(id_prefix).await } + /// Returns network account IDs within the specified block range (based on account creation + /// block). + /// + /// The function may return fewer accounts than exist in the range if the result would exceed + /// `MAX_RESPONSE_PAYLOAD_BYTES / AccountId::SERIALIZED_SIZE` rows. In this case, the result is + /// truncated at a block boundary to ensure all accounts from included blocks are returned. + /// + /// The response includes the last block number that was fully included in the result. + pub async fn get_all_network_accounts( + &self, + block_range: RangeInclusive, + ) -> Result<(Vec, BlockNumber), DatabaseError> { + self.db.select_all_network_account_ids(block_range).await + } + /// Returns the respective account proof with optional details, such as asset and storage /// entries. /// - /// Note: The `block_num` parameter in the request is currently ignored and will always - /// return the current state. Historical block support will be implemented in a future update. - #[allow(clippy::too_many_lines)] + /// When `block_num` is provided, this method will return the account state at that specific + /// block using both the historical account tree witness and historical database state. pub async fn get_account_proof( &self, account_request: AccountProofRequest, ) -> Result { let AccountProofRequest { block_num, account_id, details } = account_request; - let _ = block_num.ok_or_else(|| { - DatabaseError::NotImplemented( - "Handling of historical/past block numbers is not implemented yet".to_owned(), - ) - }); - // Lock inner state for the whole operation. We need to hold this lock to prevent the - // database, account tree and latest block number from changing during the operation, - // because changing one of them would lead to inconsistent state. + if details.is_some() && !account_id.has_public_state() { + return Err(DatabaseError::AccountNotPublic(account_id)); + } + + let (block_num, witness) = self.get_account_witness(block_num, account_id).await?; + + let details = if let Some(request) = details { + Some(self.fetch_public_account_details(account_id, block_num, request).await?) + } else { + None + }; + + Ok(AccountProofResponse { block_num, witness, details }) + } + + /// Gets the block witness (account tree proof) for the specified account + /// + /// If `block_num` is provided, returns the witness at that historical block, + /// if not present, returns the witness at the latest block. + async fn get_account_witness( + &self, + block_num: Option, + account_id: AccountId, + ) -> Result<(BlockNumber, AccountWitness), DatabaseError> { let inner_state = self.inner.read().await; - let block_num = inner_state.account_tree.block_number_latest(); - let witness = inner_state.account_tree.open_latest(account_id); + // Determine which block to query + let (block_num, witness) = if let Some(requested_block) = block_num { + // Historical query: use the account tree with history + let witness = inner_state + .account_tree + .open_at(account_id, requested_block) + .ok_or_else(|| DatabaseError::HistoricalBlockNotAvailable { + block_num: requested_block, + reason: "Block is either in the future or has been pruned from history" + .to_string(), + })?; + (requested_block, witness) + } else { + // Latest query: use the latest state + let block_num = inner_state.account_tree.block_number_latest(); + let witness = inner_state.account_tree.open_latest(account_id); + (block_num, witness) + }; - let account_details = if let Some(AccountDetailRequest { + Ok((block_num, witness)) + } + + /// Fetches the account details (code, vault, storage) for a public account at the specified + /// block. + /// + /// This method queries the database to fetch the account state and processes the detail + /// request to return only the requested information. + async fn fetch_public_account_details( + &self, + account_id: AccountId, + block_num: BlockNumber, + detail_request: AccountDetailRequest, + ) -> Result { + let AccountDetailRequest { code_commitment, asset_vault_commitment, storage_requests, - }) = details - { - let account_info = self.db.select_account(account_id).await?; + } = detail_request; - // if we get a query for a _private_ account _with_ details requested, we'll error out - let Some(account) = account_info.details else { - return Err(DatabaseError::AccountNotPublic(account_id)); - }; + if !account_id.has_public_state() { + return Err(DatabaseError::AccountNotPublic(account_id)); + } - let storage_header = account.storage().to_header(); + // Validate block exists in the blockchain before querying the database + self.validate_block_exists(block_num).await?; - let mut storage_map_details = - Vec::::with_capacity(storage_requests.len()); + let account_header = self + .db + .select_account_header_at_block(account_id, block_num) + .await? + .ok_or_else(|| DatabaseError::AccountNotPublic(account_id))?; + + let account_code = match code_commitment { + Some(commitment) if commitment == account_header.code_commitment() => None, + Some(_) => { + self.db + .select_account_code_by_commitment(account_header.code_commitment()) + .await? + }, + None => None, + }; - for StorageMapRequest { slot_index, slot_data } in storage_requests { - let Some(StorageSlot::Map(storage_map)) = - account.storage().slots().get(slot_index as usize) - else { - return Err(AccountError::StorageSlotNotMap(slot_index).into()); - }; - let details = AccountStorageMapDetails::new(slot_index, slot_data, storage_map); - storage_map_details.push(details); - } + let vault_details = match asset_vault_commitment { + Some(commitment) if commitment == account_header.vault_root() => { + AccountVaultDetails::empty() + }, + Some(_) => { + let vault_assets = + self.db.select_account_vault_at_block(account_id, block_num).await?; + AccountVaultDetails::from_assets(vault_assets) + }, + None => AccountVaultDetails::empty(), + }; - // Only include unknown account code blobs, which is equal to a account code digest - // mismatch. If `None` was requested, don't return any. - let account_code = code_commitment - .is_some_and(|code_commitment| code_commitment != account.code().commitment()) - .then(|| account.code().to_bytes()); + // TODO: don't load the entire storage at once, load what is required + let storage = self.db.select_account_storage_at_block(account_id, block_num).await?; + let storage_header = storage.to_header(); + let mut storage_map_details = + Vec::::with_capacity(storage_requests.len()); - // storage details - let storage_details = AccountStorageDetails { - header: storage_header, - map_details: storage_map_details, + for StorageMapRequest { slot_name, slot_data } in storage_requests { + let Some(slot) = storage.slots().iter().find(|s| s.name() == &slot_name) else { + continue; }; - // Handle vault details based on the `asset_vault_commitment`. - // Similar to `code_commitment`, if the provided commitment matches, we don't return - // vault data. If no commitment is provided or it doesn't match, we return - // the vault data. If the number of vault contained assets are exceeding a - // limit, we signal this back in the response and the user must handle that - // in follow-up request. - let vault_details = match asset_vault_commitment { - Some(commitment) if commitment == account.vault().root() => { - // The client already has the correct vault data - AccountVaultDetails::empty() - }, - Some(_) => { - // The commitment doesn't match, so return vault data - AccountVaultDetails::new(account.vault()) - }, - None => { - // No commitment provided, so don't return vault data - AccountVaultDetails::empty() + let storage_map = match slot.content() { + StorageSlotContent::Map(map) => map, + StorageSlotContent::Value(_) => { + return Err(AccountError::StorageSlotNotMap(slot_name).into()); }, }; - Some(AccountDetails { - account_header: AccountHeader::from(account), - account_code, - vault_details, - storage_details, - }) - } else { - None - }; - - let response = AccountProofResponse { - block_num, - witness, - details: account_details, - }; + let details = AccountStorageMapDetails::new(slot_name, slot_data, storage_map); + storage_map_details.push(details); + } - Ok(response) + Ok(AccountDetails { + account_header, + account_code, + vault_details, + storage_details: AccountStorageDetails { + header: storage_header, + map_details: storage_map_details, + }, + }) } /// Returns storage map values for syncing within a block range. @@ -1049,9 +1093,24 @@ impl State { self.inner.read().await.latest_block_num() } - /// Runs database optimization. - pub async fn optimize_db(&self) -> Result<(), DatabaseError> { - self.db.optimize().await + /// Validates that a block exists in the blockchain + /// + /// # Attention + /// + /// Acquires a *read lock** on `self.inner`. + /// + /// # Errors + /// + /// Returns `DatabaseError::BlockNotFound` if the block doesn't exist in the blockchain. + async fn validate_block_exists(&self, block_num: BlockNumber) -> Result<(), DatabaseError> { + let inner = self.inner.read().await; + let latest_block_num = inner.latest_block_num(); + + if block_num > latest_block_num { + return Err(DatabaseError::BlockNotFound(block_num)); + } + + Ok(()) } /// Returns account vault updates for specified account within a block range. @@ -1063,14 +1122,6 @@ impl State { self.db.get_account_vault_sync(account_id, block_range).await } - /// Returns the unprocessed network notes, along with the next pagination token. - pub async fn get_unconsumed_network_notes( - &self, - page: Page, - ) -> Result<(Vec, Page), DatabaseError> { - self.db.select_unconsumed_network_notes(page).await - } - /// Returns the network notes for an account that are unconsumed by a specified block number, /// along with the next pagination token. pub async fn get_unconsumed_network_notes_for_account( @@ -1080,7 +1131,7 @@ impl State { page: Page, ) -> Result<(Vec, Page), DatabaseError> { self.db - .select_unconsumed_network_notes_for_account(network_account_id_prefix, block_num, page) + .select_unconsumed_network_notes(network_account_id_prefix, block_num, page) .await } @@ -1103,19 +1154,11 @@ impl State { } } -// UTILITIES +// INNER STATE LOADING // ================================================================================================ #[instrument(level = "info", target = COMPONENT, skip_all)] -async fn load_nullifier_tree(db: &mut Db) -> Result { - let nullifiers = db.select_all_nullifiers().await?; - - NullifierTree::with_entries(nullifiers.into_iter().map(|info| (info.nullifier, info.block_num))) - .map_err(StateInitializationError::FailedToCreateNullifierTree) -} - -#[instrument(level = "info", target = COMPONENT, skip_all)] -async fn load_mmr(db: &mut Db) -> Result { +async fn load_mmr(db: &mut Db) -> Result { let block_commitments: Vec = db .select_all_block_headers() .await? @@ -1123,24 +1166,52 @@ async fn load_mmr(db: &mut Db) -> Result { .map(BlockHeader::commitment) .collect(); - Ok(block_commitments.into()) + // SAFETY: We assume the loaded MMR is valid and does not have more than u32::MAX + // entries. + let chain_mmr = Blockchain::from_mmr_unchecked(block_commitments.into()); + + Ok(chain_mmr) +} + +#[instrument(level = "info", target = COMPONENT, skip_all)] +async fn load_nullifier_tree( + db: &mut Db, +) -> Result>, StateInitializationError> { + let nullifiers = db.select_all_nullifiers().await?; + + // Convert nullifier data to entries for NullifierTree + // The nullifier value format is: block_num + let entries = nullifiers.into_iter().map(|info| (info.nullifier, info.block_num)); + + NullifierTree::with_storage_from_entries(MemoryStorage::default(), entries) + .map_err(StateInitializationError::FailedToCreateNullifierTree) } #[instrument(level = "info", target = COMPONENT, skip_all)] async fn load_account_tree( db: &mut Db, block_number: BlockNumber, -) -> Result, StateInitializationError> { - let account_data = db.select_all_account_commitments().await?.into_iter().collect::>(); +) -> Result, StateInitializationError> { + let account_data = Vec::from_iter(db.select_all_account_commitments().await?); - // Convert account_data to use account_id_to_smt_key let smt_entries = account_data .into_iter() .map(|(id, commitment)| (account_id_to_smt_key(id), commitment)); - let smt = LargeSmt::with_entries(MemoryStorage::default(), smt_entries) - .expect("Failed to create LargeSmt from database account data"); + let smt = + LargeSmt::with_entries(MemoryStorage::default(), smt_entries).map_err(|e| match e { + LargeSmtError::Merkle(merkle_error) => { + StateInitializationError::DatabaseError(DatabaseError::MerkleError(merkle_error)) + }, + LargeSmtError::Storage(err) => { + // large_smt::StorageError is not `Sync` and hence `context` cannot be called + // which we want to and do + StateInitializationError::AccountTreeIoError(err.as_report()) + }, + })?; + + let account_tree = + AccountTree::new(smt).map_err(StateInitializationError::FailedToCreateAccountsTree)?; - let account_tree = AccountTree::new(smt).expect("Failed to create AccountTree"); Ok(AccountTreeWithHistory::new(account_tree, block_number)) } diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index 1c28ce8fb..d944c1099 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -18,23 +18,26 @@ workspace = true # Enables depedencies intended for build script generation of version metadata. vergen = ["dep:vergen", "dep:vergen-gitcl"] # Enables utility functions for testing traces created by some other crate's stack. -testing = ["dep:tokio", "miden-objects/testing"] +testing = ["miden-protocol/testing"] [dependencies] anyhow = { workspace = true } bytes = { version = "1.10" } figment = { features = ["env", "toml"], version = "0.10" } +fs-err = { workspace = true } http = { workspace = true } http-body-util = { version = "0.1" } itertools = { workspace = true } -miden-objects = { workspace = true } +lru = { workspace = true } +miden-protocol = { workspace = true } opentelemetry = { version = "0.31" } opentelemetry-otlp = { default-features = false, features = ["grpc-tonic", "tls-roots", "trace"], version = "0.31" } opentelemetry_sdk = { features = ["rt-tokio", "testing"], version = "0.31" } rand = { workspace = true } serde = { features = ["derive"], version = "1.0" } thiserror = { workspace = true } -tonic = { workspace = true } +tokio = { workspace = true } +tonic = { default-features = true, workspace = true } tower-http = { features = ["catch-panic"], workspace = true } tracing = { workspace = true } tracing-forest = { features = ["chrono"], optional = true, version = "0.2" } @@ -46,8 +49,6 @@ url = { workspace = true } # This must match the version expected by `vergen-gitcl`. vergen = { "version" = "9.0", optional = true } vergen-gitcl = { features = ["cargo", "rustc"], optional = true, version = "1.0" } -# Optional dependencies enabled by `testing` feature. -tokio = { optional = true, workspace = true } [dev-dependencies] thiserror = { workspace = true } diff --git a/crates/utils/src/crypto.rs b/crates/utils/src/crypto.rs index f17b88580..44eac3f87 100644 --- a/crates/utils/src/crypto.rs +++ b/crates/utils/src/crypto.rs @@ -1,5 +1,5 @@ -use miden_objects::crypto::rand::RpoRandomCoin; -use miden_objects::{Felt, Word}; +use miden_protocol::crypto::rand::RpoRandomCoin; +use miden_protocol::{Felt, Word}; use rand::Rng; /// Creates a new RPO Random Coin with random seed diff --git a/crates/utils/src/fee.rs b/crates/utils/src/fee.rs index d7d167f24..5bde43284 100644 --- a/crates/utils/src/fee.rs +++ b/crates/utils/src/fee.rs @@ -1,9 +1,9 @@ -use miden_objects::asset::FungibleAsset; -use miden_objects::block::FeeParameters; -use miden_objects::testing::account_id::ACCOUNT_ID_NATIVE_ASSET_FAUCET; +use miden_protocol::asset::FungibleAsset; +use miden_protocol::block::FeeParameters; +use miden_protocol::testing::account_id::ACCOUNT_ID_NATIVE_ASSET_FAUCET; /// Derive a default, zero valued fee, payable to -/// [`miden_objects::testing::account_id::ACCOUNT_ID_NATIVE_ASSET_FAUCET`]. +/// [`miden_protocol::testing::account_id::ACCOUNT_ID_NATIVE_ASSET_FAUCET`]. pub fn test_fee() -> FungibleAsset { let faucet = ACCOUNT_ID_NATIVE_ASSET_FAUCET.try_into().unwrap(); FungibleAsset::new(faucet, 0).unwrap() diff --git a/crates/utils/src/formatting.rs b/crates/utils/src/formatting.rs index 1c132b6d5..5845e09b1 100644 --- a/crates/utils/src/formatting.rs +++ b/crates/utils/src/formatting.rs @@ -1,7 +1,7 @@ use std::fmt::Display; use itertools::Itertools; -use miden_objects::transaction::{InputNoteCommitment, InputNotes, OutputNotes}; +use miden_protocol::transaction::{InputNoteCommitment, InputNotes, OutputNotes}; pub fn format_opt(opt: Option<&T>) -> String { opt.map_or("None".to_owned(), ToString::to_string) diff --git a/crates/utils/src/lib.rs b/crates/utils/src/lib.rs index 4ff02f939..c894e31fb 100644 --- a/crates/utils/src/lib.rs +++ b/crates/utils/src/lib.rs @@ -7,6 +7,7 @@ pub mod formatting; pub mod grpc; pub mod limiter; pub mod logging; +pub mod lru_cache; pub mod panic; pub mod tracing; pub mod version; diff --git a/crates/utils/src/limiter.rs b/crates/utils/src/limiter.rs index d02499841..4e580d302 100644 --- a/crates/utils/src/limiter.rs +++ b/crates/utils/src/limiter.rs @@ -1,12 +1,16 @@ -//! Limit the size of a parameter list for a specific parameter +//! Limits for RPC and store parameters and payload sizes. //! -//! Used for: -//! 1. the external facing RPC -//! 2. limiting SQL statements not exceeding parameter limits +//! # Rationale +//! - Parameter limits are kept across all multi-value RPC parameters. This caps worst-case SQL `IN` +//! clauses and keeps responses comfortably under the 4 MiB payload budget enforced in the store. +//! - Limits are enforced both at the RPC boundary and inside the store to prevent bypasses and to +//! avoid expensive queries even if validation is skipped earlier in the stack. +//! - `MAX_PAGINATED_PAYLOAD_BYTES` is set to 4 MiB (e.g. 1000 nullifier rows at ~36 B each, 1000 +//! transactions summaries streamed in chunks). //! -//! The 1st is good to terminate invalid requests as early as possible, -//! where the second is both a fallback and a safeguard not benching -//! pointless parameter combinations. +//! Add new limits here so callers share the same values and rationale. + +pub const GENERAL_REQUEST_LIMIT: usize = 1000; #[allow(missing_docs)] #[derive(Debug, thiserror::Error)] @@ -37,58 +41,81 @@ pub trait QueryParamLimiter { } } +/// Maximum payload size (in bytes) for paginated responses returned by the +/// store. +pub const MAX_RESPONSE_PAYLOAD_BYTES: usize = 4 * 1024 * 1024; + /// Used for the following RPC endpoints /// * `state_sync` +/// +/// Capped at 1000 account IDs to keep SQL `IN` clauses bounded and response payloads under the +/// 4 MB budget. pub struct QueryParamAccountIdLimit; impl QueryParamLimiter for QueryParamAccountIdLimit { const PARAM_NAME: &str = "account_id"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } /// Used for the following RPC endpoints /// * `select_nullifiers_by_prefix` +/// +/// Capped at 1000 prefixes to keep queries and responses comfortably within the 4 MB payload +/// budget and to avoid unbounded prefix scans. pub struct QueryParamNullifierPrefixLimit; impl QueryParamLimiter for QueryParamNullifierPrefixLimit { const PARAM_NAME: &str = "nullifier_prefix"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } /// Used for the following RPC endpoints /// * `select_nullifiers_by_prefix` /// * `sync_nullifiers` /// * `sync_state` +/// +/// Capped at 1000 nullifiers to bound `IN` clauses and keep response sizes under the 4 MB budget. pub struct QueryParamNullifierLimit; impl QueryParamLimiter for QueryParamNullifierLimit { const PARAM_NAME: &str = "nullifier"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } /// Used for the following RPC endpoints /// * `get_note_sync` +/// +/// Capped at 1000 tags so note sync responses remain within the 4 MB payload budget. pub struct QueryParamNoteTagLimit; impl QueryParamLimiter for QueryParamNoteTagLimit { const PARAM_NAME: &str = "note_tag"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } /// Used for the following RPC endpoints /// `select_notes_by_id` +/// +/// The limit is set to 100 notes to keep responses within the 4 MiB payload cap because individual +/// notes are bounded to roughly 32 KiB. pub struct QueryParamNoteIdLimit; impl QueryParamLimiter for QueryParamNoteIdLimit { const PARAM_NAME: &str = "note_id"; - const LIMIT: usize = 1000; + const LIMIT: usize = 100; } /// Used for internal queries retrieving note inclusion proofs by commitment. +/// +/// Capped at 1000 commitments to keep internal proof lookups bounded and responses under the 4 MB +/// payload cap. pub struct QueryParamNoteCommitmentLimit; impl QueryParamLimiter for QueryParamNoteCommitmentLimit { const PARAM_NAME: &str = "note_commitment"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } /// Only used internally, not exposed via public RPC. +/// +/// Capped at 1000 block headers to bound internal batch operations and keep payloads below the +/// 4 MB limit. pub struct QueryParamBlockLimit; impl QueryParamLimiter for QueryParamBlockLimit { const PARAM_NAME: &str = "block_header"; - const LIMIT: usize = 1000; + const LIMIT: usize = GENERAL_REQUEST_LIMIT; } diff --git a/crates/utils/src/lru_cache.rs b/crates/utils/src/lru_cache.rs new file mode 100644 index 000000000..7e6751529 --- /dev/null +++ b/crates/utils/src/lru_cache.rs @@ -0,0 +1,32 @@ +use std::hash::Hash; +use std::num::NonZeroUsize; +use std::sync::Arc; + +use lru::LruCache as InnerCache; +use tokio::sync::Mutex; + +/// A newtype wrapper around an LRU cache. Ensures that the cache lock is not held across +/// await points. +#[derive(Clone)] +pub struct LruCache(Arc>>); + +impl LruCache +where + K: Hash + Eq, + V: Clone, +{ + /// Creates a new cache with the given capacity. + pub fn new(capacity: NonZeroUsize) -> Self { + Self(Arc::new(Mutex::new(InnerCache::new(capacity)))) + } + + /// Retrieves a value from the cache. + pub async fn get(&self, key: &K) -> Option { + self.0.lock().await.get(key).cloned() + } + + /// Puts a value into the cache. + pub async fn put(&self, key: K, value: V) { + self.0.lock().await.put(key, value); + } +} diff --git a/crates/utils/src/tracing/span_ext.rs b/crates/utils/src/tracing/span_ext.rs index c887346b2..07ac008fe 100644 --- a/crates/utils/src/tracing/span_ext.rs +++ b/crates/utils/src/tracing/span_ext.rs @@ -1,11 +1,11 @@ use core::time::Duration; use std::net::IpAddr; -use miden_objects::Word; -use miden_objects::account::AccountId; -use miden_objects::batch::BatchId; -use miden_objects::block::BlockNumber; -use miden_objects::transaction::TransactionId; +use miden_protocol::Word; +use miden_protocol::account::AccountId; +use miden_protocol::batch::BatchId; +use miden_protocol::block::BlockNumber; +use miden_protocol::transaction::TransactionId; use opentelemetry::trace::Status; use opentelemetry::{Key, StringValue, Value}; diff --git a/crates/utils/src/version/mod.rs b/crates/utils/src/version/mod.rs index 03ff66249..7d378558c 100644 --- a/crates/utils/src/version/mod.rs +++ b/crates/utils/src/version/mod.rs @@ -123,7 +123,7 @@ mod vergen { let cargo_vcs_info = PathBuf::from(env!("CARGO_MANIFEST_DIR")).join(".cargo_vcs_info.json"); if cargo_vcs_info.exists() { // The file is small so reading to string is acceptable. - let contents = std::fs::read_to_string(cargo_vcs_info).context("Reading vcs info")?; + let contents = fs_err::read_to_string(cargo_vcs_info).context("Reading vcs info")?; // File format: // { diff --git a/crates/validator/Cargo.toml b/crates/validator/Cargo.toml index ebb6145b7..6115e7cff 100644 --- a/crates/validator/Cargo.toml +++ b/crates/validator/Cargo.toml @@ -21,9 +21,12 @@ anyhow = { workspace = true } miden-node-proto = { workspace = true } miden-node-proto-build = { features = ["internal"], workspace = true } miden-node-utils = { features = ["testing"], workspace = true } +miden-protocol = { workspace = true } +miden-tx = { workspace = true } +thiserror = { workspace = true } tokio = { features = ["macros", "net", "rt-multi-thread"], workspace = true } tokio-stream = { features = ["net"], workspace = true } -tonic = { features = ["transport"], workspace = true } +tonic = { default-features = true, features = ["transport"], workspace = true } tonic-reflection = { workspace = true } tower-http = { features = ["util"], workspace = true } tracing = { workspace = true } diff --git a/crates/validator/src/block_validation/mod.rs b/crates/validator/src/block_validation/mod.rs new file mode 100644 index 000000000..68744010f --- /dev/null +++ b/crates/validator/src/block_validation/mod.rs @@ -0,0 +1,51 @@ +use std::sync::Arc; + +use miden_protocol::ProposedBlockError; +use miden_protocol::block::{BlockNumber, BlockSigner, ProposedBlock}; +use miden_protocol::crypto::dsa::ecdsa_k256_keccak::Signature; +use miden_protocol::transaction::TransactionId; + +use crate::server::ValidatedTransactions; + +// BLOCK VALIDATION ERROR +// ================================================================================================ + +#[derive(thiserror::Error, Debug)] +pub enum BlockValidationError { + #[error("transaction {0} in block {1} has not been validated")] + TransactionNotValidated(TransactionId, BlockNumber), + #[error("failed to build block")] + BlockBuildingFailed(#[from] ProposedBlockError), +} + +// BLOCK VALIDATION +// ================================================================================================ + +/// Validates a block by checking that all transactions in the proposed block have been processed by +/// the validator in the past. +/// +/// Removes the validated transactions from the cache upon success. +pub async fn validate_block( + proposed_block: ProposedBlock, + signer: &S, + validated_transactions: Arc, +) -> Result { + // Check that all transactions in the proposed block have been validated + for tx_header in proposed_block.transactions() { + let tx_id = tx_header.id(); + if validated_transactions.get(&tx_id).await.is_none() { + return Err(BlockValidationError::TransactionNotValidated( + tx_id, + proposed_block.block_num(), + )); + } + } + + // Build the block header. + let (header, _) = proposed_block.into_header_and_body()?; + + // Sign the header. + let signature = signer.sign(&header); + + Ok(signature) +} diff --git a/crates/validator/src/lib.rs b/crates/validator/src/lib.rs index d467b33fb..a45112d27 100644 --- a/crates/validator/src/lib.rs +++ b/crates/validator/src/lib.rs @@ -1,4 +1,6 @@ +mod block_validation; mod server; +mod tx_validation; pub use server::Validator; diff --git a/crates/validator/src/server/mod.rs b/crates/validator/src/server/mod.rs index 2ce321290..bab8b5d62 100644 --- a/crates/validator/src/server/mod.rs +++ b/crates/validator/src/server/mod.rs @@ -1,18 +1,39 @@ use std::net::SocketAddr; +use std::num::NonZeroUsize; +use std::sync::Arc; use std::time::Duration; use anyhow::Context; use miden_node_proto::generated::validator::api_server; use miden_node_proto::generated::{self as proto}; use miden_node_proto_build::validator_api_descriptor; +use miden_node_utils::ErrorReport; +use miden_node_utils::lru_cache::LruCache; use miden_node_utils::panic::catch_panic_layer_fn; use miden_node_utils::tracing::grpc::grpc_trace_fn; +use miden_protocol::block::{BlockSigner, ProposedBlock}; +use miden_protocol::transaction::{ + ProvenTransaction, + TransactionHeader, + TransactionId, + TransactionInputs, +}; +use miden_tx::utils::{Deserializable, Serializable}; use tokio::net::TcpListener; use tokio_stream::wrappers::TcpListenerStream; +use tonic::Status; use tower_http::catch_panic::CatchPanicLayer; use tower_http::trace::TraceLayer; use crate::COMPONENT; +use crate::block_validation::validate_block; +use crate::tx_validation::validate_transaction; + +/// Number of transactions to keep in the validated transactions cache. +const NUM_VALIDATED_TRANSACTIONS: NonZeroUsize = NonZeroUsize::new(10000).unwrap(); + +/// A type alias for a LRU cache that stores validated transactions. +pub type ValidatedTransactions = LruCache; // VALIDATOR // ================================================================================ @@ -20,16 +41,19 @@ use crate::COMPONENT; /// The handle into running the gRPC validator server. /// /// Facilitates the running of the gRPC server which implements the validator API. -pub struct Validator { +pub struct Validator { /// The address of the validator component. pub address: SocketAddr, /// Server-side timeout for an individual gRPC request. /// /// If the handler takes longer than this duration, the server cancels the call. pub grpc_timeout: Duration, + + /// The signer used to sign blocks. + pub signer: S, } -impl Validator { +impl Validator { /// Serves the validator RPC API. /// /// Executes in place (i.e. not spawned) and will run indefinitely until a fatal error is @@ -60,7 +84,7 @@ impl Validator { .layer(CatchPanicLayer::custom(catch_panic_layer_fn)) .layer(TraceLayer::new_for_grpc().make_span_with(grpc_trace_fn)) .timeout(self.grpc_timeout) - .add_service(api_server::ApiServer::new(ValidatorServer {})) + .add_service(api_server::ApiServer::new(ValidatorServer::new(self.signer))) .add_service(reflection_service) .add_service(reflection_service_alpha) .serve_with_incoming(TcpListenerStream::new(listener)) @@ -75,10 +99,21 @@ impl Validator { /// The underlying implementation of the gRPC validator server. /// /// Implements the gRPC API for the validator. -struct ValidatorServer {} +struct ValidatorServer { + signer: S, + validated_transactions: Arc, +} + +impl ValidatorServer { + fn new(signer: S) -> Self { + let validated_transactions = + Arc::new(ValidatedTransactions::new(NUM_VALIDATED_TRANSACTIONS)); + Self { signer, validated_transactions } + } +} #[tonic::async_trait] -impl api_server::Api for ValidatorServer { +impl api_server::Api for ValidatorServer { /// Returns the status of the validator. async fn status( &self, @@ -93,8 +128,61 @@ impl api_server::Api for ValidatorServer { /// Receives a proven transaction, then validates and stores it. async fn submit_proven_transaction( &self, - _request: tonic::Request, + request: tonic::Request, ) -> Result, tonic::Status> { - todo!() + let request = request.into_inner(); + // Deserialize the transaction. + let proven_tx = + ProvenTransaction::read_from_bytes(&request.transaction).map_err(|err| { + Status::invalid_argument(err.as_report_context("Invalid proven transaction")) + })?; + + // Deserialize the transaction inputs. + let Some(tx_inputs) = request.transaction_inputs else { + return Err(Status::invalid_argument("Missing transaction inputs")); + }; + let tx_inputs = TransactionInputs::read_from_bytes(&tx_inputs).map_err(|err| { + Status::invalid_argument(err.as_report_context("Invalid transaction inputs")) + })?; + + // Validate the transaction. + let validated_tx_header = + validate_transaction(proven_tx, tx_inputs).await.map_err(|err| { + Status::invalid_argument(err.as_report_context("Invalid transaction")) + })?; + + // Register the validated transaction. + let tx_id = validated_tx_header.id(); + self.validated_transactions.put(tx_id, validated_tx_header).await; + + Ok(tonic::Response::new(())) + } + + /// Validates a proposed block and returns the block header and body. + async fn sign_block( + &self, + request: tonic::Request, + ) -> Result, tonic::Status> { + let proposed_block_bytes = request.into_inner().proposed_block; + + // Deserialize the proposed block. + let proposed_block = + ProposedBlock::read_from_bytes(&proposed_block_bytes).map_err(|err| { + tonic::Status::invalid_argument(format!( + "Failed to deserialize proposed block: {err}", + )) + })?; + + // Validate the block. + let signature = + validate_block(proposed_block, &self.signer, self.validated_transactions.clone()) + .await + .map_err(|err| { + tonic::Status::invalid_argument(format!("Failed to validate block: {err}",)) + })?; + + // Send the signature. + let response = proto::blockchain::BlockSignature { signature: signature.to_bytes() }; + Ok(tonic::Response::new(response)) } } diff --git a/crates/rpc/src/server/validator.rs b/crates/validator/src/tx_validation/data_store.rs similarity index 56% rename from crates/rpc/src/server/validator.rs rename to crates/validator/src/tx_validation/data_store.rs index 2b6719c32..a48c2e8e6 100644 --- a/crates/rpc/src/server/validator.rs +++ b/crates/validator/src/tx_validation/data_store.rs @@ -2,52 +2,26 @@ /// when it is added to this repository. use std::collections::BTreeSet; -use miden_objects::Word; -use miden_objects::account::{AccountId, PartialAccount, StorageMapWitness}; -use miden_objects::asset::{AssetVaultKey, AssetWitness}; -use miden_objects::block::{BlockHeader, BlockNumber}; -use miden_objects::note::NoteScript; -use miden_objects::transaction::{ - AccountInputs, - ExecutedTransaction, - PartialBlockchain, - TransactionInputs, -}; -use miden_objects::vm::FutureMaybeSend; -use miden_tx::auth::UnreachableAuth; -use miden_tx::{ - DataStore, - DataStoreError, - MastForestStore, - TransactionExecutor, - TransactionExecutorError, - TransactionMastStore, -}; +use miden_protocol::Word; +use miden_protocol::account::{AccountId, PartialAccount, StorageMapWitness}; +use miden_protocol::asset::{AssetVaultKey, AssetWitness}; +use miden_protocol::block::{BlockHeader, BlockNumber}; +use miden_protocol::note::NoteScript; +use miden_protocol::transaction::{AccountInputs, PartialBlockchain, TransactionInputs}; +use miden_protocol::vm::FutureMaybeSend; +use miden_tx::{DataStore, DataStoreError, MastForestStore, TransactionMastStore}; -/// Executes a transaction using the provided transaction inputs. -pub async fn re_execute_transaction( - tx_inputs: TransactionInputs, -) -> Result { - // Create a DataStore from the transaction inputs. - let data_store = TransactionInputsDataStore::new(tx_inputs.clone()); - - // Execute the transaction. - let (account, block_header, _, input_notes, tx_args) = tx_inputs.into_parts(); - let executor: TransactionExecutor<'_, '_, _, UnreachableAuth> = - TransactionExecutor::new(&data_store); - executor - .execute_transaction(account.id(), block_header.block_num(), input_notes, tx_args) - .await -} +// TRANSACTION INPUTS DATA STORE +// ================================================================================================ /// A [`DataStore`] implementation that wraps [`TransactionInputs`] -struct TransactionInputsDataStore { +pub struct TransactionInputsDataStore { tx_inputs: TransactionInputs, mast_store: TransactionMastStore, } impl TransactionInputsDataStore { - fn new(tx_inputs: TransactionInputs) -> Self { + pub fn new(tx_inputs: TransactionInputs) -> Self { let mast_store = TransactionMastStore::new(); mast_store.load_account_code(tx_inputs.account().code()); Self { tx_inputs, mast_store } @@ -82,12 +56,12 @@ impl DataStore for TransactionInputsDataStore { async move { Err(DataStoreError::AccountNotFound(foreign_account_id)) } } - fn get_vault_asset_witness( + fn get_vault_asset_witnesses( &self, account_id: AccountId, vault_root: Word, - vault_key: AssetVaultKey, - ) -> impl FutureMaybeSend> { + vault_keys: BTreeSet, + ) -> impl FutureMaybeSend, DataStoreError>> { async move { if self.tx_inputs.account().id() != account_id { return Err(DataStoreError::AccountNotFound(account_id)); @@ -100,18 +74,20 @@ impl DataStore for TransactionInputsDataStore { }); } - match self.tx_inputs.account().vault().open(vault_key) { - Ok(vault_proof) => { - AssetWitness::new(vault_proof.into()).map_err(|err| DataStoreError::Other { - error_msg: "failed to open vault asset tree".into(), + Result::, _>::from_iter(vault_keys.into_iter().map(|vault_key| { + match self.tx_inputs.account().vault().open(vault_key) { + Ok(vault_proof) => { + AssetWitness::new(vault_proof.into()).map_err(|err| DataStoreError::Other { + error_msg: "failed to open vault asset tree".into(), + source: Some(err.into()), + }) + }, + Err(err) => Err(DataStoreError::Other { + error_msg: "failed to open vault".into(), source: Some(err.into()), - }) - }, - Err(err) => Err(DataStoreError::Other { - error_msg: "failed to open vault".into(), - source: Some(err.into()), - }), - } + }), + } + })) } } @@ -136,14 +112,14 @@ impl DataStore for TransactionInputsDataStore { fn get_note_script( &self, - script_root: Word, - ) -> impl FutureMaybeSend> { - async move { Err(DataStoreError::NoteScriptNotFound(script_root)) } + _script_root: Word, + ) -> impl FutureMaybeSend, DataStoreError>> { + async move { Ok(None) } } } impl MastForestStore for TransactionInputsDataStore { - fn get(&self, procedure_hash: &Word) -> Option> { + fn get(&self, procedure_hash: &Word) -> Option> { self.mast_store.get(procedure_hash) } } diff --git a/crates/validator/src/tx_validation/mod.rs b/crates/validator/src/tx_validation/mod.rs new file mode 100644 index 000000000..95419c392 --- /dev/null +++ b/crates/validator/src/tx_validation/mod.rs @@ -0,0 +1,62 @@ +mod data_store; + +pub use data_store::TransactionInputsDataStore; +use miden_protocol::MIN_PROOF_SECURITY_LEVEL; +use miden_protocol::transaction::{ProvenTransaction, TransactionHeader, TransactionInputs}; +use miden_tx::auth::UnreachableAuth; +use miden_tx::{TransactionExecutor, TransactionExecutorError, TransactionVerifier}; + +// TRANSACTION VALIDATION ERROR +// ================================================================================================ + +#[derive(thiserror::Error, Debug)] +pub enum TransactionValidationError { + #[error("failed to re-executed the transaction")] + ExecutionError(#[from] TransactionExecutorError), + #[error("re-executed transaction did not match the provided proven transaction")] + Mismatch { + proven_tx_header: Box, + executed_tx_header: Box, + }, + #[error("transaction proof verification failed")] + ProofVerificationFailed(#[from] miden_tx::TransactionVerifierError), +} + +// TRANSACTION VALIDATION +// ================================================================================================ + +/// Validates a transaction by verifying its proof, executing it and comparing its header with the +/// provided proven transaction. +/// +/// Returns the header of the executed transaction if successful. +pub async fn validate_transaction( + proven_tx: ProvenTransaction, + tx_inputs: TransactionInputs, +) -> Result { + // First, verify the transaction proof + let tx_verifier = TransactionVerifier::new(MIN_PROOF_SECURITY_LEVEL); + tx_verifier.verify(&proven_tx)?; + + // Create a DataStore from the transaction inputs. + let data_store = TransactionInputsDataStore::new(tx_inputs.clone()); + + // Execute the transaction. + let (account, block_header, _, input_notes, tx_args) = tx_inputs.into_parts(); + let executor: TransactionExecutor<'_, '_, _, UnreachableAuth> = + TransactionExecutor::new(&data_store); + let executed_tx = executor + .execute_transaction(account.id(), block_header.block_num(), input_notes, tx_args) + .await?; + + // Validate that the executed transaction matches the submitted transaction. + let executed_tx_header: TransactionHeader = (&executed_tx).into(); + let proven_tx_header: TransactionHeader = (&proven_tx).into(); + if executed_tx_header == proven_tx_header { + Ok(executed_tx_header) + } else { + Err(TransactionValidationError::Mismatch { + proven_tx_header: proven_tx_header.into(), + executed_tx_header: executed_tx_header.into(), + }) + } +} diff --git a/docs/external/src/rpc.md b/docs/external/src/rpc.md index 606f5cf44..7aeb9a81b 100644 --- a/docs/external/src/rpc.md +++ b/docs/external/src/rpc.md @@ -16,6 +16,7 @@ The gRPC service definition can be found in the Miden node's `proto` [directory] - [GetAccountProofs](#getaccountproofs) - [GetBlockByNumber](#getblockbynumber) - [GetBlockHeaderByNumber](#getblockheaderbynumber) +- [GetLimits](#getlimits) - [GetNotesById](#getnotesbyid) - [GetNoteScriptByRoot](#getnotescriptbyroot) - [SubmitProvenTransaction](#submitproventransaction) @@ -33,7 +34,71 @@ The gRPC service definition can be found in the Miden node's `proto` [directory] ### CheckNullifiers -Request proofs for a set of nullifiers. +Request Sparse Merkle Tree opening proofs to verify whether nullifiers have been consumed. + +#### Request + +```protobuf +message NullifierList { + repeated Digest nullifiers = 1; // List of nullifiers to check +} +``` + +#### Response + +```protobuf +message CheckNullifiersResponse { + repeated SmtOpening proofs = 1; // One proof per requested nullifier +} + +message SmtOpening { + SparseMerklePath path = 1; // Merkle authentication path + SmtLeaf leaf = 2; // Leaf at this position +} + +message SmtLeaf { + oneof leaf { + uint64 empty_leaf_index = 1; + SmtLeafEntry single = 2; + SmtLeafEntryList multiple = 3; + } +} +``` + +#### Understanding Proofs + +**Non-Inclusion (Nullifier NOT consumed):** +- `leaf` contains `empty_leaf_index` +- Note can still be consumed + +**Inclusion (Nullifier IS consumed):** +- `leaf` contains `single` or `multiple` with key-value pairs, including the `nullifier` key +- Note has been spent + +#### Verification + +```rust +use miden_crypto::merkle::{SmtProof, SmtProofError}; + +let block_header = get_latest_block_header(); +let nullifier_tree_root = block_header.state_commitment().nullifier_root(); + +let proof: SmtProof = smt_opening.try_into()?; + +match proof.verify_unset(&nullifier, &nullifier_tree_root) { + Ok(()) => { + // Nullifier is NOT in the tree - note can be consumed + } + Err(SmtProofError::ValueMismatch { .. }) => { + // Proof is valid, but nullifier has a value (not empty) - note already consumed + } + Err(_) => { + // Proof is invalid (wrong root, wrong key, etc.) + } +} +``` + +**Limits:** `nullifier` (1000) ### GetAccountDetails @@ -51,10 +116,32 @@ Request the raw data for a specific block. Request a specific block header and its inclusion proof. +### GetLimits + +Returns the query parameter limits configured for RPC endpoints. + +This endpoint allows clients to discover the maximum number of items that can be requested in a single call for various endpoints. The response contains a map of endpoint names to their parameter limits. + +**Example response structure:** + +```json +{ + "endpoints": { + "CheckNullifiers": { "parameters": { "nullifier": 1000 } }, + "SyncNullifiers": { "parameters": { "nullifier": 1000 } }, + "SyncState": { "parameters": { "account_id": 1000, "note_tag": 1000 } }, + "SyncNotes": { "parameters": { "note_tag": 1000 } }, + "GetNotesById": { "parameters": { "note_id": 100 } } + } +} +``` + ### GetNotesById Request a set of notes. +**Limits:** `note_id` (100) + ### GetNoteScriptByRoot Request the script for a note by its root. @@ -88,6 +175,8 @@ Caller specifies the `prefix_len` (currently only 16), the list of prefix values If the response is chunked (i.e., `block_num < block_to`), continue by issuing another request with `block_from = block_num + 1` to retrieve subsequent updates. +**Limits:** `nullifier` (1000) + ### SyncAccountVault Returns information that allows clients to sync asset values for specific public accounts within a block range. @@ -104,6 +193,8 @@ The response includes each note's metadata and inclusion proof. A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the tip of the chain. +**Limits:** `note_tag` (1000) + ### SyncState Iteratively sync data for specific notes and accounts. @@ -114,6 +205,8 @@ Each update response also contains info about new notes, accounts etc. created. The low part of note tags are redacted to preserve some degree of privacy. Returned data therefore contains additional notes which should be filtered out by the client. +**Limits:** `account_id` (1000), `note_tag` (1000) + ### SyncStorageMaps Returns storage map synchronization data for a specified public account within a given block range. This method allows clients to efficiently sync the storage map state of an account by retrieving only the changes that occurred between two blocks. diff --git a/docs/internal/src/rpc.md b/docs/internal/src/rpc.md index dcc9c379a..c477b940d 100644 --- a/docs/internal/src/rpc.md +++ b/docs/internal/src/rpc.md @@ -19,6 +19,14 @@ If there is a mismatch in version, clients will encounter an error while executi The server will reject any version that does not have the same major and minor version to it. This behaviour will change after v1.0.0., at which point only the major version will be taken into account. +## Query limits (`GetLimits`) + +The RPC service exposes a `GetLimits` endpoint which returns the query parameter limits enforced by the server for +multi-value parameters (e.g. number of nullifiers, note tags, note IDs, account IDs). + +These limits are defined centrally in `miden_node_utils::limiter` and are enforced at the RPC boundary (and also inside +the store) to keep database queries bounded and to keep response payloads within the ~4 MB budget. + ## Error Handling The RPC component uses domain-specific error enums for structured error reporting instead of proto-generated error types. This provides better control over error codes and makes error handling more maintainable. diff --git a/docs/internal/src/store.md b/docs/internal/src/store.md index 5f6f5b036..1929b7c49 100644 --- a/docs/internal/src/store.md +++ b/docs/internal/src/store.md @@ -18,5 +18,3 @@ startup its likely that you created the database _before_ making schema changes The store consists mainly of a gRPC server which answers requests from the RPC and block-producer components, as well as new block submissions from the block-producer. - -A lightweight background process performs database query optimisation by analysing database queries and statistics. diff --git a/proto/build.rs b/proto/build.rs index 87eb57e55..3d4047e24 100644 --- a/proto/build.rs +++ b/proto/build.rs @@ -6,19 +6,19 @@ use miette::{Context, IntoDiagnostic}; use protox::prost::Message; const RPC_PROTO: &str = "rpc.proto"; -const STORE_RPC_PROTO: &str = "store/rpc.proto"; -const STORE_NTX_BUILDER_PROTO: &str = "store/ntx_builder.proto"; -const STORE_BLOCK_PRODUCER_PROTO: &str = "store/block_producer.proto"; -const STORE_SHARED_PROTO: &str = "store/shared.proto"; -const BLOCK_PRODUCER_PROTO: &str = "block_producer.proto"; +// Unified internal store API (store.Rpc, store.BlockProducer, store.NtxBuilder). +// We compile the same file three times to preserve existing descriptor names. +const STORE_RPC_PROTO: &str = "internal/store.proto"; +const STORE_NTX_BUILDER_PROTO: &str = "internal/store.proto"; +const STORE_BLOCK_PRODUCER_PROTO: &str = "internal/store.proto"; +const BLOCK_PRODUCER_PROTO: &str = "internal/block_producer.proto"; const REMOTE_PROVER_PROTO: &str = "remote_prover.proto"; -const VALIDATOR_PROTO: &str = "validator.proto"; +const VALIDATOR_PROTO: &str = "internal/validator.proto"; const RPC_DESCRIPTOR: &str = "rpc_file_descriptor.bin"; const STORE_RPC_DESCRIPTOR: &str = "store_rpc_file_descriptor.bin"; const STORE_NTX_BUILDER_DESCRIPTOR: &str = "store_ntx_builder_file_descriptor.bin"; const STORE_BLOCK_PRODUCER_DESCRIPTOR: &str = "store_block_producer_file_descriptor.bin"; -const STORE_SHARED_DESCRIPTOR: &str = "store_shared_file_descriptor.bin"; const BLOCK_PRODUCER_DESCRIPTOR: &str = "block_producer_file_descriptor.bin"; const REMOTE_PROVER_DESCRIPTOR: &str = "remote_prover_file_descriptor.bin"; const VALIDATOR_DESCRIPTOR: &str = "validator_file_descriptor.bin"; @@ -69,12 +69,6 @@ fn main() -> miette::Result<()> { .into_diagnostic() .wrap_err("writing store block producer file descriptor")?; - let store_shared_file_descriptor = protox::compile([STORE_SHARED_PROTO], includes)?; - let store_shared_path = PathBuf::from(&out).join(STORE_SHARED_DESCRIPTOR); - fs::write(&store_shared_path, store_shared_file_descriptor.encode_to_vec()) - .into_diagnostic() - .wrap_err("writing store shared file descriptor")?; - let block_producer_file_descriptor = protox::compile([BLOCK_PRODUCER_PROTO], includes)?; let block_producer_path = PathBuf::from(&out).join(BLOCK_PRODUCER_DESCRIPTOR); fs::write(&block_producer_path, block_producer_file_descriptor.encode_to_vec()) diff --git a/proto/proto/README.md b/proto/proto/README.md new file mode 100644 index 000000000..5a3a9e321 --- /dev/null +++ b/proto/proto/README.md @@ -0,0 +1,19 @@ +# Proto Files Organization + +The files are organized by a visibility hierarchy, where the root directory contains the public-facing RPC and remote prover protocols, while the `types` directory contains the data types used by these protocols. The `internal` directory contains the internal protocols used by the node, such as the store, non-transactional data, and block producer protocols. + +The organization of the files is as follows: + +``` +rpc.proto +remote_prover.proto +types/ +├── primitives.proto +└── xxx.proto +internal/ +├── store.proto +├── ntx.proto +└── block_producer.proto +``` + +The public-facing files should only allow the usage of the `types` directory, to avoid service reflection to internal protocols. diff --git a/proto/proto/block_producer.proto b/proto/proto/internal/block_producer.proto similarity index 75% rename from proto/proto/block_producer.proto rename to proto/proto/internal/block_producer.proto index dae8293fe..acd97151a 100644 --- a/proto/proto/block_producer.proto +++ b/proto/proto/internal/block_producer.proto @@ -2,6 +2,7 @@ syntax = "proto3"; package block_producer; +import "rpc.proto"; import "types/note.proto"; import "types/blockchain.proto"; import "types/primitives.proto"; @@ -13,10 +14,10 @@ import "google/protobuf/empty.proto"; service Api { // Returns the status info. - rpc Status(google.protobuf.Empty) returns (BlockProducerStatus) {} + rpc Status(google.protobuf.Empty) returns (rpc.BlockProducerStatus) {} - // Submits proven transaction to the Miden network - rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (SubmitProvenTransactionResponse) {} + // Submits proven transaction to the Miden network. Returns the node's current block height. + rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} // Submits a proven batch to the Miden network. // @@ -28,7 +29,9 @@ service Api { // // All transactions in the batch but not in the mempool must build on the current mempool // state following normal transaction submission rules. - rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (SubmitProvenBatchResponse) {} + // + // Returns the node's current block height. + rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (blockchain.BlockNumber) {} // Subscribe to mempool events. // @@ -43,35 +46,6 @@ service Api { rpc MempoolSubscription(MempoolSubscriptionRequest) returns (stream MempoolEvent) {} } -// STATUS -// ================================================================================================ - -// Represents the status of the block producer. -message BlockProducerStatus { - // The block producer's running version. - string version = 1; - - // The block producer's status. - string status = 2; -} - -// SUBMIT PROVEN TRANSACTION -// ================================================================================================ - -// Represents the result of submitting proven transaction. -message SubmitProvenTransactionResponse { - // The node's current block height. - fixed32 block_height = 1; -} - -// SUBMIT PROVEN TRANSACTION -// ================================================================================================ - -message SubmitProvenBatchResponse { - // The node's current block height. - fixed32 block_height = 1; -} - // MEMPOOL SUBSCRIPTION // ================================================================================================ @@ -106,7 +80,7 @@ message MempoolEvent { // Changes to a network account, if any. This includes creation of new network accounts. // // The account delta is encoded using [winter_utils::Serializable] implementation - // for [miden_objects::account::delta::AccountDelta]. + // for [miden_protocol::account::delta::AccountDelta]. optional bytes network_account_delta = 4; } diff --git a/proto/proto/internal/store.proto b/proto/proto/internal/store.proto new file mode 100644 index 000000000..7fef64b13 --- /dev/null +++ b/proto/proto/internal/store.proto @@ -0,0 +1,352 @@ +// Unified specification of the internal store gRPC APIs. +syntax = "proto3"; +package store; + +import "google/protobuf/empty.proto"; +import "types/account.proto"; +import "types/blockchain.proto"; +import "types/transaction.proto"; +import "types/note.proto"; +import "types/primitives.proto"; +import "rpc.proto"; + +// RPC STORE API +// ================================================================================================ + +// Store API for the RPC component +service Rpc { + // Returns the status info. + rpc Status(google.protobuf.Empty) returns (rpc.StoreStatus) {} + + // Returns a Sparse Merkle Tree opening proof for each requested nullifier + // + // Each proof demonstrates either: + // - **Inclusion**: Nullifier exists in the tree (note was consumed) + // - **Non-inclusion**: Nullifier does not exist (note was not consumed) + // + // The `leaf` field indicates the status: + // - `empty_leaf_index`: Non-inclusion proof + // - `single` or `multiple`: Inclusion proof if the nullifier key is present + // + // Verify proofs against the nullifier tree root in the latest block header. + rpc CheckNullifiers(rpc.NullifierList) returns (rpc.CheckNullifiersResponse) {} + + // Returns the latest state of an account with the specified ID. + rpc GetAccountDetails(account.AccountId) returns (account.AccountDetails) {} + + // Returns the latest state proof of the specified account. + rpc GetAccountProof(rpc.AccountProofRequest) returns (rpc.AccountProofResponse) {} + + // Returns raw block data for the specified block number. + rpc GetBlockByNumber(blockchain.BlockNumber) returns (blockchain.MaybeBlock) {} + + // Retrieves block header by given block number. Optionally, it also returns the MMR path + // and current chain length to authenticate the block's inclusion. + rpc GetBlockHeaderByNumber(rpc.BlockHeaderByNumberRequest) returns (rpc.BlockHeaderByNumberResponse) {} + + // Returns a list of committed notes matching the provided note IDs. + rpc GetNotesById(note.NoteIdList) returns (note.CommittedNoteList) {} + + // Returns the script for a note by its root. + rpc GetNoteScriptByRoot(note.NoteRoot) returns (rpc.MaybeNoteScript) {} + + // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. + // + // Note that only 16-bit prefixes are supported at this time. + rpc SyncNullifiers(rpc.SyncNullifiersRequest) returns (rpc.SyncNullifiersResponse) {} + + // Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. + // + // requester specifies the `note_tags` they are interested in, and the block height from which to search for new for + // matching notes for. The request will then return the next block containing any note matching the provided tags. + // + // The response includes each note's metadata and inclusion proof. + // + // A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the + // tip of the chain. + rpc SyncNotes(rpc.SyncNotesRequest) returns (rpc.SyncNotesResponse) {} + + // Returns info which can be used by the requester to sync up to the latest state of the chain + // for the objects (accounts, notes, nullifiers) the requester is interested in. + // + // This request returns the next block containing requested data. It also returns `chain_tip` + // which is the latest block number in the chain. requester is expected to repeat these requests + // in a loop until `response.block_header.block_num == response.chain_tip`, at which point + // the requester is fully synchronized with the chain. + // + // Each request also returns info about new notes, nullifiers etc. created. It also returns + // Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain + // MMR peaks and chain MMR nodes. + // + // For preserving some degree of privacy, note tags and nullifiers filters contain only high + // part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make + // additional filtering of that data on its side. + rpc SyncState(rpc.SyncStateRequest) returns (rpc.SyncStateResponse) {} + + // Returns account vault updates for specified account within a block range. + rpc SyncAccountVault(rpc.SyncAccountVaultRequest) returns (rpc.SyncAccountVaultResponse) {} + + // Returns storage map updates for specified account and storage slots within a block range. + rpc SyncStorageMaps(rpc.SyncStorageMapsRequest) returns (rpc.SyncStorageMapsResponse) {} + + // Returns transactions records for specific accounts within a block range. + rpc SyncTransactions(rpc.SyncTransactionsRequest) returns (rpc.SyncTransactionsResponse) {} +} + +// BLOCK PRODUCER STORE API +// ================================================================================================ + +// Store API for the BlockProducer component +service BlockProducer { + // Applies changes of a new block to the DB and in-memory data structures. + rpc ApplyBlock(blockchain.Block) returns (google.protobuf.Empty) {} + + // Retrieves block header by given block number. Optionally, it also returns the MMR path + // and current chain length to authenticate the block's inclusion. + rpc GetBlockHeaderByNumber(rpc.BlockHeaderByNumberRequest) returns (rpc.BlockHeaderByNumberResponse) {} + + // Returns data required to prove the next block. + rpc GetBlockInputs(BlockInputsRequest) returns (BlockInputs) {} + + // Returns the inputs for a transaction batch. + rpc GetBatchInputs(BatchInputsRequest) returns (BatchInputs) {} + + // Returns data required to validate a new transaction. + rpc GetTransactionInputs(TransactionInputsRequest) returns (TransactionInputs) {} +} + +// GET BLOCK INPUTS +// ================================================================================================ + +// Returns data required to prove the next block. +message BlockInputsRequest { + // IDs of all accounts updated in the proposed block for which to retrieve account witnesses. + repeated account.AccountId account_ids = 1; + + // Nullifiers of all notes consumed by the block for which to retrieve witnesses. + // + // Due to note erasure it will generally not be possible to know the exact set of nullifiers + // a block will create, unless we pre-execute note erasure. So in practice, this set of + // nullifiers will be the set of nullifiers of all proven batches in the block, which is a + // superset of the nullifiers the block may create. + // + // However, if it is known that a certain note will be erased, it would not be necessary to + // provide a nullifier witness for it. + repeated primitives.Digest nullifiers = 2; + + // Array of note IDs for which to retrieve note inclusion proofs, **if they exist in the store**. + repeated primitives.Digest unauthenticated_notes = 3; + + // Array of block numbers referenced by all batches in the block. + repeated fixed32 reference_blocks = 4; +} + +// Represents the result of getting block inputs. +message BlockInputs { + // A nullifier returned as a response to the `GetBlockInputs`. + message NullifierWitness { + // The nullifier. + primitives.Digest nullifier = 1; + + // The SMT proof to verify the nullifier's inclusion in the nullifier tree. + primitives.SmtOpening opening = 2; + } + // The latest block header. + blockchain.BlockHeader latest_block_header = 1; + + // Proof of each requested unauthenticated note's inclusion in a block, **if it existed in + // the store**. + repeated note.NoteInclusionInBlockProof unauthenticated_note_proofs = 2; + + // The serialized chain MMR which includes proofs for all blocks referenced by the + // above note inclusion proofs as well as proofs for inclusion of the requested blocks + // referenced by the batches in the block. + bytes partial_block_chain = 3; + + // The state commitments of the requested accounts and their authentication paths. + repeated account.AccountWitness account_witnesses = 4; + + // The requested nullifiers and their authentication paths. + repeated NullifierWitness nullifier_witnesses = 5; +} + +// GET BATCH INPUTS +// ================================================================================================ + +// Returns the inputs for a transaction batch. +message BatchInputsRequest { + // List of unauthenticated note commitments to be queried from the database. + repeated primitives.Digest note_commitments = 1; + // Set of block numbers referenced by transactions. + repeated fixed32 reference_blocks = 2; +} + +// Represents the result of getting batch inputs. +message BatchInputs { + // The block header that the transaction batch should reference. + blockchain.BlockHeader batch_reference_block_header = 1; + + // Proof of each _found_ unauthenticated note's inclusion in a block. + repeated note.NoteInclusionInBlockProof note_proofs = 2; + + // The serialized chain MMR which includes proofs for all blocks referenced by the + // above note inclusion proofs as well as proofs for inclusion of the blocks referenced + // by the transactions in the batch. + bytes partial_block_chain = 3; +} + +// GET TRANSACTION INPUTS +// ================================================================================================ + +// Returns data required to validate a new transaction. +message TransactionInputsRequest { + // ID of the account against which a transaction is executed. + account.AccountId account_id = 1; + // Set of nullifiers consumed by this transaction. + repeated primitives.Digest nullifiers = 2; + // Set of unauthenticated note commitments to check for existence on-chain. + // + // These are notes which were not on-chain at the state the transaction was proven, + // but could by now be present. + repeated primitives.Digest unauthenticated_notes = 3; +} + +// Represents the result of getting transaction inputs. +message TransactionInputs { + // An account returned as a response to the `GetTransactionInputs`. + message AccountTransactionInputRecord { + // The account ID. + account.AccountId account_id = 1; + + // The latest account commitment, zero commitment if the account doesn't exist. + primitives.Digest account_commitment = 2; + } + + // A nullifier returned as a response to the `GetTransactionInputs`. + message NullifierTransactionInputRecord { + // The nullifier ID. + primitives.Digest nullifier = 1; + + // The block at which the nullifier has been consumed, zero if not consumed. + fixed32 block_num = 2; + } + + // Account state proof. + AccountTransactionInputRecord account_state = 1; + + // List of nullifiers that have been consumed. + repeated NullifierTransactionInputRecord nullifiers = 2; + + // List of unauthenticated notes that were not found in the database. + repeated primitives.Digest found_unauthenticated_notes = 3; + + // The node's current block height. + fixed32 block_height = 4; + + // Whether the account ID prefix is unique. Only relevant for account creation requests. + optional bool new_account_id_prefix_is_unique = 5; // TODO: Replace this with an error. When a general error message exists. +} + +// NTX BUILDER STORE API +// ================================================================================================ + +// Store API for the network transaction builder component +service NtxBuilder { + // Retrieves block header by given block number. Optionally, it also returns the MMR path + // and current chain length to authenticate the block's inclusion. + rpc GetBlockHeaderByNumber(rpc.BlockHeaderByNumberRequest) returns (rpc.BlockHeaderByNumberResponse) {} + + // Returns a paginated list of unconsumed network notes. + rpc GetUnconsumedNetworkNotes(UnconsumedNetworkNotesRequest) returns (UnconsumedNetworkNotes) {} + + // Returns the block header at the chain tip, as well as the MMR peaks corresponding to this + // header for executing network transactions. If the block number is not provided, the latest + // header and peaks will be retrieved. + rpc GetCurrentBlockchainData(blockchain.MaybeBlockNumber) returns (CurrentBlockchainData) {} + + // Returns the latest state of a network account with the specified account prefix. + rpc GetNetworkAccountDetailsByPrefix(AccountIdPrefix) returns (MaybeAccountDetails) {} + + // Returns a list of all network account ids. + rpc GetNetworkAccountIds(rpc.BlockRange) returns (NetworkAccountIdList) {} + + // Returns the script for a note by its root. + rpc GetNoteScriptByRoot(note.NoteRoot) returns (rpc.MaybeNoteScript) {} +} + +// GET NETWORK ACCOUNT DETAILS BY PREFIX +// ================================================================================================ + +// Account ID prefix. +message AccountIdPrefix { + // Account ID prefix. + fixed32 account_id_prefix = 1; +} + +// Represents the result of getting network account details by prefix. +message MaybeAccountDetails { + // Account details. + optional account.AccountDetails details = 1; +} + +// GET UNCONSUMED NETWORK NOTES +// ================================================================================================ + +// Returns a paginated list of unconsumed network notes for an account. +// +// Notes created or consumed after the specified block are excluded from the result. +message UnconsumedNetworkNotesRequest { + // This should be null on the first call, and set to the response token until the response token + // is null, at which point all data has been fetched. + // + // Note that this token is only valid if used with the same parameters. + optional uint64 page_token = 1; + + // Number of notes to retrieve per page. + uint64 page_size = 2; + + // The network account ID prefix to filter notes by. + uint32 network_account_id_prefix = 3; + + // The block number to filter the returned notes by. + // + // Notes that are created or consumed after this block are excluded from the result. + fixed32 block_num = 4; +} + +// Represents the result of getting the unconsumed network notes. +message UnconsumedNetworkNotes { + // An opaque pagination token. + // + // Use this in your next request to get the next + // set of data. + // + // Will be null once there is no more data remaining. + optional uint64 next_token = 1; + + // The list of unconsumed network notes. + repeated note.NetworkNote notes = 2; +} + +// GET NETWORK ACCOUNTS +// ================================================================================================ + +// Represents the result of getting the network account ids. +message NetworkAccountIdList { + // Pagination information. + rpc.PaginationInfo pagination_info = 1; + + // The list of network account ids. + repeated account.AccountId account_ids = 2; +} + +// GET CURRENT BLOCKCHAIN DATA +// ================================================================================================ + +// Current blockchain data based on the requested block number. +message CurrentBlockchainData { + // Commitments that represent the current state according to the MMR. + repeated primitives.Digest current_peaks = 1; + // Current block header. + optional blockchain.BlockHeader current_block_header = 2; +} diff --git a/proto/proto/validator.proto b/proto/proto/internal/validator.proto similarity index 81% rename from proto/proto/validator.proto rename to proto/proto/internal/validator.proto index 6d4e801b4..e3bb02a61 100644 --- a/proto/proto/validator.proto +++ b/proto/proto/internal/validator.proto @@ -4,6 +4,7 @@ package validator; import "types/transaction.proto"; import "types/blockchain.proto"; +import "types/primitives.proto"; import "google/protobuf/empty.proto"; // VALIDATOR API @@ -16,9 +17,12 @@ service Api { // Submits a transaction to the validator. rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (google.protobuf.Empty) {} + + // Validates a proposed block and returns the block header and body. + rpc SignBlock(blockchain.ProposedBlock) returns (blockchain.BlockSignature) {} } -// STATUS +// VALIDATOR STATUS // ================================================================================================ // Represents the status of the validator. diff --git a/proto/proto/remote_prover.proto b/proto/proto/remote_prover.proto index 49132fd6f..28a0ad485 100644 --- a/proto/proto/remote_prover.proto +++ b/proto/proto/remote_prover.proto @@ -33,7 +33,7 @@ message ProofRequest { // type-specific: // - TRANSACTION: TransactionInputs encoded. // - BATCH: ProposedBatch encoded. - // - BLOCK: ProposedBlock encoded. + // - BLOCK: BlockProofRequest encoded. bytes payload = 2; } @@ -42,7 +42,7 @@ message Proof { // Serialized proof bytes. // - TRANSACTION: Returns an encoded ProvenTransaction. // - BATCH: Returns an encoded ProvenBatch. - // - BLOCK: Returns an encoded ProvenBlock. + // - BLOCK: Returns an encoded BlockProof. bytes payload = 1; } @@ -56,8 +56,8 @@ service ProxyStatusApi { // Status of an individual worker in the proxy. message ProxyWorkerStatus { - // The address of the worker. - string address = 1; + // The name of the worker. + string name = 1; // The version of the worker. string version = 2; // The health status of the worker. diff --git a/proto/proto/rpc.proto b/proto/proto/rpc.proto index dd3f1d6d6..dccf44020 100644 --- a/proto/proto/rpc.proto +++ b/proto/proto/rpc.proto @@ -7,9 +7,6 @@ import "types/blockchain.proto"; import "types/note.proto"; import "types/primitives.proto"; import "types/transaction.proto"; -import "block_producer.proto"; -import "store/rpc.proto"; -import "store/shared.proto"; import "google/protobuf/empty.proto"; // RPC API @@ -20,30 +17,40 @@ service Api { // Returns the status info of the node. rpc Status(google.protobuf.Empty) returns (RpcStatus) {} - // Returns a nullifier proof for each of the requested nullifiers. - rpc CheckNullifiers(rpc_store.NullifierList) returns (rpc_store.CheckNullifiersResponse) {} + // Returns a Sparse Merkle Tree opening proof for each requested nullifier + // + // Each proof demonstrates either: + // - **Inclusion**: Nullifier exists in the tree (note was consumed) + // - **Non-inclusion**: Nullifier does not exist (note was not consumed) + // + // The `leaf` field indicates the status: + // * `empty_leaf_index`: Non-inclusion proof (nullifier not in tree) + // * `single` or `multiple`: Inclusion proof only if the requested nullifier appears as a key. + // + // Verify proofs against the nullifier tree root in the latest block header. + rpc CheckNullifiers(NullifierList) returns (CheckNullifiersResponse) {} // Returns the latest state of an account with the specified ID. rpc GetAccountDetails(account.AccountId) returns (account.AccountDetails) {} // Returns the latest state proof of the specified account. - rpc GetAccountProof(rpc_store.AccountProofRequest) returns (rpc_store.AccountProofResponse) {} + rpc GetAccountProof(AccountProofRequest) returns (AccountProofResponse) {} // Returns raw block data for the specified block number. rpc GetBlockByNumber(blockchain.BlockNumber) returns (blockchain.MaybeBlock) {} // Retrieves block header by given block number. Optionally, it also returns the MMR path // and current chain length to authenticate the block's inclusion. - rpc GetBlockHeaderByNumber(shared.BlockHeaderByNumberRequest) returns (shared.BlockHeaderByNumberResponse) {} + rpc GetBlockHeaderByNumber(BlockHeaderByNumberRequest) returns (BlockHeaderByNumberResponse) {} // Returns a list of notes matching the provided note IDs. rpc GetNotesById(note.NoteIdList) returns (note.CommittedNoteList) {} // Returns the script for a note by its root. - rpc GetNoteScriptByRoot(note.NoteRoot) returns (shared.MaybeNoteScript) {} + rpc GetNoteScriptByRoot(note.NoteRoot) returns (MaybeNoteScript) {} - // Submits proven transaction to the Miden network. - rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (block_producer.SubmitProvenTransactionResponse) {} + // Submits proven transaction to the Miden network. Returns the node's current block height. + rpc SubmitProvenTransaction(transaction.ProvenTransaction) returns (blockchain.BlockNumber) {} // Submits a proven batch of transactions to the Miden network. // @@ -55,15 +62,17 @@ service Api { // // All transactions in the batch but not in the mempool must build on the current mempool // state following normal transaction submission rules. - rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (block_producer.SubmitProvenBatchResponse) {} + // + // Returns the node's current block height. + rpc SubmitProvenBatch(transaction.ProvenTransactionBatch) returns (blockchain.BlockNumber) {} // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. // // Note that only 16-bit prefixes are supported at this time. - rpc SyncNullifiers(rpc_store.SyncNullifiersRequest) returns (rpc_store.SyncNullifiersResponse) {} + rpc SyncNullifiers(SyncNullifiersRequest) returns (SyncNullifiersResponse) {} // Returns account vault updates for specified account within a block range. - rpc SyncAccountVault(rpc_store.SyncAccountVaultRequest) returns (rpc_store.SyncAccountVaultResponse) {} + rpc SyncAccountVault(SyncAccountVaultRequest) returns (SyncAccountVaultResponse) {} // Returns info which can be used by the client to sync up to the tip of chain for the notes they are interested in. // @@ -74,7 +83,7 @@ service Api { // // A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the // tip of the chain. - rpc SyncNotes(rpc_store.SyncNotesRequest) returns (rpc_store.SyncNotesResponse) {} + rpc SyncNotes(SyncNotesRequest) returns (SyncNotesResponse) {} // Returns info which can be used by the client to sync up to the latest state of the chain // for the objects (accounts and notes) the client is interested in. @@ -91,13 +100,20 @@ service Api { // For preserving some degree of privacy, note tags contain only high // part of hashes. Thus, returned data contains excessive notes, client can make // additional filtering of that data on its side. - rpc SyncState(rpc_store.SyncStateRequest) returns (rpc_store.SyncStateResponse) {} + rpc SyncState(SyncStateRequest) returns (SyncStateResponse) {} // Returns storage map updates for specified account and storage slots within a block range. - rpc SyncStorageMaps(rpc_store.SyncStorageMapsRequest) returns (rpc_store.SyncStorageMapsResponse) {} + rpc SyncStorageMaps(SyncStorageMapsRequest) returns (SyncStorageMapsResponse) {} // Returns transactions records for specific accounts within a block range. - rpc SyncTransactions(rpc_store.SyncTransactionsRequest) returns (rpc_store.SyncTransactionsResponse) {} + rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} + + // Returns the query parameter limits configured for RPC methods. + // + // These define the maximum number of each parameter a method will accept. + // Exceeding the limit will result in the request being rejected and you should instead send + // multiple smaller requests. + rpc GetLimits(google.protobuf.Empty) returns (RpcLimits) {} } // RPC STATUS @@ -112,8 +128,521 @@ message RpcStatus { primitives.Digest genesis_commitment = 2; // The store status. - rpc_store.StoreStatus store = 3; + StoreStatus store = 3; // The block producer status. - block_producer.BlockProducerStatus block_producer = 4; + BlockProducerStatus block_producer = 4; +} + + +// BLOCK PRODUCER STATUS +// ================================================================================================ + + +// Represents the status of the block producer. +message BlockProducerStatus { + // The block producer's running version. + string version = 1; + + // The block producer's status. + string status = 2; + + // The block producer's current view of the chain tip height. + // + // This is the height of the latest block that the block producer considers + // to be part of the canonical chain. + fixed32 chain_tip = 4; + + // Statistics about the mempool. + MempoolStats mempool_stats = 3; +} + +// Statistics about the mempool. +message MempoolStats { + // Number of transactions currently in the mempool waiting to be batched. + uint64 unbatched_transactions = 1; + + // Number of batches currently being proven. + uint64 proposed_batches = 2; + + // Number of proven batches waiting for block inclusion. + uint64 proven_batches = 3; +} + +// STORE STATUS +// ================================================================================================ + +// Represents the status of the store. +message StoreStatus { + // The store's running version. + string version = 1; + + // The store's status. + string status = 2; + + // Number of the latest block in the chain. + fixed32 chain_tip = 3; +} + +// GET BLOCK HEADER BY NUMBER +// ================================================================================================ + +// Returns the block header corresponding to the requested block number, as well as the merkle +// path and current forest which validate the block's inclusion in the chain. +// +// The Merkle path is an MMR proof for the block's leaf, based on the current chain length. +message BlockHeaderByNumberRequest { + // The target block height, defaults to latest if not provided. + optional uint32 block_num = 1; + // Whether or not to return authentication data for the block header. + optional bool include_mmr_proof = 2; +} + +// Represents the result of getting a block header by block number. +message BlockHeaderByNumberResponse { + // The requested block header. + blockchain.BlockHeader block_header = 1; + + // Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. + optional primitives.MerklePath mmr_path = 2; + + // Current chain length. + optional fixed32 chain_length = 3; +} + +// GET NOTE SCRIPT BY ROOT +// ================================================================================================ + +// Represents a note script or nothing. +message MaybeNoteScript { + // The script for a note by its root. + optional note.NoteScript script = 1; +} + +// GET ACCOUNT PROOF +// ================================================================================================ + +// Returns the latest state proof of the specified account. +message AccountProofRequest { + // Request the details for a public account. + message AccountDetailRequest { + // Represents a storage slot index and the associated map keys. + message StorageMapDetailRequest { + // Indirection required for use in `oneof {..}` block. + message MapKeys { + // A list of map keys associated with this storage slot. + repeated primitives.Digest map_keys = 1; + } + // Storage slot name. + string slot_name = 1; + + oneof slot_data { + // Request to return all storage map data. If the number exceeds a threshold of 1000 entries, + // the response will not contain them but must be requested separately. + bool all_entries = 2; + + // A list of map keys associated with the given storage slot identified by `slot_name`. + MapKeys map_keys = 3; + } + } + + // Last known code commitment to the requester. The response will include account code + // only if its commitment is different from this value. + // + // If the field is ommiteed, the response will not include the account code. + optional primitives.Digest code_commitment = 1; + + // Last known asset vault commitment to the requester. The response will include asset vault data + // only if its commitment is different from this value. If the value is not present in the + // request, the response will not contain one either. + // If the number of to-be-returned asset entries exceed a threshold, they have to be requested + // separately, which is signaled in the response message with dedicated flag. + optional primitives.Digest asset_vault_commitment = 2; + + // Additional request per storage map. + repeated StorageMapDetailRequest storage_maps = 3; + } + + // ID of the account for which we want to get data + account.AccountId account_id = 1; + + // Optional block height at which to return the proof. + // + // Defaults to current chain tip if unspecified. + optional blockchain.BlockNumber block_num = 2; + + // Request for additional account details; valid only for public accounts. + optional AccountDetailRequest details = 3; +} + +// Represents the result of getting account proof. +message AccountProofResponse { + + message AccountDetails { + // Account header. + account.AccountHeader header = 1; + + // Account storage data + AccountStorageDetails storage_details = 2; + + // Account code; empty if code commitments matched or none was requested. + optional bytes code = 3; + + // Account asset vault data; empty if vault commitments matched or the requester + // omitted it in the request. + optional AccountVaultDetails vault_details = 4; + } + + // The block number at which the account witness was created and the account details were observed. + blockchain.BlockNumber block_num = 1; + + // Account ID, current state commitment, and SMT path. + account.AccountWitness witness = 2; + + // Additional details for public accounts. + optional AccountDetails details = 3; +} + +// Account vault details for AccountProofResponse +message AccountVaultDetails { + // A flag that is set to true if the account contains too many assets. This indicates + // to the user that `SyncAccountVault` endpoint should be used to retrieve the + // account's assets + bool too_many_assets = 1; + + // When too_many_assets == false, this will contain the list of assets in the + // account's vault + repeated primitives.Asset assets = 2; +} + +// Account storage details for AccountProofResponse +message AccountStorageDetails { + message AccountStorageMapDetails { + // Wrapper for repeated storage map entries + message MapEntries { + // Definition of individual storage entries. + message StorageMapEntry { + primitives.Digest key = 1; + primitives.Digest value = 2; + } + + repeated StorageMapEntry entries = 1; + } + + // Storage slot name. + string slot_name = 1; + + // A flag that is set to `true` if the number of to-be-returned entries in the + // storage map would exceed a threshold. This indicates to the user that `SyncStorageMaps` + // endpoint should be used to get all storage map data. + bool too_many_entries = 2; + + // By default we provide all storage entries. + MapEntries entries = 3; + } + + // Account storage header (storage slot info for up to 256 slots) + account.AccountStorageHeader header = 1; + + // Additional data for the requested storage maps + repeated AccountStorageMapDetails map_details = 2; +} + +// CHECK NULLIFIERS +// ================================================================================================ + +// List of nullifiers to return proofs for. +message NullifierList { + // List of nullifiers to return proofs for. + repeated primitives.Digest nullifiers = 1; +} + +// Represents the result of checking nullifiers. +message CheckNullifiersResponse { + // Each requested nullifier has its corresponding nullifier proof at the same position. + repeated primitives.SmtOpening proofs = 1; +} + +// SYNC NULLIFIERS +// ================================================================================================ + +// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. +message SyncNullifiersRequest { + // Block number from which the nullifiers are requested (inclusive). + BlockRange block_range = 1; + + // Number of bits used for nullifier prefix. Currently the only supported value is 16. + uint32 prefix_len = 2; + + // List of nullifiers to check. Each nullifier is specified by its prefix with length equal + // to `prefix_len`. + repeated uint32 nullifiers = 3; +} + +// Represents the result of syncing nullifiers. +message SyncNullifiersResponse { + // Represents a single nullifier update. + message NullifierUpdate { + // Nullifier ID. + primitives.Digest nullifier = 1; + + // Block number. + fixed32 block_num = 2; + } + + // Pagination information. + PaginationInfo pagination_info = 1; + + // List of nullifiers matching the prefixes specified in the request. + repeated NullifierUpdate nullifiers = 2; +} + +// SYNC ACCOUNT VAULT +// ================================================================================================ + +// Account vault synchronization request. +// +// Allows requesters to sync asset values for specific public accounts within a block range. +message SyncAccountVaultRequest { + // Block range from which to start synchronizing. + // + // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), + // otherwise an error will be returned. + BlockRange block_range = 1; + + // Account for which we want to sync asset vault. + account.AccountId account_id = 2; +} + +message SyncAccountVaultResponse { + // Pagination information. + PaginationInfo pagination_info = 1; + + // List of asset updates for the account. + // + // Multiple updates can be returned for a single asset, and the one with a higher `block_num` + // is expected to be retained by the caller. + repeated AccountVaultUpdate updates = 2; +} + +message AccountVaultUpdate { + // Vault key associated with the asset. + primitives.Digest vault_key = 1; + + // Asset value related to the vault key. + // If not present, the asset was removed from the vault. + optional primitives.Asset asset = 2; + + // Block number at which the above asset was updated in the account vault. + fixed32 block_num = 3; +} + +// SYNC NOTES +// ================================================================================================ + +// Note synchronization request. +// +// Specifies note tags that requester is interested in. The server will return the first block which +// contains a note matching `note_tags` or the chain tip. +message SyncNotesRequest { + // Block range from which to start synchronizing. + BlockRange block_range = 1; + + // Specifies the tags which the requester is interested in. + repeated fixed32 note_tags = 2; +} + +// Represents the result of syncing notes request. +message SyncNotesResponse { + // Pagination information. + PaginationInfo pagination_info = 1; + + // Block header of the block with the first note matching the specified criteria. + blockchain.BlockHeader block_header = 2; + + // Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. + // + // An MMR proof can be constructed for the leaf of index `block_header.block_num` of + // an MMR of forest `chain_tip` with this path. + primitives.MerklePath mmr_path = 3; + + // List of all notes together with the Merkle paths from `response.block_header.note_root`. + repeated note.NoteSyncRecord notes = 4; +} + +// SYNC STATE +// ================================================================================================ + +// State synchronization request. +// +// Specifies state updates the requester is interested in. The server will return the first block which +// contains a note matching `note_tags` or the chain tip. And the corresponding updates to +// `account_ids` for that block range. +message SyncStateRequest { + // Last block known by the requester. The response will contain data starting from the next block, + // until the first block which contains a note of matching the requested tag, or the chain tip + // if there are no notes. + fixed32 block_num = 1; + + // Accounts' commitment to include in the response. + // + // An account commitment will be included if-and-only-if it is the latest update. Meaning it is + // possible there was an update to the account for the given range, but if it is not the latest, + // it won't be included in the response. + repeated account.AccountId account_ids = 2; + + // Specifies the tags which the requester is interested in. + repeated fixed32 note_tags = 3; +} + +// Represents the result of syncing state request. +message SyncStateResponse { + // Number of the latest block in the chain. + fixed32 chain_tip = 1; + + // Block header of the block with the first note matching the specified criteria. + blockchain.BlockHeader block_header = 2; + + // Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. + primitives.MmrDelta mmr_delta = 3; + + // List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. + repeated account.AccountSummary accounts = 5; + + // List of transactions executed against requested accounts between `request.block_num + 1` and + // `response.block_header.block_num`. + repeated transaction.TransactionSummary transactions = 6; + + // List of all notes together with the Merkle paths from `response.block_header.note_root`. + repeated note.NoteSyncRecord notes = 7; +} + +// SYNC STORAGE MAP +// ================================================================================================ + +// Storage map synchronization request. +// +// Allows requesters to sync storage map values for specific public accounts within a block range, +// with support for cursor-based pagination to handle large storage maps. +message SyncStorageMapsRequest { + // Block range from which to start synchronizing. + // + // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), + // otherwise an error will be returned. + BlockRange block_range = 1; + + // Account for which we want to sync storage maps. + account.AccountId account_id = 3; +} + +message SyncStorageMapsResponse { + // Pagination information. + PaginationInfo pagination_info = 1; + + // The list of storage map updates. + // + // Multiple updates can be returned for a single slot index and key combination, and the one + // with a higher `block_num` is expected to be retained by the caller. + repeated StorageMapUpdate updates = 2; +} + +// Represents a single storage map update. +message StorageMapUpdate { + // Block number in which the slot was updated. + fixed32 block_num = 1; + + // Storage slot name. + string slot_name = 2; + + // The storage map key. + primitives.Digest key = 3; + + // The storage map value. + primitives.Digest value = 4; +} + +// BLOCK RANGE +// ================================================================================================ + +// Represents a block range. +message BlockRange { + // Block number from which to start (inclusive). + fixed32 block_from = 1; + + // Block number up to which to check (inclusive). If not specified, checks up to the latest block. + optional fixed32 block_to = 2; +} + +// PAGINATION INFO +// ================================================================================================ + +// Represents pagination information for chunked responses. +// +// Pagination is done using block numbers as the axis, allowing requesters to request +// data in chunks by specifying block ranges and continuing from where the previous +// response left off. +// +// To request the next chunk, the requester should use `block_num + 1` from the previous response +// as the `block_from` for the next request. +message PaginationInfo { + // Current chain tip + fixed32 chain_tip = 1; + + // The block number of the last check included in this response. + // + // For chunked responses, this may be less than `request.block_range.block_to`. + // If it is less than request.block_range.block_to, the user is expected to make a subsequent request + // starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). + fixed32 block_num = 2; +} + +// SYNC TRANSACTIONS +// ================================================================================================ + +// Transactions synchronization request. +// +// Allows requesters to sync transactions for specific accounts within a block range. +message SyncTransactionsRequest { + // Block range from which to start synchronizing. + BlockRange block_range = 1; + + // Accounts to sync transactions for. + repeated account.AccountId account_ids = 2; +} + +// Represents the result of syncing transactions request. +message SyncTransactionsResponse { + // Pagination information. + PaginationInfo pagination_info = 1; + + // List of transaction records. + repeated TransactionRecord transactions = 2; +} + +// Represents a transaction record. +message TransactionRecord { + // Block number in which the transaction was included. + fixed32 block_num = 1; + + // A transaction header. + transaction.TransactionHeader header = 2; +} + +// RPC LIMITS +// ================================================================================================ + +// Represents the query parameter limits for RPC endpoints. +message RpcLimits { + // Maps RPC endpoint names to their parameter limits. + // Key: endpoint name (e.g., "CheckNullifiers", "SyncState") + // Value: map of parameter names to their limit values + map endpoints = 1; +} + +// Represents the parameter limits for a single endpoint. +message EndpointLimits { + // Maps parameter names to their limit values. + // Key: parameter name (e.g., "nullifier", "account_id") + // Value: limit value + map parameters = 1; } diff --git a/proto/proto/store/block_producer.proto b/proto/proto/store/block_producer.proto deleted file mode 100644 index e0218bd0f..000000000 --- a/proto/proto/store/block_producer.proto +++ /dev/null @@ -1,164 +0,0 @@ -// Specification of the Block Producer store RPC. -syntax = "proto3"; -package block_producer_store; - -import "google/protobuf/empty.proto"; -import "types/account.proto"; -import "types/blockchain.proto"; -import "types/note.proto"; -import "types/primitives.proto"; -import "store/shared.proto"; - -// BLOCK PRODUCER STORE API -// ================================================================================================ - -// Store API for the BlockProducer component -service BlockProducer { - // Applies changes of a new block to the DB and in-memory data structures. - rpc ApplyBlock(blockchain.Block) returns (google.protobuf.Empty) {} - - // Retrieves block header by given block number. Optionally, it also returns the MMR path - // and current chain length to authenticate the block's inclusion. - rpc GetBlockHeaderByNumber(shared.BlockHeaderByNumberRequest) returns (shared.BlockHeaderByNumberResponse) {} - - // Returns data required to prove the next block. - rpc GetBlockInputs(BlockInputsRequest) returns (BlockInputs) {} - - // Returns the inputs for a transaction batch. - rpc GetBatchInputs(BatchInputsRequest) returns (BatchInputs) {} - - // Returns data required to validate a new transaction. - rpc GetTransactionInputs(TransactionInputsRequest) returns (TransactionInputs) {} -} - -// GET BLOCK INPUTS -// ================================================================================================ - -// Returns data required to prove the next block. -message BlockInputsRequest { - // IDs of all accounts updated in the proposed block for which to retrieve account witnesses. - repeated account.AccountId account_ids = 1; - - // Nullifiers of all notes consumed by the block for which to retrieve witnesses. - // - // Due to note erasure it will generally not be possible to know the exact set of nullifiers - // a block will create, unless we pre-execute note erasure. So in practice, this set of - // nullifiers will be the set of nullifiers of all proven batches in the block, which is a - // superset of the nullifiers the block may create. - // - // However, if it is known that a certain note will be erased, it would not be necessary to - // provide a nullifier witness for it. - repeated primitives.Digest nullifiers = 2; - - // Array of note IDs for which to retrieve note inclusion proofs, **if they exist in the store**. - repeated primitives.Digest unauthenticated_notes = 3; - - // Array of block numbers referenced by all batches in the block. - repeated fixed32 reference_blocks = 4; -} - -// Represents the result of getting block inputs. -message BlockInputs { - // A nullifier returned as a response to the `GetBlockInputs`. - message NullifierWitness { - // The nullifier. - primitives.Digest nullifier = 1; - - // The SMT proof to verify the nullifier's inclusion in the nullifier tree. - primitives.SmtOpening opening = 2; - } - // The latest block header. - blockchain.BlockHeader latest_block_header = 1; - - // Proof of each requested unauthenticated note's inclusion in a block, **if it existed in - // the store**. - repeated note.NoteInclusionInBlockProof unauthenticated_note_proofs = 2; - - // The serialized chain MMR which includes proofs for all blocks referenced by the - // above note inclusion proofs as well as proofs for inclusion of the requested blocks - // referenced by the batches in the block. - bytes partial_block_chain = 3; - - // The state commitments of the requested accounts and their authentication paths. - repeated account.AccountWitness account_witnesses = 4; - - // The requested nullifiers and their authentication paths. - repeated NullifierWitness nullifier_witnesses = 5; -} - -// GET BATCH INPUTS -// ================================================================================================ - -// Returns the inputs for a transaction batch. -message BatchInputsRequest { - // List of unauthenticated note commitments to be queried from the database. - repeated primitives.Digest note_commitments = 1; - // Set of block numbers referenced by transactions. - repeated fixed32 reference_blocks = 2; -} - -// Represents the result of getting batch inputs. -message BatchInputs { - // The block header that the transaction batch should reference. - blockchain.BlockHeader batch_reference_block_header = 1; - - // Proof of each _found_ unauthenticated note's inclusion in a block. - repeated note.NoteInclusionInBlockProof note_proofs = 2; - - // The serialized chain MMR which includes proofs for all blocks referenced by the - // above note inclusion proofs as well as proofs for inclusion of the blocks referenced - // by the transactions in the batch. - bytes partial_block_chain = 3; -} - -// GET TRANSACTION INPUTS -// ================================================================================================ - -// Returns data required to validate a new transaction. -message TransactionInputsRequest { - // ID of the account against which a transaction is executed. - account.AccountId account_id = 1; - // Set of nullifiers consumed by this transaction. - repeated primitives.Digest nullifiers = 2; - // Set of unauthenticated note commitments to check for existence on-chain. - // - // These are notes which were not on-chain at the state the transaction was proven, - // but could by now be present. - repeated primitives.Digest unauthenticated_notes = 3; -} - -// Represents the result of getting transaction inputs. -message TransactionInputs { - // An account returned as a response to the `GetTransactionInputs`. - message AccountTransactionInputRecord { - // The account ID. - account.AccountId account_id = 1; - - // The latest account commitment, zero commitment if the account doesn't exist. - primitives.Digest account_commitment = 2; - } - - // A nullifier returned as a response to the `GetTransactionInputs`. - message NullifierTransactionInputRecord { - // The nullifier ID. - primitives.Digest nullifier = 1; - - // The block at which the nullifier has been consumed, zero if not consumed. - fixed32 block_num = 2; - } - - // Account state proof. - AccountTransactionInputRecord account_state = 1; - - // List of nullifiers that have been consumed. - repeated NullifierTransactionInputRecord nullifiers = 2; - - // List of unauthenticated notes that were not found in the database. - repeated primitives.Digest found_unauthenticated_notes = 3; - - // The node's current block height. - fixed32 block_height = 4; - - // Whether the account ID prefix is unique. Only relevant for account creation requests. - optional bool new_account_id_prefix_is_unique = 5; // TODO: Replace this with an error. When a general error message exists. -} diff --git a/proto/proto/store/ntx_builder.proto b/proto/proto/store/ntx_builder.proto deleted file mode 100644 index 15144447b..000000000 --- a/proto/proto/store/ntx_builder.proto +++ /dev/null @@ -1,113 +0,0 @@ -// Specification of the NTX Builder store RPC. -syntax = "proto3"; -package ntx_builder_store; - -import "types/account.proto"; -import "types/blockchain.proto"; -import "types/note.proto"; -import "types/primitives.proto"; -import "store/shared.proto"; - -// NTX BUILDER STORE API -// ================================================================================================ - -// Store API for the network transaction builder component -service NtxBuilder { - // Retrieves block header by given block number. Optionally, it also returns the MMR path - // and current chain length to authenticate the block's inclusion. - rpc GetBlockHeaderByNumber(shared.BlockHeaderByNumberRequest) returns (shared.BlockHeaderByNumberResponse) {} - - // Returns a paginated list of unconsumed network notes. - rpc GetUnconsumedNetworkNotes(UnconsumedNetworkNotesRequest) returns (UnconsumedNetworkNotes) {} - - // Returns a paginated list of a network account's unconsumed notes up to a specified block number. - rpc GetUnconsumedNetworkNotesForAccount(UnconsumedNetworkNotesForAccountRequest) returns (UnconsumedNetworkNotes) {} - - // Returns the block header at the chain tip, as well as the MMR peaks corresponding to this - // header for executing network transactions. If the block number is not provided, the latest - // header and peaks will be retrieved. - rpc GetCurrentBlockchainData(blockchain.MaybeBlockNumber) returns (CurrentBlockchainData) {} - - // Returns the latest state of a network account with the specified account prefix. - rpc GetNetworkAccountDetailsByPrefix(AccountIdPrefix) returns (MaybeAccountDetails) {} - - // Returns the script for a note by its root. - rpc GetNoteScriptByRoot(note.NoteRoot) returns (shared.MaybeNoteScript) {} -} - -// GET NETWORK ACCOUNT DETAILS BY PREFIX -// ================================================================================================ - -// Account ID prefix. -message AccountIdPrefix { - // Account ID prefix. - fixed32 account_id_prefix = 1; -} - -// Represents the result of getting network account details by prefix. -message MaybeAccountDetails { - // Account details. - optional account.AccountDetails details = 1; -} - -// GET UNCONSUMED NETWORK NOTES -// ================================================================================================ - -// Returns a list of unconsumed network notes using pagination. -message UnconsumedNetworkNotesRequest { - // An opaque token used to paginate through the notes. - // - // This should be null on the first call, and set to the response token until the response token - // is null, at which point all data has been fetched. - optional uint64 page_token = 1; - - // Number of notes to retrieve per page. - uint64 page_size = 2; -} - -// Returns a paginated list of unconsumed network notes for an account. -// -// Notes created or consumed after the specified block are excluded from the result. -message UnconsumedNetworkNotesForAccountRequest { - // This should be null on the first call, and set to the response token until the response token - // is null, at which point all data has been fetched. - // - // Note that this token is only valid if used with the same parameters. - optional uint64 page_token = 1; - - // Number of notes to retrieve per page. - uint64 page_size = 2; - - // The network account ID prefix to filter notes by. - uint32 network_account_id_prefix = 3; - - // The block number to filter the returned notes by. - // - // Notes that are created or consumed after this block are excluded from the result. - fixed32 block_num = 4; -} - -// Represents the result of getting the unconsumed network notes. -message UnconsumedNetworkNotes { - // An opaque pagination token. - // - // Use this in your next request to get the next - // set of data. - // - // Will be null once there is no more data remaining. - optional uint64 next_token = 1; - - // The list of unconsumed network notes. - repeated note.NetworkNote notes = 2; -} - -// GET CURRENT BLOCKCHAIN DATA -// ================================================================================================ - -// Current blockchain data based on the requested block number. -message CurrentBlockchainData { - // Commitments that represent the current state according to the MMR. - repeated primitives.Digest current_peaks = 1; - // Current block header. - optional blockchain.BlockHeader current_block_header = 2; -} diff --git a/proto/proto/store/rpc.proto b/proto/proto/store/rpc.proto deleted file mode 100644 index 1fc3e1936..000000000 --- a/proto/proto/store/rpc.proto +++ /dev/null @@ -1,509 +0,0 @@ -// Specification of the store RPC. -// -// This provided access to the blockchain data to the other nodes. -syntax = "proto3"; -package rpc_store; - -import "google/protobuf/empty.proto"; -import "types/account.proto"; -import "types/blockchain.proto"; -import "types/transaction.proto"; -import "types/note.proto"; -import "types/primitives.proto"; -import "store/shared.proto"; - -// RPC STORE API -// ================================================================================================ - -// Store API for the RPC component -service Rpc { - // Returns the status info. - rpc Status(google.protobuf.Empty) returns (StoreStatus) {} - - // Returns a nullifier proof for each of the requested nullifiers. - rpc CheckNullifiers(NullifierList) returns (CheckNullifiersResponse) {} - - // Returns the latest state of an account with the specified ID. - rpc GetAccountDetails(account.AccountId) returns (account.AccountDetails) {} - - // Returns the latest state proof of the specified account. - rpc GetAccountProof(AccountProofRequest) returns (AccountProofResponse) {} - - // Returns raw block data for the specified block number. - rpc GetBlockByNumber(blockchain.BlockNumber) returns (blockchain.MaybeBlock) {} - - // Retrieves block header by given block number. Optionally, it also returns the MMR path - // and current chain length to authenticate the block's inclusion. - rpc GetBlockHeaderByNumber(shared.BlockHeaderByNumberRequest) returns (shared.BlockHeaderByNumberResponse) {} - - // Returns a list of committed notes matching the provided note IDs. - rpc GetNotesById(note.NoteIdList) returns (note.CommittedNoteList) {} - - // Returns the script for a note by its root. - rpc GetNoteScriptByRoot(note.NoteRoot) returns (shared.MaybeNoteScript) {} - - // Returns a list of nullifiers that match the specified prefixes and are recorded in the node. - // - // Note that only 16-bit prefixes are supported at this time. - rpc SyncNullifiers(SyncNullifiersRequest) returns (SyncNullifiersResponse) {} - - // Returns info which can be used by the requester to sync up to the tip of chain for the notes they are interested in. - // - // requester specifies the `note_tags` they are interested in, and the block height from which to search for new for - // matching notes for. The request will then return the next block containing any note matching the provided tags. - // - // The response includes each note's metadata and inclusion proof. - // - // A basic note sync can be implemented by repeatedly requesting the previous response's block until reaching the - // tip of the chain. - rpc SyncNotes(SyncNotesRequest) returns (SyncNotesResponse) {} - - // Returns info which can be used by the requester to sync up to the latest state of the chain - // for the objects (accounts, notes, nullifiers) the requester is interested in. - // - // This request returns the next block containing requested data. It also returns `chain_tip` - // which is the latest block number in the chain. requester is expected to repeat these requests - // in a loop until `response.block_header.block_num == response.chain_tip`, at which point - // the requester is fully synchronized with the chain. - // - // Each request also returns info about new notes, nullifiers etc. created. It also returns - // Chain MMR delta that can be used to update the state of Chain MMR. This includes both chain - // MMR peaks and chain MMR nodes. - // - // For preserving some degree of privacy, note tags and nullifiers filters contain only high - // part of hashes. Thus, returned data contains excessive notes and nullifiers, requester can make - // additional filtering of that data on its side. - rpc SyncState(SyncStateRequest) returns (SyncStateResponse) {} - - // Returns account vault updates for specified account within a block range. - rpc SyncAccountVault(SyncAccountVaultRequest) returns (SyncAccountVaultResponse) {} - - // Returns storage map updates for specified account and storage slots within a block range. - rpc SyncStorageMaps(SyncStorageMapsRequest) returns (SyncStorageMapsResponse) {} - - // Returns transactions records for specific accounts within a block range. - rpc SyncTransactions(SyncTransactionsRequest) returns (SyncTransactionsResponse) {} -} - -// STORE STATUS -// ================================================================================================ - -// Represents the status of the store. -message StoreStatus { - // The store's running version. - string version = 1; - - // The store's status. - string status = 2; - - // Number of the latest block in the chain. - fixed32 chain_tip = 3; -} - -// GET ACCOUNT PROOF -// ================================================================================================ - -// Returns the latest state proof of the specified account. -message AccountProofRequest { - // Request the details for a public account. - message AccountDetailRequest { - // Represents a storage slot index and the associated map keys. - message StorageMapDetailRequest { - // Indirection required for use in `oneof {..}` block. - message MapKeys { - // A list of map keys associated with this storage slot. - repeated primitives.Digest map_keys = 1; - } - // Storage slot index (`[0..255]`). - uint32 slot_index = 1; - - oneof slot_data { - // Request to return all storage map data. If the number exceeds a threshold of 1000 entries, - // the response will not contain them but must be requested separately. - bool all_entries = 2; - - // A list of map keys associated with the given storage slot identified by `slot_index`. - MapKeys map_keys = 3; - } - } - - // Last known code commitment to the requester. The response will include account code - // only if its commitment is different from this value. - // - // If the field is ommiteed, the response will not include the account code. - optional primitives.Digest code_commitment = 1; - - // Last known asset vault commitment to the requester. The response will include asset vault data - // only if its commitment is different from this value. If the value is not present in the - // request, the response will not contain one either. - // If the number of to-be-returned asset entries exceed a threshold, they have to be requested - // separately, which is signaled in the response message with dedicated flag. - optional primitives.Digest asset_vault_commitment = 2; - - // Additional request per storage map. - repeated StorageMapDetailRequest storage_maps = 3; - } - - // ID of the account for which we want to get data - account.AccountId account_id = 1; - - // Block at which we'd like to get this data. If present, must be close to the chain tip. - // If not present, data from the latest block will be returned. - optional blockchain.BlockNumber block_num = 2; - - // Request for additional account details; valid only for public accounts. - optional AccountDetailRequest details = 3; -} - -// Represents the result of getting account proof. -message AccountProofResponse { - - message AccountDetails { - // Account header. - account.AccountHeader header = 1; - - // Account storage data - AccountStorageDetails storage_details = 2; - - // Account code; empty if code commitments matched or none was requested. - optional bytes code = 3; - - // Account asset vault data; empty if vault commitments matched or the requester - // omitted it in the request. - optional AccountVaultDetails vault_details = 4; - } - - // The block number at which the account witness was created and the account details were observed. - blockchain.BlockNumber block_num = 1; - - // Account ID, current state commitment, and SMT path. - account.AccountWitness witness = 2; - - // Additional details for public accounts. - optional AccountDetails details = 3; -} - -// Account vault details for AccountProofResponse -message AccountVaultDetails { - // A flag that is set to true if the account contains too many assets. This indicates - // to the user that `SyncAccountVault` endpoint should be used to retrieve the - // account's assets - bool too_many_assets = 1; - - // When too_many_assets == false, this will contain the list of assets in the - // account's vault - repeated primitives.Asset assets = 2; -} - -// Account storage details for AccountProofResponse -message AccountStorageDetails { - message AccountStorageMapDetails { - // Wrapper for repeated storage map entries - message MapEntries { - // Definition of individual storage entries. - message StorageMapEntry { - primitives.Digest key = 1; - primitives.Digest value = 2; - } - - repeated StorageMapEntry entries = 1; - } - // slot index of the storage map - uint32 slot_index = 1; - - // A flag that is set to `true` if the number of to-be-returned entries in the - // storage map would exceed a threshold. This indicates to the user that `SyncStorageMaps` - // endpoint should be used to get all storage map data. - bool too_many_entries = 2; - - // By default we provide all storage entries. - MapEntries entries = 3; - } - - // Account storage header (storage slot info for up to 256 slots) - account.AccountStorageHeader header = 1; - - // Additional data for the requested storage maps - repeated AccountStorageMapDetails map_details = 2; -} - - -// CHECK NULLIFIERS -// ================================================================================================ - -// List of nullifiers to return proofs for. -message NullifierList { - // List of nullifiers to return proofs for. - repeated primitives.Digest nullifiers = 1; -} - -// Represents the result of checking nullifiers. -message CheckNullifiersResponse { - // Each requested nullifier has its corresponding nullifier proof at the same position. - repeated primitives.SmtOpening proofs = 1; -} - -// SYNC NULLIFIERS -// ================================================================================================ - -// Returns a list of nullifiers that match the specified prefixes and are recorded in the node. -message SyncNullifiersRequest { - // Block number from which the nullifiers are requested (inclusive). - BlockRange block_range = 1; - - // Number of bits used for nullifier prefix. Currently the only supported value is 16. - uint32 prefix_len = 2; - - // List of nullifiers to check. Each nullifier is specified by its prefix with length equal - // to `prefix_len`. - repeated uint32 nullifiers = 3; -} - -// Represents the result of syncing nullifiers. -message SyncNullifiersResponse { - // Represents a single nullifier update. - message NullifierUpdate { - // Nullifier ID. - primitives.Digest nullifier = 1; - - // Block number. - fixed32 block_num = 2; - } - - // Pagination information. - PaginationInfo pagination_info = 1; - - // List of nullifiers matching the prefixes specified in the request. - repeated NullifierUpdate nullifiers = 2; -} - -// SYNC STATE -// ================================================================================================ - -// State synchronization request. -// -// Specifies state updates the requester is interested in. The server will return the first block which -// contains a note matching `note_tags` or the chain tip. And the corresponding updates to -// `account_ids` for that block range. -message SyncStateRequest { - // Last block known by the requester. The response will contain data starting from the next block, - // until the first block which contains a note of matching the requested tag, or the chain tip - // if there are no notes. - fixed32 block_num = 1; - - // Accounts' commitment to include in the response. - // - // An account commitment will be included if-and-only-if it is the latest update. Meaning it is - // possible there was an update to the account for the given range, but if it is not the latest, - // it won't be included in the response. - repeated account.AccountId account_ids = 2; - - // Specifies the tags which the requester is interested in. - repeated fixed32 note_tags = 3; -} - -// Represents the result of syncing state request. -message SyncStateResponse { - // Number of the latest block in the chain. - fixed32 chain_tip = 1; - - // Block header of the block with the first note matching the specified criteria. - blockchain.BlockHeader block_header = 2; - - // Data needed to update the partial MMR from `request.block_num + 1` to `response.block_header.block_num`. - primitives.MmrDelta mmr_delta = 3; - - // List of account commitments updated after `request.block_num + 1` but not after `response.block_header.block_num`. - repeated account.AccountSummary accounts = 5; - - // List of transactions executed against requested accounts between `request.block_num + 1` and - // `response.block_header.block_num`. - repeated transaction.TransactionSummary transactions = 6; - - // List of all notes together with the Merkle paths from `response.block_header.note_root`. - repeated note.NoteSyncRecord notes = 7; -} - -// SYNC ACCOUNT VAULT -// ================================================================================================ - -// Account vault synchronization request. -// -// Allows requesters to sync asset values for specific public accounts within a block range. -message SyncAccountVaultRequest { - // Block range from which to start synchronizing. - // - // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - // otherwise an error will be returned. - BlockRange block_range = 1; - - // Account for which we want to sync asset vault. - account.AccountId account_id = 2; -} - -message SyncAccountVaultResponse { - // Pagination information. - PaginationInfo pagination_info = 1; - - // List of asset updates for the account. - // - // Multiple updates can be returned for a single asset, and the one with a higher `block_num` - // is expected to be retained by the caller. - repeated AccountVaultUpdate updates = 2; -} - -message AccountVaultUpdate { - // Vault key associated with the asset. - primitives.Digest vault_key = 1; - - // Asset value related to the vault key. - // If not present, the asset was removed from the vault. - optional primitives.Asset asset = 2; - - // Block number at which the above asset was updated in the account vault. - fixed32 block_num = 3; -} - -// SYNC NOTES -// ================================================================================================ - -// Note synchronization request. -// -// Specifies note tags that requester is interested in. The server will return the first block which -// contains a note matching `note_tags` or the chain tip. -message SyncNotesRequest { - // Block range from which to start synchronizing. - BlockRange block_range = 1; - - // Specifies the tags which the requester is interested in. - repeated fixed32 note_tags = 2; -} - -// Represents the result of syncing notes request. -message SyncNotesResponse { - // Pagination information. - PaginationInfo pagination_info = 1; - - // Block header of the block with the first note matching the specified criteria. - blockchain.BlockHeader block_header = 2; - - // Merkle path to verify the block's inclusion in the MMR at the returned `chain_tip`. - // - // An MMR proof can be constructed for the leaf of index `block_header.block_num` of - // an MMR of forest `chain_tip` with this path. - primitives.MerklePath mmr_path = 3; - - // List of all notes together with the Merkle paths from `response.block_header.note_root`. - repeated note.NoteSyncRecord notes = 4; -} - -// SYNC STORAGE MAP -// ================================================================================================ - -// Storage map synchronization request. -// -// Allows requesters to sync storage map values for specific public accounts within a block range, -// with support for cursor-based pagination to handle large storage maps. -message SyncStorageMapsRequest { - // Block range from which to start synchronizing. - // - // If the `block_to` is specified, this block must be close to the chain tip (i.e., within 30 blocks), - // otherwise an error will be returned. - BlockRange block_range = 1; - - // Account for which we want to sync storage maps. - account.AccountId account_id = 3; -} - -message SyncStorageMapsResponse { - // Pagination information. - PaginationInfo pagination_info = 1; - - // The list of storage map updates. - // - // Multiple updates can be returned for a single slot index and key combination, and the one - // with a higher `block_num` is expected to be retained by the caller. - repeated StorageMapUpdate updates = 2; -} - -// Represents a single storage map update. -message StorageMapUpdate { - // Block number in which the slot was updated. - fixed32 block_num = 1; - - // Slot index ([0..255]). - uint32 slot_index = 2; - - // The storage map key. - primitives.Digest key = 3; - - // The storage map value. - primitives.Digest value = 4; -} - -// BLOCK RANGE -// ================================================================================================ - -// Represents a block range. -message BlockRange { - // Block number from which to start (inclusive). - fixed32 block_from = 1; - - // Block number up to which to check (inclusive). If not specified, checks up to the latest block. - optional fixed32 block_to = 2; -} - -// PAGINATION INFO -// ================================================================================================ - -// Represents pagination information for chunked responses. -// -// Pagination is done using block numbers as the axis, allowing requesters to request -// data in chunks by specifying block ranges and continuing from where the previous -// response left off. -// -// To request the next chunk, the requester should use `block_num + 1` from the previous response -// as the `block_from` for the next request. -message PaginationInfo { - // Current chain tip - fixed32 chain_tip = 1; - - // The block number of the last check included in this response. - // - // For chunked responses, this may be less than `request.block_range.block_to`. - // If it is less than request.block_range.block_to, the user is expected to make a subsequent request - // starting from the next block to this one (ie, request.block_range.block_from = block_num + 1). - fixed32 block_num = 2; -} - -// SYNC TRANSACTIONS -// ================================================================================================ - -// Transactions synchronization request. -// -// Allows requesters to sync transactions for specific accounts within a block range. -message SyncTransactionsRequest { - // Block range from which to start synchronizing. - BlockRange block_range = 1; - - // Accounts to sync transactions for. - repeated account.AccountId account_ids = 2; -} - -// Represents the result of syncing transactions request. -message SyncTransactionsResponse { - // Pagination information. - PaginationInfo pagination_info = 1; - - // List of transaction records. - repeated TransactionRecord transaction_records = 2; -} - -// Represents a transaction record. -message TransactionRecord { - // Block number in which the transaction was included. - fixed32 block_num = 1; - - // A transaction header. - transaction.TransactionHeader transaction_header = 2; -} diff --git a/proto/proto/store/shared.proto b/proto/proto/store/shared.proto deleted file mode 100644 index 1d162087e..000000000 --- a/proto/proto/store/shared.proto +++ /dev/null @@ -1,45 +0,0 @@ -// Shared messages for the store RPC. -syntax = "proto3"; -package shared; - -import "types/account.proto"; -import "types/blockchain.proto"; -import "types/transaction.proto"; -import "types/primitives.proto"; -import "types/note.proto"; -import "google/protobuf/empty.proto"; - -// GET BLOCK HEADER BY NUMBER -// ================================================================================================ - -// Returns the block header corresponding to the requested block number, as well as the merkle -// path and current forest which validate the block's inclusion in the chain. -// -// The Merkle path is an MMR proof for the block's leaf, based on the current chain length. -message BlockHeaderByNumberRequest { - // The target block height, defaults to latest if not provided. - optional uint32 block_num = 1; - // Whether or not to return authentication data for the block header. - optional bool include_mmr_proof = 2; -} - -// Represents the result of getting a block header by block number. -message BlockHeaderByNumberResponse { - // The requested block header. - blockchain.BlockHeader block_header = 1; - - // Merkle path to verify the block's inclusion in the MMR at the returned `chain_length`. - optional primitives.MerklePath mmr_path = 2; - - // Current chain length. - optional fixed32 chain_length = 3; -} - -// GET NOTE SCRIPT BY ROOT -// ================================================================================================ - -// Represents a note script or nothing. -message MaybeNoteScript { - // The script for a note by its root. - optional note.NoteScript script = 1; -} diff --git a/proto/proto/types/account.proto b/proto/proto/types/account.proto index 6953c228b..15ae475b3 100644 --- a/proto/proto/types/account.proto +++ b/proto/proto/types/account.proto @@ -12,7 +12,7 @@ import "types/primitives.proto"; // and a random user-provided seed. message AccountId { // 15 bytes (120 bits) encoded using [winter_utils::Serializable] implementation for - // [miden_objects::account::account_id::AccountId]. + // [miden_protocol::account::account_id::AccountId]. bytes id = 1; } @@ -32,11 +32,14 @@ message AccountSummary { message AccountStorageHeader { // A single storage slot in the account storage header. message StorageSlot { + // The name of the storage slot. + string slot_name = 1; + // The type of the storage slot. - uint32 slot_type = 1; + uint32 slot_type = 2; // The commitment (Word) for this storage slot. - primitives.Digest commitment = 2; + primitives.Digest commitment = 3; } // Storage slots with their types and commitments. @@ -49,7 +52,7 @@ message AccountDetails { AccountSummary summary = 1; // Account details encoded using [winter_utils::Serializable] implementation for - // [miden_objects::account::Account]. + // [miden_protocol::account::Account]. optional bytes details = 2; } diff --git a/proto/proto/types/blockchain.proto b/proto/proto/types/blockchain.proto index 28a35ae33..6f53cd4f3 100644 --- a/proto/proto/types/blockchain.proto +++ b/proto/proto/types/blockchain.proto @@ -10,14 +10,21 @@ import "types/primitives.proto"; // Represents a block. message Block { // Block data encoded using [winter_utils::Serializable] implementation for - // [miden_objects::block::Block]. + // [miden_protocol::block::Block]. bytes block = 1; } +// Represents a proposed block. +message ProposedBlock { + // Block data encoded using [winter_utils::Serializable] implementation for + // [miden_protocol::block::ProposedBlock]. + bytes proposed_block = 1; +} + // Represents a block or nothing. message MaybeBlock { // The requested block data encoded using [winter_utils::Serializable] implementation for - // [miden_objects::block::Block]. + // [miden_protocol::block::Block]. optional bytes block = 1; } @@ -59,8 +66,8 @@ message BlockHeader { // A commitment to a set of IDs of transactions which affected accounts in this block. primitives.Digest tx_commitment = 8; - // A commitment to a STARK proof attesting to the correct state transition. - primitives.Digest proof_commitment = 9; + // The validator's ECDSA public key. + ValidatorPublicKey validator_key = 9; // A commitment to all transaction kernels supported by this block. primitives.Digest tx_kernel_commitment = 10; @@ -72,6 +79,27 @@ message BlockHeader { fixed32 timestamp = 12; } +// PUBLIC KEY +// ================================================================================================ + +// Validator ECDSA public key. +message ValidatorPublicKey { + // Signature encoded using [winter_utils::Serializable] implementation for + // [crypto::dsa::ecdsa_k256_keccak::PublicKey]. + bytes validator_key = 1; +} + +// BLOCK SIGNATURE +// ================================================================================================ + +// Block ECDSA Signature. +message BlockSignature { + // Signature encoded using [winter_utils::Serializable] implementation for + // [crypto::dsa::ecdsa_k256_keccak::Signature]. + bytes signature = 1; +} + + // FEE PARAMETERS // ================================================================================================ @@ -82,3 +110,13 @@ message FeeParameters { // The base fee (in base units) capturing the cost for the verification of a transaction. fixed32 verification_base_fee = 2; } + +// BLOCK BODY +// ================================================================================================ + +// Represents a block body. +message BlockBody { + // Block body data encoded using [winter_utils::Serializable] implementation for + // [miden_protocol::block::BlockBody]. + bytes block_body = 1; +} diff --git a/proto/proto/types/note.proto b/proto/proto/types/note.proto index 709c521f8..1177f350c 100644 --- a/proto/proto/types/note.proto +++ b/proto/proto/types/note.proto @@ -29,12 +29,12 @@ message NoteMetadata { // A value which can be used by the recipient(s) to identify notes intended for them. // - // See `miden_objects::note::note_tag` for more info. + // See `miden_protocol::note::note_tag` for more info. fixed32 tag = 3; // Specifies when a note is ready to be consumed. // - // See `miden_objects::note::execution_hint` for more info. + // See `miden_protocol::note::execution_hint` for more info. fixed64 execution_hint = 4; // An arbitrary user-defined value. diff --git a/proto/proto/types/primitives.proto b/proto/proto/types/primitives.proto index aed31cec0..3c8d279b0 100644 --- a/proto/proto/types/primitives.proto +++ b/proto/proto/types/primitives.proto @@ -9,7 +9,7 @@ message Asset { primitives.Digest asset = 1; } -// SMT +// SMT (Sparse Merkle Tree) // ================================================================================================ // Represents a single SMT leaf entry. @@ -21,9 +21,9 @@ message SmtLeafEntry { Digest value = 2; } -// Represents multiple leaf entries in an SMT. +// Multiple leaf entries when hash collisions occur at the same leaf position. message SmtLeafEntryList { - // The entries list. + // The list of entries at this leaf. repeated SmtLeafEntry entries = 1; } diff --git a/proto/proto/types/transaction.proto b/proto/proto/types/transaction.proto index a600e6327..a0e716457 100644 --- a/proto/proto/types/transaction.proto +++ b/proto/proto/types/transaction.proto @@ -11,16 +11,16 @@ import "types/primitives.proto"; // Submits proven transaction to the Miden network. message ProvenTransaction { // Transaction encoded using [winter_utils::Serializable] implementation for - // [miden_objects::transaction::proven_tx::ProvenTransaction]. + // [miden_protocol::transaction::proven_tx::ProvenTransaction]. bytes transaction = 1; // Transaction inputs encoded using [winter_utils::Serializable] implementation for - // [miden_objects::transaction::TransactionInputs]. + // [miden_protocol::transaction::TransactionInputs]. optional bytes transaction_inputs = 2; } message ProvenTransactionBatch { // Encoded using [winter_utils::Serializable] implementation for - // [miden_objects::transaction::proven_tx::ProvenTransaction]. + // [miden_protocol::transaction::proven_tx::ProvenTransaction]. bytes encoded = 1; } @@ -54,7 +54,7 @@ message TransactionHeader { primitives.Digest final_state_commitment = 3; // Nullifiers of the input notes of the transaction. - repeated primitives.Digest input_notes = 4; + repeated primitives.Digest nullifiers = 4; // Output notes of the transaction. repeated note.NoteSyncRecord output_notes = 5; diff --git a/proto/src/lib.rs b/proto/src/lib.rs index 14ded322f..8e8440d19 100644 --- a/proto/src/lib.rs +++ b/proto/src/lib.rs @@ -41,14 +41,6 @@ pub fn store_block_producer_api_descriptor() -> FileDescriptorSet { .expect("bytes should be a valid file descriptor created by build.rs") } -/// Returns the Protobuf file descriptor for the store shared API. -#[cfg(feature = "internal")] -pub fn store_shared_api_descriptor() -> FileDescriptorSet { - let bytes = include_bytes!(concat!(env!("OUT_DIR"), "/", "store_shared_file_descriptor.bin")); - FileDescriptorSet::decode(&bytes[..]) - .expect("bytes should be a valid file descriptor created by build.rs") -} - /// Returns the Protobuf file descriptor for the block-producer API. #[cfg(feature = "internal")] pub fn block_producer_api_descriptor() -> FileDescriptorSet { diff --git a/scripts/check-msrv.sh b/scripts/check-msrv.sh index 0bde2955f..6058a0ace 100755 --- a/scripts/check-msrv.sh +++ b/scripts/check-msrv.sh @@ -90,9 +90,9 @@ while IFS=$'\t' read -r pkg_id package_name manifest_path rust_version; do echo "Searching for correct MSRV for $package_name..." - # Determine the currently-installed stable toolchain version (e.g., "1.81.0") + # Determine the currently-installed stable toolchain version (e.g., "1.91.1") latest_stable="$(rustup run stable rustc --version 2>/dev/null | awk '{print $2}')" - if [[ -z "$latest_stable" ]]; then latest_stable="1.81.0"; fi + if [[ -z "$latest_stable" ]]; then latest_stable="1.91.1"; fi # Search for the actual MSRV starting from the current one if actual_msrv=$(cargo msrv find \ @@ -150,4 +150,4 @@ if [[ -n "$failed_packages" ]]; then else echo "ALL WORKSPACE MEMBERS PASSED MSRV CHECKS!" exit 0 -fi \ No newline at end of file +fi