diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 7af83205b..009b4422a 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -136,10 +136,12 @@ jobs: env: DATABASE_URL: postgres://postgres:password@localhost:5432/postgres RISC0_DEV_MODE: true + ETH_MAINNET_RPC_URL: ${{ secrets.ETH_MAINNET_RPC_URL_FREE_TIER }} - name: sccache stats run: sccache --show-stats + rust-pkg-check: runs-on: [ self-hosted, prod, "${{ matrix.os }}", "${{ matrix.device }}" ] strategy: diff --git a/.gitignore b/.gitignore index c11bb7ab7..5c9fe87ff 100644 --- a/.gitignore +++ b/.gitignore @@ -36,8 +36,10 @@ target/ .idea .aider* .claude -CLAUDE.md +**/CLAUDE.md +**/AGENTS.md .cursor +.codex # Ignore generated files diff --git a/.vscode/settings.json b/.vscode/settings.json index 2b6eba61b..99d34553a 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -13,8 +13,8 @@ "./Cargo.toml", "./crates/distributor/Cargo.toml", "./bento/crates/workflow/Cargo.toml", - "./crates/povw/mint-calculator/Cargo.toml", - "./crates/povw/log-updater/Cargo.toml" + "./crates/lambdas/indexer-api/Cargo.toml", + "./crates/indexer/Cargo.toml", // "./crates/guest/assessor/assessor-guest/Cargo.toml", // "./crates/guest/util/echo/Cargo.toml", // "./crates/guest/util/identity/Cargo.toml", diff --git a/Cargo.lock b/Cargo.lock index 670c17025..967e55c0d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -64,9 +64,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa7413bbf62c40b5db916ad5a1c382df1affe42080e148d69932bb7f0a12f32e" +checksum = "eff28fb7c2a2cf287fd2fa6291a33bcb70596230aa4b757e212ed63206733abe" dependencies = [ "alloy-consensus", "alloy-contract", @@ -103,9 +103,9 @@ dependencies = [ [[package]] name = "alloy-consensus" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b7345077623aaa080fc06735ac13b8fa335125c8550f9c4f64135a5bf6f79967" +checksum = "645b546d63ffd10bb90ec85bbd1365e99cf613273dd10968dbaf0c26264eca4f" dependencies = [ "alloy-eips", "alloy-primitives 1.3.1", @@ -128,9 +128,9 @@ dependencies = [ [[package]] name = "alloy-consensus-any" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "501f83565d28bdb9d6457dd3b5d646e19db37709d0f27608a26a1839052ddade" +checksum = "c5b549704e83c09f66a199508b9d34ee7d0f964e6d49e7913e5e8a25a64de341" dependencies = [ "alloy-consensus", "alloy-eips", @@ -142,9 +142,9 @@ dependencies = [ [[package]] name = "alloy-contract" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4c36bb4173892aeeba1c6b9e4eff923fa3fe8583f6d3e07afe1cbc5a96a853a" +checksum = "f8f7ab0f7ea0b4844dd2039cfa0f0b26232c51b31aa74a1c444361e1ca043404" dependencies = [ "alloy-consensus", "alloy-dyn-abi", @@ -231,9 +231,9 @@ dependencies = [ [[package]] name = "alloy-eips" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c219a87fb386a75780ddbdbbced242477321887e426b0f946c05815ceabe5e09" +checksum = "b26a4df894e5665f0c5c9beeedd6db6c2aa3642686a8c37c350df50d1271b611" dependencies = [ "alloy-eip2124", "alloy-eip2930", @@ -273,9 +273,9 @@ dependencies = [ [[package]] name = "alloy-genesis" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2dbf4c6b1b733ba0efaa6cc5f68786997a19ffcd88ff2ee2ba72fdd42594375e" +checksum = "6678a61059c150bb94139ba726f86f6f7b31d53c6b5e251060f94dba3d17d8eb" dependencies = [ "alloy-eips", "alloy-primitives 1.3.1", @@ -325,9 +325,9 @@ dependencies = [ [[package]] name = "alloy-json-rpc" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "334555c323fa2bb98f1d4c242b62da9de8c715557a2ed680a76cefbcac19fefd" +checksum = "658d9d65768ba57c1aa40bb47e4ecc54db744fa9f843baa339359ed9c6476247" dependencies = [ "alloy-primitives 1.3.1", "alloy-sol-types", @@ -340,9 +340,9 @@ dependencies = [ [[package]] name = "alloy-network" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c7ea377c9650203d7a7da9e8dee7f04906b49a9253f554b110edd7972e75ef34" +checksum = "785b8736204e6a8dcde9b491aa7eac333b5e14f1e57bd5f81888b8a251cfbff8" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -366,9 +366,9 @@ dependencies = [ [[package]] name = "alloy-network-primitives" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9f9ab9a9e92c49a357edaee2d35deea0a32ac8f313cfa37448f04e7e029c9d9" +checksum = "5e17985b9e55fcd27d751b5824ac2bfebf64a4823b43e02db953b5c57229f282" dependencies = [ "alloy-consensus", "alloy-eips", @@ -379,9 +379,9 @@ dependencies = [ [[package]] name = "alloy-node-bindings" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb0af8bdb3ee8da43a1f1eb9d1df3c8e3bd657dd20eddd3f00f03105c0fdd3f5" +checksum = "fdd21cd0fd7def069e33214143b1701b773751f13f4cf196ab147d8b51ead722" dependencies = [ "alloy-genesis", "alloy-hardforks 0.2.13", @@ -449,9 +449,9 @@ dependencies = [ [[package]] name = "alloy-provider" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9a85361c88c16116defbd98053e3d267054d6b82729cdbef0236f7881590f924" +checksum = "43c041912a8ccafeb36d685569ebfa852b2bb07d8576d14804a31cb117a02338" dependencies = [ "alloy-chains", "alloy-consensus", @@ -496,9 +496,9 @@ dependencies = [ [[package]] name = "alloy-pubsub" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25b1eda077b102b167effaf0c9d9109b1232948a6c7fcaff74abdb5deb562a17" +checksum = "6393c95e4e46b18d5e19247c357e2e0adb9c7f42951f9276b0b9f151549a9fbe" dependencies = [ "alloy-json-rpc", "alloy-primitives 1.3.1", @@ -540,9 +540,9 @@ dependencies = [ [[package]] name = "alloy-rpc-client" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "743fc964abb0106e454e9e8683fb0809fb32940270ef586a58e913531360b302" +checksum = "f2af7e7532b1c86b7c0d6b5bc0ebdf8d45ce0750d9383a622ea546b42f8d5403" dependencies = [ "alloy-json-rpc", "alloy-primitives 1.3.1", @@ -566,9 +566,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6445ccdc73c8a97e1794e9f0f91af52fb2bbf9ff004339a801b0293c3928abb" +checksum = "4c94b05986216575532c618a05d9fb590e1802f224003df8018f65420929ec08" dependencies = [ "alloy-primitives 1.3.1", "alloy-rpc-types-anvil", @@ -583,9 +583,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-anvil" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "242ff10318efd61c4b17ac8584df03a8db1e12146704c08b1b69d070cd4a1ebf" +checksum = "7c265bdbd7477d24e41cd594dd7a2022a14c9a4c58785af4bf15020ef51da075" dependencies = [ "alloy-primitives 1.3.1", "alloy-rpc-types-eth", @@ -595,9 +595,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-any" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97372c51a14a804fb9c17010e3dd6c117f7866620b264e24b64d2259be44bcdf" +checksum = "96414c5385381b4b9d19ed1ee8f3a9c24a9a084c509ef66a235b5a45221fa6a9" dependencies = [ "alloy-consensus-any", "alloy-rpc-types-eth", @@ -606,9 +606,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-debug" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a005a343cae9a0d4078d2f85a666493922d4bfb756229ea2a45a4bafd21cb9f1" +checksum = "a03f03ff78bc274f9434c19871e7e041c604eab04d7751c8a8429aba5539fadb" dependencies = [ "alloy-primitives 1.3.1", "derive_more 2.0.1", @@ -618,9 +618,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-engine" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e214c7667f88b2f7e48eb8428eeafcbf6faecda04175c5f4d13fdb2563333ac" +checksum = "301962bdc2f084bf25e86abe64d41c8a3ca1398d41d6f3416b6fffe2fe1620fc" dependencies = [ "alloy-consensus", "alloy-eips", @@ -636,9 +636,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-eth" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "672286c19528007df058bafd82c67e23247b4b3ebbc538cbddc705a82d8a930f" +checksum = "17d6b2bfc7e6b29f4ebc2e86cfa520c77d4cd0d08ed54332d2f5116df8357fd7" dependencies = [ "alloy-consensus", "alloy-consensus-any", @@ -657,9 +657,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-trace" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d53c5ea8e10ca72889476343deb98c050da7b85e119a55a2a02a9791cb8242e4" +checksum = "dbb5c7e2f70d1ed7e117e5a4d6b13d547ef31c238162e46f2147ff5c45dd4326" dependencies = [ "alloy-primitives 1.3.1", "alloy-rpc-types-eth", @@ -671,9 +671,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types-txpool" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb1c7ee378e899353e05a0d9f5b73b5d57bdac257532c6acd98eaa6b093fe642" +checksum = "37ca69c1bb9cb4cb6b80cfbdec98813acaa50101d6298a4604fb24b9176b3ad2" dependencies = [ "alloy-primitives 1.3.1", "alloy-rpc-types-eth", @@ -683,9 +683,9 @@ dependencies = [ [[package]] name = "alloy-serde" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1aae653f049267ae7e040eab6c9b9a417064ca1a6cb21e3dd59b9f1131ef048f" +checksum = "8b253eb23896e22d0cf8117fc915383d4ecf8efdedd57f590a13c8716a7347f2" dependencies = [ "alloy-primitives 1.3.1", "serde", @@ -694,9 +694,9 @@ dependencies = [ [[package]] name = "alloy-signer" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d97cedce202f848592b96f7e891503d3adb33739c4e76904da73574290141b93" +checksum = "a60d6c651c73df18766997bf2073b2a7e1875fec3f4fe5eef1ca6a38b6e81ff2" dependencies = [ "alloy-primitives 1.3.1", "async-trait", @@ -709,9 +709,9 @@ dependencies = [ [[package]] name = "alloy-signer-local" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "83ae7d854db5b7cdd5b9ed7ad13d1e5e034cdd8be85ffef081f61dc6c9e18351" +checksum = "621eafdbf1b1646c70d3b55959635c59e66ed7ad83a8b495fd9b948db09fe6c2" dependencies = [ "alloy-consensus", "alloy-network", @@ -746,7 +746,7 @@ dependencies = [ "alloy-json-abi", "alloy-sol-macro-input", "const-hex", - "heck", + "heck 0.5.0", "indexmap 2.11.0", "proc-macro-error2", "proc-macro2", @@ -765,7 +765,7 @@ dependencies = [ "alloy-json-abi", "const-hex", "dunce", - "heck", + "heck 0.5.0", "macro-string", "proc-macro2", "quote", @@ -798,9 +798,9 @@ dependencies = [ [[package]] name = "alloy-transport" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08b383bc903c927635e39e1dae7df2180877d93352d1abd389883665a598afc" +checksum = "a68c445bf2a3b0124203cd45bdc0950968a131eb53ba85a5f0fd09eb610fe467" dependencies = [ "alloy-json-rpc", "alloy-primitives 1.3.1", @@ -822,9 +822,9 @@ dependencies = [ [[package]] name = "alloy-transport-http" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e58dee1f7763ef302074b645fc4f25440637c09a60e8de234b62993f06c0ae3" +checksum = "f3c4d2c0052de0d82fcb2acea16bf3fe105fd4c37d108a331c777942648e8711" dependencies = [ "alloy-json-rpc", "alloy-transport", @@ -837,9 +837,9 @@ dependencies = [ [[package]] name = "alloy-transport-ipc" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ae5c6655e5cda1227f0c70b7686ecfb8af856771deebacad8dab9a7fbc51864" +checksum = "5345c71ff720219e30b0fc8d8931b2384390134a4c38bff4b5d87b4cc275e06d" dependencies = [ "alloy-json-rpc", "alloy-pubsub", @@ -857,9 +857,9 @@ dependencies = [ [[package]] name = "alloy-transport-ws" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dcb2141958a1f13722cb20a2e01c130fb375209fa428849ae553c1518bc33a0d" +checksum = "c335f772dbae8d4d17cc0ea86de3dacad245b876e6c0b951f48fd48f76d3d144" dependencies = [ "alloy-pubsub", "alloy-transport", @@ -907,12 +907,12 @@ dependencies = [ [[package]] name = "alloy-tx-macros" -version = "1.0.27" +version = "1.0.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d14809f908822dbff0dc472c77ca4aa129ab12e22fd9bff2dd1ef54603e68e3d" +checksum = "614d998c2f0e95079fdc8798cb48b9ea985dab225ed02005f724e66788aaf614" dependencies = [ "alloy-primitives 1.3.1", - "darling", + "darling 0.21.3", "proc-macro2", "quote", "syn 2.0.106", @@ -2205,13 +2205,64 @@ dependencies = [ "tracing", ] +[[package]] +name = "aws_lambda_events" +version = "0.15.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7319a086b79c3ff026a33a61e80f04fd3885fbb73237981ea080d21944e1cb1c" +dependencies = [ + "base64 0.22.1", + "bytes", + "http 1.3.1", + "http-body 1.0.1", + "http-serde", + "query_map", + "serde", + "serde_json", +] + +[[package]] +name = "axum" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f" +dependencies = [ + "async-trait", + "axum-core 0.4.5", + "axum-macros", + "bytes", + "futures-util", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "hyper 1.7.0", + "hyper-util", + "itoa", + "matchit 0.7.3", + "memchr", + "mime", + "percent-encoding", + "pin-project-lite", + "rustversion", + "serde", + "serde_json", + "serde_path_to_error", + "serde_urlencoded", + "sync_wrapper", + "tokio", + "tower 0.5.2", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "axum" version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "021e862c184ae977658b36c4500f7feac3221ca5da43e3f25bd04ab6c79a29b5" dependencies = [ - "axum-core", + "axum-core 0.5.2", "base64 0.22.1", "bytes", "form_urlencoded", @@ -2222,7 +2273,7 @@ dependencies = [ "hyper 1.7.0", "hyper-util", "itoa", - "matchit", + "matchit 0.8.4", "memchr", "mime", "percent-encoding", @@ -2242,6 +2293,27 @@ dependencies = [ "tracing", ] +[[package]] +name = "axum-core" +version = "0.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199" +dependencies = [ + "async-trait", + "bytes", + "futures-util", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "mime", + "pin-project-lite", + "rustversion", + "sync_wrapper", + "tower-layer", + "tower-service", + "tracing", +] + [[package]] name = "axum-core" version = "0.5.2" @@ -2262,6 +2334,17 @@ dependencies = [ "tracing", ] +[[package]] +name = "axum-macros" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "57d123550fa8d071b7255cb0cc04dc302baa6c8c4a79f55701552684d8399bce" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.106", +] + [[package]] name = "az" version = "1.2.1" @@ -2627,6 +2710,7 @@ dependencies = [ "boundless-assessor", "boundless-market", "boundless-povw", + "boundless-rewards", "boundless-test-utils", "boundless-zkc", "bytemuck", @@ -2650,6 +2734,7 @@ dependencies = [ "serde_yaml 0.9.34+deprecated", "shadow-rs", "sqlx", + "tabled", "tempfile", "tokio", "tracing", @@ -2684,15 +2769,22 @@ version = "0.15.0" dependencies = [ "alloy", "anyhow", + "assert_cmd", "async-trait", "boundless-cli", "boundless-market", + "boundless-povw", + "boundless-rewards", "boundless-test-utils", + "boundless-zkc", + "chrono", "clap", "futures-util", "hex", "indexer-monitor", "risc0-zkvm", + "serde", + "serde_json", "sqlx", "tempfile", "thiserror 2.0.16", @@ -2801,6 +2893,21 @@ dependencies = [ "url", ] +[[package]] +name = "boundless-rewards" +version = "0.15.0" +dependencies = [ + "alloy", + "anyhow", + "boundless-povw", + "boundless-zkc", + "chrono", + "futures-util", + "serde", + "tokio", + "tracing", +] + [[package]] name = "boundless-slasher" version = "0.15.0" @@ -2942,7 +3049,7 @@ version = "0.15.0" dependencies = [ "alloy", "anyhow", - "axum", + "axum 0.8.4", "boundless-market", "boundless-test-utils", "broker", @@ -2996,6 +3103,12 @@ version = "1.2.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7575182f7272186991736b70173b0ea045398f984bf5ebbb3804736ce1330c9d" +[[package]] +name = "bytecount" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "175812e0be2bccb6abe50bb8d566126198344f707e304f45c648fd8f2cc0365e" + [[package]] name = "bytemuck" version = "1.23.2" @@ -3286,7 +3399,7 @@ version = "4.5.47" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.106", @@ -3661,8 +3774,18 @@ version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc7f46116c46ff9ab3eb1597a45688b6715c6e628b5c133e288e709a29bcb4ee" dependencies = [ - "darling_core", - "darling_macro", + "darling_core 0.20.11", + "darling_macro 0.20.11", +] + +[[package]] +name = "darling" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9cdf337090841a411e2a7f3deb9187445851f91b309c0c0a29e05f74a00a48c0" +dependencies = [ + "darling_core 0.21.3", + "darling_macro 0.21.3", ] [[package]] @@ -3679,13 +3802,39 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "darling_core" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1247195ecd7e3c85f83c8d2a366e4210d588e802133e1e355180a9870b517ea4" +dependencies = [ + "fnv", + "ident_case", + "proc-macro2", + "quote", + "serde", + "strsim", + "syn 2.0.106", +] + [[package]] name = "darling_macro" version = "0.20.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ - "darling_core", + "darling_core 0.20.11", + "quote", + "syn 2.0.106", +] + +[[package]] +name = "darling_macro" +version = "0.21.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" +dependencies = [ + "darling_core 0.21.3", "quote", "syn 2.0.106", ] @@ -3807,7 +3956,7 @@ version = "0.20.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2d5bcf7b024d6835cfb3d473887cd966994907effbe9227e8c8219824d06c4e8" dependencies = [ - "darling", + "darling 0.20.11", "proc-macro2", "quote", "syn 2.0.106", @@ -4027,7 +4176,7 @@ version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "97af9b5f014e228b33e77d75ee0e6e87960124f0f4b16337b586a6bec91867b1" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "proc-macro2-diagnostics", ] @@ -5024,6 +5173,12 @@ dependencies = [ "stable_deref_trait", ] +[[package]] +name = "heck" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "95505c38b4572b2d910cecb0281560f54b440a19336cbbcb27bf6ce6adc6f5a8" + [[package]] name = "heck" version = "0.5.0" @@ -5363,7 +5518,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2 0.5.10", + "socket2 0.6.0", "system-configuration", "tokio", "tower-service", @@ -5550,6 +5705,37 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "4ee796ad498c8d9a1d68e477df8f754ed784ef875de1414ebdaf169f70a6a784" +[[package]] +name = "indexer-api" +version = "0.15.0" +dependencies = [ + "alloy", + "anyhow", + "assert_cmd", + "aws-config", + "axum 0.7.9", + "boundless-indexer", + "lambda_http", + "lambda_runtime", + "openssl", + "rand 0.8.5", + "reqwest", + "serde", + "serde_json", + "serde_yaml 0.9.34+deprecated", + "sqlx", + "tempfile", + "test-log", + "tokio", + "tower 0.5.2", + "tower-http 0.6.6", + "tracing", + "tracing-subscriber 0.3.20", + "utoipa", + "utoipa-axum", + "utoipa-swagger-ui 8.1.0", +] + [[package]] name = "indexer-monitor" version = "0.15.0" @@ -5890,6 +6076,33 @@ dependencies = [ "regex-automata", ] +[[package]] +name = "lambda_http" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "67fe279be7f89f5f72c97c3a96f45c43db8edab1007320ecc6a5741273b4d6db" +dependencies = [ + "aws_lambda_events", + "base64 0.22.1", + "bytes", + "encoding_rs", + "futures", + "futures-util", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "hyper 1.7.0", + "lambda_runtime", + "mime", + "percent-encoding", + "pin-project-lite", + "serde", + "serde_json", + "serde_urlencoded", + "tokio-stream", + "url", +] + [[package]] name = "lambda_runtime" version = "0.13.0" @@ -6008,7 +6221,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" dependencies = [ "cfg-if", - "windows-targets 0.48.5", + "windows-targets 0.53.3", ] [[package]] @@ -6299,6 +6512,12 @@ dependencies = [ "regex-automata", ] +[[package]] +name = "matchit" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" + [[package]] name = "matchit" version = "0.8.4" @@ -6777,7 +6996,7 @@ version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "77e878c846a8abae00dd069496dbe8751b16ac1c3d6bd2a7283a938e8228f90d" dependencies = [ - "proc-macro-crate 1.1.3", + "proc-macro-crate 3.3.0", "proc-macro2", "quote", "syn 2.0.106", @@ -6929,6 +7148,15 @@ version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" +[[package]] +name = "openssl-src" +version = "300.5.1+3.5.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "735230c832b28c000e3bc117119e6466a663ec73506bc0a9907ea4187508e42a" +dependencies = [ + "cc", +] + [[package]] name = "openssl-sys" version = "0.9.109" @@ -6937,6 +7165,7 @@ checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" dependencies = [ "cc", "libc", + "openssl-src", "pkg-config", "vcpkg", ] @@ -6954,7 +7183,7 @@ dependencies = [ "alloy", "anyhow", "async-stream", - "axum", + "axum 0.8.4", "boundless-market", "boundless-test-utils", "clap", @@ -6973,7 +7202,7 @@ dependencies = [ "tracing", "tracing-subscriber 0.3.20", "utoipa", - "utoipa-swagger-ui", + "utoipa-swagger-ui 9.0.2", ] [[package]] @@ -7005,6 +7234,17 @@ dependencies = [ "sha2 0.10.9", ] +[[package]] +name = "papergrid" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9ad43c07024ef767f9160710b3a6773976194758c7919b17e63b863db0bdf7fb" +dependencies = [ + "bytecount", + "fnv", + "unicode-width 0.1.14", +] + [[package]] name = "parity-scale-codec" version = "3.7.5" @@ -7554,7 +7794,7 @@ version = "0.13.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "be769465445e8c1474e9c5dac2018218498557af32d9ed057325ec9a41ae81bf" dependencies = [ - "heck", + "heck 0.5.0", "itertools 0.14.0", "log", "multimap", @@ -7604,6 +7844,17 @@ dependencies = [ "parking_lot 0.12.4", ] +[[package]] +name = "query_map" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5eab6b8b1074ef3359a863758dae650c7c0c6027927a085b7af911c8e0bf3a15" +dependencies = [ + "form_urlencoded", + "serde", + "serde_derive", +] + [[package]] name = "quick-error" version = "1.2.3" @@ -7623,7 +7874,7 @@ dependencies = [ "quinn-udp", "rustc-hash 2.1.1", "rustls 0.23.31", - "socket2 0.5.10", + "socket2 0.6.0", "thiserror 2.0.16", "tokio", "tracing", @@ -7660,7 +7911,7 @@ dependencies = [ "cfg_aliases", "libc", "once_cell", - "socket2 0.5.10", + "socket2 0.6.0", "tracing", "windows-sys 0.60.2", ] @@ -9292,18 +9543,28 @@ checksum = "f97841a747eef040fcd2e7b3b9a220a7205926e60488e673d9e4926d27772ce5" [[package]] name = "serde" -version = "1.0.219" +version = "1.0.226" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "0dca6411025b24b60bfa7ec1fe1f8e710ac09782dca409ee8237ba74b51295fd" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.226" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ba2ba63999edb9dac981fb34b3e5c0d111a69b0924e253ed29d83f7c99e966a4" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.226" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "8db53ae22f34573731bafa1db20f04027b2d25e02d8205921b569171699cdb33" dependencies = [ "proc-macro2", "quote", @@ -9390,7 +9651,7 @@ version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" dependencies = [ - "darling", + "darling 0.20.11", "proc-macro2", "quote", "syn 2.0.106", @@ -9771,7 +10032,7 @@ checksum = "19a9c1841124ac5a61741f96e1d9e2ec77424bf323962dd894bdb93f37d5219b" dependencies = [ "dotenvy", "either", - "heck", + "heck 0.5.0", "hex", "once_cell", "proc-macro2", @@ -9999,7 +10260,7 @@ version = "0.27.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7695ce3845ea4b33927c055a39dc438a45b059f7c1b3d91d38d10355fb8cbca7" dependencies = [ - "heck", + "heck 0.5.0", "proc-macro2", "quote", "syn 2.0.106", @@ -10098,6 +10359,30 @@ dependencies = [ "libc", ] +[[package]] +name = "tabled" +version = "0.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c998b0c8b921495196a48aabaf1901ff28be0760136e31604f7967b0792050e" +dependencies = [ + "papergrid", + "tabled_derive", + "unicode-width 0.1.14", +] + +[[package]] +name = "tabled_derive" +version = "0.7.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c138f99377e5d653a371cdad263615634cfc8467685dfe8e73e2b8e98f44b17" +dependencies = [ + "heck 0.4.1", + "proc-macro-error", + "proc-macro2", + "quote", + "syn 1.0.109", +] + [[package]] name = "tagptr" version = "0.2.0" @@ -10916,6 +11201,19 @@ dependencies = [ "utoipa-gen", ] +[[package]] +name = "utoipa-axum" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "839e89ad0db7f9e8737dace8ff43c1ce0711d5e0d08cc1c9d31cc8454d4643ee" +dependencies = [ + "axum 0.7.9", + "paste", + "tower-layer", + "tower-service", + "utoipa", +] + [[package]] name = "utoipa-gen" version = "5.4.0" @@ -10928,13 +11226,31 @@ dependencies = [ "syn 2.0.106", ] +[[package]] +name = "utoipa-swagger-ui" +version = "8.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "db4b5ac679cc6dfc5ea3f2823b0291c777750ffd5e13b21137e0f7ac0e8f9617" +dependencies = [ + "axum 0.7.9", + "base64 0.22.1", + "mime_guess", + "regex", + "rust-embed", + "serde", + "serde_json", + "url", + "utoipa", + "zip 2.4.2", +] + [[package]] name = "utoipa-swagger-ui" version = "9.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d047458f1b5b65237c2f6dc6db136945667f40a7668627b3490b9513a3d43a55" dependencies = [ - "axum", + "axum 0.8.4", "base64 0.22.1", "mime_guess", "regex", @@ -11259,7 +11575,7 @@ version = "0.1.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0978bf7171b3d90bac376700cb56d606feb40f251a475a5d6634613564460b22" dependencies = [ - "windows-sys 0.48.0", + "windows-sys 0.60.2", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index b33cf95e7..b658fe10c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -11,10 +11,12 @@ members = [ "crates/guest/assessor", "crates/guest/util", "crates/indexer", - "crates/ops-lambdas/indexer-monitor", + "crates/lambdas/indexer-api", + "crates/lambdas/indexer-monitor", "crates/order-generator", "crates/order-stream", "crates/povw", + "crates/rewards", "crates/slasher", "crates/test-utils", "crates/zkc", @@ -31,6 +33,7 @@ repository = "https://github.com/boundless-xyz/boundless/" boundless-assessor = { version = "0.15.0", path = "crates/assessor" } boundless-cli = { version = "0.15.0", path = "crates/boundless-cli" } boundless-market = { version = "0.15.0", path = "crates/boundless-market" } +boundless-rewards = { version = "0.15.0", path = "crates/rewards" } boundless-test-utils = { path = "crates/test-utils" } boundless-zkc = { version = "0.15.0", path = "crates/zkc" } boundless-povw = { path = "crates/povw" } diff --git a/crates/boundless-cli/Cargo.toml b/crates/boundless-cli/Cargo.toml index 2c28a1134..e41e86713 100644 --- a/crates/boundless-cli/Cargo.toml +++ b/crates/boundless-cli/Cargo.toml @@ -21,6 +21,7 @@ bonsai-sdk = { workspace = true } boundless-assessor = { workspace = true } boundless-market = { workspace = true } boundless-povw = { workspace = true } +boundless-rewards = { workspace = true } boundless-zkc = { workspace = true } bytemuck = { workspace = true } chrono = { workspace = true } @@ -40,6 +41,7 @@ serde_json = { workspace = true } serde_yaml = { workspace = true } shadow-rs = { version = "1.1", default-features = false } sqlx = { workspace = true, features = ["postgres", "runtime-tokio", "tls-rustls", "chrono"] } +tabled = "0.15" tokio = { workspace = true, features = ["rt-multi-thread"] } tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter"] } diff --git a/crates/guest/assessor/assessor-guest/Cargo.lock b/crates/guest/assessor/assessor-guest/Cargo.lock index 47fc9bc01..42fec89ab 100644 --- a/crates/guest/assessor/assessor-guest/Cargo.lock +++ b/crates/guest/assessor/assessor-guest/Cargo.lock @@ -46,9 +46,9 @@ checksum = "683d7910e743518b0e34f1186f92494becacb047c7b6bf616c96772180fef923" [[package]] name = "alloy" -version = "1.0.28" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b6b7b6a501cba311fdb8f7d7798a6d9ca8a5bec16609f1629efeafbac68476e" +checksum = "fa7413bbf62c40b5db916ad5a1c382df1affe42080e148d69932bb7f0a12f32e" dependencies = [ "alloy-consensus", "alloy-contract", @@ -454,9 +454,9 @@ dependencies = [ [[package]] name = "alloy-rpc-types" -version = "1.0.28" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "835576f756f0e99b6341eb44d2de4bf162586995819da81f808b32265953167e" +checksum = "e6445ccdc73c8a97e1794e9f0f91af52fb2bbf9ff004339a801b0293c3928abb" dependencies = [ "alloy-primitives", "alloy-rpc-types-eth", @@ -5900,8 +5900,9 @@ dependencies = [ [[package]] name = "sha2" -version = "0.10.8" -source = "git+https://github.com/risc0/RustCrypto-hashes?tag=sha2-v0.10.8-risczero.0#244dc3b08788f7a4ccce14c66896ae3b4f24c166" +version = "0.10.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", "cpufeatures", @@ -7343,3 +7344,8 @@ dependencies = [ "quote", "syn 2.0.106", ] + +[[patch.unused]] +name = "sha2" +version = "0.10.8" +source = "git+https://github.com/risc0/RustCrypto-hashes?tag=sha2-v0.10.8-risczero.0#244dc3b08788f7a4ccce14c66896ae3b4f24c166" diff --git a/crates/indexer/Cargo.toml b/crates/indexer/Cargo.toml index 04367f7eb..32ed82da7 100644 --- a/crates/indexer/Cargo.toml +++ b/crates/indexer/Cargo.toml @@ -10,13 +10,27 @@ publish = false [package.metadata.release] release = false +[[bin]] +name = "market-indexer" +path = "src/bin/market-indexer.rs" + +[[bin]] +name = "rewards-indexer" +path = "src/bin/rewards-indexer.rs" + [dependencies] alloy = { workspace = true, features = ["network", "node-bindings", "rpc-types", "providers", "transports", "sol-types", "contract", "signers", "signer-local"] } anyhow = { workspace = true } async-trait = { workspace = true } boundless-market = { workspace = true } +boundless-povw = { workspace = true } +boundless-rewards = { workspace = true } +boundless-zkc = { workspace = true } +chrono = { workspace = true } clap = { workspace = true } hex = { workspace = true } +serde = { workspace = true } +serde_json = { workspace = true } sqlx = { workspace = true, features = [ "any", "postgres", "sqlite", "runtime-tokio", "json", "migrate", "macros" ] } tempfile = { workspace = true } thiserror = { workspace = true } @@ -26,11 +40,12 @@ tracing-subscriber = { workspace = true, features = ["env-filter", "json"] } url = { workspace = true } [dev-dependencies] +assert_cmd = "2.0" boundless-cli = { workspace = true } boundless-market = { workspace = true } boundless-test-utils = { workspace = true } futures-util = { workspace = true } -indexer-monitor = { path = "../ops-lambdas/indexer-monitor" } +indexer-monitor = { path = "../lambdas/indexer-monitor" } risc0-zkvm = { workspace = true, features = ["std", "default"] } tracing-test = { workspace = true } diff --git a/crates/indexer/migrations/10_staking_positions.sql b/crates/indexer/migrations/10_staking_positions.sql new file mode 100644 index 000000000..667cb471f --- /dev/null +++ b/crates/indexer/migrations/10_staking_positions.sql @@ -0,0 +1,33 @@ +-- Staking positions by epoch - historical snapshots +CREATE TABLE IF NOT EXISTS staking_positions_by_epoch ( + staker_address TEXT NOT NULL, + epoch BIGINT NOT NULL, + staked_amount TEXT NOT NULL, + is_withdrawing INTEGER NOT NULL, + rewards_delegated_to TEXT, + votes_delegated_to TEXT, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (staker_address, epoch) +); + +-- Indexes for efficient querying +CREATE INDEX idx_staking_epoch ON staking_positions_by_epoch(epoch); +CREATE INDEX idx_staking_amount_by_epoch ON staking_positions_by_epoch(epoch, staked_amount DESC); +CREATE INDEX idx_staking_rewards_delegated ON staking_positions_by_epoch(rewards_delegated_to); +CREATE INDEX idx_staking_votes_delegated ON staking_positions_by_epoch(votes_delegated_to); + +-- Staking positions aggregate - current state after all events +CREATE TABLE IF NOT EXISTS staking_positions_aggregate ( + staker_address TEXT NOT NULL PRIMARY KEY, + total_staked TEXT NOT NULL, + is_withdrawing INTEGER NOT NULL, + rewards_delegated_to TEXT, + votes_delegated_to TEXT, + epochs_participated BIGINT NOT NULL, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Index for leaderboard sorting +CREATE INDEX idx_staking_aggregate_amount ON staking_positions_aggregate(total_staked DESC); +CREATE INDEX idx_staking_aggregate_rewards_delegated ON staking_positions_aggregate(rewards_delegated_to); +CREATE INDEX idx_staking_aggregate_votes_delegated ON staking_positions_aggregate(votes_delegated_to); \ No newline at end of file diff --git a/crates/indexer/migrations/11_delegation_powers.sql b/crates/indexer/migrations/11_delegation_powers.sql new file mode 100644 index 000000000..7ababd0ca --- /dev/null +++ b/crates/indexer/migrations/11_delegation_powers.sql @@ -0,0 +1,55 @@ +-- Vote delegation powers by epoch - historical snapshots +CREATE TABLE IF NOT EXISTS vote_delegation_powers_by_epoch ( + delegate_address TEXT NOT NULL, + epoch BIGINT NOT NULL, + vote_power TEXT NOT NULL, + delegator_count INTEGER NOT NULL, + delegators TEXT, -- JSON array of delegator addresses + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (delegate_address, epoch) +); + +-- Indexes for efficient querying +CREATE INDEX idx_vote_delegation_epoch ON vote_delegation_powers_by_epoch(epoch); +CREATE INDEX idx_vote_delegation_power_by_epoch ON vote_delegation_powers_by_epoch(epoch, vote_power DESC); + +-- Reward delegation powers by epoch - historical snapshots +CREATE TABLE IF NOT EXISTS reward_delegation_powers_by_epoch ( + delegate_address TEXT NOT NULL, + epoch BIGINT NOT NULL, + reward_power TEXT NOT NULL, + delegator_count INTEGER NOT NULL, + delegators TEXT, -- JSON array of delegator addresses + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (delegate_address, epoch) +); + +-- Indexes for efficient querying +CREATE INDEX idx_reward_delegation_epoch ON reward_delegation_powers_by_epoch(epoch); +CREATE INDEX idx_reward_delegation_power_by_epoch ON reward_delegation_powers_by_epoch(epoch, reward_power DESC); + +-- Vote delegation powers aggregate - current state after all events +CREATE TABLE IF NOT EXISTS vote_delegation_powers_aggregate ( + delegate_address TEXT NOT NULL PRIMARY KEY, + total_vote_power TEXT NOT NULL, + delegator_count INTEGER NOT NULL, + delegators TEXT, -- JSON array of delegator addresses + epochs_participated BIGINT NOT NULL, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Index for leaderboard sorting +CREATE INDEX idx_vote_delegation_aggregate_power ON vote_delegation_powers_aggregate(total_vote_power DESC); + +-- Reward delegation powers aggregate - current state after all events +CREATE TABLE IF NOT EXISTS reward_delegation_powers_aggregate ( + delegate_address TEXT NOT NULL PRIMARY KEY, + total_reward_power TEXT NOT NULL, + delegator_count INTEGER NOT NULL, + delegators TEXT, -- JSON array of delegator addresses + epochs_participated BIGINT NOT NULL, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +-- Index for leaderboard sorting +CREATE INDEX idx_reward_delegation_aggregate_power ON reward_delegation_powers_aggregate(total_reward_power DESC); \ No newline at end of file diff --git a/crates/indexer/migrations/12_address_history_indexes.sql b/crates/indexer/migrations/12_address_history_indexes.sql new file mode 100644 index 000000000..af567f96f --- /dev/null +++ b/crates/indexer/migrations/12_address_history_indexes.sql @@ -0,0 +1,17 @@ +-- These composite indexes optimize queries that filter by address and optionally by epoch range + +-- Index for staking history by address +CREATE INDEX IF NOT EXISTS idx_staking_address_epoch +ON staking_positions_by_epoch(staker_address, epoch DESC); + +-- Index for PoVW rewards history by address +CREATE INDEX IF NOT EXISTS idx_povw_rewards_address_epoch +ON povw_rewards_by_epoch(work_log_id, epoch DESC); + +-- Index for vote delegations received by delegate address +CREATE INDEX IF NOT EXISTS idx_vote_delegation_delegate_epoch +ON vote_delegation_powers_by_epoch(delegate_address, epoch DESC); + +-- Index for reward delegations received by delegate address +CREATE INDEX IF NOT EXISTS idx_reward_delegation_delegate_epoch +ON reward_delegation_powers_by_epoch(delegate_address, epoch DESC); \ No newline at end of file diff --git a/crates/indexer/migrations/13_summary_statistics.sql b/crates/indexer/migrations/13_summary_statistics.sql new file mode 100644 index 000000000..a9298fc9f --- /dev/null +++ b/crates/indexer/migrations/13_summary_statistics.sql @@ -0,0 +1,42 @@ +-- Global PoVW summary statistics +CREATE TABLE IF NOT EXISTS povw_summary_stats ( + id INTEGER PRIMARY KEY, + total_epochs_with_work BIGINT NOT NULL, + total_unique_work_log_ids BIGINT NOT NULL, + total_work_all_time TEXT NOT NULL, + total_emissions_all_time TEXT NOT NULL, + total_capped_rewards_all_time TEXT NOT NULL, + total_uncapped_rewards_all_time TEXT NOT NULL +); + +-- Per-epoch PoVW summary +CREATE TABLE IF NOT EXISTS epoch_povw_summary ( + epoch BIGINT PRIMARY KEY, + total_work TEXT NOT NULL, + total_emissions TEXT NOT NULL, + total_capped_rewards TEXT NOT NULL, + total_uncapped_rewards TEXT NOT NULL, + epoch_start_time BIGINT NOT NULL, + epoch_end_time BIGINT NOT NULL, + num_participants BIGINT NOT NULL +); + +-- Global staking summary statistics +CREATE TABLE IF NOT EXISTS staking_summary_stats ( + id INTEGER PRIMARY KEY, + current_total_staked TEXT NOT NULL, + total_unique_stakers BIGINT NOT NULL, + current_active_stakers BIGINT NOT NULL, + current_withdrawing BIGINT NOT NULL +); + +-- Per-epoch staking summary +CREATE TABLE IF NOT EXISTS epoch_staking_summary ( + epoch BIGINT PRIMARY KEY, + total_staked TEXT NOT NULL, + num_stakers BIGINT NOT NULL, + num_withdrawing BIGINT NOT NULL +); + +CREATE INDEX IF NOT EXISTS idx_epoch_povw_summary_epoch ON epoch_povw_summary(epoch); +CREATE INDEX IF NOT EXISTS idx_epoch_staking_summary_epoch ON epoch_staking_summary(epoch); \ No newline at end of file diff --git a/crates/indexer/migrations/14_staking_rewards.sql b/crates/indexer/migrations/14_staking_rewards.sql new file mode 100644 index 000000000..8d227319e --- /dev/null +++ b/crates/indexer/migrations/14_staking_rewards.sql @@ -0,0 +1,31 @@ +-- Per-epoch staking rewards (mirrors povw_rewards_by_epoch structure) +CREATE TABLE IF NOT EXISTS staking_rewards_by_epoch ( + staker_address TEXT NOT NULL, + epoch BIGINT NOT NULL, + staking_power TEXT NOT NULL, + percentage DOUBLE PRECISION NOT NULL, + rewards_earned TEXT NOT NULL, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (staker_address, epoch) +); + +-- Create indexes for efficient queries +CREATE INDEX idx_staking_rewards_epoch ON staking_rewards_by_epoch(epoch); +CREATE INDEX idx_staking_rewards_earned ON staking_rewards_by_epoch(rewards_earned DESC); +CREATE INDEX idx_staking_rewards_staker ON staking_rewards_by_epoch(staker_address); + +-- Add total rewards to existing staking positions aggregate table +ALTER TABLE staking_positions_aggregate +ADD COLUMN total_rewards_earned TEXT NOT NULL DEFAULT '000000000000000000000000000000000000000000000000000000000000000000000000000000'; + +-- Add staking rewards fields to existing epoch staking summary (mirrors epoch_povw_summary) +ALTER TABLE epoch_staking_summary +ADD COLUMN total_staking_emissions TEXT NOT NULL DEFAULT '000000000000000000000000000000000000000000000000000000000000000000000000000000'; +ALTER TABLE epoch_staking_summary +ADD COLUMN total_staking_power TEXT NOT NULL DEFAULT '000000000000000000000000000000000000000000000000000000000000000000000000000000'; +ALTER TABLE epoch_staking_summary +ADD COLUMN num_reward_recipients BIGINT NOT NULL DEFAULT 0; + +-- Add staking rewards fields to existing global staking summary (mirrors povw_summary_stats) +ALTER TABLE staking_summary_stats +ADD COLUMN total_staking_emissions_all_time TEXT; \ No newline at end of file diff --git a/crates/indexer/migrations/15_staking_rewards_refactor.sql b/crates/indexer/migrations/15_staking_rewards_refactor.sql new file mode 100644 index 000000000..10cabb646 --- /dev/null +++ b/crates/indexer/migrations/15_staking_rewards_refactor.sql @@ -0,0 +1,12 @@ +-- Add rewards_generated field to track rewards created by positions (regardless of delegation) +ALTER TABLE staking_positions_by_epoch +ADD COLUMN rewards_generated TEXT NOT NULL DEFAULT '0000000000000000000000000000000000000000000000000000000000000000'; + +-- Add total_rewards_generated to track lifetime rewards generated by positions owned +ALTER TABLE staking_positions_aggregate +ADD COLUMN total_rewards_generated TEXT NOT NULL DEFAULT '0000000000000000000000000000000000000000000000000000000000000000'; + +-- Note: total_rewards_earned already exists and tracks rewards actually received +-- This allows us to distinguish between: +-- - total_rewards_generated: rewards created by positions owned by this address +-- - total_rewards_earned: rewards actually received by this address (from own positions or delegations) \ No newline at end of file diff --git a/crates/indexer/migrations/16_add_epoch_times_to_staking.sql b/crates/indexer/migrations/16_add_epoch_times_to_staking.sql new file mode 100644 index 000000000..ee9745449 --- /dev/null +++ b/crates/indexer/migrations/16_add_epoch_times_to_staking.sql @@ -0,0 +1,6 @@ +-- Add epoch start and end times to staking summary table +ALTER TABLE epoch_staking_summary +ADD COLUMN epoch_start_time BIGINT NOT NULL DEFAULT 0; + +ALTER TABLE epoch_staking_summary +ADD COLUMN epoch_end_time BIGINT NOT NULL DEFAULT 0; \ No newline at end of file diff --git a/crates/indexer/migrations/17_add_updated_at_to_summaries.sql b/crates/indexer/migrations/17_add_updated_at_to_summaries.sql new file mode 100644 index 000000000..c2ad28b03 --- /dev/null +++ b/crates/indexer/migrations/17_add_updated_at_to_summaries.sql @@ -0,0 +1,18 @@ +-- Add updated_at timestamps to summary tables +-- Using TEXT for better compatibility with sqlx Any driver + +-- Add updated_at to global PoVW summary statistics +ALTER TABLE povw_summary_stats +ADD COLUMN updated_at TEXT; + +-- Add updated_at to per-epoch PoVW summary +ALTER TABLE epoch_povw_summary +ADD COLUMN updated_at TEXT; + +-- Add updated_at to global staking summary statistics +ALTER TABLE staking_summary_stats +ADD COLUMN updated_at TEXT; + +-- Add updated_at to per-epoch staking summary +ALTER TABLE epoch_staking_summary +ADD COLUMN updated_at TEXT; \ No newline at end of file diff --git a/crates/indexer/migrations/7_povw_rewards_by_epoch.sql b/crates/indexer/migrations/7_povw_rewards_by_epoch.sql new file mode 100644 index 000000000..e8d439895 --- /dev/null +++ b/crates/indexer/migrations/7_povw_rewards_by_epoch.sql @@ -0,0 +1,16 @@ +CREATE TABLE IF NOT EXISTS povw_rewards_by_epoch ( + work_log_id TEXT NOT NULL, + epoch BIGINT NOT NULL, + work_submitted TEXT NOT NULL, + percentage DOUBLE PRECISION NOT NULL, + uncapped_rewards TEXT NOT NULL, + reward_cap TEXT NOT NULL, + actual_rewards TEXT NOT NULL, + is_capped INTEGER NOT NULL, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP, + PRIMARY KEY (work_log_id, epoch) +); + +CREATE INDEX idx_povw_rewards_epoch ON povw_rewards_by_epoch(epoch); +CREATE INDEX idx_povw_rewards_actual ON povw_rewards_by_epoch(actual_rewards DESC); +CREATE INDEX idx_povw_rewards_work_log ON povw_rewards_by_epoch(work_log_id); \ No newline at end of file diff --git a/crates/indexer/migrations/8_povw_rewards_aggregate.sql b/crates/indexer/migrations/8_povw_rewards_aggregate.sql new file mode 100644 index 000000000..98b0c527d --- /dev/null +++ b/crates/indexer/migrations/8_povw_rewards_aggregate.sql @@ -0,0 +1,18 @@ +CREATE TABLE IF NOT EXISTS povw_rewards_aggregate ( + work_log_id TEXT PRIMARY KEY, + total_work_submitted TEXT NOT NULL, + total_actual_rewards TEXT NOT NULL, + total_uncapped_rewards TEXT NOT NULL, + epochs_participated BIGINT DEFAULT 0, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); + +CREATE INDEX idx_povw_aggregate_rewards ON povw_rewards_aggregate(total_actual_rewards DESC); +CREATE INDEX idx_povw_aggregate_work ON povw_rewards_aggregate(total_work_submitted DESC); + +-- Table to store indexer state (current epoch, last processed block, etc) +CREATE TABLE IF NOT EXISTS indexer_state ( + key TEXT PRIMARY KEY, + value TEXT NOT NULL, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP +); \ No newline at end of file diff --git a/crates/indexer/migrations/9_add_staked_amount.sql b/crates/indexer/migrations/9_add_staked_amount.sql new file mode 100644 index 000000000..75bd31b0c --- /dev/null +++ b/crates/indexer/migrations/9_add_staked_amount.sql @@ -0,0 +1,2 @@ +-- Add staked_amount column to povw_rewards_by_epoch table +ALTER TABLE povw_rewards_by_epoch ADD COLUMN staked_amount TEXT NOT NULL DEFAULT '000000000000000000000000000000000000000000000000000000000000000000000000000000'; \ No newline at end of file diff --git a/crates/indexer/src/main.rs b/crates/indexer/src/bin/market-indexer.rs similarity index 97% rename from crates/indexer/src/main.rs rename to crates/indexer/src/bin/market-indexer.rs index 0b1d1d5e4..fa63971d6 100644 --- a/crates/indexer/src/main.rs +++ b/crates/indexer/src/bin/market-indexer.rs @@ -16,7 +16,7 @@ use std::time::Duration; use alloy::{primitives::Address, signers::local::PrivateKeySigner}; use anyhow::{bail, Result}; -use boundless_indexer::{IndexerService, IndexerServiceConfig}; +use boundless_indexer::market::{IndexerService, IndexerServiceConfig}; use clap::Parser; use url::Url; diff --git a/crates/indexer/src/bin/rewards-indexer.rs b/crates/indexer/src/bin/rewards-indexer.rs new file mode 100644 index 000000000..885a150e0 --- /dev/null +++ b/crates/indexer/src/bin/rewards-indexer.rs @@ -0,0 +1,139 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::time::Duration; + +use alloy::primitives::Address; +use anyhow::{bail, Result}; +use boundless_indexer::rewards::{RewardsIndexerService, RewardsIndexerServiceConfig}; +use clap::Parser; +use url::Url; + +/// Arguments for the rewards indexer. +#[derive(Parser, Debug)] +#[clap(author, version, about, long_about = None)] +struct RewardsIndexerArgs { + /// URL of the Ethereum RPC endpoint. + #[clap(short, long, env)] + rpc_url: Url, + + /// Address of the veZKC (staking) contract. + #[clap(long, env)] + vezkc_address: Address, + + /// Address of the ZKC token contract. + #[clap(long, env)] + zkc_address: Address, + + /// Address of the PoVW Accounting contract. + #[clap(long, env)] + povw_accounting_address: Address, + + /// DB connection string. + #[clap(long, env = "DATABASE_URL")] + db: String, + + /// Starting block number (if not set, uses chain-specific defaults). + #[clap(long)] + start_block: Option, + + /// Ending block number (must be provided together with --end-epoch). + #[clap(long, requires = "end_epoch")] + end_block: Option, + + /// Ending epoch number (must be provided together with --end-block). + #[clap(long, requires = "end_block")] + end_epoch: Option, + + /// Interval in seconds between checking for new events. + #[clap(long, default_value = "600")] + interval: u64, + + /// Number of retries before quitting after an error. + #[clap(long, default_value = "3")] + retries: u32, + + /// Whether to log in JSON format. + #[clap(long, env, default_value_t = false)] + log_json: bool, + + /// Number of epochs back from the current block to index each time. Only valid when + /// end_block and end_epoch are not provided. Defaults to process all epochs. + /// Typically only used for testing, as to get accurate aggregate counts we only support reindexing from 0. + #[clap(long, conflicts_with_all = ["end_block", "end_epoch"])] + epochs_to_process: Option, +} + +#[tokio::main] +async fn main() -> Result<()> { + let args = RewardsIndexerArgs::parse(); + + let filter = tracing_subscriber::EnvFilter::builder() + .with_default_directive(tracing_subscriber::filter::LevelFilter::INFO.into()) + .from_env_lossy(); + + if args.log_json { + tracing_subscriber::fmt().with_ansi(false).json().with_env_filter(filter).init(); + } else { + tracing_subscriber::fmt().with_ansi(false).with_env_filter(filter).init(); + } + + let config = RewardsIndexerServiceConfig { + interval: Duration::from_secs(args.interval), + retries: args.retries, + start_block: args.start_block, + end_block: args.end_block, + end_epoch: args.end_epoch, + epochs_to_process: args.epochs_to_process, + }; + + let mut service = RewardsIndexerService::new( + args.rpc_url, + args.vezkc_address, + args.zkc_address, + args.povw_accounting_address, + &args.db, + config, + ) + .await?; + + // If both end-epoch and end-block are specified, run once and exit + if args.end_epoch.is_some() && args.end_block.is_some() { + tracing::info!("Running indexer once (end-epoch and end-block specified)"); + service.run().await?; + tracing::info!("Indexer completed successfully"); + return Ok(()); + } + + // Otherwise, run in a loop + let mut failures = 0u32; + loop { + match service.run().await { + Ok(_) => { + failures = 0; + tracing::info!("Sleeping for {} seconds", args.interval); + tokio::time::sleep(Duration::from_secs(args.interval)).await; + } + Err(e) => { + failures += 1; + tracing::error!("Error running rewards indexer: {:?}", e); + if failures >= args.retries { + bail!("Maximum retries reached"); + } + tracing::info!("Retrying in {} seconds", args.interval); + tokio::time::sleep(Duration::from_secs(args.interval)).await; + } + } + } +} diff --git a/crates/indexer/src/db.rs b/crates/indexer/src/db/market.rs similarity index 98% rename from crates/indexer/src/db.rs rename to crates/indexer/src/db/market.rs index 94307a3b4..bd076b74d 100644 --- a/crates/indexer/src/db.rs +++ b/crates/indexer/src/db/market.rs @@ -14,6 +14,7 @@ use std::{str::FromStr, sync::Arc}; +use super::DbError; use alloy::primitives::{Address, B256, U256}; use async_trait::async_trait; use boundless_market::contracts::{ @@ -24,7 +25,6 @@ use sqlx::{ any::{install_default_drivers, AnyConnectOptions, AnyPoolOptions}, AnyPool, Row, }; -use thiserror::Error; const SQL_BLOCK_KEY: i64 = 0; @@ -42,24 +42,6 @@ impl TxMetadata { } } -#[derive(Error, Debug)] -pub enum DbError { - #[error("SQL error {0:?}")] - SqlErr(#[from] sqlx::Error), - - #[error("SQL Migration error {0:?}")] - MigrateErr(#[from] sqlx::migrate::MigrateError), - - #[error("Invalid block number: {0}")] - BadBlockNumb(String), - - #[error("Failed to set last block")] - SetBlockFail, - - #[error("Invalid transaction: {0}")] - BadTransaction(String), -} - #[async_trait] pub trait IndexerDb { async fn get_last_block(&self) -> Result, DbError>; diff --git a/crates/indexer/src/db/mod.rs b/crates/indexer/src/db/mod.rs new file mode 100644 index 000000000..ca6034db5 --- /dev/null +++ b/crates/indexer/src/db/mod.rs @@ -0,0 +1,39 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod market; +pub mod rewards; + +use thiserror::Error; + +// Re-export common types from market module for backwards compatibility +pub use market::{AnyDb, DbObj, IndexerDb, TxMetadata}; + +#[derive(Error, Debug)] +pub enum DbError { + #[error("SQL error {0:?}")] + SqlErr(#[from] sqlx::Error), + + #[error("SQL Migration error {0:?}")] + MigrateErr(#[from] sqlx::migrate::MigrateError), + + #[error("Invalid block number: {0}")] + BadBlockNumb(String), + + #[error("Failed to set last block")] + SetBlockFail, + + #[error("Invalid transaction: {0}")] + BadTransaction(String), +} diff --git a/crates/indexer/src/db/rewards.rs b/crates/indexer/src/db/rewards.rs new file mode 100644 index 000000000..87cd6980b --- /dev/null +++ b/crates/indexer/src/db/rewards.rs @@ -0,0 +1,2155 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{str::FromStr, sync::Arc}; + +use alloy::primitives::{Address, U256}; +use async_trait::async_trait; +use boundless_rewards::{StakingPosition, WorkLogRewardInfo}; +use chrono::Utc; +use serde_json; +use sqlx::{any::AnyPoolOptions, AnyPool, Row}; + +use super::DbError; + +pub type RewardsDbObj = Arc; + +/// Convert a U256 to a zero-padded string for proper database sorting +/// U256 max value has 78 decimal digits (2^256 ≈ 1.15 * 10^77) +fn pad_u256(value: U256) -> String { + format!("{:0>78}", value) +} + +/// Convert a zero-padded string back to U256 +fn unpad_u256(s: &str) -> Result { + U256::from_str(s.trim_start_matches('0')).or_else(|_| { + // If trimming all zeros, the value is 0 + if s.chars().all(|c| c == '0') { + Ok(U256::ZERO) + } else { + Err(DbError::BadTransaction(format!("Invalid U256 string: {}", s))) + } + }) +} + +#[derive(Debug, Clone)] +pub struct PovwRewardByEpoch { + pub work_log_id: Address, + pub epoch: u64, + pub work_submitted: U256, + pub percentage: f64, + pub uncapped_rewards: U256, + pub reward_cap: U256, + pub actual_rewards: U256, + pub is_capped: bool, + pub staked_amount: U256, +} + +impl From for PovwRewardByEpoch { + fn from(info: WorkLogRewardInfo) -> Self { + // Note: percentage needs to be calculated by the caller since we don't have total_work here + Self { + work_log_id: info.work_log_id, + epoch: 0, // Will be set by caller + work_submitted: info.work, + percentage: 0.0, // Will be set by caller + uncapped_rewards: info.proportional_rewards, + reward_cap: info.reward_cap, + actual_rewards: info.capped_rewards, + is_capped: info.is_capped, + staked_amount: info.staking_amount, + } + } +} + +#[derive(Debug, Clone)] +pub struct PovwRewardAggregate { + pub work_log_id: Address, + pub total_work_submitted: U256, + pub total_actual_rewards: U256, + pub total_uncapped_rewards: U256, + pub epochs_participated: u64, +} + +#[derive(Debug, Clone)] +pub struct StakingPositionByEpoch { + pub staker_address: Address, + pub epoch: u64, + pub staked_amount: U256, + pub is_withdrawing: bool, + pub rewards_delegated_to: Option
, + pub votes_delegated_to: Option
, + pub rewards_generated: U256, +} + +impl From<(Address, u64, &StakingPosition)> for StakingPositionByEpoch { + fn from(value: (Address, u64, &StakingPosition)) -> Self { + Self { + staker_address: value.0, + epoch: value.1, + staked_amount: value.2.staked_amount, + is_withdrawing: value.2.is_withdrawing, + rewards_delegated_to: value.2.rewards_delegated_to, + votes_delegated_to: value.2.votes_delegated_to, + rewards_generated: value.2.rewards_generated, + } + } +} + +#[derive(Debug, Clone)] +pub struct StakingPositionAggregate { + pub staker_address: Address, + pub total_staked: U256, + pub is_withdrawing: bool, + pub rewards_delegated_to: Option
, + pub votes_delegated_to: Option
, + pub epochs_participated: u64, + pub total_rewards_generated: U256, + pub total_rewards_earned: U256, +} + +#[derive(Debug, Clone)] +pub struct StakingRewardByEpoch { + pub staker_address: Address, + pub epoch: u64, + pub staking_power: U256, + pub percentage: f64, + pub rewards_earned: U256, +} + +#[derive(Debug, Clone)] +pub struct VoteDelegationPowerByEpoch { + pub delegate_address: Address, + pub epoch: u64, + pub vote_power: U256, + pub delegator_count: u64, + pub delegators: Vec
, +} + +#[derive(Debug, Clone)] +pub struct RewardDelegationPowerByEpoch { + pub delegate_address: Address, + pub epoch: u64, + pub reward_power: U256, + pub delegator_count: u64, + pub delegators: Vec
, +} + +#[derive(Debug, Clone)] +pub struct VoteDelegationPowerAggregate { + pub delegate_address: Address, + pub total_vote_power: U256, + pub delegator_count: u64, + pub delegators: Vec
, + pub epochs_participated: u64, +} + +#[derive(Debug, Clone)] +pub struct RewardDelegationPowerAggregate { + pub delegate_address: Address, + pub total_reward_power: U256, + pub delegator_count: u64, + pub delegators: Vec
, + pub epochs_participated: u64, +} + +/// Global PoVW summary statistics across all epochs +#[derive(Debug, Clone)] +pub struct PoVWSummaryStats { + pub total_epochs_with_work: u64, + pub total_unique_work_log_ids: u64, + pub total_work_all_time: U256, + pub total_emissions_all_time: U256, + pub total_capped_rewards_all_time: U256, + pub total_uncapped_rewards_all_time: U256, + pub updated_at: Option, +} + +/// Per-epoch PoVW summary +#[derive(Debug, Clone)] +pub struct EpochPoVWSummary { + pub epoch: u64, + pub total_work: U256, + pub total_emissions: U256, + pub total_capped_rewards: U256, + pub total_uncapped_rewards: U256, + pub epoch_start_time: u64, + pub epoch_end_time: u64, + pub num_participants: u64, + pub updated_at: Option, +} + +/// Global staking summary statistics +#[derive(Debug, Clone)] +pub struct StakingSummaryStats { + pub current_total_staked: U256, + pub total_unique_stakers: u64, + pub current_active_stakers: u64, + pub current_withdrawing: u64, + pub total_staking_emissions_all_time: Option, + pub updated_at: Option, +} + +/// Per-epoch staking summary +#[derive(Debug, Clone)] +pub struct EpochStakingSummary { + pub epoch: u64, + pub total_staked: U256, + pub num_stakers: u64, + pub num_withdrawing: u64, + pub total_staking_emissions: U256, + pub total_staking_power: U256, + pub num_reward_recipients: u64, + pub epoch_start_time: u64, + pub epoch_end_time: u64, + pub updated_at: Option, +} + +#[async_trait] +pub trait RewardsIndexerDb { + /// Upsert rewards data for a specific epoch + async fn upsert_povw_rewards_by_epoch( + &self, + epoch: u64, + rewards: Vec, + ) -> Result<(), DbError>; + + /// Get rewards for a specific epoch with pagination + async fn get_povw_rewards_by_epoch( + &self, + epoch: u64, + offset: u64, + limit: u64, + ) -> Result, DbError>; + + /// Get all rewards for a specific work log ID + async fn get_povw_rewards_by_work_log( + &self, + work_log_id: Address, + ) -> Result, DbError>; + + /// Upsert aggregate rewards data + async fn upsert_povw_rewards_aggregate( + &self, + aggregates: Vec, + ) -> Result<(), DbError>; + + /// Get aggregate rewards with pagination, sorted by total rewards + async fn get_povw_rewards_aggregate( + &self, + offset: u64, + limit: u64, + ) -> Result, DbError>; + + /// Get PoVW rewards aggregate for a specific address + async fn get_povw_rewards_aggregate_by_address( + &self, + address: Address, + ) -> Result, DbError>; + + /// Get the current epoch from indexer state + async fn get_current_epoch(&self) -> Result, DbError>; + + /// Set the current epoch in indexer state + async fn set_current_epoch(&self, epoch: u64) -> Result<(), DbError>; + + /// Get the last processed block for rewards indexer + async fn get_last_rewards_block(&self) -> Result, DbError>; + + /// Set the last processed block for rewards indexer + async fn set_last_rewards_block(&self, block: u64) -> Result<(), DbError>; + + /// Upsert staking positions for a specific epoch + async fn upsert_staking_positions_by_epoch( + &self, + epoch: u64, + positions: Vec, + ) -> Result<(), DbError>; + + /// Get staking positions for a specific epoch with pagination + async fn get_staking_positions_by_epoch( + &self, + epoch: u64, + offset: u64, + limit: u64, + ) -> Result, DbError>; + + /// Upsert aggregate staking positions + async fn upsert_staking_positions_aggregate( + &self, + aggregates: Vec, + ) -> Result<(), DbError>; + + /// Get aggregate staking positions with pagination, sorted by total staked + async fn get_staking_positions_aggregate( + &self, + offset: u64, + limit: u64, + ) -> Result, DbError>; + + /// Get staking position aggregate for a specific address + async fn get_staking_position_aggregate_by_address( + &self, + address: Address, + ) -> Result, DbError>; + + /// Upsert vote delegation powers for a specific epoch + async fn upsert_vote_delegation_powers_by_epoch( + &self, + epoch: u64, + powers: Vec, + ) -> Result<(), DbError>; + + /// Get vote delegation powers for a specific epoch with pagination + async fn get_vote_delegation_powers_by_epoch( + &self, + epoch: u64, + offset: u64, + limit: u64, + ) -> Result, DbError>; + + /// Upsert aggregate vote delegation powers + async fn upsert_vote_delegation_powers_aggregate( + &self, + aggregates: Vec, + ) -> Result<(), DbError>; + + /// Get aggregate vote delegation powers with pagination, sorted by total power + async fn get_vote_delegation_powers_aggregate( + &self, + offset: u64, + limit: u64, + ) -> Result, DbError>; + + /// Upsert reward delegation powers for a specific epoch + async fn upsert_reward_delegation_powers_by_epoch( + &self, + epoch: u64, + powers: Vec, + ) -> Result<(), DbError>; + + /// Get reward delegation powers for a specific epoch with pagination + async fn get_reward_delegation_powers_by_epoch( + &self, + epoch: u64, + offset: u64, + limit: u64, + ) -> Result, DbError>; + + /// Upsert aggregate reward delegation powers + async fn upsert_reward_delegation_powers_aggregate( + &self, + aggregates: Vec, + ) -> Result<(), DbError>; + + /// Get aggregate reward delegation powers with pagination, sorted by total power + async fn get_reward_delegation_powers_aggregate( + &self, + offset: u64, + limit: u64, + ) -> Result, DbError>; + + /// Get staking history for a specific address across epochs + async fn get_staking_history_by_address( + &self, + address: Address, + start_epoch: Option, + end_epoch: Option, + ) -> Result, DbError>; + + /// Get PoVW rewards history for a specific address across epochs + async fn get_povw_rewards_history_by_address( + &self, + address: Address, + start_epoch: Option, + end_epoch: Option, + ) -> Result, DbError>; + + /// Get vote delegations received history for a specific address across epochs + async fn get_vote_delegations_received_history( + &self, + delegate_address: Address, + start_epoch: Option, + end_epoch: Option, + ) -> Result, DbError>; + + /// Get reward delegations received history for a specific address across epochs + async fn get_reward_delegations_received_history( + &self, + delegate_address: Address, + start_epoch: Option, + end_epoch: Option, + ) -> Result, DbError>; + + /// Upsert global PoVW summary statistics + async fn upsert_povw_summary_stats(&self, stats: PoVWSummaryStats) -> Result<(), DbError>; + + /// Get global PoVW summary statistics + async fn get_povw_summary_stats(&self) -> Result, DbError>; + + /// Upsert per-epoch PoVW summary + async fn upsert_epoch_povw_summary( + &self, + epoch: u64, + summary: EpochPoVWSummary, + ) -> Result<(), DbError>; + + /// Get per-epoch PoVW summary + async fn get_epoch_povw_summary(&self, epoch: u64) + -> Result, DbError>; + + /// Upsert global staking summary statistics + async fn upsert_staking_summary_stats(&self, stats: StakingSummaryStats) + -> Result<(), DbError>; + + /// Get global staking summary statistics + async fn get_staking_summary_stats(&self) -> Result, DbError>; + + /// Upsert per-epoch staking summary + async fn upsert_epoch_staking_summary( + &self, + epoch: u64, + summary: EpochStakingSummary, + ) -> Result<(), DbError>; + + /// Get per-epoch staking summary + async fn get_epoch_staking_summary( + &self, + epoch: u64, + ) -> Result, DbError>; + + /// Upsert staking rewards for a specific epoch + async fn upsert_staking_rewards_by_epoch( + &self, + epoch: u64, + rewards: Vec, + ) -> Result<(), DbError>; + + /// Get staking rewards for a specific epoch with pagination + async fn get_staking_rewards_by_epoch( + &self, + epoch: u64, + offset: u64, + limit: u64, + ) -> Result, DbError>; + + /// Get staking rewards for a specific address across epochs + async fn get_staking_rewards_by_address( + &self, + address: Address, + start_epoch: Option, + end_epoch: Option, + ) -> Result, DbError>; + + /// Get all epoch PoVW summaries + async fn get_all_epoch_povw_summaries( + &self, + offset: u64, + limit: u64, + ) -> Result, DbError>; + + /// Get all epoch staking summaries + async fn get_all_epoch_staking_summaries( + &self, + offset: u64, + limit: u64, + ) -> Result, DbError>; +} + +// Batch insert chunk size to avoid parameter limits +// PostgreSQL: 65535 max params, SQLite: 999-32766 params (configurable) +// Using conservative chunk size that works safely for both databases +const BATCH_INSERT_CHUNK_SIZE: usize = 75; + +pub struct RewardsDb { + pool: AnyPool, +} + +impl RewardsDb { + pub async fn new(database_url: &str) -> Result { + sqlx::any::install_default_drivers(); + let pool = AnyPoolOptions::new().max_connections(20).connect(database_url).await?; + + // Run migrations + sqlx::migrate!("./migrations").run(&pool).await?; + + Ok(Self { pool }) + } +} + +#[async_trait] +impl RewardsIndexerDb for RewardsDb { + async fn upsert_povw_rewards_by_epoch( + &self, + epoch: u64, + rewards: Vec, + ) -> Result<(), DbError> { + if rewards.is_empty() { + return Ok(()); + } + + let mut tx = self.pool.begin().await?; + + // Process in chunks to avoid parameter limits + for chunk in rewards.chunks(BATCH_INSERT_CHUNK_SIZE) { + let mut values_clauses = Vec::new(); + let mut param_idx = 1; + + for _ in chunk { + values_clauses.push(format!( + "(${},${},${},${},${},${},${},${},${},CURRENT_TIMESTAMP)", + param_idx, + param_idx + 1, + param_idx + 2, + param_idx + 3, + param_idx + 4, + param_idx + 5, + param_idx + 6, + param_idx + 7, + param_idx + 8 + )); + param_idx += 9; + } + + let query = format!( + r#"INSERT INTO povw_rewards_by_epoch + (work_log_id, epoch, work_submitted, percentage, uncapped_rewards, reward_cap, actual_rewards, is_capped, staked_amount, updated_at) + VALUES {} + ON CONFLICT (work_log_id, epoch) + DO UPDATE SET + work_submitted = EXCLUDED.work_submitted, + percentage = EXCLUDED.percentage, + uncapped_rewards = EXCLUDED.uncapped_rewards, + reward_cap = EXCLUDED.reward_cap, + actual_rewards = EXCLUDED.actual_rewards, + is_capped = EXCLUDED.is_capped, + staked_amount = EXCLUDED.staked_amount, + updated_at = CURRENT_TIMESTAMP"#, + values_clauses.join(",") + ); + + let mut q = sqlx::query(&query); + for reward in chunk { + q = q + .bind(format!("{:#x}", reward.work_log_id)) + .bind(epoch as i64) + .bind(pad_u256(reward.work_submitted)) + .bind(reward.percentage) + .bind(pad_u256(reward.uncapped_rewards)) + .bind(pad_u256(reward.reward_cap)) + .bind(pad_u256(reward.actual_rewards)) + .bind(if reward.is_capped { 1i32 } else { 0i32 }) + .bind(pad_u256(reward.staked_amount)); + } + q.execute(&mut *tx).await?; + } + + tx.commit().await?; + Ok(()) + } + + async fn get_povw_rewards_by_epoch( + &self, + epoch: u64, + offset: u64, + limit: u64, + ) -> Result, DbError> { + let query = r#" + SELECT work_log_id, epoch, work_submitted, percentage, uncapped_rewards, reward_cap, actual_rewards, is_capped, staked_amount + FROM povw_rewards_by_epoch + WHERE epoch = $1 + ORDER BY work_submitted DESC + LIMIT $2 OFFSET $3 + "#; + + let rows = sqlx::query(query) + .bind(epoch as i64) + .bind(limit as i64) + .bind(offset as i64) + .fetch_all(&self.pool) + .await?; + + let mut results = Vec::new(); + for row in rows { + results.push(PovwRewardByEpoch { + work_log_id: Address::from_str(&row.get::("work_log_id")) + .map_err(|e| DbError::BadTransaction(e.to_string()))?, + epoch: row.get::("epoch") as u64, + work_submitted: unpad_u256(&row.get::("work_submitted"))?, + percentage: row.get("percentage"), + uncapped_rewards: unpad_u256(&row.get::("uncapped_rewards"))?, + reward_cap: unpad_u256(&row.get::("reward_cap"))?, + actual_rewards: unpad_u256(&row.get::("actual_rewards"))?, + is_capped: row.get::("is_capped") != 0, + staked_amount: unpad_u256(&row.get::("staked_amount"))?, + }); + } + + Ok(results) + } + + async fn get_povw_rewards_by_work_log( + &self, + work_log_id: Address, + ) -> Result, DbError> { + let query = r#" + SELECT work_log_id, epoch, work_submitted, percentage, uncapped_rewards, reward_cap, actual_rewards, is_capped, staked_amount + FROM povw_rewards_by_epoch + WHERE work_log_id = $1 + ORDER BY epoch DESC + "#; + + let rows = + sqlx::query(query).bind(format!("{:#x}", work_log_id)).fetch_all(&self.pool).await?; + + let mut results = Vec::new(); + for row in rows { + results.push(PovwRewardByEpoch { + work_log_id, + epoch: row.get::("epoch") as u64, + work_submitted: unpad_u256(&row.get::("work_submitted"))?, + percentage: row.get("percentage"), + uncapped_rewards: unpad_u256(&row.get::("uncapped_rewards"))?, + reward_cap: unpad_u256(&row.get::("reward_cap"))?, + actual_rewards: unpad_u256(&row.get::("actual_rewards"))?, + is_capped: row.get::("is_capped") != 0, + staked_amount: unpad_u256(&row.get::("staked_amount"))?, + }); + } + + Ok(results) + } + + async fn upsert_povw_rewards_aggregate( + &self, + aggregates: Vec, + ) -> Result<(), DbError> { + if aggregates.is_empty() { + return Ok(()); + } + + let mut tx = self.pool.begin().await?; + + // Process in chunks to avoid parameter limits + for chunk in aggregates.chunks(BATCH_INSERT_CHUNK_SIZE) { + let mut values_clauses = Vec::new(); + let mut param_idx = 1; + + for _ in chunk { + values_clauses.push(format!( + "(${},${},${},${},${},CURRENT_TIMESTAMP)", + param_idx, + param_idx + 1, + param_idx + 2, + param_idx + 3, + param_idx + 4 + )); + param_idx += 5; + } + + let query = format!( + r#"INSERT INTO povw_rewards_aggregate + (work_log_id, total_work_submitted, total_actual_rewards, total_uncapped_rewards, epochs_participated, updated_at) + VALUES {} + ON CONFLICT (work_log_id) + DO UPDATE SET + total_work_submitted = EXCLUDED.total_work_submitted, + total_actual_rewards = EXCLUDED.total_actual_rewards, + total_uncapped_rewards = EXCLUDED.total_uncapped_rewards, + epochs_participated = EXCLUDED.epochs_participated, + updated_at = CURRENT_TIMESTAMP"#, + values_clauses.join(",") + ); + + let mut q = sqlx::query(&query); + for agg in chunk { + q = q + .bind(format!("{:#x}", agg.work_log_id)) + .bind(pad_u256(agg.total_work_submitted)) + .bind(pad_u256(agg.total_actual_rewards)) + .bind(pad_u256(agg.total_uncapped_rewards)) + .bind(agg.epochs_participated as i64); + } + q.execute(&mut *tx).await?; + } + + tx.commit().await?; + Ok(()) + } + + async fn get_povw_rewards_aggregate( + &self, + offset: u64, + limit: u64, + ) -> Result, DbError> { + let query = r#" + SELECT work_log_id, total_work_submitted, total_actual_rewards, total_uncapped_rewards, epochs_participated + FROM povw_rewards_aggregate + ORDER BY total_work_submitted DESC + LIMIT $1 OFFSET $2 + "#; + + let rows = + sqlx::query(query).bind(limit as i64).bind(offset as i64).fetch_all(&self.pool).await?; + + let mut results = Vec::new(); + for row in rows { + results.push(PovwRewardAggregate { + work_log_id: Address::from_str(&row.get::("work_log_id")) + .map_err(|e| DbError::BadTransaction(e.to_string()))?, + total_work_submitted: unpad_u256(&row.get::("total_work_submitted"))?, + total_actual_rewards: unpad_u256(&row.get::("total_actual_rewards"))?, + total_uncapped_rewards: unpad_u256( + &row.get::("total_uncapped_rewards"), + )?, + epochs_participated: row.get::("epochs_participated") as u64, + }); + } + + Ok(results) + } + + async fn get_povw_rewards_aggregate_by_address( + &self, + address: Address, + ) -> Result, DbError> { + let query = r#" + SELECT work_log_id, total_work_submitted, total_actual_rewards, total_uncapped_rewards, epochs_participated + FROM povw_rewards_aggregate + WHERE work_log_id = $1 + "#; + + let row = + sqlx::query(query).bind(format!("{:#x}", address)).fetch_optional(&self.pool).await?; + + if let Some(row) = row { + Ok(Some(PovwRewardAggregate { + work_log_id: Address::from_str(&row.get::("work_log_id")) + .map_err(|e| DbError::BadTransaction(e.to_string()))?, + total_work_submitted: unpad_u256(&row.get::("total_work_submitted"))?, + total_actual_rewards: unpad_u256(&row.get::("total_actual_rewards"))?, + total_uncapped_rewards: unpad_u256( + &row.get::("total_uncapped_rewards"), + )?, + epochs_participated: row.get::("epochs_participated") as u64, + })) + } else { + Ok(None) + } + } + + async fn get_current_epoch(&self) -> Result, DbError> { + let query = "SELECT value FROM indexer_state WHERE key = 'current_epoch'"; + let result = sqlx::query(query).fetch_optional(&self.pool).await?; + + match result { + Some(row) => { + let value: String = row.get("value"); + Ok(Some(value.parse().map_err(|_| DbError::BadBlockNumb(value))?)) + } + None => Ok(None), + } + } + + async fn set_current_epoch(&self, epoch: u64) -> Result<(), DbError> { + let query = r#" + INSERT INTO indexer_state (key, value, updated_at) + VALUES ('current_epoch', $1, CURRENT_TIMESTAMP) + ON CONFLICT (key) + DO UPDATE SET value = $1, updated_at = CURRENT_TIMESTAMP + "#; + + sqlx::query(query).bind(epoch.to_string()).execute(&self.pool).await?; + + Ok(()) + } + + async fn get_last_rewards_block(&self) -> Result, DbError> { + let query = "SELECT value FROM indexer_state WHERE key = 'last_rewards_block'"; + let result = sqlx::query(query).fetch_optional(&self.pool).await?; + + match result { + Some(row) => { + let value: String = row.get("value"); + Ok(Some(value.parse().map_err(|_| DbError::BadBlockNumb(value))?)) + } + None => Ok(None), + } + } + + async fn set_last_rewards_block(&self, block: u64) -> Result<(), DbError> { + let query = r#" + INSERT INTO indexer_state (key, value, updated_at) + VALUES ('last_rewards_block', $1, CURRENT_TIMESTAMP) + ON CONFLICT (key) + DO UPDATE SET value = $1, updated_at = CURRENT_TIMESTAMP + "#; + + sqlx::query(query).bind(block.to_string()).execute(&self.pool).await?; + + Ok(()) + } + + async fn upsert_staking_positions_by_epoch( + &self, + epoch: u64, + positions: Vec, + ) -> Result<(), DbError> { + if positions.is_empty() { + return Ok(()); + } + + let mut tx = self.pool.begin().await?; + + // Process in chunks to avoid parameter limits + for chunk in positions.chunks(BATCH_INSERT_CHUNK_SIZE) { + let mut values_clauses = Vec::new(); + let mut param_idx = 1; + + for _ in chunk { + values_clauses.push(format!( + "(${},${},${},${},${},${},${},CURRENT_TIMESTAMP)", + param_idx, + param_idx + 1, + param_idx + 2, + param_idx + 3, + param_idx + 4, + param_idx + 5, + param_idx + 6 + )); + param_idx += 7; + } + + let query = format!( + r#"INSERT INTO staking_positions_by_epoch + (staker_address, epoch, staked_amount, is_withdrawing, rewards_delegated_to, votes_delegated_to, rewards_generated, updated_at) + VALUES {} + ON CONFLICT (staker_address, epoch) + DO UPDATE SET + staked_amount = EXCLUDED.staked_amount, + is_withdrawing = EXCLUDED.is_withdrawing, + rewards_delegated_to = EXCLUDED.rewards_delegated_to, + votes_delegated_to = EXCLUDED.votes_delegated_to, + rewards_generated = EXCLUDED.rewards_generated, + updated_at = CURRENT_TIMESTAMP"#, + values_clauses.join(",") + ); + + let mut q = sqlx::query(&query); + for position in chunk { + q = q + .bind(format!("{:#x}", position.staker_address)) + .bind(epoch as i64) + .bind(pad_u256(position.staked_amount)) + .bind(if position.is_withdrawing { 1i32 } else { 0i32 }) + .bind(position.rewards_delegated_to.map(|a| format!("{:#x}", a))) + .bind(position.votes_delegated_to.map(|a| format!("{:#x}", a))) + .bind(pad_u256(position.rewards_generated)); + } + q.execute(&mut *tx).await?; + } + + tx.commit().await?; + Ok(()) + } + + async fn get_staking_positions_by_epoch( + &self, + epoch: u64, + offset: u64, + limit: u64, + ) -> Result, DbError> { + let query = r#" + SELECT staker_address, epoch, staked_amount, is_withdrawing, rewards_delegated_to, votes_delegated_to, rewards_generated + FROM staking_positions_by_epoch + WHERE epoch = $1 + ORDER BY staked_amount DESC + LIMIT $2 OFFSET $3 + "#; + + let rows = sqlx::query(query) + .bind(epoch as i64) + .bind(limit as i64) + .bind(offset as i64) + .fetch_all(&self.pool) + .await?; + + let mut results = Vec::new(); + for row in rows { + let rewards_delegated_to: Option = row.get("rewards_delegated_to"); + let votes_delegated_to: Option = row.get("votes_delegated_to"); + + results.push(StakingPositionByEpoch { + staker_address: Address::from_str(&row.get::("staker_address")) + .map_err(|e| DbError::BadTransaction(e.to_string()))?, + epoch: row.get::("epoch") as u64, + staked_amount: unpad_u256(&row.get::("staked_amount"))?, + is_withdrawing: row.get::("is_withdrawing") != 0, + rewards_delegated_to: rewards_delegated_to.and_then(|s| Address::from_str(&s).ok()), + votes_delegated_to: votes_delegated_to.and_then(|s| Address::from_str(&s).ok()), + rewards_generated: unpad_u256(&row.get::("rewards_generated"))?, + }); + } + + Ok(results) + } + + async fn upsert_staking_positions_aggregate( + &self, + aggregates: Vec, + ) -> Result<(), DbError> { + if aggregates.is_empty() { + return Ok(()); + } + + let mut tx = self.pool.begin().await?; + + // Process in chunks to avoid parameter limits + for chunk in aggregates.chunks(BATCH_INSERT_CHUNK_SIZE) { + let mut values_clauses = Vec::new(); + let mut param_idx = 1; + + for _ in chunk { + values_clauses.push(format!( + "(${},${},${},${},${},${},${},${},CURRENT_TIMESTAMP)", + param_idx, + param_idx + 1, + param_idx + 2, + param_idx + 3, + param_idx + 4, + param_idx + 5, + param_idx + 6, + param_idx + 7 + )); + param_idx += 8; + } + + let query = format!( + r#"INSERT INTO staking_positions_aggregate + (staker_address, total_staked, is_withdrawing, rewards_delegated_to, votes_delegated_to, epochs_participated, total_rewards_generated, total_rewards_earned, updated_at) + VALUES {} + ON CONFLICT (staker_address) + DO UPDATE SET + total_staked = EXCLUDED.total_staked, + is_withdrawing = EXCLUDED.is_withdrawing, + rewards_delegated_to = EXCLUDED.rewards_delegated_to, + votes_delegated_to = EXCLUDED.votes_delegated_to, + epochs_participated = EXCLUDED.epochs_participated, + total_rewards_generated = EXCLUDED.total_rewards_generated, + total_rewards_earned = EXCLUDED.total_rewards_earned, + updated_at = CURRENT_TIMESTAMP"#, + values_clauses.join(",") + ); + + let mut q = sqlx::query(&query); + for aggregate in chunk { + q = q + .bind(format!("{:#x}", aggregate.staker_address)) + .bind(pad_u256(aggregate.total_staked)) + .bind(if aggregate.is_withdrawing { 1i32 } else { 0i32 }) + .bind(aggregate.rewards_delegated_to.map(|a| format!("{:#x}", a))) + .bind(aggregate.votes_delegated_to.map(|a| format!("{:#x}", a))) + .bind(aggregate.epochs_participated as i64) + .bind(pad_u256(aggregate.total_rewards_generated)) + .bind(pad_u256(aggregate.total_rewards_earned)); + } + q.execute(&mut *tx).await?; + } + + tx.commit().await?; + Ok(()) + } + + async fn get_staking_positions_aggregate( + &self, + offset: u64, + limit: u64, + ) -> Result, DbError> { + let query = r#" + SELECT staker_address, total_staked, is_withdrawing, rewards_delegated_to, votes_delegated_to, epochs_participated, total_rewards_generated, total_rewards_earned + FROM staking_positions_aggregate + ORDER BY total_staked DESC + LIMIT $1 OFFSET $2 + "#; + + let rows = + sqlx::query(query).bind(limit as i64).bind(offset as i64).fetch_all(&self.pool).await?; + + let mut results = Vec::new(); + for row in rows { + let rewards_delegated_to: Option = row.get("rewards_delegated_to"); + let votes_delegated_to: Option = row.get("votes_delegated_to"); + + results.push(StakingPositionAggregate { + staker_address: Address::from_str(&row.get::("staker_address")) + .map_err(|e| DbError::BadTransaction(e.to_string()))?, + total_staked: unpad_u256(&row.get::("total_staked"))?, + is_withdrawing: row.get::("is_withdrawing") != 0, + rewards_delegated_to: rewards_delegated_to.and_then(|s| Address::from_str(&s).ok()), + votes_delegated_to: votes_delegated_to.and_then(|s| Address::from_str(&s).ok()), + epochs_participated: row.get::("epochs_participated") as u64, + total_rewards_generated: unpad_u256( + &row.get::("total_rewards_generated"), + )?, + total_rewards_earned: unpad_u256(&row.get::("total_rewards_earned"))?, + }); + } + + Ok(results) + } + + async fn get_staking_position_aggregate_by_address( + &self, + address: Address, + ) -> Result, DbError> { + let query = r#" + SELECT staker_address, total_staked, is_withdrawing, rewards_delegated_to, votes_delegated_to, epochs_participated, total_rewards_generated, total_rewards_earned + FROM staking_positions_aggregate + WHERE staker_address = $1 + "#; + + let row = + sqlx::query(query).bind(format!("{:#x}", address)).fetch_optional(&self.pool).await?; + + if let Some(row) = row { + let rewards_delegated_to: Option = row.get("rewards_delegated_to"); + let votes_delegated_to: Option = row.get("votes_delegated_to"); + + Ok(Some(StakingPositionAggregate { + staker_address: Address::from_str(&row.get::("staker_address")) + .map_err(|e| DbError::BadTransaction(e.to_string()))?, + total_staked: unpad_u256(&row.get::("total_staked"))?, + is_withdrawing: row.get::("is_withdrawing") != 0, + rewards_delegated_to: rewards_delegated_to.and_then(|s| Address::from_str(&s).ok()), + votes_delegated_to: votes_delegated_to.and_then(|s| Address::from_str(&s).ok()), + epochs_participated: row.get::("epochs_participated") as u64, + total_rewards_generated: unpad_u256( + &row.get::("total_rewards_generated"), + )?, + total_rewards_earned: unpad_u256(&row.get::("total_rewards_earned"))?, + })) + } else { + Ok(None) + } + } + + async fn upsert_vote_delegation_powers_by_epoch( + &self, + epoch: u64, + powers: Vec, + ) -> Result<(), DbError> { + if powers.is_empty() { + return Ok(()); + } + + let mut tx = self.pool.begin().await?; + + // Process in chunks to avoid parameter limits + for chunk in powers.chunks(BATCH_INSERT_CHUNK_SIZE) { + let mut values_clauses = Vec::new(); + let mut param_idx = 1; + + for _ in chunk { + values_clauses.push(format!( + "(${},${},${},${},${},CURRENT_TIMESTAMP)", + param_idx, + param_idx + 1, + param_idx + 2, + param_idx + 3, + param_idx + 4 + )); + param_idx += 5; + } + + let query = format!( + r#"INSERT INTO vote_delegation_powers_by_epoch + (delegate_address, epoch, vote_power, delegator_count, delegators, updated_at) + VALUES {} + ON CONFLICT (delegate_address, epoch) + DO UPDATE SET + vote_power = EXCLUDED.vote_power, + delegator_count = EXCLUDED.delegator_count, + delegators = EXCLUDED.delegators, + updated_at = CURRENT_TIMESTAMP"#, + values_clauses.join(",") + ); + + let mut q = sqlx::query(&query); + for power in chunk { + let delegators_json = serde_json::to_string( + &power.delegators.iter().map(|a| format!("{:#x}", a)).collect::>(), + ) + .unwrap_or_else(|_| "[]".to_string()); + + q = q + .bind(format!("{:#x}", power.delegate_address)) + .bind(epoch as i64) + .bind(pad_u256(power.vote_power)) + .bind(power.delegator_count as i32) + .bind(delegators_json); + } + q.execute(&mut *tx).await?; + } + + tx.commit().await?; + Ok(()) + } + + async fn get_vote_delegation_powers_by_epoch( + &self, + epoch: u64, + offset: u64, + limit: u64, + ) -> Result, DbError> { + let query = r#" + SELECT delegate_address, epoch, vote_power, delegator_count, delegators + FROM vote_delegation_powers_by_epoch + WHERE epoch = $1 + ORDER BY vote_power DESC + LIMIT $2 OFFSET $3 + "#; + + let rows = sqlx::query(query) + .bind(epoch as i64) + .bind(limit as i64) + .bind(offset as i64) + .fetch_all(&self.pool) + .await?; + + let mut results = Vec::new(); + for row in rows { + let delegators_json: String = row.get("delegators"); + let delegator_addrs: Vec = match serde_json::from_str(&delegators_json) { + Ok(addrs) => addrs, + Err(e) => { + tracing::warn!("Failed to parse delegators JSON: {}, using empty vec", e); + Vec::new() + } + }; + let delegators: Vec
= + delegator_addrs.iter().filter_map(|s| Address::from_str(s).ok()).collect(); + + results.push(VoteDelegationPowerByEpoch { + delegate_address: Address::from_str(&row.get::("delegate_address")) + .map_err(|e| DbError::BadTransaction(e.to_string()))?, + epoch: row.get::("epoch") as u64, + vote_power: unpad_u256(&row.get::("vote_power"))?, + delegator_count: row.get::("delegator_count") as u64, + delegators, + }); + } + + Ok(results) + } + + async fn upsert_vote_delegation_powers_aggregate( + &self, + aggregates: Vec, + ) -> Result<(), DbError> { + if aggregates.is_empty() { + return Ok(()); + } + + let mut tx = self.pool.begin().await?; + + // Process in chunks to avoid parameter limits + for chunk in aggregates.chunks(BATCH_INSERT_CHUNK_SIZE) { + let mut values_clauses = Vec::new(); + let mut param_idx = 1; + + for _ in chunk { + values_clauses.push(format!( + "(${},${},${},${},${},CURRENT_TIMESTAMP)", + param_idx, + param_idx + 1, + param_idx + 2, + param_idx + 3, + param_idx + 4 + )); + param_idx += 5; + } + + let query = format!( + r#"INSERT INTO vote_delegation_powers_aggregate + (delegate_address, total_vote_power, delegator_count, delegators, epochs_participated, updated_at) + VALUES {} + ON CONFLICT (delegate_address) + DO UPDATE SET + total_vote_power = EXCLUDED.total_vote_power, + delegator_count = EXCLUDED.delegator_count, + delegators = EXCLUDED.delegators, + epochs_participated = EXCLUDED.epochs_participated, + updated_at = CURRENT_TIMESTAMP"#, + values_clauses.join(",") + ); + + let mut q = sqlx::query(&query); + for aggregate in chunk { + let delegators_json = serde_json::to_string( + &aggregate.delegators.iter().map(|a| format!("{:#x}", a)).collect::>(), + ) + .unwrap_or_else(|_| "[]".to_string()); + + q = q + .bind(format!("{:#x}", aggregate.delegate_address)) + .bind(pad_u256(aggregate.total_vote_power)) + .bind(aggregate.delegator_count as i32) + .bind(delegators_json) + .bind(aggregate.epochs_participated as i64); + } + q.execute(&mut *tx).await?; + } + + tx.commit().await?; + Ok(()) + } + + async fn get_vote_delegation_powers_aggregate( + &self, + offset: u64, + limit: u64, + ) -> Result, DbError> { + let query = r#" + SELECT delegate_address, total_vote_power, delegator_count, delegators, epochs_participated + FROM vote_delegation_powers_aggregate + ORDER BY total_vote_power DESC + LIMIT $1 OFFSET $2 + "#; + + let rows = + sqlx::query(query).bind(limit as i64).bind(offset as i64).fetch_all(&self.pool).await?; + + let mut results = Vec::new(); + for row in rows { + let delegators_json: String = row.get("delegators"); + let delegator_addrs: Vec = match serde_json::from_str(&delegators_json) { + Ok(addrs) => addrs, + Err(e) => { + tracing::warn!("Failed to parse delegators JSON: {}, using empty vec", e); + Vec::new() + } + }; + let delegators: Vec
= + delegator_addrs.iter().filter_map(|s| Address::from_str(s).ok()).collect(); + + results.push(VoteDelegationPowerAggregate { + delegate_address: Address::from_str(&row.get::("delegate_address")) + .map_err(|e| DbError::BadTransaction(e.to_string()))?, + total_vote_power: unpad_u256(&row.get::("total_vote_power"))?, + delegator_count: row.get::("delegator_count") as u64, + delegators, + epochs_participated: row.get::("epochs_participated") as u64, + }); + } + + Ok(results) + } + + async fn upsert_reward_delegation_powers_by_epoch( + &self, + epoch: u64, + powers: Vec, + ) -> Result<(), DbError> { + if powers.is_empty() { + return Ok(()); + } + + let mut tx = self.pool.begin().await?; + + // Process in chunks to avoid parameter limits + for chunk in powers.chunks(BATCH_INSERT_CHUNK_SIZE) { + let mut values_clauses = Vec::new(); + let mut param_idx = 1; + + for _ in chunk { + values_clauses.push(format!( + "(${},${},${},${},${},CURRENT_TIMESTAMP)", + param_idx, + param_idx + 1, + param_idx + 2, + param_idx + 3, + param_idx + 4 + )); + param_idx += 5; + } + + let query = format!( + r#"INSERT INTO reward_delegation_powers_by_epoch + (delegate_address, epoch, reward_power, delegator_count, delegators, updated_at) + VALUES {} + ON CONFLICT (delegate_address, epoch) + DO UPDATE SET + reward_power = EXCLUDED.reward_power, + delegator_count = EXCLUDED.delegator_count, + delegators = EXCLUDED.delegators, + updated_at = CURRENT_TIMESTAMP"#, + values_clauses.join(",") + ); + + let mut q = sqlx::query(&query); + for power in chunk { + let delegators_json = serde_json::to_string( + &power.delegators.iter().map(|a| format!("{:#x}", a)).collect::>(), + ) + .unwrap_or_else(|_| "[]".to_string()); + + q = q + .bind(format!("{:#x}", power.delegate_address)) + .bind(epoch as i64) + .bind(pad_u256(power.reward_power)) + .bind(power.delegator_count as i32) + .bind(delegators_json); + } + q.execute(&mut *tx).await?; + } + + tx.commit().await?; + Ok(()) + } + + async fn get_reward_delegation_powers_by_epoch( + &self, + epoch: u64, + offset: u64, + limit: u64, + ) -> Result, DbError> { + let query = r#" + SELECT delegate_address, epoch, reward_power, delegator_count, delegators + FROM reward_delegation_powers_by_epoch + WHERE epoch = $1 + ORDER BY reward_power DESC + LIMIT $2 OFFSET $3 + "#; + + let rows = sqlx::query(query) + .bind(epoch as i64) + .bind(limit as i64) + .bind(offset as i64) + .fetch_all(&self.pool) + .await?; + + let mut results = Vec::new(); + for row in rows { + let delegators_json: String = row.get("delegators"); + let delegator_addrs: Vec = match serde_json::from_str(&delegators_json) { + Ok(addrs) => addrs, + Err(e) => { + tracing::warn!("Failed to parse delegators JSON: {}, using empty vec", e); + Vec::new() + } + }; + let delegators: Vec
= + delegator_addrs.iter().filter_map(|s| Address::from_str(s).ok()).collect(); + + results.push(RewardDelegationPowerByEpoch { + delegate_address: Address::from_str(&row.get::("delegate_address")) + .map_err(|e| DbError::BadTransaction(e.to_string()))?, + epoch: row.get::("epoch") as u64, + reward_power: unpad_u256(&row.get::("reward_power"))?, + delegator_count: row.get::("delegator_count") as u64, + delegators, + }); + } + + Ok(results) + } + + async fn upsert_reward_delegation_powers_aggregate( + &self, + aggregates: Vec, + ) -> Result<(), DbError> { + if aggregates.is_empty() { + return Ok(()); + } + + let mut tx = self.pool.begin().await?; + + // Process in chunks to avoid parameter limits + for chunk in aggregates.chunks(BATCH_INSERT_CHUNK_SIZE) { + let mut values_clauses = Vec::new(); + let mut param_idx = 1; + + for _ in chunk { + values_clauses.push(format!( + "(${},${},${},${},${},CURRENT_TIMESTAMP)", + param_idx, + param_idx + 1, + param_idx + 2, + param_idx + 3, + param_idx + 4 + )); + param_idx += 5; + } + + let query = format!( + r#"INSERT INTO reward_delegation_powers_aggregate + (delegate_address, total_reward_power, delegator_count, delegators, epochs_participated, updated_at) + VALUES {} + ON CONFLICT (delegate_address) + DO UPDATE SET + total_reward_power = EXCLUDED.total_reward_power, + delegator_count = EXCLUDED.delegator_count, + delegators = EXCLUDED.delegators, + epochs_participated = EXCLUDED.epochs_participated, + updated_at = CURRENT_TIMESTAMP"#, + values_clauses.join(",") + ); + + let mut q = sqlx::query(&query); + for aggregate in chunk { + let delegators_json = serde_json::to_string( + &aggregate.delegators.iter().map(|a| format!("{:#x}", a)).collect::>(), + ) + .unwrap_or_else(|_| "[]".to_string()); + + q = q + .bind(format!("{:#x}", aggregate.delegate_address)) + .bind(pad_u256(aggregate.total_reward_power)) + .bind(aggregate.delegator_count as i32) + .bind(delegators_json) + .bind(aggregate.epochs_participated as i64); + } + q.execute(&mut *tx).await?; + } + + tx.commit().await?; + Ok(()) + } + + async fn get_reward_delegation_powers_aggregate( + &self, + offset: u64, + limit: u64, + ) -> Result, DbError> { + let query = r#" + SELECT delegate_address, total_reward_power, delegator_count, delegators, epochs_participated + FROM reward_delegation_powers_aggregate + ORDER BY total_reward_power DESC + LIMIT $1 OFFSET $2 + "#; + + let rows = + sqlx::query(query).bind(limit as i64).bind(offset as i64).fetch_all(&self.pool).await?; + + let mut results = Vec::new(); + for row in rows { + let delegators_json: String = row.get("delegators"); + let delegator_addrs: Vec = match serde_json::from_str(&delegators_json) { + Ok(addrs) => addrs, + Err(e) => { + tracing::warn!("Failed to parse delegators JSON: {}, using empty vec", e); + Vec::new() + } + }; + let delegators: Vec
= + delegator_addrs.iter().filter_map(|s| Address::from_str(s).ok()).collect(); + + results.push(RewardDelegationPowerAggregate { + delegate_address: Address::from_str(&row.get::("delegate_address")) + .map_err(|e| DbError::BadTransaction(e.to_string()))?, + total_reward_power: unpad_u256(&row.get::("total_reward_power"))?, + delegator_count: row.get::("delegator_count") as u64, + delegators, + epochs_participated: row.get::("epochs_participated") as u64, + }); + } + + Ok(results) + } + + async fn get_staking_history_by_address( + &self, + address: Address, + start_epoch: Option, + end_epoch: Option, + ) -> Result, DbError> { + let mut query = String::from( + "SELECT staker_address, epoch, staked_amount, is_withdrawing, + rewards_delegated_to, votes_delegated_to, rewards_generated + FROM staking_positions_by_epoch + WHERE staker_address = $1", + ); + + let mut bind_count = 1; + if start_epoch.is_some() { + bind_count += 1; + query.push_str(&format!(" AND epoch >= ${}", bind_count)); + } + if end_epoch.is_some() { + bind_count += 1; + query.push_str(&format!(" AND epoch <= ${}", bind_count)); + } + query.push_str(" ORDER BY epoch DESC"); + + let mut q = sqlx::query(&query).bind(format!("{:#x}", address)); + if let Some(start) = start_epoch { + q = q.bind(start as i64); + } + if let Some(end) = end_epoch { + q = q.bind(end as i64); + } + + let rows = q.fetch_all(&self.pool).await?; + + let mut results = Vec::new(); + for row in rows { + let rewards_delegated_to: Option = row.get("rewards_delegated_to"); + let votes_delegated_to: Option = row.get("votes_delegated_to"); + + results.push(StakingPositionByEpoch { + staker_address: address, + epoch: row.get::("epoch") as u64, + staked_amount: unpad_u256(&row.get::("staked_amount"))?, + is_withdrawing: row.get::("is_withdrawing") != 0, + rewards_delegated_to: rewards_delegated_to.and_then(|s| Address::from_str(&s).ok()), + votes_delegated_to: votes_delegated_to.and_then(|s| Address::from_str(&s).ok()), + rewards_generated: unpad_u256(&row.get::("rewards_generated"))?, + }); + } + + Ok(results) + } + + async fn get_povw_rewards_history_by_address( + &self, + address: Address, + start_epoch: Option, + end_epoch: Option, + ) -> Result, DbError> { + let mut query = String::from( + "SELECT work_log_id, epoch, work_submitted, percentage, uncapped_rewards, reward_cap, actual_rewards, is_capped, staked_amount + FROM povw_rewards_by_epoch + WHERE work_log_id = $1" + ); + + let mut bind_count = 1; + if start_epoch.is_some() { + bind_count += 1; + query.push_str(&format!(" AND epoch >= ${}", bind_count)); + } + if end_epoch.is_some() { + bind_count += 1; + query.push_str(&format!(" AND epoch <= ${}", bind_count)); + } + query.push_str(" ORDER BY epoch DESC"); + + let mut q = sqlx::query(&query).bind(format!("{:#x}", address)); + if let Some(start) = start_epoch { + q = q.bind(start as i64); + } + if let Some(end) = end_epoch { + q = q.bind(end as i64); + } + + let rows = q.fetch_all(&self.pool).await?; + + let mut results = Vec::new(); + for row in rows { + results.push(PovwRewardByEpoch { + work_log_id: address, + epoch: row.get::("epoch") as u64, + work_submitted: unpad_u256(&row.get::("work_submitted"))?, + percentage: row.get("percentage"), + uncapped_rewards: unpad_u256(&row.get::("uncapped_rewards"))?, + reward_cap: unpad_u256(&row.get::("reward_cap"))?, + actual_rewards: unpad_u256(&row.get::("actual_rewards"))?, + is_capped: row.get::("is_capped") != 0, + staked_amount: unpad_u256(&row.get::("staked_amount"))?, + }); + } + + Ok(results) + } + + async fn get_vote_delegations_received_history( + &self, + delegate_address: Address, + start_epoch: Option, + end_epoch: Option, + ) -> Result, DbError> { + let mut query = String::from( + "SELECT delegate_address, epoch, vote_power, delegator_count, delegators + FROM vote_delegation_powers_by_epoch + WHERE delegate_address = $1", + ); + + let mut bind_count = 1; + if start_epoch.is_some() { + bind_count += 1; + query.push_str(&format!(" AND epoch >= ${}", bind_count)); + } + if end_epoch.is_some() { + bind_count += 1; + query.push_str(&format!(" AND epoch <= ${}", bind_count)); + } + query.push_str(" ORDER BY epoch DESC"); + + let mut q = sqlx::query(&query).bind(format!("{:#x}", delegate_address)); + if let Some(start) = start_epoch { + q = q.bind(start as i64); + } + if let Some(end) = end_epoch { + q = q.bind(end as i64); + } + + let rows = q.fetch_all(&self.pool).await?; + + let mut results = Vec::new(); + for row in rows { + let delegators_json: String = row.get("delegators"); + let delegator_addrs: Vec = match serde_json::from_str(&delegators_json) { + Ok(addrs) => addrs, + Err(e) => { + tracing::warn!("Failed to parse delegators JSON: {}, using empty vec", e); + Vec::new() + } + }; + let delegators: Vec
= + delegator_addrs.iter().filter_map(|s| Address::from_str(s).ok()).collect(); + + results.push(VoteDelegationPowerByEpoch { + delegate_address, + epoch: row.get::("epoch") as u64, + vote_power: unpad_u256(&row.get::("vote_power"))?, + delegator_count: row.get::("delegator_count") as u64, + delegators, + }); + } + + Ok(results) + } + + async fn get_reward_delegations_received_history( + &self, + delegate_address: Address, + start_epoch: Option, + end_epoch: Option, + ) -> Result, DbError> { + let mut query = String::from( + "SELECT delegate_address, epoch, reward_power, delegator_count, delegators + FROM reward_delegation_powers_by_epoch + WHERE delegate_address = $1", + ); + + let mut bind_count = 1; + if start_epoch.is_some() { + bind_count += 1; + query.push_str(&format!(" AND epoch >= ${}", bind_count)); + } + if end_epoch.is_some() { + bind_count += 1; + query.push_str(&format!(" AND epoch <= ${}", bind_count)); + } + query.push_str(" ORDER BY epoch DESC"); + + let mut q = sqlx::query(&query).bind(format!("{:#x}", delegate_address)); + if let Some(start) = start_epoch { + q = q.bind(start as i64); + } + if let Some(end) = end_epoch { + q = q.bind(end as i64); + } + + let rows = q.fetch_all(&self.pool).await?; + + let mut results = Vec::new(); + for row in rows { + let delegators_json: String = row.get("delegators"); + let delegator_addrs: Vec = match serde_json::from_str(&delegators_json) { + Ok(addrs) => addrs, + Err(e) => { + tracing::warn!("Failed to parse delegators JSON: {}, using empty vec", e); + Vec::new() + } + }; + let delegators: Vec
= + delegator_addrs.iter().filter_map(|s| Address::from_str(s).ok()).collect(); + + results.push(RewardDelegationPowerByEpoch { + delegate_address, + epoch: row.get::("epoch") as u64, + reward_power: unpad_u256(&row.get::("reward_power"))?, + delegator_count: row.get::("delegator_count") as u64, + delegators, + }); + } + + Ok(results) + } + + async fn upsert_povw_summary_stats(&self, stats: PoVWSummaryStats) -> Result<(), DbError> { + sqlx::query( + "INSERT INTO povw_summary_stats + (id, total_epochs_with_work, total_unique_work_log_ids, + total_work_all_time, total_emissions_all_time, + total_capped_rewards_all_time, total_uncapped_rewards_all_time, updated_at) + VALUES (1, $1, $2, $3, $4, $5, $6, $7) + ON CONFLICT (id) DO UPDATE SET + total_epochs_with_work = $1, + total_unique_work_log_ids = $2, + total_work_all_time = $3, + total_emissions_all_time = $4, + total_capped_rewards_all_time = $5, + total_uncapped_rewards_all_time = $6, + updated_at = $7", + ) + .bind(stats.total_epochs_with_work as i64) + .bind(stats.total_unique_work_log_ids as i64) + .bind(pad_u256(stats.total_work_all_time)) + .bind(pad_u256(stats.total_emissions_all_time)) + .bind(pad_u256(stats.total_capped_rewards_all_time)) + .bind(pad_u256(stats.total_uncapped_rewards_all_time)) + .bind(stats.updated_at.unwrap_or_else(|| Utc::now().to_rfc3339())) + .execute(&self.pool) + .await + .map_err(DbError::from)?; + Ok(()) + } + + async fn get_povw_summary_stats(&self) -> Result, DbError> { + let row = sqlx::query( + "SELECT total_epochs_with_work, total_unique_work_log_ids, + total_work_all_time, total_emissions_all_time, + total_capped_rewards_all_time, total_uncapped_rewards_all_time, updated_at + FROM povw_summary_stats + WHERE id = 1", + ) + .fetch_optional(&self.pool) + .await + .map_err(DbError::from)?; + + if let Some(row) = row { + Ok(Some(PoVWSummaryStats { + total_epochs_with_work: row.get::("total_epochs_with_work") as u64, + total_unique_work_log_ids: row.get::("total_unique_work_log_ids") as u64, + total_work_all_time: unpad_u256(&row.get::("total_work_all_time"))?, + total_emissions_all_time: unpad_u256( + &row.get::("total_emissions_all_time"), + )?, + total_capped_rewards_all_time: unpad_u256( + &row.get::("total_capped_rewards_all_time"), + )?, + total_uncapped_rewards_all_time: unpad_u256( + &row.get::("total_uncapped_rewards_all_time"), + )?, + updated_at: row.get::, _>("updated_at"), + })) + } else { + Ok(None) + } + } + + async fn upsert_epoch_povw_summary( + &self, + epoch: u64, + summary: EpochPoVWSummary, + ) -> Result<(), DbError> { + sqlx::query( + "INSERT INTO epoch_povw_summary + (epoch, total_work, total_emissions, total_capped_rewards, + total_uncapped_rewards, epoch_start_time, epoch_end_time, num_participants, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) + ON CONFLICT (epoch) DO UPDATE SET + total_work = $2, + total_emissions = $3, + total_capped_rewards = $4, + total_uncapped_rewards = $5, + epoch_start_time = $6, + epoch_end_time = $7, + num_participants = $8, + updated_at = $9", + ) + .bind(epoch as i64) + .bind(pad_u256(summary.total_work)) + .bind(pad_u256(summary.total_emissions)) + .bind(pad_u256(summary.total_capped_rewards)) + .bind(pad_u256(summary.total_uncapped_rewards)) + .bind(summary.epoch_start_time as i64) + .bind(summary.epoch_end_time as i64) + .bind(summary.num_participants as i64) + .bind(summary.updated_at.unwrap_or_else(|| Utc::now().to_rfc3339())) + .execute(&self.pool) + .await + .map_err(DbError::from)?; + Ok(()) + } + + async fn get_epoch_povw_summary( + &self, + epoch: u64, + ) -> Result, DbError> { + let row = sqlx::query( + "SELECT epoch, total_work, total_emissions, total_capped_rewards, + total_uncapped_rewards, epoch_start_time, epoch_end_time, num_participants, updated_at + FROM epoch_povw_summary + WHERE epoch = $1", + ) + .bind(epoch as i64) + .fetch_optional(&self.pool) + .await + .map_err(DbError::from)?; + + if let Some(row) = row { + Ok(Some(EpochPoVWSummary { + epoch: row.get::("epoch") as u64, + total_work: unpad_u256(&row.get::("total_work"))?, + total_emissions: unpad_u256(&row.get::("total_emissions"))?, + total_capped_rewards: unpad_u256(&row.get::("total_capped_rewards"))?, + total_uncapped_rewards: unpad_u256( + &row.get::("total_uncapped_rewards"), + )?, + epoch_start_time: row.get::("epoch_start_time") as u64, + epoch_end_time: row.get::("epoch_end_time") as u64, + num_participants: row.get::("num_participants") as u64, + updated_at: row.get::, _>("updated_at"), + })) + } else { + Ok(None) + } + } + + async fn upsert_staking_summary_stats( + &self, + stats: StakingSummaryStats, + ) -> Result<(), DbError> { + sqlx::query( + "INSERT INTO staking_summary_stats + (id, current_total_staked, total_unique_stakers, + current_active_stakers, current_withdrawing, + total_staking_emissions_all_time, updated_at) + VALUES (1, $1, $2, $3, $4, $5, $6) + ON CONFLICT (id) DO UPDATE SET + current_total_staked = $1, + total_unique_stakers = $2, + current_active_stakers = $3, + current_withdrawing = $4, + total_staking_emissions_all_time = $5, + updated_at = $6", + ) + .bind(pad_u256(stats.current_total_staked)) + .bind(stats.total_unique_stakers as i64) + .bind(stats.current_active_stakers as i64) + .bind(stats.current_withdrawing as i64) + .bind(stats.total_staking_emissions_all_time.map(pad_u256)) + .bind(stats.updated_at.unwrap_or_else(|| Utc::now().to_rfc3339())) + .execute(&self.pool) + .await + .map_err(DbError::from)?; + Ok(()) + } + + async fn get_staking_summary_stats(&self) -> Result, DbError> { + let row = sqlx::query( + "SELECT current_total_staked, total_unique_stakers, + current_active_stakers, current_withdrawing, + total_staking_emissions_all_time, updated_at + FROM staking_summary_stats + WHERE id = 1", + ) + .fetch_optional(&self.pool) + .await + .map_err(DbError::from)?; + + if let Some(row) = row { + Ok(Some(StakingSummaryStats { + current_total_staked: unpad_u256(&row.get::("current_total_staked"))?, + total_unique_stakers: row.get::("total_unique_stakers") as u64, + current_active_stakers: row.get::("current_active_stakers") as u64, + current_withdrawing: row.get::("current_withdrawing") as u64, + total_staking_emissions_all_time: row + .get::, _>("total_staking_emissions_all_time") + .and_then(|s| unpad_u256(&s).ok()), + updated_at: row.get::, _>("updated_at"), + })) + } else { + Ok(None) + } + } + + async fn upsert_epoch_staking_summary( + &self, + epoch: u64, + summary: EpochStakingSummary, + ) -> Result<(), DbError> { + sqlx::query( + "INSERT INTO epoch_staking_summary + (epoch, total_staked, num_stakers, num_withdrawing, + total_staking_emissions, total_staking_power, + num_reward_recipients, epoch_start_time, epoch_end_time, updated_at) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10) + ON CONFLICT (epoch) DO UPDATE SET + total_staked = $2, + num_stakers = $3, + num_withdrawing = $4, + total_staking_emissions = $5, + total_staking_power = $6, + num_reward_recipients = $7, + epoch_start_time = $8, + epoch_end_time = $9, + updated_at = $10", + ) + .bind(epoch as i64) + .bind(pad_u256(summary.total_staked)) + .bind(summary.num_stakers as i64) + .bind(summary.num_withdrawing as i64) + .bind(pad_u256(summary.total_staking_emissions)) + .bind(pad_u256(summary.total_staking_power)) + .bind(summary.num_reward_recipients as i64) + .bind(summary.epoch_start_time as i64) + .bind(summary.epoch_end_time as i64) + .bind(summary.updated_at.unwrap_or_else(|| Utc::now().to_rfc3339())) + .execute(&self.pool) + .await + .map_err(DbError::from)?; + Ok(()) + } + + async fn get_epoch_staking_summary( + &self, + epoch: u64, + ) -> Result, DbError> { + let row = sqlx::query( + "SELECT epoch, total_staked, num_stakers, num_withdrawing, + total_staking_emissions, total_staking_power, + num_reward_recipients, epoch_start_time, epoch_end_time, updated_at + FROM epoch_staking_summary + WHERE epoch = $1", + ) + .bind(epoch as i64) + .fetch_optional(&self.pool) + .await + .map_err(DbError::from)?; + + if let Some(row) = row { + Ok(Some(EpochStakingSummary { + epoch: row.get::("epoch") as u64, + total_staked: unpad_u256(&row.get::("total_staked"))?, + num_stakers: row.get::("num_stakers") as u64, + num_withdrawing: row.get::("num_withdrawing") as u64, + total_staking_emissions: unpad_u256( + &row.get::("total_staking_emissions"), + )?, + total_staking_power: unpad_u256(&row.get::("total_staking_power"))?, + num_reward_recipients: row.get::("num_reward_recipients") as u64, + epoch_start_time: row.get::("epoch_start_time") as u64, + epoch_end_time: row.get::("epoch_end_time") as u64, + updated_at: row.get::, _>("updated_at"), + })) + } else { + Ok(None) + } + } + + async fn upsert_staking_rewards_by_epoch( + &self, + epoch: u64, + rewards: Vec, + ) -> Result<(), DbError> { + if rewards.is_empty() { + return Ok(()); + } + + // Process in chunks to avoid parameter limits + for chunk in rewards.chunks(BATCH_INSERT_CHUNK_SIZE) { + let mut values_clauses = Vec::new(); + let mut param_idx = 1; + + for _ in chunk { + values_clauses.push(format!( + "(${},${},${},${},${})", + param_idx, + param_idx + 1, + param_idx + 2, + param_idx + 3, + param_idx + 4 + )); + param_idx += 5; + } + + let query = format!( + r#"INSERT INTO staking_rewards_by_epoch + (staker_address, epoch, staking_power, percentage, rewards_earned) + VALUES {} + ON CONFLICT (staker_address, epoch) DO UPDATE SET + staking_power = EXCLUDED.staking_power, + percentage = EXCLUDED.percentage, + rewards_earned = EXCLUDED.rewards_earned, + updated_at = CURRENT_TIMESTAMP"#, + values_clauses.join(",") + ); + + let mut q = sqlx::query(&query); + for reward in chunk { + q = q + .bind(format!("{:#x}", reward.staker_address)) + .bind(epoch as i64) + .bind(pad_u256(reward.staking_power)) + .bind(reward.percentage) + .bind(pad_u256(reward.rewards_earned)); + } + q.execute(&self.pool).await.map_err(DbError::from)?; + } + Ok(()) + } + + async fn get_staking_rewards_by_epoch( + &self, + epoch: u64, + offset: u64, + limit: u64, + ) -> Result, DbError> { + let rows = sqlx::query( + "SELECT staker_address, epoch, staking_power, percentage, rewards_earned + FROM staking_rewards_by_epoch + WHERE epoch = $1 + ORDER BY rewards_earned DESC + LIMIT $2 OFFSET $3", + ) + .bind(epoch as i64) + .bind(limit as i64) + .bind(offset as i64) + .fetch_all(&self.pool) + .await + .map_err(DbError::from)?; + + let mut rewards = Vec::new(); + for row in rows { + rewards.push(StakingRewardByEpoch { + staker_address: Address::from_str(&row.get::("staker_address")) + .map_err(|e| DbError::BadTransaction(e.to_string()))?, + epoch: row.get::("epoch") as u64, + staking_power: unpad_u256(&row.get::("staking_power"))?, + percentage: row.get::("percentage"), + rewards_earned: unpad_u256(&row.get::("rewards_earned"))?, + }); + } + Ok(rewards) + } + + async fn get_staking_rewards_by_address( + &self, + address: Address, + start_epoch: Option, + end_epoch: Option, + ) -> Result, DbError> { + let mut query = String::from( + "SELECT staker_address, epoch, staking_power, percentage, rewards_earned + FROM staking_rewards_by_epoch + WHERE staker_address = $1", + ); + + if let Some(start) = start_epoch { + query.push_str(&format!(" AND epoch >= {}", start)); + } + if let Some(end) = end_epoch { + query.push_str(&format!(" AND epoch <= {}", end)); + } + query.push_str(" ORDER BY epoch DESC"); + + let rows = sqlx::query(&query) + .bind(format!("{:#x}", address)) + .fetch_all(&self.pool) + .await + .map_err(DbError::from)?; + + let mut rewards = Vec::new(); + for row in rows { + rewards.push(StakingRewardByEpoch { + staker_address: Address::from_str(&row.get::("staker_address")) + .map_err(|e| DbError::BadTransaction(e.to_string()))?, + epoch: row.get::("epoch") as u64, + staking_power: unpad_u256(&row.get::("staking_power"))?, + percentage: row.get::("percentage"), + rewards_earned: unpad_u256(&row.get::("rewards_earned"))?, + }); + } + Ok(rewards) + } + + async fn get_all_epoch_povw_summaries( + &self, + offset: u64, + limit: u64, + ) -> Result, DbError> { + let rows = sqlx::query( + "SELECT epoch, total_work, total_emissions, total_capped_rewards, + total_uncapped_rewards, epoch_start_time, epoch_end_time, num_participants, updated_at + FROM epoch_povw_summary + ORDER BY epoch DESC + LIMIT $1 OFFSET $2", + ) + .bind(limit as i64) + .bind(offset as i64) + .fetch_all(&self.pool) + .await + .map_err(DbError::from)?; + + let mut summaries = Vec::new(); + for row in rows { + summaries.push(EpochPoVWSummary { + epoch: row.get::("epoch") as u64, + total_work: unpad_u256(&row.get::("total_work"))?, + total_emissions: unpad_u256(&row.get::("total_emissions"))?, + total_capped_rewards: unpad_u256(&row.get::("total_capped_rewards"))?, + total_uncapped_rewards: unpad_u256( + &row.get::("total_uncapped_rewards"), + )?, + epoch_start_time: row.get::("epoch_start_time") as u64, + epoch_end_time: row.get::("epoch_end_time") as u64, + num_participants: row.get::("num_participants") as u64, + updated_at: row.get::, _>("updated_at"), + }); + } + Ok(summaries) + } + + async fn get_all_epoch_staking_summaries( + &self, + offset: u64, + limit: u64, + ) -> Result, DbError> { + let rows = sqlx::query( + "SELECT epoch, total_staked, num_stakers, num_withdrawing, + total_staking_emissions, total_staking_power, num_reward_recipients, + epoch_start_time, epoch_end_time, updated_at + FROM epoch_staking_summary + ORDER BY epoch DESC + LIMIT $1 OFFSET $2", + ) + .bind(limit as i64) + .bind(offset as i64) + .fetch_all(&self.pool) + .await + .map_err(DbError::from)?; + + let mut summaries = Vec::new(); + for row in rows { + summaries.push(EpochStakingSummary { + epoch: row.get::("epoch") as u64, + total_staked: unpad_u256(&row.get::("total_staked"))?, + num_stakers: row.get::("num_stakers") as u64, + num_withdrawing: row.get::("num_withdrawing") as u64, + total_staking_emissions: unpad_u256( + &row.get::("total_staking_emissions"), + )?, + total_staking_power: unpad_u256(&row.get::("total_staking_power"))?, + num_reward_recipients: row.get::("num_reward_recipients") as u64, + epoch_start_time: row.get::("epoch_start_time") as u64, + epoch_end_time: row.get::("epoch_end_time") as u64, + updated_at: row.get::, _>("updated_at"), + }); + } + Ok(summaries) + } +} diff --git a/crates/indexer/src/lib.rs b/crates/indexer/src/lib.rs index 05fbded0c..8ea5631ae 100644 --- a/crates/indexer/src/lib.rs +++ b/crates/indexer/src/lib.rs @@ -12,744 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::{cmp::min, collections::HashMap, sync::Arc}; - -use ::boundless_market::contracts::{ - boundless_market::{BoundlessMarketService, MarketError}, - EIP712DomainSaltless, -}; -use alloy::{ - eips::BlockNumberOrTag, - network::{Ethereum, TransactionResponse}, - primitives::{Address, B256}, - providers::{ - fillers::{ChainIdFiller, FillProvider, JoinFill}, - Identity, Provider, ProviderBuilder, RootProvider, - }, - rpc::types::Log, - signers::local::PrivateKeySigner, - transports::{RpcError, TransportErrorKind}, -}; -use anyhow::{anyhow, Context}; -use db::{AnyDb, DbError, DbObj, TxMetadata}; -use thiserror::Error; -use tokio::time::Duration; -use url::Url; - -mod db; +pub mod db; +pub mod market; +pub mod rewards; pub mod test_utils; -const MAX_BATCH_SIZE: u64 = 500; - -type ProviderWallet = FillProvider, RootProvider>; - -#[derive(Error, Debug)] -pub enum ServiceError { - #[error("Database error: {0}")] - DatabaseError(#[from] DbError), - - #[error("Boundless market error: {0}")] - BoundlessMarketError(#[from] MarketError), - - #[error("RPC error: {0}")] - RpcError(#[from] RpcError), - - #[error("Event query error: {0}")] - EventQueryError(#[from] alloy::contract::Error), - - #[error("Error: {0}")] - Error(#[from] anyhow::Error), - - #[error("Maximum retries reached")] - MaxRetries, - - #[error("Request not expired")] - RequestNotExpired, -} - -#[derive(Clone)] -pub struct IndexerService

{ - pub boundless_market: BoundlessMarketService

, - pub db: DbObj, - pub domain: EIP712DomainSaltless, - pub config: IndexerServiceConfig, - // Mapping from transaction hash to TxMetadata - pub cache: HashMap, -} - -#[derive(Clone)] -pub struct IndexerServiceConfig { - pub interval: Duration, - pub retries: u32, -} - -impl IndexerService { - pub async fn new( - rpc_url: Url, - private_key: &PrivateKeySigner, - boundless_market_address: Address, - db_conn: &str, - config: IndexerServiceConfig, - ) -> Result { - let caller = private_key.address(); - let provider = ProviderBuilder::new() - .disable_recommended_fillers() - .filler(ChainIdFiller::default()) - .connect_http(rpc_url); - let boundless_market = - BoundlessMarketService::new(boundless_market_address, provider.clone(), caller); - let db: DbObj = Arc::new(AnyDb::new(db_conn).await?); - let domain = boundless_market.eip712_domain().await?; - let cache = HashMap::new(); - - Ok(Self { boundless_market, db, domain, config, cache }) - } -} - -impl

IndexerService

-where - P: Provider + 'static + Clone, -{ - pub async fn run(&mut self, starting_block: Option) -> Result<(), ServiceError> { - let mut interval = tokio::time::interval(self.config.interval); - - let mut from_block: u64 = self.starting_block(starting_block).await?; - tracing::info!("Starting indexer at block {}", from_block); - - let mut attempt = 0; - loop { - interval.tick().await; - - match self.current_block().await { - Ok(to_block) => { - if to_block < from_block { - continue; - } - - // cap to at most 500 blocks per batch - let batch_end = min(to_block, from_block.saturating_add(MAX_BATCH_SIZE)); - - tracing::info!("Processing blocks from {} to {}", from_block, batch_end); - - match self.process_blocks(from_block, batch_end).await { - Ok(_) => { - attempt = 0; - from_block = batch_end + 1; - } - Err(e) => match e { - // Irrecoverable errors - ServiceError::DatabaseError(_) - | ServiceError::MaxRetries - | ServiceError::RequestNotExpired - | ServiceError::Error(_) => { - tracing::error!( - "Failed to process blocks from {} to {}: {:?}", - from_block, - batch_end, - e - ); - return Err(e); - } - // Recoverable errors - ServiceError::BoundlessMarketError(_) - | ServiceError::EventQueryError(_) - | ServiceError::RpcError(_) => { - attempt += 1; - // exponential backoff with a maximum delay of 120 seconds - let delay = - std::time::Duration::from_secs(2u64.pow(attempt - 1).min(120)); - tracing::warn!( - "Failed to process blocks from {} to {}: {:?}, attempt number {}, retrying in {}s", - from_block, - batch_end, - e, - attempt, - delay.as_secs() - ); - tokio::time::sleep(delay).await; - } - }, - } - } - Err(e) => { - attempt += 1; - tracing::warn!( - "Failed to fetch current block: {:?}, attempt number {}", - e, - attempt - ); - } - } - if attempt > self.config.retries { - tracing::error!("Aborting after {} consecutive attempts", attempt); - return Err(ServiceError::MaxRetries); - } - } - } - - async fn process_blocks(&mut self, from: u64, to: u64) -> Result<(), ServiceError> { - self.process_request_submitted_events(from, to).await?; - self.process_locked_events(from, to).await?; - self.process_proof_delivered_events(from, to).await?; - self.process_fulfilled_events(from, to).await?; - self.process_callback_failed_events(from, to).await?; - self.process_slashed_events(from, to).await?; - self.process_deposit_events(from, to).await?; - self.process_withdrawal_events(from, to).await?; - self.process_collateral_deposit_events(from, to).await?; - self.process_collateral_withdrawal_events(from, to).await?; - self.clear_cache(); - - self.update_last_processed_block(to).await?; - - Ok(()) - } - - async fn get_last_processed_block(&self) -> Result, ServiceError> { - Ok(self.db.get_last_block().await?) - } - - async fn update_last_processed_block(&self, block_number: u64) -> Result<(), ServiceError> { - Ok(self.db.set_last_block(block_number).await?) - } - - async fn process_request_submitted_events( - &mut self, - from_block: u64, - to_block: u64, - ) -> Result<(), ServiceError> { - let event_filter = self - .boundless_market - .instance() - .RequestSubmitted_filter() - .from_block(from_block) - .to_block(to_block); - - // Query the logs for the event - let logs = event_filter.query().await?; - tracing::debug!( - "Found {} request submitted events from block {} to block {}", - logs.len(), - from_block, - to_block - ); - - for (event, log_data) in logs { - let metadata = self.fetch_tx_metadata(log_data).await?; - - tracing::debug!( - "Processing request submitted event for request: 0x{:x} [block: {}, timestamp: {}]", - event.requestId, - metadata.block_number, - metadata.block_timestamp - ); - - let request = event.request.clone(); - - let request_digest = request - .signing_hash(self.domain.verifying_contract, self.domain.chain_id) - .context(anyhow!( - "Failed to compute request digest for request: 0x{:x}", - event.requestId - ))?; - - self.db.add_proof_request(request_digest, request, &metadata).await?; - self.db.add_request_submitted_event(request_digest, event.requestId, &metadata).await?; - } - - Ok(()) - } - - async fn process_locked_events( - &mut self, - from_block: u64, - to_block: u64, - ) -> Result<(), ServiceError> { - let event_filter = self - .boundless_market - .instance() - .RequestLocked_filter() - .from_block(from_block) - .to_block(to_block); - - // Query the logs for the event - let logs = event_filter.query().await?; - tracing::debug!( - "Found {} locked events from block {} to block {}", - logs.len(), - from_block, - to_block - ); - - for (event, log_data) in logs { - let metadata = self.fetch_tx_metadata(log_data).await?; - tracing::debug!( - "Processing request locked event for request: 0x{:x} [block: {}, timestamp: {}]", - event.requestId, - metadata.block_number, - metadata.block_timestamp - ); - - // Get the request and calculate its digest - let request = event.request.clone(); - let request_digest = request - .signing_hash(self.domain.verifying_contract, self.domain.chain_id) - .context(anyhow!( - "Failed to compute request digest for request: 0x{:x}", - event.requestId - ))?; - - // Check if we've already seen this request (from RequestSubmitted event) - // If not, it must have been submitted off-chain. We add it to the database. - let request_exists = self.db.has_proof_request(request_digest).await?; - if !request_exists { - tracing::debug!("Detected request locked for unseen request. Likely submitted off-chain: 0x{:x}", event.requestId); - self.db.add_proof_request(request_digest, request, &metadata).await?; - } - self.db - .add_request_locked_event(request_digest, event.requestId, event.prover, &metadata) - .await?; - } - - Ok(()) - } - - async fn process_proof_delivered_events( - &mut self, - from_block: u64, - to_block: u64, - ) -> Result<(), ServiceError> { - let event_filter = self - .boundless_market - .instance() - .ProofDelivered_filter() - .from_block(from_block) - .to_block(to_block); - - // Query the logs for the event - let logs = event_filter.query().await?; - tracing::debug!( - "Found {} proof delivered events from block {} to block {}", - logs.len(), - from_block, - to_block - ); - - for (event, log_data) in logs { - let metadata = self.fetch_tx_metadata(log_data).await?; - tracing::debug!( - "Processing proof delivered event for request: 0x{:x} [block: {}, timestamp: {}]", - event.requestId, - metadata.block_number, - metadata.block_timestamp - ); - - self.db - .add_proof_delivered_event( - event.fulfillment.requestDigest, - event.requestId, - &metadata, - ) - .await?; - self.db.add_fulfillment(event.fulfillment, event.prover, &metadata).await?; - } - - Ok(()) - } - - async fn process_fulfilled_events( - &mut self, - from_block: u64, - to_block: u64, - ) -> Result<(), ServiceError> { - let event_filter = self - .boundless_market - .instance() - .RequestFulfilled_filter() - .from_block(from_block) - .to_block(to_block); - - // Query the logs for the event - let logs = event_filter.query().await?; - tracing::debug!( - "Found {} fulfilled events from block {} to block {}", - logs.len(), - from_block, - to_block - ); - - for (event, log_data) in logs { - let metadata = self.fetch_tx_metadata(log_data).await?; - tracing::debug!( - "Processing fulfilled event for request: 0x{:x} [block: {}, timestamp: {}]", - event.requestId, - metadata.block_number, - metadata.block_timestamp - ); - self.db - .add_request_fulfilled_event(event.requestDigest, event.requestId, &metadata) - .await?; - } - - Ok(()) - } - - async fn process_slashed_events( - &mut self, - from_block: u64, - to_block: u64, - ) -> Result<(), ServiceError> { - let event_filter = self - .boundless_market - .instance() - .ProverSlashed_filter() - .from_block(from_block) - .to_block(to_block); - - // Query the logs for the event - let logs = event_filter.query().await?; - tracing::debug!( - "Found {} slashed events from block {} to block {}", - logs.len(), - from_block, - to_block - ); - - for (event, log_data) in logs { - let metadata = self.fetch_tx_metadata(log_data).await?; - tracing::debug!( - "Processing slashed event for request: 0x{:x} [block: {}, timestamp: {}]", - event.requestId, - metadata.block_number, - metadata.block_timestamp - ); - self.db - .add_prover_slashed_event( - event.requestId, - event.collateralBurned, - event.collateralTransferred, - event.collateralRecipient, - &metadata, - ) - .await?; - } - - Ok(()) - } - - async fn process_deposit_events( - &mut self, - from_block: u64, - to_block: u64, - ) -> Result<(), ServiceError> { - let event_filter = self - .boundless_market - .instance() - .Deposit_filter() - .from_block(from_block) - .to_block(to_block); - - // Query the logs for the event - let logs = event_filter.query().await?; - tracing::debug!( - "Found {} deposit events from block {} to block {}", - logs.len(), - from_block, - to_block - ); - - for (event, log_data) in logs { - let metadata = self.fetch_tx_metadata(log_data).await?; - tracing::debug!( - "Processing deposit event for account: 0x{:x} [block: {}, timestamp: {}]", - event.account, - metadata.block_number, - metadata.block_timestamp - ); - self.db.add_deposit_event(event.account, event.value, &metadata).await?; - } - - Ok(()) - } - - async fn process_withdrawal_events( - &mut self, - from_block: u64, - to_block: u64, - ) -> Result<(), ServiceError> { - let event_filter = self - .boundless_market - .instance() - .Withdrawal_filter() - .from_block(from_block) - .to_block(to_block); - - // Query the logs for the event - let logs = event_filter.query().await?; - tracing::debug!( - "Found {} withdrawal events from block {} to block {}", - logs.len(), - from_block, - to_block - ); - - for (event, log_data) in logs { - let metadata = self.fetch_tx_metadata(log_data).await?; - tracing::debug!( - "Processing withdrawal event for account: 0x{:x} [block: {}, timestamp: {}]", - event.account, - metadata.block_number, - metadata.block_timestamp - ); - self.db.add_withdrawal_event(event.account, event.value, &metadata).await?; - } - - Ok(()) - } - - async fn process_collateral_deposit_events( - &mut self, - from_block: u64, - to_block: u64, - ) -> Result<(), ServiceError> { - let event_filter = self - .boundless_market - .instance() - .CollateralDeposit_filter() - .from_block(from_block) - .to_block(to_block); - - // Query the logs for the event - let logs = event_filter.query().await?; - tracing::debug!( - "Found {} collateral deposit events from block {} to block {}", - logs.len(), - from_block, - to_block - ); - - for (event, log_data) in logs { - let metadata = self.fetch_tx_metadata(log_data).await?; - tracing::debug!( - "Processing collateral deposit event for account: 0x{:x} [block: {}, timestamp: {}]", - event.account, - metadata.block_number, - metadata.block_timestamp - ); - self.db.add_collateral_deposit_event(event.account, event.value, &metadata).await?; - } - - Ok(()) - } - - async fn process_collateral_withdrawal_events( - &mut self, - from_block: u64, - to_block: u64, - ) -> Result<(), ServiceError> { - let event_filter = self - .boundless_market - .instance() - .CollateralWithdrawal_filter() - .from_block(from_block) - .to_block(to_block); - - // Query the logs for the event - let logs = event_filter.query().await?; - tracing::debug!( - "Found {} collateral withdrawal events from block {} to block {}", - logs.len(), - from_block, - to_block - ); - - for (event, log_data) in logs { - let metadata = self.fetch_tx_metadata(log_data).await?; - tracing::debug!( - "Processing collateral withdrawal event for account: 0x{:x} [block: {}, timestamp: {}]", - event.account, - metadata.block_number, - metadata.block_timestamp - ); - self.db.add_collateral_withdrawal_event(event.account, event.value, &metadata).await?; - } - - Ok(()) - } - - async fn process_callback_failed_events( - &mut self, - from_block: u64, - to_block: u64, - ) -> Result<(), ServiceError> { - let event_filter = self - .boundless_market - .instance() - .CallbackFailed_filter() - .from_block(from_block) - .to_block(to_block); - - // Query the logs for the event - let logs = event_filter.query().await?; - tracing::debug!( - "Found {} callback failed events from block {} to block {}", - logs.len(), - from_block, - to_block - ); - - for (event, log_data) in logs { - let metadata = self.fetch_tx_metadata(log_data).await?; - tracing::debug!( - "Processing callback failed event for request: 0x{:x} [block: {}, timestamp: {}]", - event.requestId, - metadata.block_number, - metadata.block_timestamp - ); - - self.db - .add_callback_failed_event( - event.requestId, - event.callback, - event.error.to_vec(), - &metadata, - ) - .await?; - } - - Ok(()) - } - - async fn current_block(&self) -> Result { - Ok(self.boundless_market.instance().provider().get_block_number().await?) - } - - async fn block_timestamp(&self, block_number: u64) -> Result { - let timestamp = self.db.get_block_timestamp(block_number).await?; - let ts = match timestamp { - Some(ts) => ts, - None => { - tracing::debug!("Block timestamp not found in DB for block {}", block_number); - let ts = self - .boundless_market - .instance() - .provider() - .get_block_by_number(BlockNumberOrTag::Number(block_number)) - .await? - .context(anyhow!("Failed to get block by number: {}", block_number))? - .header - .timestamp; - self.db.add_block(block_number, ts).await?; - ts - } - }; - Ok(ts) - } - - fn clear_cache(&mut self) { - self.cache.clear(); - } - - // Fetch (and cache) metadata for a tx - // Check if the transaction is already in the cache - // If it is, use the cached tx metadata - // Otherwise, fetch the transaction from the provider and cache it - // This is to avoid making multiple calls to the provider for the same transaction - // as delivery events may be emitted in a batch - async fn fetch_tx_metadata(&mut self, log: Log) -> Result { - let tx_hash = log.transaction_hash.context("Transaction hash not found")?; - if let Some(meta) = self.cache.get(&tx_hash) { - return Ok(meta.clone()); - } - let tx = self - .boundless_market - .instance() - .provider() - .get_transaction_by_hash(tx_hash) - .await? - .context(anyhow!("Transaction not found: {}", hex::encode(tx_hash)))?; - let bn = tx.block_number.context("block number not found")?; - let ts = - if let Some(ts) = log.block_timestamp { ts } else { self.block_timestamp(bn).await? }; - let meta = TxMetadata::new(tx_hash, tx.from(), bn, ts); - self.cache.insert(tx_hash, meta.clone()); - Ok(meta) - } - - // Return the last processed block from the DB is > 0; - // otherwise, return the starting_block if set and <= current_block; - // otherwise, return the current_block. - async fn starting_block(&self, starting_block: Option) -> Result { - let last_processed = self.get_last_processed_block().await?; - let current_block = self.current_block().await?; - Ok(find_starting_block(starting_block, last_processed, current_block)) - } -} - -fn find_starting_block( - starting_block: Option, - last_processed: Option, - current_block: u64, -) -> u64 { - if let Some(last) = last_processed.filter(|&b| b > 0) { - tracing::debug!("Using last processed block {} as starting block", last); - return last; - } - - let from = starting_block.unwrap_or(current_block); - if from > current_block { - tracing::warn!( - "Starting block {} is greater than current block {}, defaulting to current block", - from, - current_block - ); - current_block - } else { - tracing::debug!("Using {} as starting block", from); - from - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[tokio::test] - async fn test_find_starting_block() { - let starting_block = Some(100); - let last_processed = Some(50); - let current_block = 200; - let block = find_starting_block(starting_block, last_processed, current_block); - assert_eq!(block, 50); - - let starting_block = None; - let last_processed = Some(50); - let current_block = 200; - let block = find_starting_block(starting_block, last_processed, current_block); - assert_eq!(block, 50); - - let starting_block = None; - let last_processed = None; - let current_block = 200; - let block = find_starting_block(starting_block, last_processed, current_block); - assert_eq!(block, 200); - - let starting_block = None; - let last_processed = Some(0); - let current_block = 200; - let block = find_starting_block(starting_block, last_processed, current_block); - assert_eq!(block, 200); - - let starting_block = Some(200); - let last_processed = None; - let current_block = 100; - let block = find_starting_block(starting_block, last_processed, current_block); - assert_eq!(block, 100); - - let starting_block = Some(200); - let last_processed = Some(10); - let current_block = 100; - let block = find_starting_block(starting_block, last_processed, current_block); - assert_eq!(block, 10); - } -} +// Re-export for backwards compatibility +pub use market::{IndexerService, IndexerServiceConfig, ServiceError}; diff --git a/crates/indexer/src/market/mod.rs b/crates/indexer/src/market/mod.rs new file mode 100644 index 000000000..ee5823739 --- /dev/null +++ b/crates/indexer/src/market/mod.rs @@ -0,0 +1,17 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod service; + +pub use service::{IndexerService, IndexerServiceConfig, ServiceError}; diff --git a/crates/indexer/src/market/service.rs b/crates/indexer/src/market/service.rs new file mode 100644 index 000000000..4e11ea3d9 --- /dev/null +++ b/crates/indexer/src/market/service.rs @@ -0,0 +1,752 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{cmp::min, collections::HashMap, sync::Arc}; + +use crate::db::{AnyDb, DbError, DbObj, TxMetadata}; +use ::boundless_market::contracts::{ + boundless_market::{BoundlessMarketService, MarketError}, + EIP712DomainSaltless, +}; +use alloy::{ + eips::BlockNumberOrTag, + network::{Ethereum, TransactionResponse}, + primitives::{Address, B256}, + providers::{ + fillers::{ChainIdFiller, FillProvider, JoinFill}, + Identity, Provider, ProviderBuilder, RootProvider, + }, + rpc::types::Log, + signers::local::PrivateKeySigner, + transports::{RpcError, TransportErrorKind}, +}; +use anyhow::{anyhow, Context}; +use thiserror::Error; +use tokio::time::Duration; +use url::Url; + +const MAX_BATCH_SIZE: u64 = 500; + +type ProviderWallet = FillProvider, RootProvider>; + +#[derive(Error, Debug)] +pub enum ServiceError { + #[error("Database error: {0}")] + DatabaseError(#[from] DbError), + + #[error("Boundless market error: {0}")] + BoundlessMarketError(#[from] MarketError), + + #[error("RPC error: {0}")] + RpcError(#[from] RpcError), + + #[error("Event query error: {0}")] + EventQueryError(#[from] alloy::contract::Error), + + #[error("Error: {0}")] + Error(#[from] anyhow::Error), + + #[error("Maximum retries reached")] + MaxRetries, + + #[error("Request not expired")] + RequestNotExpired, +} + +#[derive(Clone)] +pub struct IndexerService

{ + pub boundless_market: BoundlessMarketService

, + pub db: DbObj, + pub domain: EIP712DomainSaltless, + pub config: IndexerServiceConfig, + // Mapping from transaction hash to TxMetadata + pub cache: HashMap, +} + +#[derive(Clone)] +pub struct IndexerServiceConfig { + pub interval: Duration, + pub retries: u32, +} + +impl IndexerService { + pub async fn new( + rpc_url: Url, + private_key: &PrivateKeySigner, + boundless_market_address: Address, + db_conn: &str, + config: IndexerServiceConfig, + ) -> Result { + let caller = private_key.address(); + let provider = ProviderBuilder::new() + .disable_recommended_fillers() + .filler(ChainIdFiller::default()) + .connect_http(rpc_url); + let boundless_market = + BoundlessMarketService::new(boundless_market_address, provider.clone(), caller); + let db: DbObj = Arc::new(AnyDb::new(db_conn).await?); + let domain = boundless_market.eip712_domain().await?; + let cache = HashMap::new(); + + Ok(Self { boundless_market, db, domain, config, cache }) + } +} + +impl

IndexerService

+where + P: Provider + 'static + Clone, +{ + pub async fn run(&mut self, starting_block: Option) -> Result<(), ServiceError> { + let mut interval = tokio::time::interval(self.config.interval); + + let mut from_block: u64 = self.starting_block(starting_block).await?; + tracing::info!("Starting indexer at block {}", from_block); + + let mut attempt = 0; + loop { + interval.tick().await; + + match self.current_block().await { + Ok(to_block) => { + if to_block < from_block { + continue; + } + + // cap to at most 500 blocks per batch + let batch_end = min(to_block, from_block.saturating_add(MAX_BATCH_SIZE)); + + tracing::info!("Processing blocks from {} to {}", from_block, batch_end); + + match self.process_blocks(from_block, batch_end).await { + Ok(_) => { + attempt = 0; + from_block = batch_end + 1; + } + Err(e) => match e { + // Irrecoverable errors + ServiceError::DatabaseError(_) + | ServiceError::MaxRetries + | ServiceError::RequestNotExpired + | ServiceError::Error(_) => { + tracing::error!( + "Failed to process blocks from {} to {}: {:?}", + from_block, + batch_end, + e + ); + return Err(e); + } + // Recoverable errors + ServiceError::BoundlessMarketError(_) + | ServiceError::EventQueryError(_) + | ServiceError::RpcError(_) => { + attempt += 1; + // exponential backoff with a maximum delay of 120 seconds + let delay = + std::time::Duration::from_secs(2u64.pow(attempt - 1).min(120)); + tracing::warn!( + "Failed to process blocks from {} to {}: {:?}, attempt number {}, retrying in {}s", + from_block, + batch_end, + e, + attempt, + delay.as_secs() + ); + tokio::time::sleep(delay).await; + } + }, + } + } + Err(e) => { + attempt += 1; + tracing::warn!( + "Failed to fetch current block: {:?}, attempt number {}", + e, + attempt + ); + } + } + if attempt > self.config.retries { + tracing::error!("Aborting after {} consecutive attempts", attempt); + return Err(ServiceError::MaxRetries); + } + } + } + + async fn process_blocks(&mut self, from: u64, to: u64) -> Result<(), ServiceError> { + self.process_request_submitted_events(from, to).await?; + self.process_locked_events(from, to).await?; + self.process_proof_delivered_events(from, to).await?; + self.process_fulfilled_events(from, to).await?; + self.process_callback_failed_events(from, to).await?; + self.process_slashed_events(from, to).await?; + self.process_deposit_events(from, to).await?; + self.process_withdrawal_events(from, to).await?; + self.process_collateral_deposit_events(from, to).await?; + self.process_collateral_withdrawal_events(from, to).await?; + self.clear_cache(); + + self.update_last_processed_block(to).await?; + + Ok(()) + } + + async fn get_last_processed_block(&self) -> Result, ServiceError> { + Ok(self.db.get_last_block().await?) + } + + async fn update_last_processed_block(&self, block_number: u64) -> Result<(), ServiceError> { + Ok(self.db.set_last_block(block_number).await?) + } + + async fn process_request_submitted_events( + &mut self, + from_block: u64, + to_block: u64, + ) -> Result<(), ServiceError> { + let event_filter = self + .boundless_market + .instance() + .RequestSubmitted_filter() + .from_block(from_block) + .to_block(to_block); + + // Query the logs for the event + let logs = event_filter.query().await?; + tracing::debug!( + "Found {} request submitted events from block {} to block {}", + logs.len(), + from_block, + to_block + ); + + for (event, log_data) in logs { + let metadata = self.fetch_tx_metadata(log_data).await?; + + tracing::debug!( + "Processing request submitted event for request: 0x{:x} [block: {}, timestamp: {}]", + event.requestId, + metadata.block_number, + metadata.block_timestamp + ); + + let request = event.request.clone(); + + let request_digest = request + .signing_hash(self.domain.verifying_contract, self.domain.chain_id) + .context(anyhow!( + "Failed to compute request digest for request: 0x{:x}", + event.requestId + ))?; + + self.db.add_proof_request(request_digest, request, &metadata).await?; + self.db.add_request_submitted_event(request_digest, event.requestId, &metadata).await?; + } + + Ok(()) + } + + async fn process_locked_events( + &mut self, + from_block: u64, + to_block: u64, + ) -> Result<(), ServiceError> { + let event_filter = self + .boundless_market + .instance() + .RequestLocked_filter() + .from_block(from_block) + .to_block(to_block); + + // Query the logs for the event + let logs = event_filter.query().await?; + tracing::debug!( + "Found {} locked events from block {} to block {}", + logs.len(), + from_block, + to_block + ); + + for (event, log_data) in logs { + let metadata = self.fetch_tx_metadata(log_data).await?; + tracing::debug!( + "Processing request locked event for request: 0x{:x} [block: {}, timestamp: {}]", + event.requestId, + metadata.block_number, + metadata.block_timestamp + ); + + // Get the request and calculate its digest + let request = event.request.clone(); + let request_digest = request + .signing_hash(self.domain.verifying_contract, self.domain.chain_id) + .context(anyhow!( + "Failed to compute request digest for request: 0x{:x}", + event.requestId + ))?; + + // Check if we've already seen this request (from RequestSubmitted event) + // If not, it must have been submitted off-chain. We add it to the database. + let request_exists = self.db.has_proof_request(request_digest).await?; + if !request_exists { + tracing::debug!("Detected request locked for unseen request. Likely submitted off-chain: 0x{:x}", event.requestId); + self.db.add_proof_request(request_digest, request, &metadata).await?; + } + self.db + .add_request_locked_event(request_digest, event.requestId, event.prover, &metadata) + .await?; + } + + Ok(()) + } + + async fn process_proof_delivered_events( + &mut self, + from_block: u64, + to_block: u64, + ) -> Result<(), ServiceError> { + let event_filter = self + .boundless_market + .instance() + .ProofDelivered_filter() + .from_block(from_block) + .to_block(to_block); + + // Query the logs for the event + let logs = event_filter.query().await?; + tracing::debug!( + "Found {} proof delivered events from block {} to block {}", + logs.len(), + from_block, + to_block + ); + + for (event, log_data) in logs { + let metadata = self.fetch_tx_metadata(log_data).await?; + tracing::debug!( + "Processing proof delivered event for request: 0x{:x} [block: {}, timestamp: {}]", + event.requestId, + metadata.block_number, + metadata.block_timestamp + ); + + self.db + .add_proof_delivered_event( + event.fulfillment.requestDigest, + event.requestId, + &metadata, + ) + .await?; + self.db.add_fulfillment(event.fulfillment, event.prover, &metadata).await?; + } + + Ok(()) + } + + async fn process_fulfilled_events( + &mut self, + from_block: u64, + to_block: u64, + ) -> Result<(), ServiceError> { + let event_filter = self + .boundless_market + .instance() + .RequestFulfilled_filter() + .from_block(from_block) + .to_block(to_block); + + // Query the logs for the event + let logs = event_filter.query().await?; + tracing::debug!( + "Found {} fulfilled events from block {} to block {}", + logs.len(), + from_block, + to_block + ); + + for (event, log_data) in logs { + let metadata = self.fetch_tx_metadata(log_data).await?; + tracing::debug!( + "Processing fulfilled event for request: 0x{:x} [block: {}, timestamp: {}]", + event.requestId, + metadata.block_number, + metadata.block_timestamp + ); + self.db + .add_request_fulfilled_event(event.requestDigest, event.requestId, &metadata) + .await?; + } + + Ok(()) + } + + async fn process_slashed_events( + &mut self, + from_block: u64, + to_block: u64, + ) -> Result<(), ServiceError> { + let event_filter = self + .boundless_market + .instance() + .ProverSlashed_filter() + .from_block(from_block) + .to_block(to_block); + + // Query the logs for the event + let logs = event_filter.query().await?; + tracing::debug!( + "Found {} slashed events from block {} to block {}", + logs.len(), + from_block, + to_block + ); + + for (event, log_data) in logs { + let metadata = self.fetch_tx_metadata(log_data).await?; + tracing::debug!( + "Processing slashed event for request: 0x{:x} [block: {}, timestamp: {}]", + event.requestId, + metadata.block_number, + metadata.block_timestamp + ); + self.db + .add_prover_slashed_event( + event.requestId, + event.collateralBurned, + event.collateralTransferred, + event.collateralRecipient, + &metadata, + ) + .await?; + } + + Ok(()) + } + + async fn process_deposit_events( + &mut self, + from_block: u64, + to_block: u64, + ) -> Result<(), ServiceError> { + let event_filter = self + .boundless_market + .instance() + .Deposit_filter() + .from_block(from_block) + .to_block(to_block); + + // Query the logs for the event + let logs = event_filter.query().await?; + tracing::debug!( + "Found {} deposit events from block {} to block {}", + logs.len(), + from_block, + to_block + ); + + for (event, log_data) in logs { + let metadata = self.fetch_tx_metadata(log_data).await?; + tracing::debug!( + "Processing deposit event for account: 0x{:x} [block: {}, timestamp: {}]", + event.account, + metadata.block_number, + metadata.block_timestamp + ); + self.db.add_deposit_event(event.account, event.value, &metadata).await?; + } + + Ok(()) + } + + async fn process_withdrawal_events( + &mut self, + from_block: u64, + to_block: u64, + ) -> Result<(), ServiceError> { + let event_filter = self + .boundless_market + .instance() + .Withdrawal_filter() + .from_block(from_block) + .to_block(to_block); + + // Query the logs for the event + let logs = event_filter.query().await?; + tracing::debug!( + "Found {} withdrawal events from block {} to block {}", + logs.len(), + from_block, + to_block + ); + + for (event, log_data) in logs { + let metadata = self.fetch_tx_metadata(log_data).await?; + tracing::debug!( + "Processing withdrawal event for account: 0x{:x} [block: {}, timestamp: {}]", + event.account, + metadata.block_number, + metadata.block_timestamp + ); + self.db.add_withdrawal_event(event.account, event.value, &metadata).await?; + } + + Ok(()) + } + + async fn process_collateral_deposit_events( + &mut self, + from_block: u64, + to_block: u64, + ) -> Result<(), ServiceError> { + let event_filter = self + .boundless_market + .instance() + .CollateralDeposit_filter() + .from_block(from_block) + .to_block(to_block); + + // Query the logs for the event + let logs = event_filter.query().await?; + tracing::debug!( + "Found {} collateral deposit events from block {} to block {}", + logs.len(), + from_block, + to_block + ); + + for (event, log_data) in logs { + let metadata = self.fetch_tx_metadata(log_data).await?; + tracing::debug!( + "Processing collateral deposit event for account: 0x{:x} [block: {}, timestamp: {}]", + event.account, + metadata.block_number, + metadata.block_timestamp + ); + self.db.add_collateral_deposit_event(event.account, event.value, &metadata).await?; + } + + Ok(()) + } + + async fn process_collateral_withdrawal_events( + &mut self, + from_block: u64, + to_block: u64, + ) -> Result<(), ServiceError> { + let event_filter = self + .boundless_market + .instance() + .CollateralWithdrawal_filter() + .from_block(from_block) + .to_block(to_block); + + // Query the logs for the event + let logs = event_filter.query().await?; + tracing::debug!( + "Found {} collateral withdrawal events from block {} to block {}", + logs.len(), + from_block, + to_block + ); + + for (event, log_data) in logs { + let metadata = self.fetch_tx_metadata(log_data).await?; + tracing::debug!( + "Processing collateral withdrawal event for account: 0x{:x} [block: {}, timestamp: {}]", + event.account, + metadata.block_number, + metadata.block_timestamp + ); + self.db.add_collateral_withdrawal_event(event.account, event.value, &metadata).await?; + } + + Ok(()) + } + + async fn process_callback_failed_events( + &mut self, + from_block: u64, + to_block: u64, + ) -> Result<(), ServiceError> { + let event_filter = self + .boundless_market + .instance() + .CallbackFailed_filter() + .from_block(from_block) + .to_block(to_block); + + // Query the logs for the event + let logs = event_filter.query().await?; + tracing::debug!( + "Found {} callback failed events from block {} to block {}", + logs.len(), + from_block, + to_block + ); + + for (event, log_data) in logs { + let metadata = self.fetch_tx_metadata(log_data).await?; + tracing::debug!( + "Processing callback failed event for request: 0x{:x} [block: {}, timestamp: {}]", + event.requestId, + metadata.block_number, + metadata.block_timestamp + ); + + self.db + .add_callback_failed_event( + event.requestId, + event.callback, + event.error.to_vec(), + &metadata, + ) + .await?; + } + + Ok(()) + } + + async fn current_block(&self) -> Result { + Ok(self.boundless_market.instance().provider().get_block_number().await?) + } + + async fn block_timestamp(&self, block_number: u64) -> Result { + let timestamp = self.db.get_block_timestamp(block_number).await?; + let ts = match timestamp { + Some(ts) => ts, + None => { + tracing::debug!("Block timestamp not found in DB for block {}", block_number); + let ts = self + .boundless_market + .instance() + .provider() + .get_block_by_number(BlockNumberOrTag::Number(block_number)) + .await? + .context(anyhow!("Failed to get block by number: {}", block_number))? + .header + .timestamp; + self.db.add_block(block_number, ts).await?; + ts + } + }; + Ok(ts) + } + + fn clear_cache(&mut self) { + self.cache.clear(); + } + + // Fetch (and cache) metadata for a tx + // Check if the transaction is already in the cache + // If it is, use the cached tx metadata + // Otherwise, fetch the transaction from the provider and cache it + // This is to avoid making multiple calls to the provider for the same transaction + // as delivery events may be emitted in a batch + async fn fetch_tx_metadata(&mut self, log: Log) -> Result { + let tx_hash = log.transaction_hash.context("Transaction hash not found")?; + if let Some(meta) = self.cache.get(&tx_hash) { + return Ok(meta.clone()); + } + let tx = self + .boundless_market + .instance() + .provider() + .get_transaction_by_hash(tx_hash) + .await? + .context(anyhow!("Transaction not found: {}", hex::encode(tx_hash)))?; + let bn = tx.block_number.context("block number not found")?; + let ts = + if let Some(ts) = log.block_timestamp { ts } else { self.block_timestamp(bn).await? }; + let meta = TxMetadata::new(tx_hash, tx.from(), bn, ts); + self.cache.insert(tx_hash, meta.clone()); + Ok(meta) + } + + // Return the last processed block from the DB is > 0; + // otherwise, return the starting_block if set and <= current_block; + // otherwise, return the current_block. + async fn starting_block(&self, starting_block: Option) -> Result { + let last_processed = self.get_last_processed_block().await?; + let current_block = self.current_block().await?; + Ok(find_starting_block(starting_block, last_processed, current_block)) + } +} + +fn find_starting_block( + starting_block: Option, + last_processed: Option, + current_block: u64, +) -> u64 { + if let Some(last) = last_processed.filter(|&b| b > 0) { + tracing::debug!("Using last processed block {} as starting block", last); + return last; + } + + let from = starting_block.unwrap_or(current_block); + if from > current_block { + tracing::warn!( + "Starting block {} is greater than current block {}, defaulting to current block", + from, + current_block + ); + current_block + } else { + tracing::debug!("Using {} as starting block", from); + from + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_find_starting_block() { + let starting_block = Some(100); + let last_processed = Some(50); + let current_block = 200; + let block = find_starting_block(starting_block, last_processed, current_block); + assert_eq!(block, 50); + + let starting_block = None; + let last_processed = Some(50); + let current_block = 200; + let block = find_starting_block(starting_block, last_processed, current_block); + assert_eq!(block, 50); + + let starting_block = None; + let last_processed = None; + let current_block = 200; + let block = find_starting_block(starting_block, last_processed, current_block); + assert_eq!(block, 200); + + let starting_block = None; + let last_processed = Some(0); + let current_block = 200; + let block = find_starting_block(starting_block, last_processed, current_block); + assert_eq!(block, 200); + + let starting_block = Some(200); + let last_processed = None; + let current_block = 100; + let block = find_starting_block(starting_block, last_processed, current_block); + assert_eq!(block, 100); + + let starting_block = Some(200); + let last_processed = Some(10); + let current_block = 100; + let block = find_starting_block(starting_block, last_processed, current_block); + assert_eq!(block, 10); + } +} diff --git a/crates/indexer/src/rewards/mod.rs b/crates/indexer/src/rewards/mod.rs new file mode 100644 index 000000000..65e463dd3 --- /dev/null +++ b/crates/indexer/src/rewards/mod.rs @@ -0,0 +1,17 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod service; + +pub use service::{RewardsIndexerService, RewardsIndexerServiceConfig}; diff --git a/crates/indexer/src/rewards/service.rs b/crates/indexer/src/rewards/service.rs new file mode 100644 index 000000000..fed143b5e --- /dev/null +++ b/crates/indexer/src/rewards/service.rs @@ -0,0 +1,739 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::collections::HashMap; +use std::sync::Arc; + +use alloy::{ + primitives::{Address, U256}, + providers::{ + fillers::{ChainIdFiller, FillProvider, JoinFill}, + Identity, Provider, ProviderBuilder, RootProvider, + }, + rpc::client::RpcClient, + transports::layers::RetryBackoffLayer, +}; +use anyhow::{Context, Result}; +use boundless_povw::{deployments::Deployment as PovwDeployment, log_updater::IPovwAccounting}; +use boundless_rewards::{ + build_rewards_cache, compute_delegation_powers, compute_povw_rewards, compute_staking_data, + fetch_all_event_logs, AllEventLogs, EpochTimeRange, RewardsCache, MAINNET_FROM_BLOCK, + SEPOLIA_FROM_BLOCK, +}; +use boundless_zkc::{contracts::IZKC, deployments::Deployment as ZkcDeployment}; +use tokio::time::Duration; +use url::Url; + +use crate::db::rewards::{ + EpochPoVWSummary, EpochStakingSummary, PoVWSummaryStats, PovwRewardAggregate, + PovwRewardByEpoch, RewardDelegationPowerAggregate, RewardDelegationPowerByEpoch, RewardsDb, + RewardsDbObj, StakingPositionAggregate, StakingPositionByEpoch, StakingRewardByEpoch, + StakingSummaryStats, VoteDelegationPowerAggregate, VoteDelegationPowerByEpoch, +}; + +#[derive(Clone)] +pub struct RewardsIndexerServiceConfig { + pub interval: Duration, + pub retries: u32, + pub start_block: Option, + pub end_block: Option, + pub end_epoch: Option, + pub epochs_to_process: Option, +} + +type ProviderType = FillProvider, RootProvider>; + +struct EventsAndPreparedData { + all_logs: AllEventLogs, + actual_current_epoch_u64: u64, + processing_end_epoch: u64, + epochs_to_process: Vec, + end_block: u64, + povw_deployment: PovwDeployment, + #[allow(dead_code)] + zkc_deployment: ZkcDeployment, +} + +pub struct RewardsIndexerService { + provider: ProviderType, + db: RewardsDbObj, + zkc_address: Address, + #[allow(dead_code)] + vezkc_address: Address, + #[allow(dead_code)] + povw_accounting_address: Address, + config: RewardsIndexerServiceConfig, + chain_id: u64, + epoch_cache: HashMap, + block_timestamp_cache: HashMap, +} + +impl RewardsIndexerService { + pub async fn new( + rpc_url: Url, + vezkc_address: Address, + zkc_address: Address, + povw_accounting_address: Address, + db_conn: &str, + config: RewardsIndexerServiceConfig, + ) -> Result { + let provider = ProviderBuilder::new() + .disable_recommended_fillers() + .filler(ChainIdFiller::default()) + .connect_client( + RpcClient::builder().layer(RetryBackoffLayer::new(3, 1000, 200)).http(rpc_url), + ); + let chain_id = provider.get_chain_id().await?; + let db: RewardsDbObj = Arc::new(RewardsDb::new(db_conn).await?); + + Ok(Self { + provider, + db, + vezkc_address, + zkc_address, + povw_accounting_address, + config, + chain_id, + epoch_cache: HashMap::new(), + block_timestamp_cache: HashMap::new(), + }) + } + + pub async fn run(&mut self) -> Result<()> { + let start_time = std::time::Instant::now(); + tracing::info!("Starting rewards indexer run"); + + // Fetch events and prepare data + let prepared_data = self.fetch_events_and_prepare_data().await?; + + // Store current epoch in database + self.db.set_current_epoch(prepared_data.actual_current_epoch_u64).await?; + + // Build cache + let povw_cache = self + .build_cache( + &prepared_data.povw_deployment, + &prepared_data.epochs_to_process, + prepared_data.actual_current_epoch_u64, + &prepared_data.all_logs, + ) + .await?; + + // Compute and store staking rewards + let staking_amounts_by_epoch = self + .compute_and_store_staking_rewards( + prepared_data.actual_current_epoch_u64, + prepared_data.processing_end_epoch, + &povw_cache, + ) + .await?; + + // Compute and store PoVW rewards + self.compute_and_store_povw_rewards( + prepared_data.actual_current_epoch_u64, + prepared_data.processing_end_epoch, + &prepared_data.epochs_to_process, + &povw_cache, + &prepared_data.povw_deployment, + &staking_amounts_by_epoch, + ) + .await?; + + // Compute and store delegation powers + self.compute_and_store_delegation_powers( + prepared_data.actual_current_epoch_u64, + prepared_data.processing_end_epoch, + &povw_cache, + ) + .await?; + + // Save last processed block + self.db.set_last_rewards_block(prepared_data.end_block).await?; + + tracing::info!( + "Rewards indexer run completed successfully in {:.2}s", + start_time.elapsed().as_secs_f64() + ); + Ok(()) + } + + async fn fetch_events_and_prepare_data(&self) -> Result { + // Get deployments based on chain ID + let povw_deployment = PovwDeployment::from_chain_id(self.chain_id) + .context("Could not determine PoVW deployment from chain ID")?; + let zkc_deployment = ZkcDeployment::from_chain_id(self.chain_id) + .context("Could not determine ZKC deployment from chain ID")?; + + // Determine starting block + let start_block = if let Some(block) = self.config.start_block { + block + } else { + match self.chain_id { + 1 => MAINNET_FROM_BLOCK, + 11155111 => SEPOLIA_FROM_BLOCK, + _ => 0, + } + }; + + // Determine ending block (use end_block if provided, otherwise current block) + let actual_current_block = self.provider.get_block_number().await?; + let end_block = self.config.end_block.unwrap_or(actual_current_block); + + // Validate end_block is not greater than current block + if end_block > actual_current_block { + anyhow::bail!( + "End block {} is greater than current block {}", + end_block, + actual_current_block + ); + } + + tracing::info!( + "Fetching events from block {} to {} ({} blocks)", + start_block, + end_block, + end_block - start_block + ); + + // Fetch all event logs up to end_block + let fetch_start = std::time::Instant::now(); + let all_logs = fetch_all_event_logs( + &self.provider, + &povw_deployment, + &zkc_deployment, + start_block, + end_block, + ) + .await?; + tracing::info!("Event fetching completed in {:.2}s", fetch_start.elapsed().as_secs_f64()); + + // Get current epoch from ZKC contract + let zkc = IZKC::new(self.zkc_address, &self.provider); + let actual_current_epoch = zkc.getCurrentEpoch().call().await?; + let actual_current_epoch_u64 = actual_current_epoch.to::(); + + tracing::info!("Current blockchain epoch: {}", actual_current_epoch_u64); + + // Determine the end epoch for processing + let processing_end_epoch = if let Some(end_epoch) = self.config.end_epoch { + if end_epoch > actual_current_epoch_u64 { + anyhow::bail!( + "End epoch {} is greater than current epoch {}", + end_epoch, + actual_current_epoch_u64 + ); + } + tracing::info!( + "Historical mode: processing up to epoch {} (current epoch: {})", + end_epoch, + actual_current_epoch_u64 + ); + end_epoch + } else { + tracing::info!( + "Live mode: processing up to current epoch {}", + actual_current_epoch_u64 + ); + actual_current_epoch_u64 + }; + + tracing::info!("Processing up to epoch: {}", processing_end_epoch); + + // Process the last epochs_to_process epochs, if the arg is provided in config. + let epochs_to_process = if let Some(epochs_to_process_count) = self.config.epochs_to_process + { + if epochs_to_process_count > processing_end_epoch { + (0..=processing_end_epoch).collect::>() + } else { + (processing_end_epoch - epochs_to_process_count + 1..=processing_end_epoch) + .collect::>() + } + } else { + (0..=processing_end_epoch).collect::>() + }; + + Ok(EventsAndPreparedData { + all_logs, + actual_current_epoch_u64, + processing_end_epoch, + epochs_to_process, + end_block, + povw_deployment, + zkc_deployment, + }) + } + + async fn build_cache( + &self, + povw_deployment: &PovwDeployment, + epochs_to_process: &[u64], + actual_current_epoch_u64: u64, + all_logs: &AllEventLogs, + ) -> Result { + // Build PoVW rewards cache with all necessary data (includes epoch times, block timestamps, and stake events) + tracing::info!("Building rewards cache for {} epochs", epochs_to_process.len()); + let cache_build_start = std::time::Instant::now(); + let povw_cache = build_rewards_cache( + &self.provider, + povw_deployment, + self.zkc_address, + epochs_to_process, + actual_current_epoch_u64, // Pass real current epoch + self.config.end_epoch, // Pass end_epoch for historical mode detection + all_logs, + ) + .await?; + tracing::info!( + "PoVW rewards cache built in {:.2}s", + cache_build_start.elapsed().as_secs_f64() + ); + + Ok(povw_cache) + } + + async fn compute_and_store_staking_rewards( + &mut self, + actual_current_epoch_u64: u64, + processing_end_epoch: u64, + povw_cache: &RewardsCache, + ) -> Result> { + // Compute all staking data (positions and rewards) using the unified function + tracing::info!("Computing staking data (positions + rewards)..."); + let staking_start = std::time::Instant::now(); + + let staking_data = compute_staking_data( + actual_current_epoch_u64, // Real current epoch for comparison + processing_end_epoch, // Process up to this epoch + &povw_cache.timestamped_stake_events, + &povw_cache.staking_emissions_by_epoch, + &povw_cache.staking_power_by_address_by_epoch, + &povw_cache.total_staking_power_by_epoch, + )?; + + tracing::info!( + "Staking data computed in {:.2}s (current total: {} ZKC, {} stakers, total emissions: {} ZKC)", + staking_start.elapsed().as_secs_f64(), + staking_data.summary.current_total_staked / U256::from(10).pow(U256::from(18)), + staking_data.summary.current_active_stakers, + staking_data.summary.total_staking_emissions_all_time / U256::from(10).pow(U256::from(18)) + ); + + // Store epoch and block caches from the povw_cache for later use + self.epoch_cache = povw_cache.epoch_time_ranges.clone(); + self.block_timestamp_cache = povw_cache.block_timestamps.clone(); + + // Build staking lookup for PoVW rewards computation + let mut staking_amounts_by_epoch: HashMap<(Address, u64), U256> = HashMap::new(); + for epoch_data in &staking_data.epochs { + for (address, position) in &epoch_data.positions_by_staker { + staking_amounts_by_epoch + .insert((*address, epoch_data.epoch), position.staked_amount); + } + } + + // Store staking data + tracing::info!("Storing staking data for {} epochs...", staking_data.epochs.len()); + let staking_db_start = std::time::Instant::now(); + + // Store staking positions and rewards by epoch + for epoch_data in &staking_data.epochs { + // Store positions + let positions: Vec = epoch_data + .positions_by_staker + .iter() + .map(|(address, position)| StakingPositionByEpoch { + staker_address: *address, + epoch: epoch_data.epoch, + staked_amount: position.staked_amount, + is_withdrawing: position.is_withdrawing, + rewards_delegated_to: position.rewards_delegated_to, + votes_delegated_to: position.votes_delegated_to, + rewards_generated: position.rewards_generated, + }) + .collect(); + + if !positions.is_empty() { + self.db.upsert_staking_positions_by_epoch(epoch_data.epoch, positions).await?; + } + + // Store rewards + let rewards: Vec = epoch_data + .rewards_by_address + .iter() + .map(|(address, info)| StakingRewardByEpoch { + staker_address: *address, + epoch: epoch_data.epoch, + staking_power: info.staking_power, + percentage: info.percentage, + rewards_earned: info.rewards_earned, + }) + .collect(); + + if !rewards.is_empty() { + self.db.upsert_staking_rewards_by_epoch(epoch_data.epoch, rewards).await?; + } + + // Get epoch time range from cache + let epoch_time_range = self + .epoch_cache + .get(&epoch_data.epoch) + .cloned() + .unwrap_or(boundless_rewards::EpochTimeRange { start_time: 0, end_time: 0 }); + + // Store epoch summary + let epoch_summary = EpochStakingSummary { + epoch: epoch_data.epoch, + total_staked: epoch_data.total_staked, + num_stakers: epoch_data.num_stakers as u64, + num_withdrawing: epoch_data.num_withdrawing as u64, + total_staking_emissions: epoch_data.total_staking_emissions, + total_staking_power: epoch_data.total_staking_power, + num_reward_recipients: epoch_data.num_reward_recipients as u64, + epoch_start_time: epoch_time_range.start_time, + epoch_end_time: epoch_time_range.end_time, + updated_at: Some(chrono::Utc::now().to_rfc3339()), + }; + self.db.upsert_epoch_staking_summary(epoch_data.epoch, epoch_summary).await?; + } + + // Store staking position aggregates (with rewards) + let aggregates: Vec = staking_data + .staker_aggregates + .values() + .map(|agg| StakingPositionAggregate { + staker_address: agg.staker_address, + total_staked: agg.current_staked, + is_withdrawing: agg.is_withdrawing, + rewards_delegated_to: agg.rewards_delegated_to, + votes_delegated_to: agg.votes_delegated_to, + epochs_participated: agg.epochs_participated, + total_rewards_earned: agg.total_rewards_earned, + total_rewards_generated: agg.total_rewards_generated, + }) + .collect(); + + if !aggregates.is_empty() { + self.db.upsert_staking_positions_aggregate(aggregates).await?; + tracing::info!( + "Updated staking position aggregates for {} addresses", + staking_data.staker_aggregates.len() + ); + } + + // Store global staking summary statistics + let staking_summary_stats = StakingSummaryStats { + current_total_staked: staking_data.summary.current_total_staked, + total_unique_stakers: staking_data.summary.total_unique_stakers as u64, + current_active_stakers: staking_data.summary.current_active_stakers as u64, + current_withdrawing: staking_data.summary.current_withdrawing as u64, + total_staking_emissions_all_time: Some( + staking_data.summary.total_staking_emissions_all_time, + ), + updated_at: Some(chrono::Utc::now().to_rfc3339()), + }; + self.db.upsert_staking_summary_stats(staking_summary_stats).await?; + + tracing::info!("Staking data stored in {:.2}s", staking_db_start.elapsed().as_secs_f64()); + + Ok(staking_amounts_by_epoch) + } + + async fn compute_and_store_povw_rewards( + &self, + actual_current_epoch_u64: u64, + processing_end_epoch: u64, + epochs_to_process: &[u64], + povw_cache: &RewardsCache, + povw_deployment: &PovwDeployment, + staking_amounts_by_epoch: &HashMap<(Address, u64), U256>, + ) -> Result<()> { + // Get pending epoch total work + // For historical indexing (when end_epoch is specified), we don't fetch pending work from blockchain state + let pending_epoch_total_work = if self.config.end_epoch.is_some() { + tracing::info!("Historical indexing mode - using finalized epoch data only"); + U256::ZERO + } else { + let povw_accounting = + IPovwAccounting::new(povw_deployment.povw_accounting_address, &self.provider); + let pending_epoch = povw_accounting.pendingEpoch().call().await?; + U256::from(pending_epoch.totalWork) + }; + + // Compute rewards for all epochs at once + tracing::info!("Computing PoVW rewards for all epochs (0 to {})...", processing_end_epoch); + let povw_result = compute_povw_rewards( + actual_current_epoch_u64, // Real current epoch for comparison logic + processing_end_epoch, // Process up to this epoch + &povw_cache.work_by_work_log_by_epoch, + &povw_cache.work_recipients_by_epoch, + &povw_cache.total_work_by_epoch, + pending_epoch_total_work, + &povw_cache.povw_emissions_by_epoch, + &povw_cache.reward_caps, + staking_amounts_by_epoch, + &povw_cache.epoch_time_ranges, + )?; + + tracing::info!( + "Computed rewards for {} epochs with {} unique work logs. Total work: {}, Total emissions: {}", + povw_result.summary.total_epochs_with_work, + povw_result.summary.total_unique_work_log_ids, + povw_result.summary.total_work_all_time, + povw_result.summary.total_emissions_all_time + ); + + // Store rewards for epochs we're processing + for &epoch in epochs_to_process { + let epoch_rewards = povw_result + .epoch_rewards + .iter() + .find(|e| e.epoch == U256::from(epoch)) + .cloned() + .unwrap_or_else(|| { + // Create empty epoch if not found + // For empty epochs, use reasonable defaults for times + boundless_rewards::EpochPoVWRewards { + epoch: U256::from(epoch), + total_work: U256::ZERO, + total_emissions: U256::ZERO, + total_capped_rewards: U256::ZERO, + total_proportional_rewards: U256::ZERO, + epoch_start_time: 0, + epoch_end_time: 0, + rewards_by_work_log_id: HashMap::new(), + } + }); + + // Convert to database format + let mut db_rewards = Vec::new(); + let num_rewards = epoch_rewards.rewards_by_work_log_id.len(); + let total_work = epoch_rewards.total_work; + + for (_, info) in epoch_rewards.rewards_by_work_log_id { + // Calculate percentage before conversion + let percentage = if total_work > U256::ZERO { + (info.work * U256::from(10000) / total_work).to::() as f64 / 100.0 + } else { + 0.0 + }; + + let mut reward: PovwRewardByEpoch = info.into(); + reward.epoch = epoch; + reward.percentage = percentage; + db_rewards.push(reward); + } + + // Upsert epoch rewards + self.db.upsert_povw_rewards_by_epoch(epoch, db_rewards).await?; + tracing::debug!("Updated {} rewards for epoch {}", num_rewards, epoch); + } + + // Convert aggregates to database format and upsert + let aggregates: Vec = povw_result + .summary_by_work_log_id + .into_values() + .map(|aggregate| PovwRewardAggregate { + work_log_id: aggregate.work_log_id, + total_work_submitted: aggregate.total_work_submitted, + total_actual_rewards: aggregate.total_actual_rewards, + total_uncapped_rewards: aggregate.total_uncapped_rewards, + epochs_participated: aggregate.epochs_participated, + }) + .collect(); + self.db.upsert_povw_rewards_aggregate(aggregates.clone()).await?; + + tracing::info!("Updated aggregate rewards for {} work logs", aggregates.len()); + + // Store PoVW global summary statistics + tracing::info!("Storing PoVW global summary statistics..."); + let povw_summary_stats = PoVWSummaryStats { + total_epochs_with_work: povw_result.summary.total_epochs_with_work as u64, + total_unique_work_log_ids: povw_result.summary.total_unique_work_log_ids as u64, + total_work_all_time: povw_result.summary.total_work_all_time, + total_emissions_all_time: povw_result.summary.total_emissions_all_time, + total_capped_rewards_all_time: povw_result.summary.total_capped_rewards_all_time, + total_uncapped_rewards_all_time: povw_result.summary.total_uncapped_rewards_all_time, + updated_at: Some(chrono::Utc::now().to_rfc3339()), + }; + self.db.upsert_povw_summary_stats(povw_summary_stats).await?; + tracing::info!("Updated PoVW global summary statistics"); + + // Store per-epoch PoVW summaries + tracing::info!( + "Storing per-epoch PoVW summaries for {} epochs...", + povw_result.epoch_rewards.len() + ); + for epoch_data in &povw_result.epoch_rewards { + let num_participants = epoch_data.rewards_by_work_log_id.len() as u64; + let epoch_summary = EpochPoVWSummary { + epoch: epoch_data.epoch.to::(), + total_work: epoch_data.total_work, + total_emissions: epoch_data.total_emissions, + total_capped_rewards: epoch_data.total_capped_rewards, + total_uncapped_rewards: epoch_data.total_proportional_rewards, + epoch_start_time: epoch_data.epoch_start_time, + epoch_end_time: epoch_data.epoch_end_time, + num_participants, + updated_at: Some(chrono::Utc::now().to_rfc3339()), + }; + self.db.upsert_epoch_povw_summary(epoch_data.epoch.to::(), epoch_summary).await?; + } + tracing::info!("Updated per-epoch PoVW summaries"); + + Ok(()) + } + + async fn compute_and_store_delegation_powers( + &self, + actual_current_epoch_u64: u64, + processing_end_epoch: u64, + povw_cache: &RewardsCache, + ) -> Result<()> { + // Compute delegation powers + tracing::info!("Computing delegation powers from events..."); + let delegation_start = std::time::Instant::now(); + + // Compute delegation powers from pre-processed events + let epoch_delegation_powers = compute_delegation_powers( + &povw_cache.timestamped_delegation_events, + actual_current_epoch_u64, // Real current epoch for comparison + processing_end_epoch, // Process up to this epoch + )?; + tracing::info!( + "Delegation powers computed in {:.2}s", + delegation_start.elapsed().as_secs_f64() + ); + + // Store delegation powers by epoch + tracing::info!("Storing delegation powers for {} epochs...", epoch_delegation_powers.len()); + let delegation_db_start = std::time::Instant::now(); + + for epoch_data in &epoch_delegation_powers { + // Prepare vote delegation powers + let vote_powers: Vec = epoch_data + .powers + .iter() + .filter(|(_, powers)| powers.vote_power > U256::ZERO) + .map(|(address, powers)| VoteDelegationPowerByEpoch { + delegate_address: *address, + epoch: epoch_data.epoch, + vote_power: powers.vote_power, + delegator_count: powers.vote_delegators.len() as u64, + delegators: powers.vote_delegators.clone(), + }) + .collect(); + + // Prepare reward delegation powers + let reward_powers: Vec = epoch_data + .powers + .iter() + .filter(|(_, powers)| powers.reward_power > U256::ZERO) + .map(|(address, powers)| RewardDelegationPowerByEpoch { + delegate_address: *address, + epoch: epoch_data.epoch, + reward_power: powers.reward_power, + delegator_count: powers.reward_delegators.len() as u64, + delegators: powers.reward_delegators.clone(), + }) + .collect(); + + // Store both vote and reward powers + if !vote_powers.is_empty() { + self.db + .upsert_vote_delegation_powers_by_epoch(epoch_data.epoch, vote_powers) + .await?; + } + if !reward_powers.is_empty() { + self.db + .upsert_reward_delegation_powers_by_epoch(epoch_data.epoch, reward_powers) + .await?; + } + + tracing::debug!("Updated delegation powers for epoch {}", epoch_data.epoch); + } + tracing::info!( + "Delegation powers stored in {:.2}s", + delegation_db_start.elapsed().as_secs_f64() + ); + + // Compute and store delegation aggregates (latest epoch is the current state) + if let Some(latest) = epoch_delegation_powers.last() { + let mut vote_epochs_per_address: HashMap = HashMap::new(); + let mut reward_epochs_per_address: HashMap = HashMap::new(); + + // Count epochs participated for each address + for epoch_data in &epoch_delegation_powers { + for (address, powers) in &epoch_data.powers { + if powers.vote_power > U256::ZERO { + *vote_epochs_per_address.entry(*address).or_insert(0) += 1; + } + if powers.reward_power > U256::ZERO { + *reward_epochs_per_address.entry(*address).or_insert(0) += 1; + } + } + } + + // Create vote aggregates + let vote_aggregates: Vec = latest + .powers + .iter() + .filter(|(_, powers)| powers.vote_power > U256::ZERO) + .map(|(address, powers)| VoteDelegationPowerAggregate { + delegate_address: *address, + total_vote_power: powers.vote_power, + delegator_count: powers.vote_delegators.len() as u64, + delegators: powers.vote_delegators.clone(), + epochs_participated: vote_epochs_per_address.get(address).copied().unwrap_or(0), + }) + .collect(); + + // Create reward aggregates + let reward_aggregates: Vec = latest + .powers + .iter() + .filter(|(_, powers)| powers.reward_power > U256::ZERO) + .map(|(address, powers)| RewardDelegationPowerAggregate { + delegate_address: *address, + total_reward_power: powers.reward_power, + delegator_count: powers.reward_delegators.len() as u64, + delegators: powers.reward_delegators.clone(), + epochs_participated: reward_epochs_per_address + .get(address) + .copied() + .unwrap_or(0), + }) + .collect(); + + // Store aggregates + if !vote_aggregates.is_empty() { + self.db.upsert_vote_delegation_powers_aggregate(vote_aggregates.clone()).await?; + tracing::info!( + "Updated aggregate vote delegation powers for {} delegates", + vote_aggregates.len() + ); + } + if !reward_aggregates.is_empty() { + self.db + .upsert_reward_delegation_powers_aggregate(reward_aggregates.clone()) + .await?; + tracing::info!( + "Updated aggregate reward delegation powers for {} delegates", + reward_aggregates.len() + ); + } + } + + Ok(()) + } +} diff --git a/crates/indexer/src/test_utils.rs b/crates/indexer/src/test_utils.rs index 1d7cacbb7..f2e77ad7b 100644 --- a/crates/indexer/src/test_utils.rs +++ b/crates/indexer/src/test_utils.rs @@ -14,7 +14,7 @@ use std::sync::Arc; -use crate::{AnyDb, DbError, DbObj}; +use crate::db::{AnyDb, DbError, DbObj}; use sqlx::any::install_default_drivers; use sqlx::AnyPool; use tempfile::NamedTempFile; diff --git a/crates/indexer/tests/market.rs b/crates/indexer/tests/market.rs new file mode 100644 index 000000000..3b1b9d29c --- /dev/null +++ b/crates/indexer/tests/market.rs @@ -0,0 +1,19 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Integration tests for market indexer + +// Test modules +#[path = "market/basic.rs"] +mod basic; diff --git a/crates/indexer/tests/basic.rs b/crates/indexer/tests/market/basic.rs similarity index 95% rename from crates/indexer/tests/basic.rs rename to crates/indexer/tests/market/basic.rs index 458e017c0..b3a959e48 100644 --- a/crates/indexer/tests/basic.rs +++ b/crates/indexer/tests/market/basic.rs @@ -14,6 +14,8 @@ use std::{process::Command, time::Duration}; +use assert_cmd::Command as AssertCommand; + use alloy::{ node_bindings::Anvil, primitives::{Address, Bytes, U256}, @@ -70,7 +72,10 @@ async fn test_e2e() { let rpc_url = anvil.endpoint_url(); let ctx = create_test_ctx(&anvil).await.unwrap(); - let exe_path = env!("CARGO_BIN_EXE_boundless-indexer"); + // Use assert_cmd to find the binary path + let cmd = AssertCommand::cargo_bin("market-indexer") + .expect("market-indexer binary not found. Run `cargo build --bin market-indexer` first."); + let exe_path = cmd.get_program().to_string_lossy().to_string(); let args = [ "--rpc-url", rpc_url.as_str(), @@ -205,7 +210,10 @@ async fn test_monitoring() { let rpc_url = anvil.endpoint_url(); let ctx = create_test_ctx(&anvil).await.unwrap(); - let exe_path = env!("CARGO_BIN_EXE_boundless-indexer"); + // Use assert_cmd to find the binary path + let cmd = AssertCommand::cargo_bin("market-indexer") + .expect("market-indexer binary not found. Run `cargo build --bin market-indexer` first."); + let exe_path = cmd.get_program().to_string_lossy().to_string(); let args = [ "--rpc-url", rpc_url.as_str(), diff --git a/crates/indexer/tests/rewards.rs b/crates/indexer/tests/rewards.rs new file mode 100644 index 000000000..807c302b6 --- /dev/null +++ b/crates/indexer/tests/rewards.rs @@ -0,0 +1,29 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Integration tests for rewards indexer + +// Common test utilities +#[path = "rewards/common/mod.rs"] +mod common; + +// Test modules +#[path = "rewards/delegations_integration.rs"] +mod delegations_integration; + +#[path = "rewards/povw_integration.rs"] +mod povw_integration; + +#[path = "rewards/staking_integration.rs"] +mod staking_integration; diff --git a/crates/indexer/tests/rewards/common/mod.rs b/crates/indexer/tests/rewards/common/mod.rs new file mode 100644 index 000000000..5980f45f4 --- /dev/null +++ b/crates/indexer/tests/rewards/common/mod.rs @@ -0,0 +1,96 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use std::{env, sync::Arc, time::Duration}; + +use boundless_indexer::{ + db::rewards::{RewardsDb, RewardsIndexerDb}, + rewards::{RewardsIndexerService, RewardsIndexerServiceConfig}, +}; +use tempfile::NamedTempFile; +use tokio::sync::OnceCell; +use tracing_subscriber::EnvFilter; +use url::Url; + +// Contract addresses for mainnet +const VEZKC_ADDRESS: &str = "0xE8Ae8eE8ffa57F6a79B6Cbe06BAFc0b05F3ffbf4"; +const ZKC_ADDRESS: &str = "0x000006c2A22ff4A44ff1f5d0F2ed65F781F55555"; +const POVW_ACCOUNTING_ADDRESS: &str = "0x319bd4050b2170a7aE3Ead3E6d5AB8a5c7cFBDF8"; + +// Test limits for faster execution +const END_EPOCH: u64 = 4; +const END_BLOCK: u64 = 23395398; + +// Store both the database and temp file to keep the file alive +// The RewardsDb type implements Send + Sync, unlike the trait object +struct TestDbState { + db: Arc, + _temp_file: NamedTempFile, // Kept alive as long as TestDbState exists +} + +// Static storage for the shared test database. Ensures each test doesn't need to re-index from chain. +static TEST_DB: OnceCell = OnceCell::const_new(); + +/// Get the shared test database, initializing it on first access +pub async fn setup_test_db() -> Arc { + let state = TEST_DB.get_or_init(|| async { initialize_test_db().await }).await; + + // Return the database as a trait object + state.db.clone() as Arc +} + +async fn initialize_test_db() -> TestDbState { + let _ = tracing_subscriber::fmt().with_env_filter(EnvFilter::from_default_env()).try_init(); + + // Get RPC URL from environment + let rpc_url = env::var("ETH_MAINNET_RPC_URL") + .expect("ETH_MAINNET_RPC_URL environment variable must be set"); + + // Create temporary database file + let temp_file = NamedTempFile::new().expect("Failed to create temp file"); + let db_path = temp_file.path().to_str().expect("Invalid temp path"); + let db_url = format!("sqlite:{}", db_path); + + tracing::info!("Creating test database at: {}", db_path); + + // Create database connection + let db = Arc::new(RewardsDb::new(&db_url).await.expect("Failed to create database")); + + // Configure indexer + let config = RewardsIndexerServiceConfig { + interval: Duration::from_secs(600), + retries: 3, + start_block: None, + end_block: Some(END_BLOCK), + end_epoch: Some(END_EPOCH), + epochs_to_process: Some(10), + }; + + let mut service = RewardsIndexerService::new( + Url::parse(&rpc_url).expect("Invalid RPC URL"), + VEZKC_ADDRESS.parse().expect("Invalid veZKC address"), + ZKC_ADDRESS.parse().expect("Invalid ZKC address"), + POVW_ACCOUNTING_ADDRESS.parse().expect("Invalid PoVW address"), + &db_url, + config, + ) + .await + .expect("Failed to create indexer service"); + + tracing::info!("Running indexer up to epoch {} (block {})", END_EPOCH, END_BLOCK); + service.run().await.expect("Failed to run indexer"); + tracing::info!("Indexer completed successfully"); + + TestDbState { db, _temp_file: temp_file } +} diff --git a/crates/indexer/tests/rewards/delegations_integration.rs b/crates/indexer/tests/rewards/delegations_integration.rs new file mode 100644 index 000000000..f5907bc65 --- /dev/null +++ b/crates/indexer/tests/rewards/delegations_integration.rs @@ -0,0 +1,132 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::common; + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_vote_delegations_by_epoch() { + let db = common::setup_test_db().await; + + // Test vote delegations for epoch 3 + let delegations = db + .get_vote_delegation_powers_by_epoch(3, 0, 5) + .await + .expect("Failed to get vote delegations for epoch 3"); + + // We limit to 5 but there are many more delegations + assert_eq!(delegations.len(), 5); + + // Check first delegation (staker delegates to themselves) + let first = &delegations[0]; + assert_eq!( + format!("{:#x}", first.delegate_address), + "0x2408e37489c231f883126c87e8aadbad782a040a" + ); + assert_eq!(first.vote_power.to_string(), "726927981342423248000000"); + assert_eq!(first.delegator_count, 0); // Self-delegation not counted + + // Check second delegation + let second = &delegations[1]; + assert_eq!( + format!("{:#x}", second.delegate_address), + "0x7cc3376b8d38b2c923cd9d5164f9d74e303482b2" + ); + assert_eq!(second.vote_power.to_string(), "603060340000000000000000"); + assert_eq!(second.delegator_count, 0); +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_reward_delegations_by_epoch() { + let db = common::setup_test_db().await; + + // Test reward delegations for epoch 3 + let delegations = db + .get_reward_delegation_powers_by_epoch(3, 0, 5) + .await + .expect("Failed to get reward delegations for epoch 3"); + + // We limit to 5 but there are many delegations + assert_eq!(delegations.len(), 5); + + // Check first delegation (receives delegated rewards) + let first = &delegations[0]; + assert_eq!( + format!("{:#x}", first.delegate_address), + "0x0164ec96442196a02931f57e7e20fa59cff43845" + ); + assert_eq!(first.reward_power.to_string(), "726927981342423248000000"); + assert_eq!(first.delegator_count, 1); // Has one delegator + + // Check that the delegator is the expected address + assert_eq!(first.delegators.len(), 1); + assert_eq!(format!("{:#x}", first.delegators[0]), "0x2408e37489c231f883126c87e8aadbad782a040a"); + + // Check second (self-delegation) + let second = &delegations[1]; + assert_eq!( + format!("{:#x}", second.delegate_address), + "0x7cc3376b8d38b2c923cd9d5164f9d74e303482b2" + ); + assert_eq!(second.reward_power.to_string(), "603060340000000000000000"); + assert_eq!(second.delegator_count, 0); // Self-delegation +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_vote_delegation_aggregates() { + let db = common::setup_test_db().await; + + let aggregates = db + .get_vote_delegation_powers_aggregate(0, 5) + .await + .expect("Failed to get vote delegation aggregates"); + + // We limit to 5 but there are many more delegations + assert_eq!(aggregates.len(), 5); + + // Check aggregate vote power + let first = &aggregates[0]; + assert_eq!( + format!("{:#x}", first.delegate_address), + "0x2408e37489c231f883126c87e8aadbad782a040a" + ); + assert_eq!(first.total_vote_power.to_string(), "726927981342423248000000"); + assert_eq!(first.epochs_participated, 3); // Participated in epochs 2, 3, 4 +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_reward_delegation_aggregates() { + let db = common::setup_test_db().await; + + let aggregates = db + .get_reward_delegation_powers_aggregate(0, 5) + .await + .expect("Failed to get reward delegation aggregates"); + + // We limit to 5 but there are many delegations + assert_eq!(aggregates.len(), 5); + + // Check aggregate reward power for the delegate + let first = &aggregates[0]; + assert_eq!( + format!("{:#x}", first.delegate_address), + "0x0164ec96442196a02931f57e7e20fa59cff43845" + ); + assert_eq!(first.total_reward_power.to_string(), "726927981342423248000000"); + assert_eq!(first.delegator_count, 1); + assert_eq!(first.epochs_participated, 3); // Delegated in epochs 2, 3, 4 +} diff --git a/crates/indexer/tests/rewards/povw_integration.rs b/crates/indexer/tests/rewards/povw_integration.rs new file mode 100644 index 000000000..c74906978 --- /dev/null +++ b/crates/indexer/tests/rewards/povw_integration.rs @@ -0,0 +1,146 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::common; + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_povw_summary_stats() { + let db = common::setup_test_db().await; + + let stats = db + .get_povw_summary_stats() + .await + .expect("Failed to get PoVW summary stats") + .expect("No PoVW summary stats found"); + + // Check specific values matching the actual indexed data + assert_eq!(stats.total_epochs_with_work, 3); + assert_eq!(stats.total_unique_work_log_ids, 26); + assert_eq!(stats.total_work_all_time.to_string(), "24999835418624"); + assert_eq!(stats.total_emissions_all_time.to_string(), "1395361974850288500000000"); + assert_eq!(stats.total_capped_rewards_all_time.to_string(), "54999464530233482198753"); + assert_eq!(stats.total_uncapped_rewards_all_time.to_string(), "837217107775305749999989"); +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_epoch_povw_summary() { + let db = common::setup_test_db().await; + + // Test epoch 3 summary + let epoch3 = db + .get_epoch_povw_summary(3) + .await + .expect("Failed to get epoch 3 PoVW summary") + .expect("No epoch 3 PoVW summary found"); + + assert_eq!(epoch3.epoch, 3); + assert_eq!(epoch3.total_work.to_string(), "22364014854144"); + assert_eq!(epoch3.num_participants, 21); + assert_eq!(epoch3.total_capped_rewards.to_string(), "40087246525823817857153"); + + // Test epoch 4 summary + let epoch4 = db + .get_epoch_povw_summary(4) + .await + .expect("Failed to get epoch 4 PoVW summary") + .expect("No epoch 4 PoVW summary found"); + + assert_eq!(epoch4.epoch, 4); + assert_eq!(epoch4.total_work.to_string(), "0"); // Epoch 4 has participants but no work yet + assert_eq!(epoch4.num_participants, 10); +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_povw_rewards_by_epoch() { + let db = common::setup_test_db().await; + + // Test top rewards for epoch 3 + let rewards = db + .get_povw_rewards_by_epoch(3, 0, 3) + .await + .expect("Failed to get PoVW rewards for epoch 3"); + + assert!(rewards.len() >= 3); + + // Check top earner + let top = &rewards[0]; + assert_eq!(format!("{:#x}", top.work_log_id), "0x94072d2282cb2c718d23d5779a5f8484e2530f2a"); + assert_eq!(top.work_submitted.to_string(), "14928086204416"); + assert_eq!(top.actual_rewards.to_string(), "20000000000000000000000"); // 20000 ZKC + assert!(top.is_capped); + + // Check second earner + let second = &rewards[1]; + assert_eq!(format!("{:#x}", second.work_log_id), "0x0ab71eb0727536b179b2d009316b201b43a049fa"); + assert_eq!(second.work_submitted.to_string(), "1798892077056"); +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_povw_rewards_aggregate() { + let db = common::setup_test_db().await; + + // Test aggregate rewards for top performers + let aggregates = + db.get_povw_rewards_aggregate(0, 5).await.expect("Failed to get PoVW rewards aggregate"); + + assert!(aggregates.len() >= 5); + + // Check top aggregate earner + let top = &aggregates[0]; + assert_eq!(format!("{:#x}", top.work_log_id), "0x94072d2282cb2c718d23d5779a5f8484e2530f2a"); + assert_eq!(top.total_work_submitted.to_string(), "18245963022336"); + assert_eq!(top.epochs_participated, 3); + + // Check that actual_rewards <= uncapped_rewards (capping applied) + assert!(top.total_actual_rewards <= top.total_uncapped_rewards); +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_all_epoch_povw_summaries() { + let db = common::setup_test_db().await; + + let summaries = db + .get_all_epoch_povw_summaries(0, 10) + .await + .expect("Failed to get all epoch PoVW summaries"); + + // Should have epochs 0-4 (5 total) + assert_eq!(summaries.len(), 5); + + // Note: epochs are returned in reverse order (4, 3, 2, 1, 0) + // Just verify we have all expected epochs + let all_epochs: Vec = summaries.iter().map(|s| s.epoch).collect(); + assert!(all_epochs.contains(&0)); + assert!(all_epochs.contains(&1)); + assert!(all_epochs.contains(&2)); + assert!(all_epochs.contains(&3)); + assert!(all_epochs.contains(&4)); + + // Check that epochs 1, 2, 3 have actual work (4 has participants but no work yet) + let epochs_with_work: Vec = summaries + .iter() + .filter(|s| s.total_work > alloy::primitives::U256::from(0)) + .map(|s| s.epoch) + .collect(); + + // Epochs 1, 2, 3 should have actual work submitted + assert!(epochs_with_work.contains(&1)); + assert!(epochs_with_work.contains(&2)); + assert!(epochs_with_work.contains(&3)); +} diff --git a/crates/indexer/tests/rewards/staking_integration.rs b/crates/indexer/tests/rewards/staking_integration.rs new file mode 100644 index 000000000..29bedfbdb --- /dev/null +++ b/crates/indexer/tests/rewards/staking_integration.rs @@ -0,0 +1,149 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::common; + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_staking_summary_stats() { + let db = common::setup_test_db().await; + + let stats = db + .get_staking_summary_stats() + .await + .expect("Failed to get staking summary stats") + .expect("No staking summary stats found"); + + // Check specific values matching the actual indexed data + // Total reflects ALL stakers in the system (not just top 2) + assert_eq!(stats.current_total_staked.to_string(), "4330465936598121426217840"); + assert_eq!(stats.total_unique_stakers, 343); + assert_eq!(stats.current_active_stakers, 343); + assert_eq!(stats.current_withdrawing, 11); +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_epoch_staking_summary() { + let db = common::setup_test_db().await; + + // Test epoch 3 summary + let epoch3 = db + .get_epoch_staking_summary(3) + .await + .expect("Failed to get epoch 3 staking summary") + .expect("No epoch 3 staking summary found"); + + assert_eq!(epoch3.epoch, 3); + assert_eq!(epoch3.num_stakers, 311); + assert_eq!(epoch3.total_staked.to_string(), "3685477115558191540906493"); + assert_eq!(epoch3.num_withdrawing, 7); + + // Test epoch 4 summary + let epoch4 = db + .get_epoch_staking_summary(4) + .await + .expect("Failed to get epoch 4 staking summary") + .expect("No epoch 4 staking summary found"); + + assert_eq!(epoch4.epoch, 4); + assert!(epoch4.num_stakers >= 340); // Should have 343 stakers by epoch 4 + assert!(epoch4.total_staked.to_string().starts_with("43")); // ~4.3M ZKC total +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_staking_positions_by_epoch() { + let db = common::setup_test_db().await; + + // Test staking positions for epoch 3 + let positions = db + .get_staking_positions_by_epoch(3, 0, 5) + .await + .expect("Failed to get staking positions for epoch 3"); + + assert!(positions.len() >= 2); // Should have at least 2 stakers + + // Check top staker + let top = &positions[0]; + assert_eq!(format!("{:#x}", top.staker_address), "0x2408e37489c231f883126c87e8aadbad782a040a"); + assert_eq!(top.staked_amount.to_string(), "726927981342423248000000"); + assert!(!top.is_withdrawing); + + // Check second staker + let second = &positions[1]; + assert_eq!( + format!("{:#x}", second.staker_address), + "0x7cc3376b8d38b2c923cd9d5164f9d74e303482b2" + ); + assert_eq!(second.staked_amount.to_string(), "603060340000000000000000"); + assert!(!second.is_withdrawing); +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_staking_positions_aggregate() { + let db = common::setup_test_db().await; + + let aggregates = db + .get_staking_positions_aggregate(0, 5) + .await + .expect("Failed to get staking positions aggregate"); + + assert!(aggregates.len() >= 2); // Returns top 5, but we have many stakers + + // Check top aggregate staker + let top = &aggregates[0]; + assert_eq!(format!("{:#x}", top.staker_address), "0x2408e37489c231f883126c87e8aadbad782a040a"); + assert_eq!(top.total_staked.to_string(), "726927981342423248000000"); + assert_eq!(top.epochs_participated, 3); + assert!(!top.is_withdrawing); + + // Check rewards delegation + assert_eq!( + top.rewards_delegated_to.map(|addr| format!("{:#x}", addr)), + Some("0x0164ec96442196a02931f57e7e20fa59cff43845".to_string()) + ); +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_all_epoch_staking_summaries() { + let db = common::setup_test_db().await; + + let summaries = db + .get_all_epoch_staking_summaries(0, 10) + .await + .expect("Failed to get all epoch staking summaries"); + + // Should have epochs 0-4 (5 total) + assert_eq!(summaries.len(), 5); + + // Verify we have all expected epochs (may be returned in any order) + let all_epochs: Vec = summaries.iter().map(|s| s.epoch).collect(); + assert!(all_epochs.contains(&0)); + assert!(all_epochs.contains(&1)); + assert!(all_epochs.contains(&2)); + assert!(all_epochs.contains(&3)); + assert!(all_epochs.contains(&4)); + + // Check that epochs 2, 3, 4 have stakers (and possibly more) + let epochs_with_stakers: Vec = + summaries.iter().filter(|s| s.num_stakers > 0).map(|s| s.epoch).collect(); + + // At least epochs 2, 3, 4 should have stakers + assert!(epochs_with_stakers.contains(&2)); + assert!(epochs_with_stakers.contains(&3)); + assert!(epochs_with_stakers.contains(&4)); +} diff --git a/crates/lambdas/indexer-api/Cargo.toml b/crates/lambdas/indexer-api/Cargo.toml new file mode 100644 index 000000000..3917df0e3 --- /dev/null +++ b/crates/lambdas/indexer-api/Cargo.toml @@ -0,0 +1,76 @@ +[package] +name = "indexer-api" +resolver = "2" +version = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +repository = { workspace = true } +publish = false + +[[bin]] +name = "local-server" +path = "src/bin/local-server.rs" + +[package.metadata.release] +release = false + +[dependencies] +# Core dependencies +alloy = { workspace = true, default-features = false } +anyhow = { workspace = true } +aws-config = { version = "1.6", features = ["behavior-version-latest"] } + +# Web framework +axum = { version = "0.7", features = ["macros"] } + +# Internal dependencies +boundless-indexer = { path = "../../indexer" } +lambda_http = "0.13.0" + +# Lambda runtime +lambda_runtime = "0.13.0" + +# Fix cross-compilation for Lambda +openssl = { version = "0.10", features = ["vendored"] } + +# Serialization +serde = { workspace = true, features = ["derive"] } +serde_json = { workspace = true } +serde_yaml = "0.9" + +# Database +sqlx = { workspace = true, features = [ "any", "postgres", "sqlite", "runtime-tokio", "json", "migrate", "macros", "runtime-tokio-rustls"] } + +# Async runtime +tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } +tower = "0.5" +tower-http = { version = "0.6", features = ["cors"] } + +# Logging +tracing = { workspace = true } +tracing-subscriber = { workspace = true, features = [ + "fmt", + "env-filter", +] } + +# OpenAPI generation +utoipa = { version = "5", features = ["axum_extras", "chrono"] } +utoipa-axum = "0.1" +utoipa-swagger-ui = { version = "8", features = ["axum"] } + +[dev-dependencies] +# Command assertion for integration tests +assert_cmd = "2.0" +# Random number generation for tests +rand = "0.8" +# HTTP client for integration tests +reqwest = { version = "0.12", features = ["json"] } +# Temporary file handling for tests +tempfile = "3.8" +# Test logging +test-log = { version = "0.2", default-features = false, features = ["trace"] } +# Async runtime for tests +tokio = { workspace = true, features = ["test-util", "process"] } +# Logging for tests +tracing = { workspace = true } +tracing-subscriber = { workspace = true, features = ["fmt", "env-filter"] } diff --git a/crates/lambdas/indexer-api/README.md b/crates/lambdas/indexer-api/README.md new file mode 100644 index 000000000..4bb5a38b2 --- /dev/null +++ b/crates/lambdas/indexer-api/README.md @@ -0,0 +1,104 @@ +# Indexer API Lambda + +AWS Lambda function providing REST API access to Boundless protocol staking, delegation, and PoVW rewards data. + +## Environment Variables + +- `DB_URL` (required) - PostgreSQL connection string to the indexer database (or SQLite for local testing) +- `RUST_LOG` (optional) - Tracing log level (default: info) + +Additional environment variables for local testing: + +- `ETH_RPC_URL` - **Required for indexer**: Ethereum RPC endpoint URL +- `VEZKC_ADDRESS` - Optional: veZKC contract address (defaults to mainnet address) +- `ZKC_ADDRESS` - Optional: ZKC token address (defaults to mainnet address) +- `POVW_ACCOUNTING_ADDRESS` - Optional: PoVW accounting address (defaults to mainnet address) + +## API Documentation + +The complete API documentation is available through the OpenAPI specification: + +- **`GET /docs`** - Interactive Swagger UI documentation. Open this in a browser to explore and test all API endpoints. +- **`GET /openapi.yaml`** - OpenAPI 3.0 specification (YAML format) +- **`GET /openapi.json`** - OpenAPI 3.0 specification (JSON format) + +For detailed request/response schemas, query parameters, and data models, please refer to the OpenAPI specification through Swagger UI (`/docs`) or directly via `/openapi.yaml`. + +## Running API Locally + +### Setup + +Export `ETH_MAINNET_RPC_URL` to be an archive node endpoint with support for querying events. + +### Running the Services Locally + +Use the `manage_local` CLI tool to run the indexer and API: + +```bash +./manage-local --help +``` + +#### Example workflow + +```bash +# 1. Create and populate a test database (runs for 30 seconds) +./manage_local run-indexer my_test.db 30 + +# 2. Start the API server on port 3000 +./manage_local run-api 3000 my_test.db +``` + +Once the API server is running, you can test it: + +```bash +# Health check +curl http://localhost:3000/health + +# Get PoVW aggregate data +curl http://localhost:3000/v1/povw +``` + +You can also access the swagger UI at http://localhost:3000/docs + +### Database + +The SQLite database file will be created in the current directory. You can inspect it with any SQLite client: + +```bash +sqlite3 test.db +.tables # Show all tables +.schema povw_rewards # Show schema for a table +SELECT * FROM povw_summary_stats; # Query data +``` + +## Deployment + +This Lambda function is designed to be deployed with AWS Lambda and API Gateway. +Build for Lambda deployment using cargo-lambda or similar tools. See `infra/indexer` for how we deploy it. + +## Testing + +Tests are ignored by default as they require an Ethereum RPC URL to be set, as they fetch real data from mainnet. + +### Running the Tests + +Each test module: + +1. Spawns a rewards-indexer (see `crates/indexer`) process to populate a temporary SQLite database +2. Starts the API server on a random port +3. Makes HTTP requests to test various endpoints +4. Cleans up processes and temporary files after completion + +```bash +# Set your RPC URL (or add to .env file) +export ETH_RPC_URL="https://eth-mainnet.g.alchemy.com/v2/YOUR_API_KEY" + +# Run all integration tests (tests are ignored by default since they require RPC) +cargo test --test local_integration -- --ignored + +# Run specific test modules +cargo test --test local_integration povw_tests -- --ignored +cargo test --test local_integration staking_tests -- --ignored +cargo test --test local_integration delegations_tests -- --ignored +cargo test --test local_integration docs_tests -- --ignored +``` diff --git a/crates/lambdas/indexer-api/local_test.db b/crates/lambdas/indexer-api/local_test.db new file mode 100644 index 000000000..1a5541583 Binary files /dev/null and b/crates/lambdas/indexer-api/local_test.db differ diff --git a/crates/lambdas/indexer-api/manage_local b/crates/lambdas/indexer-api/manage_local new file mode 100755 index 000000000..64a5118b9 --- /dev/null +++ b/crates/lambdas/indexer-api/manage_local @@ -0,0 +1,225 @@ +#!/bin/bash + +# Script to manage local indexer and API for testing +set -e + +# Colors for output +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +RED='\033[0;31m' +NC='\033[0m' # No Color + +# Load .env file if it exists +if [ -f .env ]; then + echo "Loading environment variables from .env file..." + set -a + source .env + set +a +fi + +# Ethereum mainnet configuration +VEZKC_ADDRESS="${VEZKC_ADDRESS:-0xE8Ae8eE8ffa57F6a79B6Cbe06BAFc0b05F3ffbf4}" +ZKC_ADDRESS="${ZKC_ADDRESS:-0x000006c2A22ff4A44ff1f5d0F2ed65F781F55555}" +POVW_ACCOUNTING_ADDRESS="${POVW_ACCOUNTING_ADDRESS:-0x319bd4050b2170a7aE3Ead3E6d5AB8a5c7cFBDF8}" + +# Function to display usage +usage() { + echo "Usage: $0 [options]" + echo "" + echo "Commands:" + echo " run-indexer [duration] [end_epoch] [end_block]" + echo " Run rewards-indexer to populate SQLite database" + echo " duration: seconds to run (default: 120)" + echo " end_epoch: stop at this epoch (optional)" + echo " end_block: stop at this block (optional)" + echo "" + echo " run-api Run API server with debug logging" + echo "" + echo "Environment variables:" + echo " ETH_RPC_URL Required: Ethereum RPC endpoint" + echo " VEZKC_ADDRESS Optional: veZKC contract address" + echo " ZKC_ADDRESS Optional: ZKC token address" + echo " POVW_ACCOUNTING_ADDRESS Optional: PoVW accounting address" + exit 1 +} + +# Function to check ETH_RPC_URL +check_rpc_url() { + if [ -z "$ETH_RPC_URL" ]; then + echo -e "${RED}Error: ETH_RPC_URL environment variable is not set${NC}" + echo "Please set it to your Ethereum RPC endpoint" + exit 1 + fi +} + +# Function to build binaries +build_binaries() { + echo -e "${GREEN}Building required binaries...${NC}" + cd ../../.. || exit 1 + + if [ "$1" == "indexer" ] || [ "$1" == "both" ]; then + echo "Building rewards-indexer..." + cargo build -p boundless-indexer --bin rewards-indexer + fi + + if [ "$1" == "api" ] || [ "$1" == "both" ]; then + echo "Building local-server..." + cargo build -p indexer-api --bin local-server + fi + + cd - > /dev/null || exit 1 +} + +# Function to run indexer +run_indexer() { + local db_file="$1" + local duration="${2:-120}" + local end_epoch="$3" + local end_block="$4" + + check_rpc_url + + # Validate arguments + if [ -z "$db_file" ]; then + echo -e "${RED}Error: Database file path required${NC}" + usage + fi + + # Convert to absolute path if relative + if [[ ! "$db_file" = /* ]]; then + db_file="$(pwd)/$db_file" + fi + + # Clean up existing database + if [ -f "$db_file" ]; then + echo -e "${YELLOW}Removing existing database: $db_file${NC}" + rm "$db_file" + fi + + # Create empty database file for SQLite + echo -e "${GREEN}Creating SQLite database: $db_file${NC}" + touch "$db_file" + + # Build the indexer + build_binaries "indexer" + + echo -e "${GREEN}Running rewards-indexer for $duration seconds...${NC}" + echo "Database: sqlite:$db_file" + echo "RPC URL: $ETH_RPC_URL" + if [ ! -z "$end_epoch" ]; then + echo "End epoch: $end_epoch" + fi + if [ ! -z "$end_block" ]; then + echo "End block: $end_block" + fi + echo "" + + # Build command with optional parameters + local cmd="../../../target/debug/rewards-indexer" + cmd="$cmd --rpc-url \"$ETH_RPC_URL\"" + cmd="$cmd --vezkc-address \"$VEZKC_ADDRESS\"" + cmd="$cmd --zkc-address \"$ZKC_ADDRESS\"" + cmd="$cmd --povw-accounting-address \"$POVW_ACCOUNTING_ADDRESS\"" + cmd="$cmd --db \"sqlite:$db_file\"" + cmd="$cmd --interval 600" + + if [ ! -z "$end_epoch" ]; then + cmd="$cmd --end-epoch $end_epoch" + fi + if [ ! -z "$end_block" ]; then + cmd="$cmd --end-block $end_block" + fi + + # Run the indexer in background + DATABASE_URL="sqlite:$db_file" \ + VEZKC_ADDRESS="$VEZKC_ADDRESS" \ + ZKC_ADDRESS="$ZKC_ADDRESS" \ + POVW_ACCOUNTING_ADDRESS="$POVW_ACCOUNTING_ADDRESS" \ + RUST_LOG=info \ + eval "$cmd &" + + INDEXER_PID=$! + + # Show progress + echo -n "Populating database" + for i in $(seq 1 "$duration"); do + if ! kill -0 $INDEXER_PID 2>/dev/null; then + echo "" + echo -e "${RED}Indexer stopped unexpectedly${NC}" + exit 1 + fi + sleep 1 + echo -n "." + done + echo "" + + # Stop the indexer + if kill -0 $INDEXER_PID 2>/dev/null; then + echo "Stopping indexer..." + kill $INDEXER_PID 2>/dev/null || true + wait $INDEXER_PID 2>/dev/null || true + fi + + echo -e "${GREEN}Database populated successfully!${NC}" + echo "Database location: $db_file" +} + +# Function to run API +run_api() { + local port="$1" + local db_file="$2" + + # Validate arguments + if [ -z "$port" ] || [ -z "$db_file" ]; then + echo -e "${RED}Error: Port and database file required${NC}" + usage + fi + + # Convert to absolute path if relative + if [[ ! "$db_file" = /* ]]; then + db_file="$(pwd)/$db_file" + fi + + # Check if database exists + if [ ! -f "$db_file" ]; then + echo -e "${RED}Error: Database file not found: $db_file${NC}" + echo "Run 'manage_local run-indexer' first to create and populate the database" + exit 1 + fi + + # Build the API server + build_binaries "api" + + echo -e "${GREEN}Starting API server...${NC}" + echo "Port: $port" + echo "Database: sqlite:$db_file" + echo "" + echo "http://localhost:$port/docs" + echo "" + + DB_URL="sqlite:$db_file" \ + PORT="$port" \ + RUST_LOG=debug \ + ../../../target/debug/local-server +} + +# Main script logic +if [ $# -lt 1 ]; then + usage +fi + +COMMAND="$1" +shift + +case "$COMMAND" in + run-indexer) + run_indexer "$@" + ;; + run-api) + run_api "$@" + ;; + *) + echo -e "${RED}Unknown command: $COMMAND${NC}" + usage + ;; +esac \ No newline at end of file diff --git a/crates/lambdas/indexer-api/src/bin/local-server.rs b/crates/lambdas/indexer-api/src/bin/local-server.rs new file mode 100644 index 000000000..2bce8b03a --- /dev/null +++ b/crates/lambdas/indexer-api/src/bin/local-server.rs @@ -0,0 +1,61 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::{Context, Result}; +use std::{env, net::SocketAddr, sync::Arc}; +use tracing_subscriber::{fmt, prelude::*, EnvFilter}; + +use indexer_api::db::AppState; +use indexer_api::handler::create_app; + +#[tokio::main] +async fn main() -> Result<()> { + // Initialize tracing + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info"))) + .init(); + + // Load configuration from environment or use defaults + let db_url = env::var("DB_URL") + .or_else(|_| env::var("DATABASE_URL")) + .unwrap_or_else(|_| "sqlite:local_indexer.db".to_string()); + + let port = env::var("PORT").ok().and_then(|p| p.parse::().ok()).unwrap_or(3000); + + tracing::info!("Starting local indexer-api server"); + tracing::info!("Database URL: {}", db_url); + tracing::info!("Port: {}", port); + + // Create application state with database connection + let state = AppState::new(&db_url).await.context("Failed to create application state")?; + let shared_state = Arc::new(state); + + // Create the axum application with routes + let app = create_app(shared_state); + + // Create the server address + let addr = SocketAddr::from(([127, 0, 0, 1], port)); + + tracing::info!("Server listening on http://{}", addr); + + // Create the listener + let listener = + tokio::net::TcpListener::bind(addr).await.context("Failed to bind to address")?; + + // Run the server + axum::serve(listener, app).await.context("Server failed")?; + + Ok(()) +} diff --git a/crates/lambdas/indexer-api/src/db.rs b/crates/lambdas/indexer-api/src/db.rs new file mode 100644 index 000000000..1262cd8e3 --- /dev/null +++ b/crates/lambdas/indexer-api/src/db.rs @@ -0,0 +1,37 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::Result; +use boundless_indexer::db::rewards::{RewardsDb, RewardsDbObj}; +use std::sync::Arc; + +/// Application state containing database connections +pub struct AppState { + pub rewards_db: RewardsDbObj, +} + +impl AppState { + /// Create new application state with database connection + pub async fn new(database_url: &str) -> Result { + tracing::info!("Connecting to database..."); + + // Create rewards database connection + let rewards_db = RewardsDb::new(database_url).await?; + let rewards_db: RewardsDbObj = Arc::new(rewards_db); + + tracing::info!("Database connection established"); + + Ok(Self { rewards_db }) + } +} diff --git a/crates/lambdas/indexer-api/src/handler.rs b/crates/lambdas/indexer-api/src/handler.rs new file mode 100644 index 000000000..d7c3e4ca8 --- /dev/null +++ b/crates/lambdas/indexer-api/src/handler.rs @@ -0,0 +1,151 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::{Context, Result}; +use axum::{ + http::{header, HeaderValue, StatusCode}, + response::{IntoResponse, Json, Response}, + routing::get, + Router, +}; +use lambda_http::Error; +use serde_json::json; +use std::{env, sync::Arc}; +use tower_http::cors::{Any, CorsLayer}; + +use crate::db::AppState; +use crate::openapi::ApiDoc; +use crate::routes::{delegations, povw, staking}; +use utoipa::OpenApi; +use utoipa_swagger_ui::SwaggerUi; + +/// Creates the Lambda handler with axum router +pub async fn create_handler() -> Result { + // Load configuration from environment + let db_url = env::var("DB_URL").context("DB_URL environment variable is required")?; + + // Create application state with database connection + let state = AppState::new(&db_url).await?; + let shared_state = Arc::new(state); + + // Create the axum application with routes + Ok(create_app(shared_state)) +} + +/// Creates the axum application with all routes +pub fn create_app(state: Arc) -> Router { + // Configure CORS + let cors = CorsLayer::new().allow_origin(Any).allow_methods(Any).allow_headers(Any); + + // Build the router + Router::new() + // Health check endpoint + .route("/health", get(health_check)) + // OpenAPI spec endpoint (YAML format) + .route("/openapi.yaml", get(openapi_yaml)) + // Swagger UI documentation with generated spec (includes /openapi.json automatically) + .merge(SwaggerUi::new("/docs").url("/openapi.json", ApiDoc::openapi())) + // API v1 routes + .nest("/v1", api_v1_routes(state)) + // Add CORS layer + .layer(cors) + // Add fallback for unmatched routes + .fallback(not_found) +} + +/// API v1 routes +fn api_v1_routes(state: Arc) -> Router { + Router::new() + // RESTful structure + .nest("/staking", staking::routes()) + .nest("/povw", povw::routes()) + .nest("/delegations", delegations::routes()) + .with_state(state) +} + +/// Health check endpoint +#[utoipa::path( + get, + path = "/health", + tag = "Health", + responses( + (status = 200, description = "Service is healthy", body = serde_json::Value) + ) +)] +async fn health_check() -> impl IntoResponse { + Json(json!({ + "status": "healthy", + "service": "indexer-api" + })) +} + +/// OpenAPI specification endpoint (YAML) +async fn openapi_yaml() -> impl IntoResponse { + // Convert the generated JSON spec to YAML + let openapi_json = ApiDoc::openapi(); + match serde_yaml::to_string(&openapi_json) { + Ok(yaml) => Response::builder() + .status(StatusCode::OK) + .header(header::CONTENT_TYPE, "application/x-yaml") + .body(yaml) + .unwrap(), + Err(err) => Response::builder() + .status(StatusCode::INTERNAL_SERVER_ERROR) + .body(format!("Failed to convert to YAML: {}", err)) + .unwrap(), + } +} + +/// 404 handler +async fn not_found() -> impl IntoResponse { + ( + StatusCode::NOT_FOUND, + Json(json!({ + "error": "Not Found", + "message": "The requested endpoint does not exist" + })), + ) +} + +/// Global error handler that converts anyhow errors to HTTP responses +pub fn handle_error(err: anyhow::Error) -> impl IntoResponse { + // Log the full error with backtrace for debugging + tracing::error!("Request failed: {:?}", err); + + // Check if it's a database connection error + let error_message = err.to_string(); + let (status, message) = if error_message.contains("database") + || error_message.contains("connection") + { + (StatusCode::SERVICE_UNAVAILABLE, "Database connection error. Please try again later.") + } else if error_message.contains("not found") || error_message.contains("No data found") { + (StatusCode::NOT_FOUND, "The requested data was not found.") + } else { + // For production, return a generic message. In dev, you might want to return the actual error + (StatusCode::INTERNAL_SERVER_ERROR, "An internal error occurred. Please try again later.") + }; + + ( + status, + Json(json!({ + "error": status.canonical_reason().unwrap_or("Error"), + "message": message + })), + ) +} + +/// Create a cache control header value safely +pub fn cache_control(value: &str) -> HeaderValue { + HeaderValue::from_str(value).unwrap_or_else(|_| HeaderValue::from_static("public, max-age=60")) +} diff --git a/crates/lambdas/indexer-api/src/lib.rs b/crates/lambdas/indexer-api/src/lib.rs new file mode 100644 index 000000000..2e528da8b --- /dev/null +++ b/crates/lambdas/indexer-api/src/lib.rs @@ -0,0 +1,20 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod db; +pub mod handler; +pub mod models; +pub mod openapi; +pub mod routes; +pub mod utils; diff --git a/crates/lambdas/indexer-api/src/main.rs b/crates/lambdas/indexer-api/src/main.rs new file mode 100644 index 000000000..6d9aa5d6e --- /dev/null +++ b/crates/lambdas/indexer-api/src/main.rs @@ -0,0 +1,41 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use anyhow::Result; +use lambda_http::{run, Error}; +use tracing_subscriber::{fmt, prelude::*, EnvFilter}; + +mod db; +mod handler; +mod models; +mod openapi; +mod routes; +mod utils; + +#[tokio::main] +async fn main() -> Result<(), Error> { + // Initialize tracing + tracing_subscriber::registry() + .with(fmt::layer()) + .with(EnvFilter::try_from_default_env().unwrap_or_else(|_| EnvFilter::new("info"))) + .init(); + + tracing::info!("Starting indexer-api Lambda function"); + + // Get the axum app + let app = handler::create_handler().await?; + + // Run the Lambda runtime with the axum app + run(app).await +} diff --git a/crates/lambdas/indexer-api/src/models.rs b/crates/lambdas/indexer-api/src/models.rs new file mode 100644 index 000000000..7281f871d --- /dev/null +++ b/crates/lambdas/indexer-api/src/models.rs @@ -0,0 +1,405 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use serde::{Deserialize, Serialize}; +use utoipa::ToSchema; + +/// Health check response +#[derive(Debug, Serialize, Deserialize, ToSchema)] +pub struct HealthResponse { + pub status: String, + pub service: String, +} + +/// Query parameters for pagination +#[derive(Debug, Deserialize, ToSchema, utoipa::IntoParams)] +pub struct PaginationParams { + /// Number of results to return (default: 50, max: 100) + #[serde(default = "default_limit")] + pub limit: u64, + + /// Number of results to skip (default: 0) + #[serde(default)] + pub offset: u64, +} + +fn default_limit() -> u64 { + 50 +} + +impl PaginationParams { + /// Validate and normalize pagination parameters + pub fn validate(self) -> Self { + Self { + limit: self.limit.min(100), // Cap at 100 + offset: self.offset, + } + } +} + +/// Response for aggregate PoVW rewards leaderboard +#[derive(Debug, Serialize, Deserialize, ToSchema)] +pub struct AggregateLeaderboardEntry { + /// Rank in the leaderboard (1-based, only present in leaderboard contexts) + #[serde(skip_serializing_if = "Option::is_none")] + pub rank: Option, + + /// Work log ID (Ethereum address) + pub work_log_id: String, + + /// Total work submitted across all epochs + pub total_work_submitted: String, + + /// Total work submitted (human-readable) + pub total_work_submitted_formatted: String, + + /// Total rewards earned across all epochs + pub total_actual_rewards: String, + + /// Total rewards earned (human-readable) + pub total_actual_rewards_formatted: String, + + /// Total uncapped rewards earned across all epochs + pub total_uncapped_rewards: String, + + /// Total uncapped rewards (human-readable) + pub total_uncapped_rewards_formatted: String, + + /// Number of epochs participated in + pub epochs_participated: u64, +} + +/// Response for epoch-specific PoVW rewards leaderboard +#[derive(Debug, Serialize, Deserialize, ToSchema)] +pub struct EpochLeaderboardEntry { + /// Rank in the leaderboard (1-based, only present in leaderboard contexts) + #[serde(skip_serializing_if = "Option::is_none")] + pub rank: Option, + + /// Work log ID (Ethereum address) + pub work_log_id: String, + + /// Epoch number + pub epoch: u64, + + /// Work submitted in this epoch + pub work_submitted: String, + + /// Work submitted (human-readable) + pub work_submitted_formatted: String, + + /// Percentage of total work in epoch + pub percentage: f64, + + /// Rewards before applying cap + pub uncapped_rewards: String, + + /// Uncapped rewards (human-readable) + pub uncapped_rewards_formatted: String, + + /// Maximum rewards allowed based on stake + pub reward_cap: String, + + /// Reward cap (human-readable) + pub reward_cap_formatted: String, + + /// Actual rewards after applying cap + pub actual_rewards: String, + + /// Actual rewards (human-readable) + pub actual_rewards_formatted: String, + + /// Whether rewards were capped + pub is_capped: bool, + + /// Staked amount for this work log + pub staked_amount: String, + + /// Staked amount (human-readable) + pub staked_amount_formatted: String, +} + +/// Response wrapper for leaderboard endpoints +#[derive(Debug, Serialize, Deserialize, ToSchema)] +pub struct LeaderboardResponse { + /// List of leaderboard entries + pub entries: Vec, + + /// Pagination metadata + pub pagination: PaginationMetadata, +} + +/// Pagination metadata +#[derive(Debug, Serialize, Deserialize, ToSchema)] +pub struct PaginationMetadata { + /// Number of results returned + pub count: usize, + + /// Offset used + pub offset: u64, + + /// Limit used + pub limit: u64, +} + +impl LeaderboardResponse { + pub fn new(entries: Vec, offset: u64, limit: u64) -> Self { + let count = entries.len(); + Self { entries, pagination: PaginationMetadata { count, offset, limit } } + } +} + +/// Response wrapper for address-specific endpoints with summary +#[derive(Debug, Serialize, Deserialize, ToSchema)] +pub struct AddressLeaderboardResponse { + /// List of history entries + pub entries: Vec, + + /// Pagination metadata + pub pagination: PaginationMetadata, + + /// Address summary statistics + pub summary: S, +} + +impl AddressLeaderboardResponse { + pub fn new(entries: Vec, offset: u64, limit: u64, summary: S) -> Self { + let count = entries.len(); + Self { entries, pagination: PaginationMetadata { count, offset, limit }, summary } + } +} + +/// Response for aggregate staking leaderboard +#[derive(Debug, Serialize, Deserialize, ToSchema)] +pub struct AggregateStakingEntry { + /// Rank in the leaderboard (1-based, only present in leaderboard contexts) + #[serde(skip_serializing_if = "Option::is_none")] + pub rank: Option, + + /// Staker address + pub staker_address: String, + + /// Total staked amount + pub total_staked: String, + + /// Total staked (human-readable) + pub total_staked_formatted: String, + + /// Whether the stake is in withdrawal + pub is_withdrawing: bool, + + /// Address this staker has delegated rewards to + pub rewards_delegated_to: Option, + + /// Address this staker has delegated votes to + pub votes_delegated_to: Option, + + /// Number of epochs participated in + pub epochs_participated: u64, + + /// Total rewards generated by owned positions + pub total_rewards_generated: String, + + /// Total rewards generated (human-readable) + pub total_rewards_generated_formatted: String, +} + +/// Response for epoch-specific staking leaderboard +#[derive(Debug, Serialize, Deserialize, ToSchema)] +pub struct EpochStakingEntry { + /// Rank in the leaderboard (1-based, only present in leaderboard contexts) + #[serde(skip_serializing_if = "Option::is_none")] + pub rank: Option, + + /// Staker address + pub staker_address: String, + + /// Epoch number + pub epoch: u64, + + /// Staked amount in this epoch + pub staked_amount: String, + + /// Staked amount (human-readable) + pub staked_amount_formatted: String, + + /// Whether the stake was in withdrawal during this epoch + pub is_withdrawing: bool, + + /// Address this staker had delegated rewards to during this epoch + pub rewards_delegated_to: Option, + + /// Address this staker had delegated votes to during this epoch + pub votes_delegated_to: Option, + + /// Rewards generated by this position in this epoch + pub rewards_generated: String, + + /// Rewards generated (human-readable) + pub rewards_generated_formatted: String, +} + +/// Global PoVW summary statistics +#[derive(Debug, Serialize, Deserialize, Clone, ToSchema)] +pub struct PoVWSummaryStats { + pub total_epochs_with_work: u64, + pub total_unique_work_log_ids: u64, + pub total_work_all_time: String, + pub total_work_all_time_formatted: String, + pub total_emissions_all_time: String, + pub total_emissions_all_time_formatted: String, + pub total_capped_rewards_all_time: String, + pub total_capped_rewards_all_time_formatted: String, + pub total_uncapped_rewards_all_time: String, + pub total_uncapped_rewards_all_time_formatted: String, + #[serde(skip_serializing_if = "Option::is_none")] + pub last_updated_at: Option, +} + +/// Per-epoch PoVW summary +#[derive(Debug, Serialize, Deserialize, Clone, ToSchema)] +pub struct EpochPoVWSummary { + pub epoch: u64, + pub total_work: String, + pub total_work_formatted: String, + pub total_emissions: String, + pub total_emissions_formatted: String, + pub total_capped_rewards: String, + pub total_capped_rewards_formatted: String, + pub total_uncapped_rewards: String, + pub total_uncapped_rewards_formatted: String, + pub epoch_start_time: u64, + pub epoch_end_time: u64, + pub num_participants: u64, + #[serde(skip_serializing_if = "Option::is_none")] + pub last_updated_at: Option, +} + +/// Global staking summary statistics +#[derive(Debug, Serialize, Deserialize, Clone, ToSchema)] +pub struct StakingSummaryStats { + pub current_total_staked: String, + pub current_total_staked_formatted: String, + pub total_unique_stakers: u64, + pub current_active_stakers: u64, + pub current_withdrawing: u64, + pub total_staking_emissions_all_time: Option, + pub total_staking_emissions_all_time_formatted: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub last_updated_at: Option, +} + +/// Per-epoch staking summary +#[derive(Debug, Serialize, Deserialize, Clone, ToSchema)] +pub struct EpochStakingSummary { + pub epoch: u64, + pub total_staked: String, + pub total_staked_formatted: String, + pub num_stakers: u64, + pub num_withdrawing: u64, + pub total_staking_emissions: String, + pub total_staking_emissions_formatted: String, + pub total_staking_power: String, + pub total_staking_power_formatted: String, + pub num_reward_recipients: u64, + pub epoch_start_time: u64, + pub epoch_end_time: u64, + #[serde(skip_serializing_if = "Option::is_none")] + pub last_updated_at: Option, +} + +/// Address-specific staking aggregate summary +#[derive(Debug, Serialize, Deserialize, Clone, ToSchema)] +pub struct StakingAddressSummary { + pub staker_address: String, + pub total_staked: String, + pub total_staked_formatted: String, + pub is_withdrawing: bool, + pub rewards_delegated_to: Option, + pub votes_delegated_to: Option, + pub epochs_participated: u64, + pub total_rewards_generated: String, + pub total_rewards_generated_formatted: String, +} + +/// Address-specific PoVW aggregate summary +#[derive(Debug, Serialize, Deserialize, Clone, ToSchema)] +pub struct PoVWAddressSummary { + pub work_log_id: String, + pub total_work_submitted: String, + pub total_work_submitted_formatted: String, + pub total_actual_rewards: String, + pub total_actual_rewards_formatted: String, + pub total_uncapped_rewards: String, + pub total_uncapped_rewards_formatted: String, + pub epochs_participated: u64, +} + +/// Summary statistics for vote delegations +#[derive(Debug, Serialize, Deserialize, Clone, ToSchema)] +pub struct VoteDelegationSummaryStats { + pub total_unique_delegates: u64, + pub total_unique_delegators: u64, + pub current_total_delegated_power: String, + pub current_total_delegated_power_formatted: String, + pub current_active_delegations: u64, +} + +/// Summary statistics for reward delegations +#[derive(Debug, Serialize, Deserialize, Clone, ToSchema)] +pub struct RewardDelegationSummaryStats { + pub total_unique_delegates: u64, + pub total_unique_delegators: u64, + pub current_total_delegated_power: String, + pub current_total_delegated_power_formatted: String, + pub current_active_delegations: u64, +} + +/// Per-epoch delegation summary +#[derive(Debug, Serialize, Deserialize, Clone, ToSchema)] +pub struct EpochDelegationSummary { + pub epoch: u64, + pub total_delegated_power: String, + pub total_delegated_power_formatted: String, + pub num_delegates: u64, + pub num_delegators: u64, + pub epoch_start_time: u64, + pub epoch_end_time: u64, +} + +/// Response for delegation power entries +#[derive(Debug, Serialize, Deserialize, ToSchema)] +pub struct DelegationPowerEntry { + /// Rank in the leaderboard (1-based), None for individual queries + #[serde(skip_serializing_if = "Option::is_none")] + pub rank: Option, + + /// Delegate address receiving the delegation + pub delegate_address: String, + + /// Total power delegated + pub power: String, + + /// Number of delegators + pub delegator_count: u64, + + /// List of delegator addresses + pub delegators: Vec, + + /// Number of epochs participated (for aggregates) + pub epochs_participated: Option, + + /// Epoch number (for specific epoch data) + pub epoch: Option, +} diff --git a/crates/lambdas/indexer-api/src/openapi.rs b/crates/lambdas/indexer-api/src/openapi.rs new file mode 100644 index 000000000..d366a7d32 --- /dev/null +++ b/crates/lambdas/indexer-api/src/openapi.rs @@ -0,0 +1,99 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::models::*; +use utoipa::OpenApi; + +#[derive(OpenApi)] +#[openapi( + info( + title = "Boundless Indexer API", + version = "1.0.0", + description = "API for accessing staking, delegation, and Proof of Verifiable Work (PoVW) data for the Boundless protocol.", + contact(name = "Boundless Development Team") + ), + servers( + (url = "/", description = "Current server") + ), + tags( + (name = "Health", description = "Health check endpoints"), + (name = "Staking", description = "Staking position and history endpoints"), + (name = "PoVW", description = "Proof of Verifiable Work rewards endpoints"), + (name = "Delegations", description = "Vote and reward delegation endpoints") + ), + paths( + // Health check + crate::handler::health_check, + // Staking endpoints + crate::routes::staking::get_staking_summary, + crate::routes::staking::get_all_epochs_summary, + crate::routes::staking::get_epoch_summary, + crate::routes::staking::get_epoch_leaderboard, + crate::routes::staking::get_address_at_epoch, + crate::routes::staking::get_all_time_leaderboard, + crate::routes::staking::get_address_history, + // PoVW endpoints + crate::routes::povw::get_povw_summary, + crate::routes::povw::get_all_epochs_summary, + crate::routes::povw::get_epoch_summary, + crate::routes::povw::get_epoch_leaderboard, + crate::routes::povw::get_address_at_epoch, + crate::routes::povw::get_all_time_leaderboard, + crate::routes::povw::get_address_history, + // Delegation endpoints - Votes + crate::routes::delegations::get_aggregate_vote_delegations, + crate::routes::delegations::get_vote_delegations_by_epoch, + crate::routes::delegations::get_vote_delegation_history_by_address, + crate::routes::delegations::get_vote_delegation_by_address_and_epoch, + // Delegation endpoints - Rewards + crate::routes::delegations::get_aggregate_reward_delegations, + crate::routes::delegations::get_reward_delegations_by_epoch, + crate::routes::delegations::get_reward_delegation_history_by_address, + crate::routes::delegations::get_reward_delegation_by_address_and_epoch, + ), + components(schemas( + // Response models + StakingSummaryStats, + PoVWSummaryStats, + LeaderboardResponse, + LeaderboardResponse, + LeaderboardResponse, + LeaderboardResponse, + AddressLeaderboardResponse, + AddressLeaderboardResponse, + + // Entry types + AggregateStakingEntry, + EpochStakingEntry, + AggregateLeaderboardEntry, + EpochLeaderboardEntry, + + // Summary types + StakingAddressSummary, + PoVWAddressSummary, + EpochStakingSummary, + EpochPoVWSummary, + + // Pagination + PaginationParams, + PaginationMetadata, + + // Delegation types + DelegationPowerEntry, + EpochDelegationSummary, + VoteDelegationSummaryStats, + RewardDelegationSummaryStats, + )) +)] +pub struct ApiDoc; diff --git a/crates/lambdas/indexer-api/src/routes/delegations.rs b/crates/lambdas/indexer-api/src/routes/delegations.rs new file mode 100644 index 000000000..227c32bb1 --- /dev/null +++ b/crates/lambdas/indexer-api/src/routes/delegations.rs @@ -0,0 +1,591 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use alloy::primitives::Address; +use axum::{ + extract::{Path, Query, State}, + http::header, + response::{IntoResponse, Response}, + routing::get, + Json, Router, +}; +use std::{str::FromStr, sync::Arc}; +use utoipa; + +use crate::{ + db::AppState, + handler::{cache_control, handle_error}, + models::{DelegationPowerEntry, LeaderboardResponse, PaginationParams}, +}; + +/// Create delegation routes +pub fn routes() -> Router> { + Router::new() + // Vote delegation endpoints + .route("/votes/epochs/:epoch/addresses", get(get_vote_delegations_by_epoch)) + .route( + "/votes/epochs/:epoch/addresses/:address", + get(get_vote_delegation_by_address_and_epoch), + ) + .route("/votes/addresses", get(get_aggregate_vote_delegations)) + .route("/votes/addresses/:address", get(get_vote_delegation_history_by_address)) + // Reward delegation endpoints + .route("/rewards/epochs/:epoch/addresses", get(get_reward_delegations_by_epoch)) + .route( + "/rewards/epochs/:epoch/addresses/:address", + get(get_reward_delegation_by_address_and_epoch), + ) + .route("/rewards/addresses", get(get_aggregate_reward_delegations)) + .route("/rewards/addresses/:address", get(get_reward_delegation_history_by_address)) +} + +// ===== VOTE DELEGATION ENDPOINTS ===== + +/// GET /v1/delegations/votes/addresses +/// Returns the current aggregate vote delegation powers +#[utoipa::path( + get, + path = "/v1/delegations/votes/addresses", + tag = "Delegations", + params( + PaginationParams + ), + responses( + (status = 200, description = "Aggregate vote delegation powers", body = LeaderboardResponse), + (status = 500, description = "Internal server error") + ) +)] +async fn get_aggregate_vote_delegations( + State(state): State>, + Query(params): Query, +) -> Response { + let params = params.validate(); + + match get_aggregate_vote_delegations_impl(state, params).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=60")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_aggregate_vote_delegations_impl( + state: Arc, + params: PaginationParams, +) -> anyhow::Result> { + tracing::debug!( + "Fetching aggregate vote delegation powers with offset={}, limit={}", + params.offset, + params.limit + ); + + let aggregates = + state.rewards_db.get_vote_delegation_powers_aggregate(params.offset, params.limit).await?; + + let entries: Vec = aggregates + .into_iter() + .enumerate() + .map(|(index, agg)| DelegationPowerEntry { + rank: Some(params.offset + (index as u64) + 1), + delegate_address: format!("{:#x}", agg.delegate_address), + power: agg.total_vote_power.to_string(), + delegator_count: agg.delegator_count, + delegators: agg.delegators.iter().map(|a| format!("{:#x}", a)).collect(), + epochs_participated: Some(agg.epochs_participated), + epoch: None, + }) + .collect(); + + Ok(LeaderboardResponse::new(entries, params.offset, params.limit)) +} + +/// GET /v1/delegations/votes/epochs/:epoch/addresses +/// Returns vote delegation powers for a specific epoch +#[utoipa::path( + get, + path = "/v1/delegations/votes/epochs/{epoch}/addresses", + tag = "Delegations", + params( + ("epoch" = u64, Path, description = "Epoch number"), + PaginationParams + ), + responses( + (status = 200, description = "Vote delegation powers for epoch", body = LeaderboardResponse), + (status = 500, description = "Internal server error") + ) +)] +async fn get_vote_delegations_by_epoch( + State(state): State>, + Path(epoch): Path, + Query(params): Query, +) -> Response { + let params = params.validate(); + + match get_vote_delegations_by_epoch_impl(state, epoch, params).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=300")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_vote_delegations_by_epoch_impl( + state: Arc, + epoch: u64, + params: PaginationParams, +) -> anyhow::Result> { + tracing::debug!( + "Fetching vote delegation powers for epoch {} with offset={}, limit={}", + epoch, + params.offset, + params.limit + ); + + let powers = state + .rewards_db + .get_vote_delegation_powers_by_epoch(epoch, params.offset, params.limit) + .await?; + + let entries: Vec = powers + .into_iter() + .enumerate() + .map(|(index, power)| DelegationPowerEntry { + rank: Some(params.offset + (index as u64) + 1), + delegate_address: format!("{:#x}", power.delegate_address), + power: power.vote_power.to_string(), + delegator_count: power.delegator_count, + delegators: power.delegators.iter().map(|a| format!("{:#x}", a)).collect(), + epochs_participated: None, + epoch: Some(epoch), + }) + .collect(); + + Ok(LeaderboardResponse::new(entries, params.offset, params.limit)) +} + +/// GET /v1/delegations/votes/addresses/:address +/// Returns vote delegation history for a specific address +#[utoipa::path( + get, + path = "/v1/delegations/votes/addresses/{address}", + tag = "Delegations", + params( + ("address" = String, Path, description = "Ethereum address"), + PaginationParams + ), + responses( + (status = 200, description = "Vote delegation history for address", body = LeaderboardResponse), + (status = 400, description = "Invalid address format"), + (status = 500, description = "Internal server error") + ) +)] +async fn get_vote_delegation_history_by_address( + State(state): State>, + Path(address_str): Path, + Query(params): Query, +) -> Response { + let address = match Address::from_str(&address_str) { + Ok(addr) => addr, + Err(e) => { + return handle_error(anyhow::anyhow!("Invalid address format: {}", e)).into_response() + } + }; + + let params = params.validate(); + + match get_vote_delegation_history_by_address_impl(state, address, params).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=300")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_vote_delegation_history_by_address_impl( + state: Arc, + address: Address, + params: PaginationParams, +) -> anyhow::Result> { + tracing::debug!( + "Fetching vote delegation history for address {} with offset={}, limit={}", + address, + params.offset, + params.limit + ); + + let history = + state.rewards_db.get_vote_delegations_received_history(address, None, None).await?; + + // Apply pagination + let start = params.offset as usize; + let end = (start + params.limit as usize).min(history.len()); + let paginated = if start < history.len() { history[start..end].to_vec() } else { vec![] }; + + let entries: Vec = paginated + .into_iter() + .map(|power| DelegationPowerEntry { + rank: None, + delegate_address: format!("{:#x}", power.delegate_address), + power: power.vote_power.to_string(), + delegator_count: power.delegator_count, + delegators: power.delegators.iter().map(|a| format!("{:#x}", a)).collect(), + epochs_participated: None, + epoch: Some(power.epoch), + }) + .collect(); + + Ok(LeaderboardResponse::new(entries, params.offset, params.limit)) +} + +/// GET /v1/delegations/votes/epochs/:epoch/addresses/:address +/// Returns vote delegation for a specific address at a specific epoch +#[utoipa::path( + get, + path = "/v1/delegations/votes/epochs/{epoch}/addresses/{address}", + tag = "Delegations", + params( + ("epoch" = u64, Path, description = "Epoch number"), + ("address" = String, Path, description = "Ethereum address") + ), + responses( + (status = 200, description = "Vote delegation for address at epoch", body = Option), + (status = 400, description = "Invalid address format"), + (status = 500, description = "Internal server error") + ) +)] +async fn get_vote_delegation_by_address_and_epoch( + State(state): State>, + Path((epoch, address_str)): Path<(u64, String)>, +) -> Response { + let address = match Address::from_str(&address_str) { + Ok(addr) => addr, + Err(e) => { + return handle_error(anyhow::anyhow!("Invalid address format: {}", e)).into_response() + } + }; + + match get_vote_delegation_by_address_and_epoch_impl(state, address, epoch).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=300")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_vote_delegation_by_address_and_epoch_impl( + state: Arc, + address: Address, + epoch: u64, +) -> anyhow::Result> { + tracing::debug!("Fetching vote delegation for address {} at epoch {}", address, epoch); + + let history = state + .rewards_db + .get_vote_delegations_received_history(address, Some(epoch), Some(epoch)) + .await?; + + if history.is_empty() { + return Ok(None); + } + + let power = &history[0]; + Ok(Some(DelegationPowerEntry { + rank: None, + delegate_address: format!("{:#x}", power.delegate_address), + power: power.vote_power.to_string(), + delegator_count: power.delegator_count, + delegators: power.delegators.iter().map(|a| format!("{:#x}", a)).collect(), + epochs_participated: None, + epoch: Some(power.epoch), + })) +} + +// ===== REWARD DELEGATION ENDPOINTS ===== + +/// GET /v1/delegations/rewards/addresses +/// Returns the current aggregate reward delegation powers +#[utoipa::path( + get, + path = "/v1/delegations/rewards/addresses", + tag = "Delegations", + params( + PaginationParams + ), + responses( + (status = 200, description = "Aggregate reward delegation powers", body = LeaderboardResponse), + (status = 500, description = "Internal server error") + ) +)] +async fn get_aggregate_reward_delegations( + State(state): State>, + Query(params): Query, +) -> Response { + let params = params.validate(); + + match get_aggregate_reward_delegations_impl(state, params).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=60")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_aggregate_reward_delegations_impl( + state: Arc, + params: PaginationParams, +) -> anyhow::Result> { + tracing::debug!( + "Fetching aggregate reward delegation powers with offset={}, limit={}", + params.offset, + params.limit + ); + + let aggregates = state + .rewards_db + .get_reward_delegation_powers_aggregate(params.offset, params.limit) + .await?; + + let entries: Vec = aggregates + .into_iter() + .enumerate() + .map(|(index, agg)| DelegationPowerEntry { + rank: Some(params.offset + (index as u64) + 1), + delegate_address: format!("{:#x}", agg.delegate_address), + power: agg.total_reward_power.to_string(), + delegator_count: agg.delegator_count, + delegators: agg.delegators.iter().map(|a| format!("{:#x}", a)).collect(), + epochs_participated: Some(agg.epochs_participated), + epoch: None, + }) + .collect(); + + Ok(LeaderboardResponse::new(entries, params.offset, params.limit)) +} + +/// GET /v1/delegations/rewards/epochs/:epoch/addresses +/// Returns reward delegation powers for a specific epoch +#[utoipa::path( + get, + path = "/v1/delegations/rewards/epochs/{epoch}/addresses", + tag = "Delegations", + params( + ("epoch" = u64, Path, description = "Epoch number"), + PaginationParams + ), + responses( + (status = 200, description = "Reward delegation powers for epoch", body = LeaderboardResponse), + (status = 500, description = "Internal server error") + ) +)] +async fn get_reward_delegations_by_epoch( + State(state): State>, + Path(epoch): Path, + Query(params): Query, +) -> Response { + let params = params.validate(); + + match get_reward_delegations_by_epoch_impl(state, epoch, params).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=300")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_reward_delegations_by_epoch_impl( + state: Arc, + epoch: u64, + params: PaginationParams, +) -> anyhow::Result> { + tracing::debug!( + "Fetching reward delegation powers for epoch {} with offset={}, limit={}", + epoch, + params.offset, + params.limit + ); + + let powers = state + .rewards_db + .get_reward_delegation_powers_by_epoch(epoch, params.offset, params.limit) + .await?; + + let entries: Vec = powers + .into_iter() + .enumerate() + .map(|(index, power)| DelegationPowerEntry { + rank: Some(params.offset + (index as u64) + 1), + delegate_address: format!("{:#x}", power.delegate_address), + power: power.reward_power.to_string(), + delegator_count: power.delegator_count, + delegators: power.delegators.iter().map(|a| format!("{:#x}", a)).collect(), + epochs_participated: None, + epoch: Some(epoch), + }) + .collect(); + + Ok(LeaderboardResponse::new(entries, params.offset, params.limit)) +} + +/// GET /v1/delegations/rewards/addresses/:address +/// Returns reward delegation history for a specific address +#[utoipa::path( + get, + path = "/v1/delegations/rewards/addresses/{address}", + tag = "Delegations", + params( + ("address" = String, Path, description = "Ethereum address"), + PaginationParams + ), + responses( + (status = 200, description = "Reward delegation history for address", body = LeaderboardResponse), + (status = 400, description = "Invalid address format"), + (status = 500, description = "Internal server error") + ) +)] +async fn get_reward_delegation_history_by_address( + State(state): State>, + Path(address_str): Path, + Query(params): Query, +) -> Response { + let address = match Address::from_str(&address_str) { + Ok(addr) => addr, + Err(e) => { + return handle_error(anyhow::anyhow!("Invalid address format: {}", e)).into_response() + } + }; + + let params = params.validate(); + + match get_reward_delegation_history_by_address_impl(state, address, params).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=300")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_reward_delegation_history_by_address_impl( + state: Arc, + address: Address, + params: PaginationParams, +) -> anyhow::Result> { + tracing::debug!( + "Fetching reward delegation history for address {} with offset={}, limit={}", + address, + params.offset, + params.limit + ); + + let history = + state.rewards_db.get_reward_delegations_received_history(address, None, None).await?; + + // Apply pagination + let start = params.offset as usize; + let end = (start + params.limit as usize).min(history.len()); + let paginated = if start < history.len() { history[start..end].to_vec() } else { vec![] }; + + let entries: Vec = paginated + .into_iter() + .map(|power| DelegationPowerEntry { + rank: None, + delegate_address: format!("{:#x}", power.delegate_address), + power: power.reward_power.to_string(), + delegator_count: power.delegator_count, + delegators: power.delegators.iter().map(|a| format!("{:#x}", a)).collect(), + epochs_participated: None, + epoch: Some(power.epoch), + }) + .collect(); + + Ok(LeaderboardResponse::new(entries, params.offset, params.limit)) +} + +/// GET /v1/delegations/rewards/epochs/:epoch/addresses/:address +/// Returns reward delegation for a specific address at a specific epoch +#[utoipa::path( + get, + path = "/v1/delegations/rewards/epochs/{epoch}/addresses/{address}", + tag = "Delegations", + params( + ("epoch" = u64, Path, description = "Epoch number"), + ("address" = String, Path, description = "Ethereum address") + ), + responses( + (status = 200, description = "Reward delegation for address at epoch", body = Option), + (status = 400, description = "Invalid address format"), + (status = 500, description = "Internal server error") + ) +)] +async fn get_reward_delegation_by_address_and_epoch( + State(state): State>, + Path((epoch, address_str)): Path<(u64, String)>, +) -> Response { + let address = match Address::from_str(&address_str) { + Ok(addr) => addr, + Err(e) => { + return handle_error(anyhow::anyhow!("Invalid address format: {}", e)).into_response() + } + }; + + match get_reward_delegation_by_address_and_epoch_impl(state, address, epoch).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=300")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_reward_delegation_by_address_and_epoch_impl( + state: Arc, + address: Address, + epoch: u64, +) -> anyhow::Result> { + tracing::debug!("Fetching reward delegation for address {} at epoch {}", address, epoch); + + let history = state + .rewards_db + .get_reward_delegations_received_history(address, Some(epoch), Some(epoch)) + .await?; + + if history.is_empty() { + return Ok(None); + } + + let power = &history[0]; + Ok(Some(DelegationPowerEntry { + rank: None, + delegate_address: format!("{:#x}", power.delegate_address), + power: power.reward_power.to_string(), + delegator_count: power.delegator_count, + delegators: power.delegators.iter().map(|a| format!("{:#x}", a)).collect(), + epochs_participated: None, + epoch: Some(power.epoch), + })) +} diff --git a/crates/lambdas/indexer-api/src/routes/mod.rs b/crates/lambdas/indexer-api/src/routes/mod.rs new file mode 100644 index 000000000..a61123539 --- /dev/null +++ b/crates/lambdas/indexer-api/src/routes/mod.rs @@ -0,0 +1,17 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub mod delegations; +pub mod povw; +pub mod staking; diff --git a/crates/lambdas/indexer-api/src/routes/povw.rs b/crates/lambdas/indexer-api/src/routes/povw.rs new file mode 100644 index 000000000..c14fc600d --- /dev/null +++ b/crates/lambdas/indexer-api/src/routes/povw.rs @@ -0,0 +1,589 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use alloy::primitives::Address; +use axum::{ + extract::{Path, Query, State}, + http::header, + response::{IntoResponse, Response}, + routing::get, + Json, Router, +}; +use std::{str::FromStr, sync::Arc}; +use utoipa; + +use crate::{ + db::AppState, + handler::{cache_control, handle_error}, + models::{ + AddressLeaderboardResponse, AggregateLeaderboardEntry, EpochLeaderboardEntry, + EpochPoVWSummary, LeaderboardResponse, PaginationParams, PoVWAddressSummary, + PoVWSummaryStats, + }, + utils::{format_cycles, format_zkc}, +}; + +/// Create PoVW routes +pub fn routes() -> Router> { + Router::new() + // Aggregate summary endpoint + .route("/", get(get_povw_summary)) + // Epoch endpoints + .route("/epochs", get(get_all_epochs_summary)) + .route("/epochs/:epoch", get(get_epoch_summary)) + .route("/epochs/:epoch/addresses", get(get_epoch_leaderboard)) + .route("/epochs/:epoch/addresses/:address", get(get_address_at_epoch)) + // Address endpoints + .route("/addresses", get(get_all_time_leaderboard)) + .route("/addresses/:address", get(get_address_history)) +} + +/// GET /v1/povw +/// Returns the aggregate PoVW summary +#[utoipa::path( + get, + path = "/v1/povw", + tag = "PoVW", + responses( + (status = 200, description = "PoVW summary statistics", body = PoVWSummaryStats), + (status = 500, description = "Internal server error") + ) +)] +async fn get_povw_summary(State(state): State>) -> Response { + match get_povw_summary_impl(state).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=60")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_povw_summary_impl(state: Arc) -> anyhow::Result { + tracing::debug!("Fetching PoVW summary stats"); + + // Fetch summary stats + let summary_stats = state + .rewards_db + .get_povw_summary_stats() + .await? + .ok_or_else(|| anyhow::anyhow!("No PoVW summary data available"))?; + + let work_str = summary_stats.total_work_all_time.to_string(); + let emissions_str = summary_stats.total_emissions_all_time.to_string(); + let capped_str = summary_stats.total_capped_rewards_all_time.to_string(); + let uncapped_str = summary_stats.total_uncapped_rewards_all_time.to_string(); + + Ok(PoVWSummaryStats { + total_epochs_with_work: summary_stats.total_epochs_with_work, + total_unique_work_log_ids: summary_stats.total_unique_work_log_ids, + total_work_all_time: work_str.clone(), + total_work_all_time_formatted: format_cycles(&work_str), + total_emissions_all_time: emissions_str.clone(), + total_emissions_all_time_formatted: format_zkc(&emissions_str), + total_capped_rewards_all_time: capped_str.clone(), + total_capped_rewards_all_time_formatted: format_zkc(&capped_str), + total_uncapped_rewards_all_time: uncapped_str.clone(), + total_uncapped_rewards_all_time_formatted: format_zkc(&uncapped_str), + last_updated_at: summary_stats.updated_at, + }) +} + +/// GET /v1/povw/epochs +/// Returns summary of all epochs +#[utoipa::path( + get, + path = "/v1/povw/epochs", + tag = "PoVW", + params( + PaginationParams + ), + responses( + (status = 200, description = "All epochs PoVW summary", body = LeaderboardResponse), + (status = 500, description = "Internal server error") + ) +)] +async fn get_all_epochs_summary( + State(state): State>, + Query(params): Query, +) -> Response { + let params = params.validate(); + + match get_all_epochs_summary_impl(state, params).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=300")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_all_epochs_summary_impl( + state: Arc, + params: PaginationParams, +) -> anyhow::Result> { + tracing::debug!( + "Fetching all epochs summary with offset={}, limit={}", + params.offset, + params.limit + ); + + // Fetch all epoch summaries + let summaries = + state.rewards_db.get_all_epoch_povw_summaries(params.offset, params.limit).await?; + + // Convert to response format + let entries: Vec = summaries + .into_iter() + .map(|summary| { + let work_str = summary.total_work.to_string(); + let emissions_str = summary.total_emissions.to_string(); + let capped_str = summary.total_capped_rewards.to_string(); + let uncapped_str = summary.total_uncapped_rewards.to_string(); + EpochPoVWSummary { + epoch: summary.epoch, + total_work: work_str.clone(), + total_work_formatted: format_cycles(&work_str), + total_emissions: emissions_str.clone(), + total_emissions_formatted: format_zkc(&emissions_str), + total_capped_rewards: capped_str.clone(), + total_capped_rewards_formatted: format_zkc(&capped_str), + total_uncapped_rewards: uncapped_str.clone(), + total_uncapped_rewards_formatted: format_zkc(&uncapped_str), + epoch_start_time: summary.epoch_start_time, + epoch_end_time: summary.epoch_end_time, + num_participants: summary.num_participants, + last_updated_at: summary.updated_at, + } + }) + .collect(); + + Ok(LeaderboardResponse::new(entries, params.offset, params.limit)) +} + +/// GET /v1/povw/epochs/:epoch +/// Returns summary for a specific epoch +#[utoipa::path( + get, + path = "/v1/povw/epochs/{epoch}", + tag = "PoVW", + params( + ("epoch" = u64, Path, description = "Epoch number") + ), + responses( + (status = 200, description = "Epoch PoVW summary", body = EpochPoVWSummary), + (status = 404, description = "Epoch not found"), + (status = 500, description = "Internal server error") + ) +)] +async fn get_epoch_summary(State(state): State>, Path(epoch): Path) -> Response { + match get_epoch_summary_impl(state, epoch).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=300")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_epoch_summary_impl( + state: Arc, + epoch: u64, +) -> anyhow::Result { + tracing::debug!("Fetching PoVW summary for epoch {}", epoch); + + // Fetch epoch summary + let summary = state + .rewards_db + .get_epoch_povw_summary(epoch) + .await? + .ok_or_else(|| anyhow::anyhow!("No data available for epoch {}", epoch))?; + + let work_str = summary.total_work.to_string(); + let emissions_str = summary.total_emissions.to_string(); + let capped_str = summary.total_capped_rewards.to_string(); + let uncapped_str = summary.total_uncapped_rewards.to_string(); + + Ok(EpochPoVWSummary { + epoch: summary.epoch, + total_work: work_str.clone(), + total_work_formatted: format_cycles(&work_str), + total_emissions: emissions_str.clone(), + total_emissions_formatted: format_zkc(&emissions_str), + total_capped_rewards: capped_str.clone(), + total_capped_rewards_formatted: format_zkc(&capped_str), + total_uncapped_rewards: uncapped_str.clone(), + total_uncapped_rewards_formatted: format_zkc(&uncapped_str), + epoch_start_time: summary.epoch_start_time, + epoch_end_time: summary.epoch_end_time, + num_participants: summary.num_participants, + last_updated_at: summary.updated_at, + }) +} + +/// GET /v1/povw/epochs/:epoch/addresses +/// Returns the leaderboard for a specific epoch +#[utoipa::path( + get, + path = "/v1/povw/epochs/{epoch}/addresses", + tag = "PoVW", + params( + ("epoch" = u64, Path, description = "Epoch number"), + PaginationParams + ), + responses( + (status = 200, description = "Epoch PoVW leaderboard", body = LeaderboardResponse), + (status = 500, description = "Internal server error") + ) +)] +async fn get_epoch_leaderboard( + State(state): State>, + Path(epoch): Path, + Query(params): Query, +) -> Response { + let params = params.validate(); + + match get_epoch_leaderboard_impl(state, epoch, params).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=300")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_epoch_leaderboard_impl( + state: Arc, + epoch: u64, + params: PaginationParams, +) -> anyhow::Result> { + tracing::debug!( + "Fetching epoch {} leaderboard with offset={}, limit={}", + epoch, + params.offset, + params.limit + ); + + // Fetch data from database + let rewards = + state.rewards_db.get_povw_rewards_by_epoch(epoch, params.offset, params.limit).await?; + + // Convert to response format with ranks + let entries: Vec = rewards + .into_iter() + .enumerate() + .map(|(index, reward)| { + let work_str = reward.work_submitted.to_string(); + let uncapped_str = reward.uncapped_rewards.to_string(); + let cap_str = reward.reward_cap.to_string(); + let actual_str = reward.actual_rewards.to_string(); + let staked_str = reward.staked_amount.to_string(); + EpochLeaderboardEntry { + rank: Some(params.offset + (index as u64) + 1), + work_log_id: format!("{:#x}", reward.work_log_id), + epoch: reward.epoch, + work_submitted: work_str.clone(), + work_submitted_formatted: format_cycles(&work_str), + percentage: reward.percentage, + uncapped_rewards: uncapped_str.clone(), + uncapped_rewards_formatted: format_zkc(&uncapped_str), + reward_cap: cap_str.clone(), + reward_cap_formatted: format_zkc(&cap_str), + actual_rewards: actual_str.clone(), + actual_rewards_formatted: format_zkc(&actual_str), + is_capped: reward.is_capped, + staked_amount: staked_str.clone(), + staked_amount_formatted: format_zkc(&staked_str), + } + }) + .collect(); + + Ok(LeaderboardResponse::new(entries, params.offset, params.limit)) +} + +/// GET /v1/povw/epochs/:epoch/addresses/:address +/// Returns the PoVW rewards for a specific address at a specific epoch +#[utoipa::path( + get, + path = "/v1/povw/epochs/{epoch}/addresses/{address}", + tag = "PoVW", + params( + ("epoch" = u64, Path, description = "Epoch number"), + ("address" = String, Path, description = "Ethereum address") + ), + responses( + (status = 200, description = "PoVW rewards for address at epoch", body = Option), + (status = 400, description = "Invalid address format"), + (status = 500, description = "Internal server error") + ) +)] +async fn get_address_at_epoch( + State(state): State>, + Path((epoch, address_str)): Path<(u64, String)>, +) -> Response { + // Parse and validate address + let address = match Address::from_str(&address_str) { + Ok(addr) => addr, + Err(e) => { + return handle_error(anyhow::anyhow!("Invalid address format: {}", e)).into_response() + } + }; + + match get_address_at_epoch_impl(state, epoch, address).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=300")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_address_at_epoch_impl( + state: Arc, + epoch: u64, + address: Address, +) -> anyhow::Result> { + tracing::debug!("Fetching PoVW rewards for address {} at epoch {}", address, epoch); + + // Fetch PoVW history for the address at specific epoch + let rewards = state + .rewards_db + .get_povw_rewards_history_by_address(address, Some(epoch), Some(epoch)) + .await?; + + if rewards.is_empty() { + return Ok(None); + } + + let reward = &rewards[0]; + let work_str = reward.work_submitted.to_string(); + let uncapped_str = reward.uncapped_rewards.to_string(); + let cap_str = reward.reward_cap.to_string(); + let actual_str = reward.actual_rewards.to_string(); + let staked_str = reward.staked_amount.to_string(); + Ok(Some(EpochLeaderboardEntry { + rank: None, // No rank for individual queries + work_log_id: format!("{:#x}", reward.work_log_id), + epoch: reward.epoch, + work_submitted: work_str.clone(), + work_submitted_formatted: format_cycles(&work_str), + percentage: reward.percentage, + uncapped_rewards: uncapped_str.clone(), + uncapped_rewards_formatted: format_zkc(&uncapped_str), + reward_cap: cap_str.clone(), + reward_cap_formatted: format_zkc(&cap_str), + actual_rewards: actual_str.clone(), + actual_rewards_formatted: format_zkc(&actual_str), + is_capped: reward.is_capped, + staked_amount: staked_str.clone(), + staked_amount_formatted: format_zkc(&staked_str), + })) +} + +/// GET /v1/povw/addresses +/// Returns the all-time PoVW leaderboard +#[utoipa::path( + get, + path = "/v1/povw/addresses", + tag = "PoVW", + params(PaginationParams), + responses( + (status = 200, description = "PoVW leaderboard", body = LeaderboardResponse), + (status = 500, description = "Internal server error") + ) +)] +async fn get_all_time_leaderboard( + State(state): State>, + Query(params): Query, +) -> Response { + let params = params.validate(); + + match get_all_time_leaderboard_impl(state, params).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=60")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_all_time_leaderboard_impl( + state: Arc, + params: PaginationParams, +) -> anyhow::Result> { + tracing::debug!( + "Fetching all-time PoVW leaderboard with offset={}, limit={}", + params.offset, + params.limit + ); + + // Fetch data from database + let aggregates = + state.rewards_db.get_povw_rewards_aggregate(params.offset, params.limit).await?; + + // Convert to response format with ranks + let entries: Vec = aggregates + .into_iter() + .enumerate() + .map(|(index, agg)| { + let work_str = agg.total_work_submitted.to_string(); + let actual_str = agg.total_actual_rewards.to_string(); + let uncapped_str = agg.total_uncapped_rewards.to_string(); + AggregateLeaderboardEntry { + rank: Some(params.offset + (index as u64) + 1), + work_log_id: format!("{:#x}", agg.work_log_id), + total_work_submitted: work_str.clone(), + total_work_submitted_formatted: format_cycles(&work_str), + total_actual_rewards: actual_str.clone(), + total_actual_rewards_formatted: format_zkc(&actual_str), + total_uncapped_rewards: uncapped_str.clone(), + total_uncapped_rewards_formatted: format_zkc(&uncapped_str), + epochs_participated: agg.epochs_participated, + } + }) + .collect(); + + Ok(LeaderboardResponse::new(entries, params.offset, params.limit)) +} + +/// GET /v1/povw/addresses/:address +/// Returns the PoVW rewards history for a specific address +#[utoipa::path( + get, + path = "/v1/povw/addresses/{address}", + tag = "PoVW", + params( + ("address" = String, Path, description = "Work log ID (Ethereum address)"), + PaginationParams + ), + responses( + (status = 200, description = "Address PoVW history", body = AddressLeaderboardResponse), + (status = 400, description = "Invalid address format"), + (status = 500, description = "Internal server error") + ) +)] +async fn get_address_history( + State(state): State>, + Path(address_str): Path, + Query(params): Query, +) -> Response { + // Parse and validate address + let address = match Address::from_str(&address_str) { + Ok(addr) => addr, + Err(e) => { + return handle_error(anyhow::anyhow!("Invalid address format: {}", e)).into_response() + } + }; + + let params = params.validate(); + + match get_address_history_impl(state, address, params).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=300")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_address_history_impl( + state: Arc, + address: Address, + params: PaginationParams, +) -> anyhow::Result> { + tracing::debug!( + "Fetching PoVW history for address {} with offset={}, limit={}", + address, + params.offset, + params.limit + ); + + // Fetch PoVW history for the address + let rewards = state.rewards_db.get_povw_rewards_history_by_address(address, None, None).await?; + + // Fetch aggregate summary for this address + let address_aggregate = state.rewards_db.get_povw_rewards_aggregate_by_address(address).await?; + + // Apply pagination + let start = params.offset as usize; + let end = (start + params.limit as usize).min(rewards.len()); + let paginated = if start < rewards.len() { rewards[start..end].to_vec() } else { vec![] }; + + // Convert to response format without rank (this is address history, not a leaderboard) + let entries: Vec = paginated + .into_iter() + .map(|reward| { + let work_str = reward.work_submitted.to_string(); + let uncapped_str = reward.uncapped_rewards.to_string(); + let cap_str = reward.reward_cap.to_string(); + let actual_str = reward.actual_rewards.to_string(); + let staked_str = reward.staked_amount.to_string(); + EpochLeaderboardEntry { + rank: None, // No rank for individual address history + work_log_id: format!("{:#x}", reward.work_log_id), + epoch: reward.epoch, + work_submitted: work_str.clone(), + work_submitted_formatted: format_cycles(&work_str), + percentage: reward.percentage, + uncapped_rewards: uncapped_str.clone(), + uncapped_rewards_formatted: format_zkc(&uncapped_str), + reward_cap: cap_str.clone(), + reward_cap_formatted: format_zkc(&cap_str), + actual_rewards: actual_str.clone(), + actual_rewards_formatted: format_zkc(&actual_str), + is_capped: reward.is_capped, + staked_amount: staked_str.clone(), + staked_amount_formatted: format_zkc(&staked_str), + } + }) + .collect(); + + // Create summary from aggregate if available, otherwise use default + let summary = if let Some(aggregate) = address_aggregate { + let work_str = aggregate.total_work_submitted.to_string(); + let actual_str = aggregate.total_actual_rewards.to_string(); + let uncapped_str = aggregate.total_uncapped_rewards.to_string(); + PoVWAddressSummary { + work_log_id: format!("{:#x}", aggregate.work_log_id), + total_work_submitted: work_str.clone(), + total_work_submitted_formatted: format_cycles(&work_str), + total_actual_rewards: actual_str.clone(), + total_actual_rewards_formatted: format_zkc(&actual_str), + total_uncapped_rewards: uncapped_str.clone(), + total_uncapped_rewards_formatted: format_zkc(&uncapped_str), + epochs_participated: aggregate.epochs_participated, + } + } else { + // No data for this address - return empty summary + PoVWAddressSummary { + work_log_id: format!("{:#x}", address), + total_work_submitted: "0".to_string(), + total_work_submitted_formatted: format_cycles("0"), + total_actual_rewards: "0".to_string(), + total_actual_rewards_formatted: format_zkc("0"), + total_uncapped_rewards: "0".to_string(), + total_uncapped_rewards_formatted: format_zkc("0"), + epochs_participated: 0, + } + }; + + Ok(AddressLeaderboardResponse::new(entries, params.offset, params.limit, summary)) +} diff --git a/crates/lambdas/indexer-api/src/routes/staking.rs b/crates/lambdas/indexer-api/src/routes/staking.rs new file mode 100644 index 000000000..6ba62a9ed --- /dev/null +++ b/crates/lambdas/indexer-api/src/routes/staking.rs @@ -0,0 +1,566 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use alloy::primitives::Address; +use axum::{ + extract::{Path, Query, State}, + http::header, + response::{IntoResponse, Response}, + routing::get, + Json, Router, +}; +use std::{str::FromStr, sync::Arc}; +use utoipa; + +use crate::{ + db::AppState, + handler::{cache_control, handle_error}, + models::{ + AddressLeaderboardResponse, AggregateStakingEntry, EpochStakingEntry, EpochStakingSummary, + LeaderboardResponse, PaginationParams, StakingAddressSummary, StakingSummaryStats, + }, + utils::format_zkc, +}; + +/// Create Staking routes +pub fn routes() -> Router> { + Router::new() + // Aggregate summary endpoint + .route("/", get(get_staking_summary)) + // Epoch endpoints + .route("/epochs", get(get_all_epochs_summary)) + .route("/epochs/:epoch", get(get_epoch_summary)) + .route("/epochs/:epoch/addresses", get(get_epoch_leaderboard)) + .route("/epochs/:epoch/addresses/:address", get(get_address_at_epoch)) + // Address endpoints + .route("/addresses", get(get_all_time_leaderboard)) + .route("/addresses/:address", get(get_address_history)) +} + +/// GET /v1/staking +/// Returns the aggregate staking summary +#[utoipa::path( + get, + path = "/v1/staking", + tag = "Staking", + responses( + (status = 200, description = "Staking summary statistics", body = StakingSummaryStats), + (status = 500, description = "Internal server error") + ) +)] +async fn get_staking_summary(State(state): State>) -> Response { + match get_staking_summary_impl(state).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=60")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_staking_summary_impl(state: Arc) -> anyhow::Result { + tracing::debug!("Fetching staking summary stats"); + + // Fetch summary stats + let summary = state + .rewards_db + .get_staking_summary_stats() + .await? + .ok_or_else(|| anyhow::anyhow!("No staking summary data available"))?; + + let total_str = summary.current_total_staked.to_string(); + let emissions_str = summary + .total_staking_emissions_all_time + .map(|v| v.to_string()) + .unwrap_or_else(|| "0".to_string()); + + Ok(StakingSummaryStats { + current_total_staked: total_str.clone(), + current_total_staked_formatted: format_zkc(&total_str), + total_unique_stakers: summary.total_unique_stakers, + current_active_stakers: summary.current_active_stakers, + current_withdrawing: summary.current_withdrawing, + total_staking_emissions_all_time: Some(emissions_str.clone()), + total_staking_emissions_all_time_formatted: Some(format_zkc(&emissions_str)), + last_updated_at: summary.updated_at, + }) +} + +/// GET /v1/staking/epochs +/// Returns summary of all epochs +#[utoipa::path( + get, + path = "/v1/staking/epochs", + tag = "Staking", + params( + PaginationParams + ), + responses( + (status = 200, description = "All epochs staking summary", body = LeaderboardResponse), + (status = 500, description = "Internal server error") + ) +)] +async fn get_all_epochs_summary( + State(state): State>, + Query(params): Query, +) -> Response { + let params = params.validate(); + + match get_all_epochs_summary_impl(state, params).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=300")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_all_epochs_summary_impl( + state: Arc, + params: PaginationParams, +) -> anyhow::Result> { + tracing::debug!( + "Fetching all epochs staking summary with offset={}, limit={}", + params.offset, + params.limit + ); + + // Fetch all epoch summaries + let summaries = + state.rewards_db.get_all_epoch_staking_summaries(params.offset, params.limit).await?; + + // Convert to response format + let entries: Vec = summaries + .into_iter() + .map(|summary| { + let total_str = summary.total_staked.to_string(); + let emissions_str = summary.total_staking_emissions.to_string(); + let power_str = summary.total_staking_power.to_string(); + EpochStakingSummary { + epoch: summary.epoch, + total_staked: total_str.clone(), + total_staked_formatted: format_zkc(&total_str), + num_stakers: summary.num_stakers, + num_withdrawing: summary.num_withdrawing, + total_staking_emissions: emissions_str.clone(), + total_staking_emissions_formatted: format_zkc(&emissions_str), + total_staking_power: power_str.clone(), + total_staking_power_formatted: format_zkc(&power_str), + num_reward_recipients: summary.num_reward_recipients, + epoch_start_time: summary.epoch_start_time, + epoch_end_time: summary.epoch_end_time, + last_updated_at: summary.updated_at, + } + }) + .collect(); + + Ok(LeaderboardResponse::new(entries, params.offset, params.limit)) +} + +/// GET /v1/staking/epochs/:epoch +/// Returns summary for a specific epoch +#[utoipa::path( + get, + path = "/v1/staking/epochs/{epoch}", + tag = "Staking", + params( + ("epoch" = u64, Path, description = "Epoch number") + ), + responses( + (status = 200, description = "Epoch staking summary", body = EpochStakingSummary), + (status = 404, description = "Epoch not found"), + (status = 500, description = "Internal server error") + ) +)] +async fn get_epoch_summary(State(state): State>, Path(epoch): Path) -> Response { + match get_epoch_summary_impl(state, epoch).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=300")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_epoch_summary_impl( + state: Arc, + epoch: u64, +) -> anyhow::Result { + tracing::debug!("Fetching staking summary for epoch {}", epoch); + + // Fetch epoch summary + let summary = state + .rewards_db + .get_epoch_staking_summary(epoch) + .await? + .ok_or_else(|| anyhow::anyhow!("No staking data available for epoch {}", epoch))?; + + let total_str = summary.total_staked.to_string(); + let emissions_str = summary.total_staking_emissions.to_string(); + let power_str = summary.total_staking_power.to_string(); + + Ok(EpochStakingSummary { + epoch: summary.epoch, + total_staked: total_str.clone(), + total_staked_formatted: format_zkc(&total_str), + num_stakers: summary.num_stakers, + num_withdrawing: summary.num_withdrawing, + total_staking_emissions: emissions_str.clone(), + total_staking_emissions_formatted: format_zkc(&emissions_str), + total_staking_power: power_str.clone(), + total_staking_power_formatted: format_zkc(&power_str), + num_reward_recipients: summary.num_reward_recipients, + epoch_start_time: summary.epoch_start_time, + epoch_end_time: summary.epoch_end_time, + last_updated_at: summary.updated_at, + }) +} + +/// GET /v1/staking/epochs/:epoch/addresses +/// Returns the staking leaderboard for a specific epoch +#[utoipa::path( + get, + path = "/v1/staking/epochs/{epoch}/addresses", + tag = "Staking", + params( + ("epoch" = u64, Path, description = "Epoch number"), + PaginationParams + ), + responses( + (status = 200, description = "Epoch staking leaderboard", body = LeaderboardResponse), + (status = 500, description = "Internal server error") + ) +)] +async fn get_epoch_leaderboard( + State(state): State>, + Path(epoch): Path, + Query(params): Query, +) -> Response { + let params = params.validate(); + + match get_epoch_leaderboard_impl(state, epoch, params).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=300")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_epoch_leaderboard_impl( + state: Arc, + epoch: u64, + params: PaginationParams, +) -> anyhow::Result> { + tracing::debug!( + "Fetching staking leaderboard for epoch {} with offset={}, limit={}", + epoch, + params.offset, + params.limit + ); + + // Fetch data from database + let positions = + state.rewards_db.get_staking_positions_by_epoch(epoch, params.offset, params.limit).await?; + + // Convert to response format with ranks + let entries: Vec = positions + .into_iter() + .enumerate() + .map(|(index, position)| { + let staked_str = position.staked_amount.to_string(); + let generated_str = position.rewards_generated.to_string(); + EpochStakingEntry { + rank: Some(params.offset + (index as u64) + 1), + staker_address: format!("{:#x}", position.staker_address), + epoch: position.epoch, + staked_amount: staked_str.clone(), + staked_amount_formatted: format_zkc(&staked_str), + is_withdrawing: position.is_withdrawing, + rewards_delegated_to: position + .rewards_delegated_to + .map(|addr| format!("{:#x}", addr)), + votes_delegated_to: position.votes_delegated_to.map(|addr| format!("{:#x}", addr)), + rewards_generated: generated_str.clone(), + rewards_generated_formatted: format_zkc(&generated_str), + } + }) + .collect(); + + Ok(LeaderboardResponse::new(entries, params.offset, params.limit)) +} + +/// GET /v1/staking/epochs/:epoch/addresses/:address +/// Returns staking data for a specific address at a specific epoch +#[utoipa::path( + get, + path = "/v1/staking/epochs/{epoch}/addresses/{address}", + tag = "Staking", + params( + ("epoch" = u64, Path, description = "Epoch number"), + ("address" = String, Path, description = "Ethereum address") + ), + responses( + (status = 200, description = "Staking position for address at epoch", body = Option), + (status = 400, description = "Invalid address format"), + (status = 500, description = "Internal server error") + ) +)] +async fn get_address_at_epoch( + State(state): State>, + Path((epoch, address_str)): Path<(u64, String)>, +) -> Response { + // Parse and validate address + let address = match Address::from_str(&address_str) { + Ok(addr) => addr, + Err(e) => { + return handle_error(anyhow::anyhow!("Invalid address format: {}", e)).into_response() + } + }; + + match get_address_at_epoch_impl(state, epoch, address).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=300")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_address_at_epoch_impl( + state: Arc, + epoch: u64, + address: Address, +) -> anyhow::Result> { + tracing::debug!("Fetching staking data for address {} at epoch {}", address, epoch); + + // Fetch staking history for the address at specific epoch + let positions = + state.rewards_db.get_staking_history_by_address(address, Some(epoch), Some(epoch)).await?; + + if positions.is_empty() { + return Ok(None); + } + + let position = &positions[0]; + let staked_str = position.staked_amount.to_string(); + let generated_str = position.rewards_generated.to_string(); + Ok(Some(EpochStakingEntry { + rank: None, // No rank for individual queries + staker_address: format!("{:#x}", position.staker_address), + epoch: position.epoch, + staked_amount: staked_str.clone(), + staked_amount_formatted: format_zkc(&staked_str), + is_withdrawing: position.is_withdrawing, + rewards_delegated_to: position.rewards_delegated_to.map(|addr| format!("{:#x}", addr)), + votes_delegated_to: position.votes_delegated_to.map(|addr| format!("{:#x}", addr)), + rewards_generated: generated_str.clone(), + rewards_generated_formatted: format_zkc(&generated_str), + })) +} + +/// GET /v1/staking/addresses +/// Returns the all-time staking leaderboard +#[utoipa::path( + get, + path = "/v1/staking/addresses", + tag = "Staking", + params(PaginationParams), + responses( + (status = 200, description = "Staking leaderboard", body = LeaderboardResponse), + (status = 500, description = "Internal server error") + ) +)] +async fn get_all_time_leaderboard( + State(state): State>, + Query(params): Query, +) -> Response { + let params = params.validate(); + + match get_all_time_leaderboard_impl(state, params).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=60")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_all_time_leaderboard_impl( + state: Arc, + params: PaginationParams, +) -> anyhow::Result> { + tracing::debug!( + "Fetching all-time staking leaderboard with offset={}, limit={}", + params.offset, + params.limit + ); + + // Fetch aggregate data from database + let aggregates = + state.rewards_db.get_staking_positions_aggregate(params.offset, params.limit).await?; + + // Convert to response format with ranks + let entries: Vec = aggregates + .into_iter() + .enumerate() + .map(|(index, aggregate)| { + let staked_str = aggregate.total_staked.to_string(); + let generated_str = aggregate.total_rewards_generated.to_string(); + AggregateStakingEntry { + rank: Some(params.offset + (index as u64) + 1), + staker_address: format!("{:#x}", aggregate.staker_address), + total_staked: staked_str.clone(), + total_staked_formatted: format_zkc(&staked_str), + is_withdrawing: aggregate.is_withdrawing, + rewards_delegated_to: aggregate + .rewards_delegated_to + .map(|addr| format!("{:#x}", addr)), + votes_delegated_to: aggregate.votes_delegated_to.map(|addr| format!("{:#x}", addr)), + epochs_participated: aggregate.epochs_participated, + total_rewards_generated: generated_str.clone(), + total_rewards_generated_formatted: format_zkc(&generated_str), + } + }) + .collect(); + + Ok(LeaderboardResponse::new(entries, params.offset, params.limit)) +} + +/// GET /v1/staking/addresses/:address +/// Returns the staking history for a specific address +#[utoipa::path( + get, + path = "/v1/staking/addresses/{address}", + tag = "Staking", + params( + ("address" = String, Path, description = "Ethereum address"), + PaginationParams + ), + responses( + (status = 200, description = "Address staking history", body = AddressLeaderboardResponse), + (status = 400, description = "Invalid address format"), + (status = 500, description = "Internal server error") + ) +)] +async fn get_address_history( + State(state): State>, + Path(address_str): Path, + Query(params): Query, +) -> Response { + // Parse and validate address + let address = match Address::from_str(&address_str) { + Ok(addr) => addr, + Err(e) => { + return handle_error(anyhow::anyhow!("Invalid address format: {}", e)).into_response() + } + }; + + let params = params.validate(); + + match get_address_history_impl(state, address, params).await { + Ok(response) => { + let mut res = Json(response).into_response(); + res.headers_mut().insert(header::CACHE_CONTROL, cache_control("public, max-age=300")); + res + } + Err(err) => handle_error(err).into_response(), + } +} + +async fn get_address_history_impl( + state: Arc, + address: Address, + params: PaginationParams, +) -> anyhow::Result> { + tracing::debug!( + "Fetching staking history for address {} with offset={}, limit={}", + address, + params.offset, + params.limit + ); + + // Fetch staking history for the address + let positions = state.rewards_db.get_staking_history_by_address(address, None, None).await?; + + // Fetch aggregate summary for this address + let address_aggregate = + state.rewards_db.get_staking_position_aggregate_by_address(address).await?; + + // Apply pagination + let start = params.offset as usize; + let end = (start + params.limit as usize).min(positions.len()); + let paginated = if start < positions.len() { positions[start..end].to_vec() } else { vec![] }; + + // Convert to response format + let entries: Vec = paginated + .into_iter() + .map(|position| { + let staked_str = position.staked_amount.to_string(); + let generated_str = position.rewards_generated.to_string(); + EpochStakingEntry { + rank: None, // No rank for individual address queries + staker_address: format!("{:#x}", position.staker_address), + epoch: position.epoch, + staked_amount: staked_str.clone(), + staked_amount_formatted: format_zkc(&staked_str), + is_withdrawing: position.is_withdrawing, + rewards_delegated_to: position + .rewards_delegated_to + .map(|addr| format!("{:#x}", addr)), + votes_delegated_to: position.votes_delegated_to.map(|addr| format!("{:#x}", addr)), + rewards_generated: generated_str.clone(), + rewards_generated_formatted: format_zkc(&generated_str), + } + }) + .collect(); + + // Create summary from aggregate if available, otherwise use default + let summary = if let Some(aggregate) = address_aggregate { + let staked_str = aggregate.total_staked.to_string(); + let generated_str = aggregate.total_rewards_generated.to_string(); + StakingAddressSummary { + staker_address: format!("{:#x}", aggregate.staker_address), + total_staked: staked_str.clone(), + total_staked_formatted: format_zkc(&staked_str), + is_withdrawing: aggregate.is_withdrawing, + rewards_delegated_to: aggregate.rewards_delegated_to.map(|addr| format!("{:#x}", addr)), + votes_delegated_to: aggregate.votes_delegated_to.map(|addr| format!("{:#x}", addr)), + epochs_participated: aggregate.epochs_participated, + total_rewards_generated: generated_str.clone(), + total_rewards_generated_formatted: format_zkc(&generated_str), + } + } else { + // No data for this address - return empty summary + StakingAddressSummary { + staker_address: format!("{:#x}", address), + total_staked: "0".to_string(), + total_staked_formatted: format_zkc("0"), + is_withdrawing: false, + rewards_delegated_to: None, + votes_delegated_to: None, + epochs_participated: 0, + total_rewards_generated: "0".to_string(), + total_rewards_generated_formatted: format_zkc("0"), + } + }; + + Ok(AddressLeaderboardResponse::new(entries, params.offset, params.limit, summary)) +} diff --git a/crates/lambdas/indexer-api/src/utils.rs b/crates/lambdas/indexer-api/src/utils.rs new file mode 100644 index 000000000..17495df28 --- /dev/null +++ b/crates/lambdas/indexer-api/src/utils.rs @@ -0,0 +1,115 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use alloy::primitives::U256; +use std::str::FromStr; + +/// Format wei amount to human-readable ZKC with commas +/// Converts from 18 decimals to ZKC units +pub fn format_zkc(wei_str: &str) -> String { + match U256::from_str(wei_str) { + Ok(wei) => { + // ZKC has 18 decimals + let divisor = U256::from(10u64).pow(U256::from(18)); + let zkc = wei / divisor; + let formatted = format_with_commas_u256(zkc); + format!("{} ZKC", formatted) + } + Err(_) => "0 ZKC".to_string(), + } +} + +/// Format work amount to human-readable cycles with commas +/// Work values are raw cycle counts (no decimals) +pub fn format_cycles(cycles_str: &str) -> String { + match U256::from_str(cycles_str) { + Ok(cycles) => { + // Work values are already in cycles (no decimal conversion needed) + let formatted = format_with_commas_u256(cycles); + format!("{} cycles", formatted) + } + Err(_) => "0 cycles".to_string(), + } +} + +/// Format a u64 number with comma separators +#[allow(dead_code)] +pub fn format_with_commas(num: u64) -> String { + let s = num.to_string(); + let mut result = String::new(); + let mut count = 0; + + for ch in s.chars().rev() { + if count == 3 { + result.insert(0, ','); + count = 0; + } + result.insert(0, ch); + count += 1; + } + + result +} + +/// Format a U256 number with comma separators +fn format_with_commas_u256(num: U256) -> String { + let s = num.to_string(); + let mut result = String::new(); + let mut count = 0; + + for ch in s.chars().rev() { + if count == 3 { + result.insert(0, ','); + count = 0; + } + result.insert(0, ch); + count += 1; + } + + result +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_format_zkc() { + assert_eq!(format_zkc("1000000000000000000000"), "1,000 ZKC"); + assert_eq!(format_zkc("1500000000000000000000000"), "1,500,000 ZKC"); + assert_eq!(format_zkc("788626950526189926000000"), "788,626 ZKC"); + assert_eq!(format_zkc("0"), "0 ZKC"); + assert_eq!(format_zkc("invalid"), "0 ZKC"); + } + + #[test] + fn test_format_cycles() { + assert_eq!(format_cycles("1"), "1 cycles"); + assert_eq!(format_cycles("1000"), "1,000 cycles"); + assert_eq!(format_cycles("30711723851776"), "30,711,723,851,776 cycles"); + assert_eq!(format_cycles("5000000"), "5,000,000 cycles"); + assert_eq!(format_cycles("0"), "0 cycles"); + } + + #[test] + fn test_format_with_commas() { + assert_eq!(format_with_commas(0), "0"); + assert_eq!(format_with_commas(100), "100"); + assert_eq!(format_with_commas(1000), "1,000"); + assert_eq!(format_with_commas(10000), "10,000"); + assert_eq!(format_with_commas(100000), "100,000"); + assert_eq!(format_with_commas(1000000), "1,000,000"); + assert_eq!(format_with_commas(1234567890), "1,234,567,890"); + } +} diff --git a/crates/lambdas/indexer-api/tests/local_integration.rs b/crates/lambdas/indexer-api/tests/local_integration.rs new file mode 100644 index 000000000..41bc27f37 --- /dev/null +++ b/crates/lambdas/indexer-api/tests/local_integration.rs @@ -0,0 +1,304 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Test utilities for local integration tests + +use assert_cmd::Command; +use reqwest::Client; +use serde::Deserialize; +use std::{ + env, + net::TcpListener, + path::{Path, PathBuf}, + sync::Arc, + time::Duration, +}; +use tempfile::NamedTempFile; +use tokio::{ + process::{Child, Command as TokioCommand}, + sync::OnceCell, +}; +use tracing::{debug, info}; + +// Test modules +#[path = "local_integration/povw.rs"] +pub mod povw_tests; + +#[path = "local_integration/staking.rs"] +pub mod staking_tests; + +#[path = "local_integration/delegations.rs"] +pub mod delegations_tests; + +#[path = "local_integration/docs.rs"] +pub mod docs_tests; + +// Contract addresses for mainnet +const VEZKC_ADDRESS: &str = "0xE8Ae8eE8ffa57F6a79B6Cbe06BAFc0b05F3ffbf4"; +const ZKC_ADDRESS: &str = "0x000006c2A22ff4A44ff1f5d0F2ed65F781F55555"; +const POVW_ACCOUNTING_ADDRESS: &str = "0x319bd4050b2170a7aE3Ead3E6d5AB8a5c7cFBDF8"; + +// Indexer limits for faster tests +const END_EPOCH: u32 = 4; +const END_BLOCK: u32 = 23395398; + +/// Shared test environment that persists across all tests +struct SharedTestEnv { + api_url: String, + _temp_file: NamedTempFile, // Keep the database file alive + _api_process: Child, +} + +/// Test environment handle for individual tests +pub struct TestEnv { + api_url: String, +} + +// Static storage for the shared test environment +static SHARED_TEST_ENV: OnceCell> = OnceCell::const_new(); + +impl TestEnv { + /// Get the API URL + pub fn api_url(&self) -> &str { + &self.api_url + } + + /// Get or create the shared test environment + pub async fn shared() -> Self { + let shared_env = SHARED_TEST_ENV + .get_or_init(|| async { + Arc::new( + SharedTestEnv::initialize() + .await + .expect("Failed to initialize test environment"), + ) + }) + .await; + + TestEnv { api_url: shared_env.api_url.clone() } + } + + /// Make a GET request to the API + pub async fn get Deserialize<'de>>(&self, path: &str) -> anyhow::Result { + let url = format!("{}{}", self.api_url, path); + let client = Client::new(); + let response = client.get(&url).send().await?; + + if !response.status().is_success() { + let status = response.status(); + let text = response.text().await?; + anyhow::bail!("Request failed with status {}: {}", status, text); + } + + Ok(response.json().await?) + } +} + +impl SharedTestEnv { + /// Initialize the shared test environment (called only once) + async fn initialize() -> anyhow::Result { + // Initialize tracing if not already done + let _ = tracing_subscriber::fmt() + .with_env_filter(tracing_subscriber::EnvFilter::from_default_env()) + .try_init(); + + info!("Creating shared test environment..."); + + // Check for ETH_MAINNET_RPC_URL + let rpc_url = env::var("ETH_MAINNET_RPC_URL") + .expect("ETH_MAINNET_RPC_URL environment variable must be set"); + + // Create temp file for database + let temp_file = NamedTempFile::new()?; + let db_path = temp_file.path().to_path_buf(); + + // Run indexer to populate database + info!("Running indexer to populate database..."); + Self::run_indexer(&rpc_url, &db_path).await?; + + // Find available port + let api_port = Self::find_available_port()?; + + // Start API server + info!("Starting API server on port {}...", api_port); + let api_process = Self::start_api_server(&db_path, api_port).await?; + + // Wait for API to be ready + let api_url = format!("http://127.0.0.1:{}", api_port); + Self::wait_for_api(&api_url).await?; + + Ok(SharedTestEnv { api_url, _temp_file: temp_file, _api_process: api_process }) + } + + /// Run indexer to populate database + async fn run_indexer(rpc_url: &str, db_path: &PathBuf) -> anyhow::Result<()> { + // Create empty database file + std::fs::File::create(db_path)?; + + let db_url = format!("sqlite:{}", db_path.display()); + info!("Using database at {}", db_path.display()); + + // Use assert_cmd to get the path to the binary + let cmd = Command::cargo_bin("rewards-indexer")?; + let program = cmd.get_program(); + + // Build command with tokio + let mut child = TokioCommand::new(program) + .args([ + "--rpc-url", + rpc_url, + "--vezkc-address", + VEZKC_ADDRESS, + "--zkc-address", + ZKC_ADDRESS, + "--povw-accounting-address", + POVW_ACCOUNTING_ADDRESS, + "--db", + &db_url, + "--interval", + "600", + "--end-epoch", + &END_EPOCH.to_string(), + "--end-block", + &END_BLOCK.to_string(), + ]) + .env("DATABASE_URL", &db_url) + .env("RUST_LOG", "debug,sqlx=warn") + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .spawn()?; + + // Spawn tasks to read and log output + let stdout = child.stdout.take().expect("Failed to take stdout"); + let stderr = child.stderr.take().expect("Failed to take stderr"); + + // Read stdout in background + tokio::spawn(async move { + use tokio::io::{AsyncBufReadExt, BufReader}; + let reader = BufReader::new(stdout); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!(target: "indexer", "{}", line); + } + }); + + // Read stderr in background + tokio::spawn(async move { + use tokio::io::{AsyncBufReadExt, BufReader}; + let reader = BufReader::new(stderr); + let mut lines = reader.lines(); + while let Ok(Some(line)) = lines.next_line().await { + debug!(target: "indexer", "stderr: {}", line); + } + }); + + // Wait for indexer to complete (it should exit when it reaches --end-block) + info!("Waiting for indexer to complete (will exit at block {})...", END_BLOCK); + + // Set a timeout for the indexer to complete + let timeout = Duration::from_secs(120); + let start = std::time::Instant::now(); + + loop { + // Check if process has exited + match child.try_wait() { + Ok(Some(status)) => { + info!("Indexer exited with status: {:?}", status); + if !status.success() { + anyhow::bail!("Indexer exited with error: {:?}", status); + } + break; + } + Ok(None) => { + // Process still running + if start.elapsed() > timeout { + info!("Timeout reached, killing indexer..."); + child.kill().await?; + let _ = child.wait().await; + anyhow::bail!( + "Indexer did not complete within {} seconds", + timeout.as_secs() + ); + } + + // Print progress every 5 seconds + if start.elapsed().as_secs() % 5 == 0 { + let size = std::fs::metadata(db_path).map(|m| m.len()).unwrap_or(0); + debug!( + "Still indexing... (elapsed: {}s, DB size: {} bytes)", + start.elapsed().as_secs(), + size + ); + } + + tokio::time::sleep(Duration::from_secs(1)).await; + } + Err(e) => { + anyhow::bail!("Error checking indexer status: {}", e); + } + } + } + + info!("Indexer completed successfully"); + + Ok(()) + } + + /// Find an available port for the API server + fn find_available_port() -> anyhow::Result { + let listener = TcpListener::bind("127.0.0.1:0")?; + let port = listener.local_addr()?.port(); + Ok(port) + } + + /// Start the API server + async fn start_api_server(db_path: &Path, port: u16) -> anyhow::Result { + let db_url = format!("sqlite:{}", db_path.display()); + info!("Starting API server on port {} with database {}", port, db_path.display()); + + // Use assert_cmd to get the path to the binary + let cmd = Command::cargo_bin("local-server")?; + let program = cmd.get_program(); + + // Build command with tokio + let child = TokioCommand::new(program) + .env("DB_URL", &db_url) + .env("PORT", port.to_string()) + .env("RUST_LOG", "debug,tower_http=debug,sqlx=warn") + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()) + .spawn()?; + + Ok(child) + } + + /// Wait for API server to be ready + async fn wait_for_api(api_url: &str) -> anyhow::Result<()> { + let client = Client::new(); + let health_url = format!("{}/health", api_url); + info!("Waiting for API server to be ready at {}...", health_url); + + for i in 0..30 { + if let Ok(response) = client.get(&health_url).send().await { + if response.status().is_success() { + info!("API server is ready after {} attempts", i + 1); + return Ok(()); + } + } + tokio::time::sleep(Duration::from_millis(500)).await; + } + + anyhow::bail!("API server did not start within 15 seconds") + } +} diff --git a/crates/lambdas/indexer-api/tests/local_integration/delegations.rs b/crates/lambdas/indexer-api/tests/local_integration/delegations.rs new file mode 100644 index 000000000..cd083c86c --- /dev/null +++ b/crates/lambdas/indexer-api/tests/local_integration/delegations.rs @@ -0,0 +1,178 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Integration tests for Delegations API endpoints + +use indexer_api::models::{DelegationPowerEntry, LeaderboardResponse}; + +use super::TestEnv; + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_delegations_votes_leaderboard() { + let env = TestEnv::shared().await; + + // Test votes delegation leaderboard + let response: LeaderboardResponse = + env.get("/v1/delegations/votes/addresses").await.unwrap(); + + // Basic validation + assert!(response.pagination.count <= response.pagination.limit as usize); + + // Test with limit + let response: LeaderboardResponse = + env.get("/v1/delegations/votes/addresses?limit=3").await.unwrap(); + assert!(response.entries.len() <= 3); + assert_eq!(response.pagination.limit, 3); + + // Check specific values from real data for top entries + if response.entries.len() >= 2 { + let first = &response.entries[0]; + assert_eq!(first.delegate_address, "0x2408e37489c231f883126c87e8aadbad782a040a"); + assert_eq!(first.power, "726927981342423248000000"); + assert_eq!(first.delegator_count, 0); + assert_eq!(first.delegators.len(), 0); + + let second = &response.entries[1]; + assert_eq!(second.delegate_address, "0x7cc3376b8d38b2c923cd9d5164f9d74e303482b2"); + assert_eq!(second.power, "603060340000000000000000"); + assert_eq!(second.delegator_count, 0); + assert_eq!(second.delegators.len(), 0); + } + + // Verify rank ordering if we have data + if response.entries.len() > 1 { + for i in 1..response.entries.len() { + if let (Some(rank1), Some(rank2)) = + (response.entries[i - 1].rank, response.entries[i].rank) + { + assert!(rank1 < rank2); + } + } + } +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_delegations_rewards_leaderboard() { + let env = TestEnv::shared().await; + + // Test rewards delegation leaderboard + let response: LeaderboardResponse = + env.get("/v1/delegations/rewards/addresses").await.unwrap(); + + // Basic validation + assert!(response.pagination.count <= response.pagination.limit as usize); + + // Test with limit + let response: LeaderboardResponse = + env.get("/v1/delegations/rewards/addresses?limit=3").await.unwrap(); + assert!(response.entries.len() <= 3); + assert_eq!(response.pagination.limit, 3); + + // Check specific values from real data for top entries + if response.entries.len() >= 2 { + let first = &response.entries[0]; + assert_eq!(first.delegate_address, "0x0164ec96442196a02931f57e7e20fa59cff43845"); + assert_eq!(first.power, "726927981342423248000000"); + assert_eq!(first.delegator_count, 1); + assert_eq!(first.delegators.len(), 1); + + let second = &response.entries[1]; + assert_eq!(second.delegate_address, "0x7cc3376b8d38b2c923cd9d5164f9d74e303482b2"); + assert_eq!(second.power, "603060340000000000000000"); + assert_eq!(second.delegator_count, 0); + assert_eq!(second.delegators.len(), 0); + } + + // Verify rank ordering if we have data + if response.entries.len() > 1 { + for i in 1..response.entries.len() { + if let (Some(rank1), Some(rank2)) = + (response.entries[i - 1].rank, response.entries[i].rank) + { + assert!(rank1 < rank2); + } + } + } +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_delegations_votes_by_epoch() { + let env = TestEnv::shared().await; + + // Test votes delegation for a specific epoch (we index up to epoch 4) + let response: LeaderboardResponse = + env.get("/v1/delegations/votes/epochs/3/addresses").await.unwrap(); + + // Basic validation + assert!(response.pagination.count <= response.pagination.limit as usize); + + // Verify we have entries (epoch 3 should have data) + if response.pagination.count > 0 { + assert!(!response.entries.is_empty()); + + // Verify addresses are valid + for entry in &response.entries { + assert!(entry.delegate_address.starts_with("0x")); + assert_eq!(entry.delegate_address.len(), 42); + } + } +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_delegations_rewards_by_epoch() { + let env = TestEnv::shared().await; + + // Test rewards delegation for a specific epoch (we index up to epoch 4) + let response: LeaderboardResponse = + env.get("/v1/delegations/rewards/epochs/3/addresses").await.unwrap(); + + // Basic validation + assert!(response.pagination.count <= response.pagination.limit as usize); + + // Verify we have entries (epoch 3 should have data) + if response.pagination.count > 0 { + assert!(!response.entries.is_empty()); + + // Verify addresses are valid + for entry in &response.entries { + assert!(entry.delegate_address.starts_with("0x")); + assert_eq!(entry.delegate_address.len(), 42); + } + } +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_delegations_pagination() { + let env = TestEnv::shared().await; + + // Test pagination for votes + let response1: LeaderboardResponse = + env.get("/v1/delegations/votes/addresses?limit=2").await.unwrap(); + let response2: LeaderboardResponse = + env.get("/v1/delegations/votes/addresses?limit=2&offset=2").await.unwrap(); + + // Ensure responses are different if we have enough data + if response1.entries.len() == 2 && !response2.entries.is_empty() { + assert_ne!(response1.entries[0].delegate_address, response2.entries[0].delegate_address); + } + + // Verify pagination metadata + assert_eq!(response1.pagination.offset, 0); + assert_eq!(response2.pagination.offset, 2); +} diff --git a/crates/lambdas/indexer-api/tests/local_integration/docs.rs b/crates/lambdas/indexer-api/tests/local_integration/docs.rs new file mode 100644 index 000000000..289c6a334 --- /dev/null +++ b/crates/lambdas/indexer-api/tests/local_integration/docs.rs @@ -0,0 +1,135 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Integration tests for documentation and OpenAPI endpoints + +use indexer_api::models::HealthResponse; +use serde_json::Value; + +use super::TestEnv; + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_health_endpoint() { + let env = TestEnv::shared().await; + + let response: HealthResponse = env.get("/health").await.unwrap(); + + assert_eq!(response.status, "healthy"); + assert_eq!(response.service, "indexer-api"); +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_openapi_yaml_endpoint() { + let env = TestEnv::shared().await; + + // Get the raw YAML response + let client = reqwest::Client::new(); + let url = format!("{}/openapi.yaml", env.api_url()); + let response = client.get(&url).send().await.unwrap(); + + assert!(response.status().is_success()); + + let content_type = + response.headers().get("content-type").and_then(|v| v.to_str().ok()).unwrap_or(""); + + assert!(content_type.contains("yaml") || content_type.contains("x-yaml")); + + let body = response.text().await.unwrap(); + assert!(body.contains("openapi:")); + assert!(body.contains("Boundless Indexer API")); +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_openapi_json_endpoint() { + let env = TestEnv::shared().await; + + let response: Value = env.get("/openapi.json").await.unwrap(); + + // Verify it's valid OpenAPI JSON + assert!(response.get("openapi").is_some()); + assert!(response.get("info").is_some()); + assert!(response.get("paths").is_some()); + assert!(response.get("components").is_some()); + + // Verify basic info + let info = response.get("info").unwrap(); + assert!(info.get("title").unwrap().as_str().unwrap().contains("Boundless")); + assert!(info.get("version").is_some()); + + // Verify we have paths defined + let paths = response.get("paths").unwrap().as_object().unwrap(); + assert!(paths.contains_key("/health")); + assert!(paths.contains_key("/v1/povw")); + assert!(paths.contains_key("/v1/staking")); + assert!(paths.contains_key("/v1/delegations/votes/addresses")); + assert!(paths.contains_key("/v1/delegations/rewards/addresses")); + + // Verify components/schemas are defined + let components = response.get("components").unwrap(); + let schemas = components.get("schemas").unwrap().as_object().unwrap(); + + // Check for important schema definitions + // Just verify we have some schemas defined + assert!(!schemas.is_empty(), "Should have schema definitions"); +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_swagger_ui_endpoint() { + let env = TestEnv::shared().await; + + // Get the raw HTML response + let client = reqwest::Client::new(); + let url = format!("{}/docs", env.api_url()); + let response = client.get(&url).send().await.unwrap(); + + assert!(response.status().is_success()); + + let content_type = + response.headers().get("content-type").and_then(|v| v.to_str().ok()).unwrap_or(""); + + assert!(content_type.contains("text/html")); + + let body = response.text().await.unwrap(); + + // Verify it's the Swagger UI HTML + // The utoipa-swagger-ui generates HTML with these characteristic elements + assert!(body.contains("swagger-ui"), "Response should contain swagger-ui"); + assert!(body.contains(""), "Response should be valid HTML"); + // Check for either the API title or the OpenAPI endpoint reference + assert!( + body.contains("openapi.json") || body.contains("Swagger UI"), + "Response should reference OpenAPI spec or contain Swagger UI" + ); +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_404_handler() { + let env = TestEnv::shared().await; + + // Try to access a non-existent endpoint + let client = reqwest::Client::new(); + let url = format!("{}/v1/nonexistent", env.api_url()); + let response = client.get(&url).send().await.unwrap(); + + assert_eq!(response.status().as_u16(), 404); + + let body: Value = response.json().await.unwrap(); + assert!(body.get("error").is_some()); + assert!(body.get("message").is_some()); +} diff --git a/crates/lambdas/indexer-api/tests/local_integration/povw.rs b/crates/lambdas/indexer-api/tests/local_integration/povw.rs new file mode 100644 index 000000000..f25b8e646 --- /dev/null +++ b/crates/lambdas/indexer-api/tests/local_integration/povw.rs @@ -0,0 +1,164 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Integration tests for PoVW API endpoints + +use indexer_api::models::{ + AddressLeaderboardResponse, AggregateLeaderboardEntry, EpochLeaderboardEntry, EpochPoVWSummary, + LeaderboardResponse, PoVWAddressSummary, PoVWSummaryStats, +}; + +use super::TestEnv; + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_povw_leaderboard() { + let env = TestEnv::shared().await; + + // Test default leaderboard + let response: LeaderboardResponse = + env.get("/v1/povw/addresses").await.unwrap(); + assert!(response.pagination.count <= response.pagination.limit as usize); + + // Test with limit of 3 to check top entries + let response: LeaderboardResponse = + env.get("/v1/povw/addresses?limit=3").await.unwrap(); + assert!(response.entries.len() <= 3); + assert_eq!(response.pagination.limit, 3); + + // Verify rank field is present for leaderboard + if !response.entries.is_empty() { + assert!(response.entries[0].rank.is_some()); + + // Check specific values from real data for top 3 + if response.entries.len() >= 3 { + let first = &response.entries[0]; + assert_eq!(first.work_log_id, "0x94072d2282cb2c718d23d5779a5f8484e2530f2a"); + assert_eq!(first.total_work_submitted, "18245963022336"); + assert_eq!(first.total_actual_rewards, "28666666666666666666666"); + assert_eq!(first.total_uncapped_rewards, "454178915961434029731928"); + assert_eq!(first.epochs_participated, 3); + + let second = &response.entries[1]; + assert_eq!(second.work_log_id, "0x0164ec96442196a02931f57e7e20fa59cff43845"); + assert_eq!(second.total_work_submitted, "2349000278016"); + assert_eq!(second.total_actual_rewards, "8825197537996492524728"); // Fixed: was 13540303064735614608777 + assert_eq!(second.total_uncapped_rewards, "8825197537996492524728"); // Fixed: was 13540303064735614608777 + assert_eq!(second.epochs_participated, 2); + } + } +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_povw_summary() { + let env = TestEnv::shared().await; + + // Test the summary endpoint + let summary: PoVWSummaryStats = env.get("/v1/povw").await.unwrap(); + + // Check specific values from real data + assert_eq!(summary.total_epochs_with_work, 3); + assert_eq!(summary.total_unique_work_log_ids, 26); + assert_eq!(summary.total_work_all_time, "24999835418624"); + assert_eq!(summary.total_emissions_all_time, "1395361974850288500000000"); + assert_eq!(summary.total_capped_rewards_all_time, "54999464530233482198753"); + assert_eq!(summary.total_uncapped_rewards_all_time, "837217107775305749999989"); // Fixed: was 624997088546559733077848 + + // Verify formatted strings are present + assert_eq!(summary.total_work_all_time_formatted, "24,999,835,418,624 cycles"); + assert_eq!(summary.total_uncapped_rewards_all_time_formatted, "837,217 ZKC"); + // Fixed: was 624,997 ZKC +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_povw_epochs_summary() { + let env = TestEnv::shared().await; + + // Test epochs summary + let response: LeaderboardResponse = env.get("/v1/povw/epochs").await.unwrap(); + + // Verify we have exactly 4 epochs (matching our end-epoch parameter) + assert_eq!(response.entries.len(), 5, "Should have epochs 0-4"); + + // Verify epoch structure + let epoch = &response.entries[0]; + assert!(epoch.epoch > 0); + assert!(epoch.epoch_start_time > 0); + assert!(epoch.epoch_end_time > epoch.epoch_start_time); +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_povw_epoch_details() { + let env = TestEnv::shared().await; + + // Test specific epoch (epoch 4 usually has data) + let response: LeaderboardResponse = + env.get("/v1/povw/epochs/4/addresses").await.unwrap(); + + // Verify all entries are for the requested epoch if we have data + for entry in &response.entries { + assert_eq!(entry.epoch, 4); + } +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_povw_address() { + let env = TestEnv::shared().await; + + // Use a known address with PoVW data + let address = "0x4a48ad93e826a0b64602b8ba7f86b056f079e609"; + let path = format!("/v1/povw/addresses/{}", address); + + let response: AddressLeaderboardResponse = + env.get(&path).await.unwrap(); + + // Verify address-specific response + for entry in &response.entries { + // Verify work_log_id matches the address pattern + assert!(entry.work_log_id.to_lowercase().contains(&address[2..])); + } + + // Check summary (always present in AddressLeaderboardResponse) + let summary = &response.summary; + assert!(summary.work_log_id.to_lowercase().contains(&address[2..])); + // If there's data, verify it + if !response.entries.is_empty() { + assert!(summary.epochs_participated > 0); + } +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_povw_pagination() { + let env = TestEnv::shared().await; + + // Test pagination with offset + let response1: LeaderboardResponse = + env.get("/v1/povw/addresses?limit=2").await.unwrap(); + let response2: LeaderboardResponse = + env.get("/v1/povw/addresses?limit=2&offset=2").await.unwrap(); + + // Ensure responses are different if we have enough data + if response1.entries.len() == 2 && !response2.entries.is_empty() { + assert_ne!(response1.entries[0].work_log_id, response2.entries[0].work_log_id); + } + + // Verify pagination metadata + assert_eq!(response1.pagination.offset, 0); + assert_eq!(response2.pagination.offset, 2); +} diff --git a/crates/lambdas/indexer-api/tests/local_integration/staking.rs b/crates/lambdas/indexer-api/tests/local_integration/staking.rs new file mode 100644 index 000000000..bd00c994c --- /dev/null +++ b/crates/lambdas/indexer-api/tests/local_integration/staking.rs @@ -0,0 +1,149 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Integration tests for Staking API endpoints + +use indexer_api::models::{ + AddressLeaderboardResponse, AggregateStakingEntry, EpochStakingEntry, EpochStakingSummary, + LeaderboardResponse, StakingAddressSummary, +}; + +use super::TestEnv; + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_staking_leaderboard() { + let env = TestEnv::shared().await; + + // Test default leaderboard + let response: LeaderboardResponse = + env.get("/v1/staking/addresses").await.unwrap(); + assert!(response.pagination.count <= response.pagination.limit as usize); + + // Test with limit of 2 to check top entries + let response: LeaderboardResponse = + env.get("/v1/staking/addresses?limit=2").await.unwrap(); + assert!(response.entries.len() <= 2); + assert_eq!(response.pagination.limit, 2); + + // Verify rank field is present for leaderboard + if !response.entries.is_empty() { + assert!(response.entries[0].rank.is_some()); + + // Check specific values from real data for top 2 + if response.entries.len() >= 2 { + let first = &response.entries[0]; + assert_eq!(first.staker_address, "0x2408e37489c231f883126c87e8aadbad782a040a"); + assert_eq!(first.total_staked, "726927981342423248000000"); + assert_eq!(first.total_rewards_generated, "43793837998280676959348"); + assert!(!first.is_withdrawing); + assert_eq!( + first.rewards_delegated_to, + Some("0x0164ec96442196a02931f57e7e20fa59cff43845".to_string()) + ); + assert_eq!(first.epochs_participated, 3); + + let second = &response.entries[1]; + assert_eq!(second.staker_address, "0x7cc3376b8d38b2c923cd9d5164f9d74e303482b2"); + assert_eq!(second.total_staked, "603060340000000000000000"); + assert_eq!(second.total_rewards_generated, "28191507291258394253114"); + assert!(!second.is_withdrawing); + assert_eq!(second.rewards_delegated_to, None); + assert_eq!(second.epochs_participated, 2); + } + } +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_staking_epochs_summary() { + let env = TestEnv::shared().await; + + // Test epochs summary + let response: LeaderboardResponse = + env.get("/v1/staking/epochs").await.unwrap(); + + // Verify we have some epochs + assert!(!response.entries.is_empty(), "Should have at least one epoch"); + + // Verify epoch structure + let epoch = &response.entries[0]; + assert!(epoch.epoch > 0); + assert!(epoch.num_stakers > 0); +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_staking_epoch_details() { + let env = TestEnv::shared().await; + + // Test specific epoch (epoch 3 should have data, we index up to epoch 4) + let response: LeaderboardResponse = + env.get("/v1/staking/epochs/3/addresses").await.unwrap(); + + // Verify all entries are for the requested epoch if we have data + for entry in &response.entries { + assert_eq!(entry.epoch, 3); + } +} + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_staking_address() { + let env = TestEnv::shared().await; + + // Use a known address with staking data + let address = "0x00000000f2708738d4886bc4aedefd8dd04818b0"; + let path = format!("/v1/staking/addresses/{}", address); + + let response: AddressLeaderboardResponse = + env.get(&path).await.unwrap(); + + // Verify address-specific response + for entry in &response.entries { + assert_eq!(entry.staker_address.to_lowercase(), address); + } + + // Check summary (always present in AddressLeaderboardResponse) + let summary = &response.summary; + assert_eq!(summary.staker_address.to_lowercase(), address); + // If there's data, verify it + if !response.entries.is_empty() { + assert!(summary.epochs_participated > 0); + assert!(summary.total_staked != "0"); + } +} + +// Removed test_staking_filters - the API doesn't support is_withdrawing filter parameter + +#[tokio::test] +#[ignore = "Requires ETH_MAINNET_RPC_URL"] +async fn test_staking_pagination() { + let env = TestEnv::shared().await; + + // Test pagination with offset + let response1: LeaderboardResponse = + env.get("/v1/staking/addresses?limit=2").await.unwrap(); + let response2: LeaderboardResponse = + env.get("/v1/staking/addresses?limit=2&offset=2").await.unwrap(); + + // Ensure responses are different if we have enough data + if response1.entries.len() == 2 && !response2.entries.is_empty() { + assert_ne!(response1.entries[0].staker_address, response2.entries[0].staker_address); + } + + // Verify pagination metadata + assert_eq!(response1.pagination.offset, 0); + assert_eq!(response2.pagination.offset, 2); +} diff --git a/crates/ops-lambdas/indexer-monitor/.gitignore b/crates/lambdas/indexer-monitor/.gitignore similarity index 100% rename from crates/ops-lambdas/indexer-monitor/.gitignore rename to crates/lambdas/indexer-monitor/.gitignore diff --git a/crates/ops-lambdas/indexer-monitor/Cargo.lock b/crates/lambdas/indexer-monitor/Cargo.lock similarity index 100% rename from crates/ops-lambdas/indexer-monitor/Cargo.lock rename to crates/lambdas/indexer-monitor/Cargo.lock diff --git a/crates/ops-lambdas/indexer-monitor/Cargo.toml b/crates/lambdas/indexer-monitor/Cargo.toml similarity index 100% rename from crates/ops-lambdas/indexer-monitor/Cargo.toml rename to crates/lambdas/indexer-monitor/Cargo.toml diff --git a/crates/ops-lambdas/indexer-monitor/src/handler.rs b/crates/lambdas/indexer-monitor/src/handler.rs similarity index 100% rename from crates/ops-lambdas/indexer-monitor/src/handler.rs rename to crates/lambdas/indexer-monitor/src/handler.rs diff --git a/crates/ops-lambdas/indexer-monitor/src/lib.rs b/crates/lambdas/indexer-monitor/src/lib.rs similarity index 100% rename from crates/ops-lambdas/indexer-monitor/src/lib.rs rename to crates/lambdas/indexer-monitor/src/lib.rs diff --git a/crates/ops-lambdas/indexer-monitor/src/main.rs b/crates/lambdas/indexer-monitor/src/main.rs similarity index 100% rename from crates/ops-lambdas/indexer-monitor/src/main.rs rename to crates/lambdas/indexer-monitor/src/main.rs diff --git a/crates/ops-lambdas/indexer-monitor/src/monitor.rs b/crates/lambdas/indexer-monitor/src/monitor.rs similarity index 100% rename from crates/ops-lambdas/indexer-monitor/src/monitor.rs rename to crates/lambdas/indexer-monitor/src/monitor.rs diff --git a/crates/rewards/Cargo.toml b/crates/rewards/Cargo.toml new file mode 100644 index 000000000..242ebeabe --- /dev/null +++ b/crates/rewards/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "boundless-rewards" +description = "Rewards computation for Boundless PoVW system" +resolver = "2" +version = { workspace = true } +edition = { workspace = true } +homepage = { workspace = true } +repository = { workspace = true } +publish = false + +[dependencies] +alloy = { version = "1.0.34", features = ["network", "providers", "transports", "sol-types", "contract", "rpc-client"] } +anyhow = { workspace = true } +boundless-povw = { workspace = true } +boundless-zkc = { workspace = true } +chrono = { workspace = true } +futures-util = { workspace = true } +serde = { workspace = true } +tokio = { workspace = true } +tracing = { workspace = true } + +[dev-dependencies] diff --git a/crates/rewards/README.md b/crates/rewards/README.md new file mode 100644 index 000000000..6f4a41f25 --- /dev/null +++ b/crates/rewards/README.md @@ -0,0 +1,3 @@ +# Rewards + +Contains utility functions for fetching rewards data form chain, and pre-processing it to be used in other applications (e.g. explorers, indexers, jupyter notebooks) diff --git a/crates/rewards/src/cache.rs b/crates/rewards/src/cache.rs new file mode 100644 index 000000000..9934a3aa4 --- /dev/null +++ b/crates/rewards/src/cache.rs @@ -0,0 +1,796 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Caching and prefetching utilities for rewards computation. + +use alloy::{ + primitives::{Address, U256}, + providers::Provider, + rpc::types::{BlockNumberOrTag, Log}, +}; +use boundless_povw::deployments::Deployment; +use boundless_zkc::contracts::{IRewards, IStaking, IZKC}; +use futures_util::future::try_join_all; +use std::collections::{HashMap, HashSet}; + +use crate::{ + powers::{DelegationEvent, TimestampedDelegationEvent}, + staking::{StakeEvent, TimestampedStakeEvent}, + EpochTimeRange, +}; +use boundless_povw::log_updater::IPovwAccounting; + +/// Contains all the necessary data for the rewards computations +#[derive(Debug, Clone, Default)] +pub struct RewardsCache { + /// PoVW emissions by epoch number + pub povw_emissions_by_epoch: HashMap, + /// Staking emissions by epoch number + pub staking_emissions_by_epoch: HashMap, + /// Reward caps by (work_log_id, epoch) - includes both historical and current + pub reward_caps: HashMap<(Address, u64), U256>, + /// Epoch time ranges (start and end times) by epoch number + pub epoch_time_ranges: HashMap, + /// Block timestamps by block number + pub block_timestamps: HashMap, + /// Timestamped stake events, sorted by (block_number, transaction_index, log_index) + pub timestamped_stake_events: Vec, + /// Timestamped delegation events, sorted by (block_number, transaction_index, log_index) + pub timestamped_delegation_events: Vec, + /// Work by work log ID and epoch + pub work_by_work_log_by_epoch: HashMap<(Address, u64), U256>, + /// Total work by epoch + pub total_work_by_epoch: HashMap, + /// Work recipients by work log ID and epoch + pub work_recipients_by_epoch: HashMap<(Address, u64), Address>, + /// Individual staking power by (staker_address, epoch) + pub staking_power_by_address_by_epoch: HashMap<(Address, u64), U256>, + /// Total staking power by epoch + pub total_staking_power_by_epoch: HashMap, + /// Staking amounts by (staker_address, epoch) - actual staked balances from positions + pub staking_amounts_by_epoch: HashMap<(Address, u64), U256>, +} + +/// For the given epochs, pre-fetches all the necessary data for the rewards computations +/// Uses multicall to batch requests and processes all event logs +pub async fn build_rewards_cache( + provider: &P, + deployment: &Deployment, + zkc_address: Address, + epochs_to_process: &[u64], + current_epoch: u64, + end_epoch: Option, + all_event_logs: &crate::AllEventLogs, +) -> anyhow::Result { + let mut cache = RewardsCache::default(); + + // Extract unique work log IDs from the work logs + let mut unique_work_log_ids = std::collections::HashSet::new(); + for log in &all_event_logs.work_logs { + if let Ok(decoded) = log.log_decode::() { + unique_work_log_ids.insert(decoded.inner.data.workLogId); + } + } + let work_log_ids: Vec

= unique_work_log_ids.into_iter().collect(); + + let zkc = IZKC::new(zkc_address, provider); + let rewards_contract = IRewards::new(deployment.vezkc_address, provider); + + // Batch 1: Fetch all epoch emissions (both PoVW and staking) using dynamic multicall + if !epochs_to_process.is_empty() { + tracing::debug!( + "Fetching PoVW and staking emissions for {} epochs using multicall", + epochs_to_process.len() + ); + + // Process in chunks to avoid hitting multicall limits + const CHUNK_SIZE: usize = 50; // Smaller chunks since we're fetching both types + for chunk in epochs_to_process.chunks(CHUNK_SIZE) { + // Fetch PoVW emissions + let mut povw_multicall = provider + .multicall() + .dynamic::( + ); + + for &epoch_num in chunk { + povw_multicall = + povw_multicall.add_dynamic(zkc.getPoVWEmissionsForEpoch(U256::from(epoch_num))); + } + + let povw_results: Vec = povw_multicall.aggregate().await?; + + // Fetch staking emissions + let mut staking_multicall = + provider + .multicall() + .dynamic::(); + + for &epoch_num in chunk { + staking_multicall = staking_multicall + .add_dynamic(zkc.getStakingEmissionsForEpoch(U256::from(epoch_num))); + } + + let staking_results: Vec = staking_multicall.aggregate().await?; + + // Process results - zip with input epochs + for (i, &epoch_num) in chunk.iter().enumerate() { + cache.povw_emissions_by_epoch.insert(epoch_num, povw_results[i]); + cache.staking_emissions_by_epoch.insert(epoch_num, staking_results[i]); + } + } + } + + // Batch 2: Fetch epoch start and end times using multicall + if !epochs_to_process.is_empty() { + tracing::debug!( + "Fetching epoch start and end times for {} epochs using multicall", + epochs_to_process.len() + ); + + const CHUNK_SIZE: usize = 50; // Smaller chunk since we're making 2 calls per epoch + for chunk in epochs_to_process.chunks(CHUNK_SIZE) { + // Fetch start times + let mut start_time_multicall = provider + .multicall() + .dynamic::( + ); + + for &epoch_num in chunk { + start_time_multicall = + start_time_multicall.add_dynamic(zkc.getEpochStartTime(U256::from(epoch_num))); + } + + let start_times: Vec = start_time_multicall.aggregate().await?; + + // Fetch end times + let mut end_time_multicall = provider + .multicall() + .dynamic::( + ); + + for &epoch_num in chunk { + end_time_multicall = + end_time_multicall.add_dynamic(zkc.getEpochEndTime(U256::from(epoch_num))); + } + + let end_times: Vec = end_time_multicall.aggregate().await?; + + // Process results + for (i, &epoch_num) in chunk.iter().enumerate() { + let start_time = start_times[i]; + let end_time = end_times[i]; + + cache.epoch_time_ranges.insert( + epoch_num, + EpochTimeRange { + start_time: start_time.to::(), + end_time: end_time.to::(), + }, + ); + } + } + } + + // Batch 3: Fetch current reward caps using dynamic multicall + // Skip this if we're in historical mode (end_epoch is set) + if !work_log_ids.is_empty() && end_epoch.is_none() { + tracing::debug!( + "Fetching current reward caps for {} work log IDs using multicall", + work_log_ids.len() + ); + + const CHUNK_SIZE: usize = 50; + for chunk in work_log_ids.chunks(CHUNK_SIZE) { + // Use dynamic multicall for same-type calls + let mut multicall = provider + .multicall() + .dynamic::( + ); + + for &work_log_id in chunk { + multicall = multicall.add_dynamic(rewards_contract.getPoVWRewardCap(work_log_id)); + } + + let results: Vec = multicall.aggregate().await?; + + // Process results - store current caps with current_epoch as key + for (&work_log_id, cap) in chunk.iter().zip(results.iter()) { + cache.reward_caps.insert((work_log_id, current_epoch), *cap); + } + } + } + + // Batch 4: Fetch past reward caps using dynamic multicall + if epochs_to_process.iter().any(|&e| e < current_epoch) { + tracing::debug!( + "Fetching past reward caps for {} work log IDs and past epochs using multicall", + work_log_ids.len() + ); + + // Build list of (work_log_id, epoch_num, epoch_end_time) tuples + let mut past_cap_requests = Vec::new(); + for work_log_id in &work_log_ids { + for &epoch_num in epochs_to_process { + if epoch_num < current_epoch { + if let Some(epoch_range) = cache.epoch_time_ranges.get(&epoch_num) { + past_cap_requests.push(( + *work_log_id, + epoch_num, + U256::from(epoch_range.end_time), + )); + } + } + } + } + + // Process in chunks using dynamic multicall + const CHUNK_SIZE: usize = 100; + for chunk in past_cap_requests.chunks(CHUNK_SIZE) { + // Use dynamic multicall for same-type calls + let mut multicall = provider + .multicall() + .dynamic::( + ); + + for &(work_log_id, _, epoch_end_time) in chunk { + multicall = multicall.add_dynamic( + rewards_contract.getPastPoVWRewardCap(work_log_id, epoch_end_time), + ); + } + + let results: Vec = multicall.aggregate().await?; + + // Process results - zip with input tuples + for (&(work_log_id, epoch_num, _), cap) in chunk.iter().zip(results.iter()) { + cache.reward_caps.insert((work_log_id, epoch_num), *cap); + } + } + } + + // Batch 3: Build block timestamp cache from all event logs + tracing::debug!("Building block timestamp cache from event logs"); + let mut all_logs: Vec<&Log> = Vec::new(); + all_logs.extend(all_event_logs.work_logs.iter()); + all_logs.extend(all_event_logs.epoch_finalized_logs.iter()); + all_logs.extend(all_event_logs.stake_created_logs.iter()); + all_logs.extend(all_event_logs.stake_added_logs.iter()); + all_logs.extend(all_event_logs.unstake_initiated_logs.iter()); + all_logs.extend(all_event_logs.unstake_completed_logs.iter()); + all_logs.extend(all_event_logs.vote_delegation_change_logs.iter()); + all_logs.extend(all_event_logs.reward_delegation_change_logs.iter()); + + // Collect unique block numbers + let mut block_numbers = HashSet::new(); + for log in &all_logs { + if let Some(block_num) = log.block_number { + block_numbers.insert(block_num); + } + } + + if !block_numbers.is_empty() { + tracing::debug!( + "Fetching timestamps for {} blocks using concurrent requests", + block_numbers.len() + ); + + // Convert HashSet to Vec for chunking + let block_numbers: Vec<_> = block_numbers.into_iter().collect(); + + // Fetch timestamps for blocks using concurrent futures + // Process in chunks to avoid overwhelming the RPC + const CHUNK_SIZE: usize = 100; + for chunk in block_numbers.chunks(CHUNK_SIZE) { + let futures: Vec<_> = chunk + .iter() + .map(|&block_num| async move { + let block = + provider.get_block_by_number(BlockNumberOrTag::Number(block_num)).await?; + Ok::<_, anyhow::Error>((block_num, block)) + }) + .collect(); + + let results = try_join_all(futures).await?; + + // Process results + for (block_num, block) in results { + match block { + Some(block) => { + cache.block_timestamps.insert(block_num, block.header.timestamp); + } + None => { + anyhow::bail!("Block {} not found", block_num); + } + } + } + } + } + + // Batch 8: Process timestamped stake events + tracing::debug!("Processing timestamped stake events"); + + // Create lookup closures for epoch and timestamp + let get_epoch_for_timestamp = |timestamp: u64| -> anyhow::Result { + for (epoch, range) in &cache.epoch_time_ranges { + if timestamp >= range.start_time && timestamp <= range.end_time { + return Ok(*epoch); + } + } + anyhow::bail!("No epoch found for timestamp {}", timestamp) + }; + + let get_timestamp_for_block = |block_num: u64| -> anyhow::Result { + cache + .block_timestamps + .get(&block_num) + .copied() + .ok_or_else(|| anyhow::anyhow!("Block timestamp not found for block {}", block_num)) + }; + + // Helper function to process logs into timestamped events + fn process_event_log( + logs: &[Log], + decode_and_create: F, + get_timestamp_for_block: &impl Fn(u64) -> anyhow::Result, + get_epoch_for_timestamp: &impl Fn(u64) -> anyhow::Result, + events: &mut Vec, + ) -> anyhow::Result<()> + where + F: Fn(&Log) -> Option, + { + for log in logs { + if let Some(event) = decode_and_create(log) { + if let (Some(block_num), Some(tx_idx), Some(log_idx)) = + (log.block_number, log.transaction_index, log.log_index) + { + let timestamp = get_timestamp_for_block(block_num)?; + let epoch = get_epoch_for_timestamp(timestamp)?; + + events.push(TimestampedStakeEvent { + block_number: block_num, + block_timestamp: timestamp, + transaction_index: tx_idx, + log_index: log_idx, + epoch, + event, + }); + } + } + } + Ok(()) + } + + // Process StakeCreated events + process_event_log( + &all_event_logs.stake_created_logs, + |log| { + log.log_decode::().ok().map(|decoded| StakeEvent::Created { + owner: decoded.inner.data.owner, + amount: decoded.inner.data.amount, + }) + }, + &get_timestamp_for_block, + &get_epoch_for_timestamp, + &mut cache.timestamped_stake_events, + )?; + + // Process StakeAdded events + process_event_log( + &all_event_logs.stake_added_logs, + |log| { + log.log_decode::().ok().map(|decoded| StakeEvent::Added { + owner: decoded.inner.data.owner, + new_total: decoded.inner.data.newTotal, + }) + }, + &get_timestamp_for_block, + &get_epoch_for_timestamp, + &mut cache.timestamped_stake_events, + )?; + + // Process UnstakeInitiated events + process_event_log( + &all_event_logs.unstake_initiated_logs, + |log| { + log.log_decode::() + .ok() + .map(|decoded| StakeEvent::UnstakeInitiated { owner: decoded.inner.data.owner }) + }, + &get_timestamp_for_block, + &get_epoch_for_timestamp, + &mut cache.timestamped_stake_events, + )?; + + // Process UnstakeCompleted events + process_event_log( + &all_event_logs.unstake_completed_logs, + |log| { + log.log_decode::() + .ok() + .map(|decoded| StakeEvent::UnstakeCompleted { owner: decoded.inner.data.owner }) + }, + &get_timestamp_for_block, + &get_epoch_for_timestamp, + &mut cache.timestamped_stake_events, + )?; + + // Process VoteDelegateChanged events + process_event_log( + &all_event_logs.vote_delegation_change_logs, + |log| { + // For DelegateChanged(address indexed delegator, address indexed fromDelegate, address indexed toDelegate) + if log.topics().len() >= 4 { + let delegator = Address::from_slice(&log.topics()[1][12..]); + let new_delegate = Address::from_slice(&log.topics()[3][12..]); + Some(StakeEvent::VoteDelegateChanged { delegator, new_delegate }) + } else { + None + } + }, + &get_timestamp_for_block, + &get_epoch_for_timestamp, + &mut cache.timestamped_stake_events, + )?; + + // Process RewardDelegateChanged events + process_event_log( + &all_event_logs.reward_delegation_change_logs, + |log| { + log.log_decode::().ok().map(|decoded| { + StakeEvent::RewardDelegateChanged { + delegator: decoded.inner.data.delegator, + new_delegate: decoded.inner.data.toDelegate, + } + }) + }, + &get_timestamp_for_block, + &get_epoch_for_timestamp, + &mut cache.timestamped_stake_events, + )?; + + // Sort events by block number, then transaction index, then log index + cache + .timestamped_stake_events + .sort_by_key(|e| (e.block_number, e.transaction_index, e.log_index)); + + // Batch 9: Process delegation events + tracing::debug!("Processing delegation events"); + + // Process vote delegation change events (DelegateChanged) + for log in &all_event_logs.vote_delegation_change_logs { + if log.topics().len() >= 4 { + let delegator = Address::from_slice(&log.topics()[1][12..]); + let new_delegate = Address::from_slice(&log.topics()[3][12..]); + if let (Some(block_num), Some(tx_idx), Some(log_idx)) = + (log.block_number, log.transaction_index, log.log_index) + { + let timestamp = get_timestamp_for_block(block_num)?; + let epoch = get_epoch_for_timestamp(timestamp)?; + + cache.timestamped_delegation_events.push(TimestampedDelegationEvent { + event: DelegationEvent::VoteDelegationChange { delegator, new_delegate }, + timestamp, + block_number: block_num, + transaction_index: tx_idx, + log_index: log_idx, + epoch, + }); + } + } + } + + // Process reward delegation change events (RewardDelegateChanged) + for log in &all_event_logs.reward_delegation_change_logs { + if let Ok(decoded) = log.log_decode::() { + let delegator = decoded.inner.data.delegator; + let new_delegate = decoded.inner.data.toDelegate; + if let (Some(block_num), Some(tx_idx), Some(log_idx)) = + (log.block_number, log.transaction_index, log.log_index) + { + let timestamp = get_timestamp_for_block(block_num)?; + let epoch = get_epoch_for_timestamp(timestamp)?; + + cache.timestamped_delegation_events.push(TimestampedDelegationEvent { + event: DelegationEvent::RewardDelegationChange { delegator, new_delegate }, + timestamp, + block_number: block_num, + transaction_index: tx_idx, + log_index: log_idx, + epoch, + }); + } + } + } + + // Process vote power change events (DelegateVotesChanged) + for log in &all_event_logs.vote_power_logs { + if log.topics().len() >= 2 { + let delegate = Address::from_slice(&log.topics()[1][12..]); + let data_bytes = &log.data().data; + if data_bytes.len() >= 64 { + let mut bytes = [0u8; 32]; + bytes.copy_from_slice(&data_bytes[32..64]); + let new_votes = U256::from_be_bytes(bytes); + if let (Some(block_num), Some(tx_idx), Some(log_idx)) = + (log.block_number, log.transaction_index, log.log_index) + { + let timestamp = get_timestamp_for_block(block_num)?; + let epoch = get_epoch_for_timestamp(timestamp)?; + + cache.timestamped_delegation_events.push(TimestampedDelegationEvent { + event: DelegationEvent::VotePowerChange { delegate, new_votes }, + timestamp, + block_number: block_num, + transaction_index: tx_idx, + log_index: log_idx, + epoch, + }); + } + } + } + } + + // Process reward power change events (DelegateRewardsChanged) + for log in &all_event_logs.reward_power_logs { + if let Ok(decoded) = log.log_decode::() { + let delegate = decoded.inner.data.delegate; + let new_rewards = decoded.inner.data.newRewards; + if let (Some(block_num), Some(tx_idx), Some(log_idx)) = + (log.block_number, log.transaction_index, log.log_index) + { + let timestamp = get_timestamp_for_block(block_num)?; + let epoch = get_epoch_for_timestamp(timestamp)?; + + cache.timestamped_delegation_events.push(TimestampedDelegationEvent { + event: DelegationEvent::RewardPowerChange { delegate, new_rewards }, + timestamp, + block_number: block_num, + transaction_index: tx_idx, + log_index: log_idx, + epoch, + }); + } + } + } + + // Sort delegation events chronologically + cache + .timestamped_delegation_events + .sort_by_key(|e| (e.block_number, e.transaction_index, e.log_index)); + + // Batch 10: Process work events + tracing::debug!("Processing work events"); + + // Process WorkLogUpdated events + for log in &all_event_logs.work_logs { + if let Ok(decoded) = log.log_decode::() { + let work_log_id = decoded.inner.data.workLogId; + let epoch = decoded.inner.data.epochNumber.to::(); + let update_value = decoded.inner.data.updateValue; + let recipient = decoded.inner.data.valueRecipient; + + // Aggregate work + *cache.work_by_work_log_by_epoch.entry((work_log_id, epoch)).or_insert(U256::ZERO) += + update_value; + + // Store recipient (last one wins if multiple updates) + cache.work_recipients_by_epoch.insert((work_log_id, epoch), recipient); + } + } + + // Process EpochFinalized events for total work + for log in &all_event_logs.epoch_finalized_logs { + if let Ok(decoded) = log.log_decode::() { + let epoch = decoded.inner.data.epoch.to::(); + let total_work = U256::from(decoded.inner.data.totalWork); + cache.total_work_by_epoch.insert(epoch, total_work); + } + } + + // Batch 11: Fetch staking power for rewards computation + // Extract unique stakers from stake events + tracing::debug!("Extracting unique stakers from stake events"); + let mut stakers_by_epoch: HashMap> = HashMap::new(); + + // Process StakeCreated events + // In historical mode, only process up to end_epoch, not current_epoch + let max_epoch = end_epoch.unwrap_or(current_epoch); + + for event in &cache.timestamped_stake_events { + if let StakeEvent::Created { owner, .. } = &event.event { + stakers_by_epoch.entry(event.epoch).or_default().insert(*owner); + // Add to all future epochs too (up to max_epoch) + for epoch in (event.epoch + 1)..=max_epoch { + stakers_by_epoch.entry(epoch).or_default().insert(*owner); + } + } + } + + // Build a list of (staker, epoch) pairs we need to fetch + let mut staker_epoch_pairs: Vec<(Address, u64)> = Vec::new(); + for (epoch, stakers) in &stakers_by_epoch { + for staker in stakers { + staker_epoch_pairs.push((*staker, *epoch)); + } + } + + if !staker_epoch_pairs.is_empty() { + tracing::debug!( + "Fetching staking power for {} staker-epoch pairs using multicall", + staker_epoch_pairs.len() + ); + + // Process in chunks to avoid hitting multicall limits + const CHUNK_SIZE: usize = 100; + for chunk in staker_epoch_pairs.chunks(CHUNK_SIZE) { + // Separate current and past epochs + let mut past_pairs = Vec::new(); + let mut current_pairs = Vec::new(); + + for &(staker_address, epoch) in chunk { + if epoch == current_epoch { + current_pairs.push((staker_address, epoch)); + } else { + past_pairs.push((staker_address, epoch)); + } + } + + // Fetch past staking rewards using getPastStakingRewards + if !past_pairs.is_empty() { + let mut past_power_multicall = + provider + .multicall() + .dynamic::(); + + for &(staker_address, epoch) in &past_pairs { + let epoch_end_time = cache + .epoch_time_ranges + .get(&epoch) + .ok_or_else(|| { + anyhow::anyhow!("Missing epoch time range for epoch {}", epoch) + })? + .end_time; + + past_power_multicall = past_power_multicall.add_dynamic( + rewards_contract + .getPastStakingRewards(staker_address, U256::from(epoch_end_time)), + ); + } + + let past_results: Vec = past_power_multicall.aggregate().await?; + + // Store past power results + for ((staker_address, epoch), power) in past_pairs.iter().zip(past_results.iter()) { + cache + .staking_power_by_address_by_epoch + .insert((*staker_address, *epoch), *power); + } + } + + // Fetch current staking rewards using getStakingRewards + if !current_pairs.is_empty() { + let mut current_power_multicall = + provider + .multicall() + .dynamic::(); + + for &(staker_address, _epoch) in ¤t_pairs { + current_power_multicall = current_power_multicall + .add_dynamic(rewards_contract.getStakingRewards(staker_address)); + } + + let current_results: Vec = current_power_multicall.aggregate().await?; + + // Store current power results + for ((staker_address, epoch), power) in + current_pairs.iter().zip(current_results.iter()) + { + cache + .staking_power_by_address_by_epoch + .insert((*staker_address, *epoch), *power); + } + } + } + + // Now fetch total staking power for each unique epoch + let unique_epochs: HashSet = staker_epoch_pairs.iter().map(|(_, e)| *e).collect(); + if !unique_epochs.is_empty() { + tracing::debug!( + "Fetching total staking power for {} epochs using multicall", + unique_epochs.len() + ); + + const EPOCH_CHUNK_SIZE: usize = 50; + let epochs_vec: Vec = unique_epochs.into_iter().collect(); + + for epoch_chunk in epochs_vec.chunks(EPOCH_CHUNK_SIZE) { + // Separate current and past epochs + let mut current_epochs = Vec::new(); + let mut past_epochs = Vec::new(); + + for &epoch in epoch_chunk { + if epoch == current_epoch { + current_epochs.push(epoch); + } else { + past_epochs.push(epoch); + } + } + + // Handle past epochs with getPastTotalStakingRewards + if !past_epochs.is_empty() { + let mut total_power_multicall = provider + .multicall() + .dynamic::(); + + let mut valid_epochs = Vec::new(); + for &epoch in &past_epochs { + let epoch_end_time = cache + .epoch_time_ranges + .get(&epoch) + .ok_or_else(|| { + anyhow::anyhow!("Missing epoch time range for epoch {}", epoch) + })? + .end_time; + + total_power_multicall = total_power_multicall.add_dynamic( + rewards_contract.getPastTotalStakingRewards(U256::from(epoch_end_time)), + ); + valid_epochs.push(epoch); + } + + let total_results: Vec = total_power_multicall.aggregate().await?; + + // Store total power results + for (epoch, total_power) in valid_epochs.iter().zip(total_results.iter()) { + cache.total_staking_power_by_epoch.insert(*epoch, *total_power); + } + } + + // Handle current epoch with getTotalStakingRewards + if !current_epochs.is_empty() { + let mut current_multicall = provider + .multicall() + .dynamic::(); + + for &_epoch in ¤t_epochs { + current_multicall = current_multicall + .add_dynamic(rewards_contract.getTotalStakingRewards()); + } + + let current_results: Vec = current_multicall.aggregate().await?; + + // Store current epoch results + for (epoch, total_power) in current_epochs.iter().zip(current_results.iter()) { + cache.total_staking_power_by_epoch.insert(*epoch, *total_power); + } + } + } + } + } + + tracing::info!( + "Built rewards cache: {} povw emissions, {} staking emissions, {} work logs, {} reward caps, {} epoch time ranges, {} block timestamps, {} stake events, {} delegation events, {} work entries, {} staking power entries", + cache.povw_emissions_by_epoch.len(), + cache.staking_emissions_by_epoch.len(), + work_log_ids.len(), + cache.reward_caps.len(), + cache.epoch_time_ranges.len(), + cache.block_timestamps.len(), + cache.timestamped_stake_events.len(), + cache.timestamped_delegation_events.len(), + cache.work_by_work_log_by_epoch.len(), + cache.staking_power_by_address_by_epoch.len() + ); + + Ok(cache) +} diff --git a/crates/rewards/src/events.rs b/crates/rewards/src/events.rs new file mode 100644 index 000000000..1ba5cba20 --- /dev/null +++ b/crates/rewards/src/events.rs @@ -0,0 +1,268 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Event fetching and log querying utilities. + +use alloy::{ + primitives::B256, + providers::Provider, + rpc::types::{BlockNumberOrTag, Filter, Log}, + sol_types::SolEvent, +}; +use anyhow::Context; +use boundless_povw::{deployments::Deployment, log_updater::IPovwAccounting}; + +/// Container for all event logs needed for rewards computation +#[derive(Debug)] +pub struct AllEventLogs { + pub work_logs: Vec, + pub epoch_finalized_logs: Vec, + pub stake_created_logs: Vec, + pub stake_added_logs: Vec, + pub unstake_initiated_logs: Vec, + pub unstake_completed_logs: Vec, + pub vote_delegation_change_logs: Vec, + pub reward_delegation_change_logs: Vec, + pub vote_power_logs: Vec, + pub reward_power_logs: Vec, + pub povw_claims_logs: Vec, + pub staking_claims_logs: Vec, +} + +/// Query logs in chunks to avoid hitting provider limits +pub async fn query_logs_chunked( + provider: &P, + filter: Filter, + from_block: u64, + to_block: u64, +) -> anyhow::Result> { + const BLOCK_CHUNK_SIZE: u64 = 50_000; + let mut all_logs = Vec::new(); + + let mut current_from = from_block; + while current_from <= to_block { + let current_to = (current_from + BLOCK_CHUNK_SIZE - 1).min(to_block); + + let chunk_filter = filter + .clone() + .from_block(BlockNumberOrTag::Number(current_from)) + .to_block(BlockNumberOrTag::Number(current_to)); + + let logs = provider.get_logs(&chunk_filter).await?; + all_logs.extend(logs); + + current_from = current_to + 1; + } + + Ok(all_logs) +} + +/// Fetch all event logs needed for rewards computation +pub async fn fetch_all_event_logs( + provider: &P, + deployment: &Deployment, + zkc_deployment: &boundless_zkc::deployments::Deployment, + from_block_num: u64, + current_block: u64, +) -> anyhow::Result { + tracing::info!("Fetching blockchain event data ({} blocks)...", current_block - from_block_num); + + // Batch 1: Core stake and work data (5 parallel queries) + tracing::info!("[1/2] Querying stake and work events..."); + + let work_filter = Filter::new() + .address(deployment.povw_accounting_address) + .event_signature(IPovwAccounting::WorkLogUpdated::SIGNATURE_HASH); + + let epoch_finalized_filter = Filter::new() + .address(deployment.povw_accounting_address) + .event_signature(IPovwAccounting::EpochFinalized::SIGNATURE_HASH); + + let stake_created_filter = Filter::new().address(deployment.vezkc_address).event_signature( + B256::from(alloy::primitives::keccak256("StakeCreated(uint256,address,uint256)")), + ); + + let stake_added_filter = Filter::new().address(deployment.vezkc_address).event_signature( + B256::from(alloy::primitives::keccak256("StakeAdded(uint256,address,uint256,uint256)")), + ); + + let unstake_initiated_filter = Filter::new().address(deployment.vezkc_address).event_signature( + B256::from(alloy::primitives::keccak256("UnstakeInitiated(uint256,address,uint256)")), + ); + + let ( + work_logs, + epoch_finalized_logs, + stake_created_logs, + stake_added_logs, + unstake_initiated_logs, + ) = tokio::join!( + async { + query_logs_chunked(provider, work_filter.clone(), from_block_num, current_block) + .await + .context("Failed to get work logs") + }, + async { + query_logs_chunked( + provider, + epoch_finalized_filter.clone(), + from_block_num, + current_block, + ) + .await + .context("Failed to get epoch finalized logs") + }, + async { + query_logs_chunked( + provider, + stake_created_filter.clone(), + from_block_num, + current_block, + ) + .await + .context("Failed to get stake created logs") + }, + async { + query_logs_chunked(provider, stake_added_filter.clone(), from_block_num, current_block) + .await + .context("Failed to get stake added logs") + }, + async { + query_logs_chunked( + provider, + unstake_initiated_filter.clone(), + from_block_num, + current_block, + ) + .await + .context("Failed to get unstake initiated logs") + } + ); + + // Batch 2: Delegation, completion, and reward claims (7 parallel queries) + tracing::info!("[2/2] Querying delegation, unstake completion, and reward claim events..."); + + let unstake_completed_filter = Filter::new().address(deployment.vezkc_address).event_signature( + B256::from(alloy::primitives::keccak256("UnstakeCompleted(uint256,address,uint256)")), + ); + + let vote_delegation_change_filter = + Filter::new().address(deployment.vezkc_address).event_signature(B256::from( + alloy::primitives::keccak256("DelegateChanged(address,address,address)"), + )); + + let reward_delegation_change_filter = + Filter::new().address(deployment.vezkc_address).event_signature(B256::from( + alloy::primitives::keccak256("RewardDelegateChanged(address,address,address)"), + )); + + let vote_power_filter = Filter::new().address(deployment.vezkc_address).event_signature( + B256::from(alloy::primitives::keccak256("DelegateVotesChanged(address,uint256,uint256)")), + ); + + let reward_power_filter = Filter::new().address(deployment.vezkc_address).event_signature( + B256::from(alloy::primitives::keccak256("DelegateRewardsChanged(address,uint256,uint256)")), + ); + + let povw_claims_filter = Filter::new().address(zkc_deployment.zkc_address).event_signature( + B256::from(alloy::primitives::keccak256("PoVWRewardsClaimed(address,uint256)")), + ); + + let staking_claims_filter = Filter::new().address(zkc_deployment.zkc_address).event_signature( + B256::from(alloy::primitives::keccak256("StakingRewardsClaimed(address,uint256)")), + ); + + let ( + unstake_completed_logs, + vote_delegation_change_logs, + reward_delegation_change_logs, + vote_power_logs, + reward_power_logs, + povw_claims_logs, + staking_claims_logs, + ) = tokio::join!( + async { + query_logs_chunked( + provider, + unstake_completed_filter.clone(), + from_block_num, + current_block, + ) + .await + .context("Failed to get unstake completed logs") + }, + async { + query_logs_chunked( + provider, + vote_delegation_change_filter.clone(), + from_block_num, + current_block, + ) + .await + .context("Failed to get vote delegation change logs") + }, + async { + query_logs_chunked( + provider, + reward_delegation_change_filter.clone(), + from_block_num, + current_block, + ) + .await + .context("Failed to get reward delegation change logs") + }, + async { + query_logs_chunked(provider, vote_power_filter.clone(), from_block_num, current_block) + .await + .context("Failed to get vote power logs") + }, + async { + query_logs_chunked(provider, reward_power_filter.clone(), from_block_num, current_block) + .await + .context("Failed to get reward power logs") + }, + async { + query_logs_chunked(provider, povw_claims_filter.clone(), from_block_num, current_block) + .await + .context("Failed to get povw claims logs") + }, + async { + query_logs_chunked( + provider, + staking_claims_filter.clone(), + from_block_num, + current_block, + ) + .await + .context("Failed to get staking claims logs") + } + ); + + tracing::info!("Event data fetched successfully"); + + Ok(AllEventLogs { + work_logs: work_logs?, + epoch_finalized_logs: epoch_finalized_logs?, + stake_created_logs: stake_created_logs?, + stake_added_logs: stake_added_logs?, + unstake_initiated_logs: unstake_initiated_logs?, + unstake_completed_logs: unstake_completed_logs?, + vote_delegation_change_logs: vote_delegation_change_logs?, + reward_delegation_change_logs: reward_delegation_change_logs?, + vote_power_logs: vote_power_logs?, + reward_power_logs: reward_power_logs?, + povw_claims_logs: povw_claims_logs?, + staking_claims_logs: staking_claims_logs?, + }) +} diff --git a/crates/rewards/src/lib.rs b/crates/rewards/src/lib.rs new file mode 100644 index 000000000..a4d0f0e87 --- /dev/null +++ b/crates/rewards/src/lib.rs @@ -0,0 +1,73 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Rewards calculation and event processing utilities for ZKC staking and PoVW rewards. + +// Declare modules +pub mod cache; +pub mod events; +pub mod povw; +pub mod powers; +pub mod staking; + +// Re-export commonly used types +pub use cache::{build_rewards_cache, RewardsCache}; + +pub use events::{fetch_all_event_logs, query_logs_chunked, AllEventLogs}; + +pub use povw::{ + compute_povw_rewards, compute_povw_rewards_for_epoch, EpochPoVWRewards, PoVWRewardsResult, + PoVWSummary, PoVWWorkLogIdSummary, WorkLogRewardInfo, +}; + +pub use staking::{ + // Main unified function + compute_staking_data, + // Legacy functions (for compatibility) + compute_staking_positions, + compute_staking_rewards, + EpochStakingData, + EpochStakingPositions, + EpochStakingRewards, + StakeEvent, + StakerAggregate, + StakerRewardInfo, + StakingDataResult, + StakingPosition, + StakingPositionsResult, + StakingRewardsResult, + StakingRewardsSummary, + StakingSummary, + TimestampedStakeEvent, +}; + +pub use powers::{ + compute_delegation_powers, DelegationEvent, DelegationPowers, EpochDelegationPowers, + TimestampedDelegationEvent, +}; + +/// Time range for an epoch +#[derive(Debug, Clone, Copy)] +pub struct EpochTimeRange { + pub start_time: u64, + pub end_time: u64, +} + +// Block numbers from before contract creation. +/// Mainnet starting block for event queries +pub const MAINNET_FROM_BLOCK: u64 = 23250070; +/// Sepolia starting block for event queries +pub const SEPOLIA_FROM_BLOCK: u64 = 9110040; +/// Chunk size for log queries to avoid rate limiting +pub const LOG_QUERY_CHUNK_SIZE: u64 = 2500; diff --git a/crates/rewards/src/povw.rs b/crates/rewards/src/povw.rs new file mode 100644 index 000000000..aa4b43f4f --- /dev/null +++ b/crates/rewards/src/povw.rs @@ -0,0 +1,296 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! PoVW rewards computation logic. + +use alloy::primitives::{Address, U256}; +use std::collections::HashMap; + +/// Information about a work log ID's rewards for an epoch +#[derive(Debug, Clone)] +pub struct WorkLogRewardInfo { + /// The work log ID (address) + pub work_log_id: Address, + /// Total work contributed by this work log ID in the epoch + pub work: U256, + /// Proportional share of rewards (before cap) + pub proportional_rewards: U256, + /// Actual rewards after applying cap + pub capped_rewards: U256, + /// The reward cap for this work log ID + pub reward_cap: U256, + /// Whether the rewards were capped + pub is_capped: bool, + /// Recipient address for the rewards + pub recipient_address: Address, + /// Staking amount for the work log ID + pub staking_amount: U256, +} + +/// PoVW rewards for an entire epoch +#[derive(Debug, Clone)] +pub struct EpochPoVWRewards { + /// The epoch number + pub epoch: U256, + /// Total work in the epoch + pub total_work: U256, + /// Total emissions for the epoch + pub total_emissions: U256, + /// Total capped rewards (sum of all individual capped rewards) + pub total_capped_rewards: U256, + /// Total rewards before capping (sum of all proportional rewards) + pub total_proportional_rewards: U256, + /// Epoch start time + pub epoch_start_time: u64, + /// Epoch end time + pub epoch_end_time: u64, + /// Rewards by work log ID + pub rewards_by_work_log_id: HashMap, +} + +/// Aggregated PoVW rewards for a work log across all epochs +#[derive(Debug, Clone)] +pub struct PoVWWorkLogIdSummary { + /// The work log ID + pub work_log_id: Address, + /// Total work submitted across all epochs + pub total_work_submitted: U256, + /// Total actual rewards received (after capping) + pub total_actual_rewards: U256, + /// Total uncapped rewards (before capping) + pub total_uncapped_rewards: U256, + /// Number of epochs participated in + pub epochs_participated: u64, +} + +/// Summary statistics for PoVW rewards across all epochs +#[derive(Debug, Clone)] +pub struct PoVWSummary { + /// Total number of epochs with work + pub total_epochs_with_work: usize, + /// Total unique work log IDs + pub total_unique_work_log_ids: usize, + /// Total work across all epochs + pub total_work_all_time: U256, + /// Total emissions across all epochs + pub total_emissions_all_time: U256, + /// Total capped rewards distributed + pub total_capped_rewards_all_time: U256, + /// Total uncapped rewards (before capping) + pub total_uncapped_rewards_all_time: U256, +} + +/// Result of PoVW rewards computation across all epochs +#[derive(Debug, Clone)] +pub struct PoVWRewardsResult { + /// Rewards by epoch + pub epoch_rewards: Vec, + /// Aggregated rewards by work log ID + pub summary_by_work_log_id: HashMap, + /// Summary statistics + pub summary: PoVWSummary, +} + +/// Compute PoVW rewards for a specific epoch from pre-processed cached data +#[allow(clippy::too_many_arguments)] +pub fn compute_povw_rewards_for_epoch( + epoch: U256, + current_epoch: U256, + work_by_work_log_by_epoch: &HashMap<(Address, u64), U256>, + work_recipients_by_epoch: &HashMap<(Address, u64), Address>, + total_work_by_epoch: &HashMap, + pending_epoch_total_work: U256, + povw_emissions_by_epoch: &HashMap, + reward_caps: &HashMap<(Address, u64), U256>, + staking_amounts_by_epoch: &HashMap<(Address, u64), U256>, + epoch_time_ranges: &HashMap, +) -> anyhow::Result { + let epoch_u64 = epoch.to::(); + + // Get emissions for the epoch from cache + let povw_emissions = povw_emissions_by_epoch + .get(&epoch_u64) + .copied() + .ok_or_else(|| anyhow::anyhow!("Emissions not found for epoch {}", epoch_u64))?; + + // Get epoch time range + let epoch_time_range = epoch_time_ranges + .get(&epoch_u64) + .ok_or_else(|| anyhow::anyhow!("Epoch time range not found for epoch {}", epoch_u64))?; + let epoch_start_time = epoch_time_range.start_time; + let epoch_end_time = epoch_time_range.end_time; + + // Determine if this is the current epoch + let is_current_epoch = epoch == current_epoch; + + // Get total work for the epoch + let total_work = if is_current_epoch { + // For current epoch, use pending epoch total work + pending_epoch_total_work + } else { + // For past epochs, get from cached total work + total_work_by_epoch.get(&epoch_u64).copied().unwrap_or(U256::ZERO) + }; + + // Get work by work_log_id for this epoch from cache + let mut work_by_work_log_id: HashMap = HashMap::new(); + for ((work_log_id, work_epoch), work) in work_by_work_log_by_epoch { + if *work_epoch == epoch_u64 { + work_by_work_log_id.insert(*work_log_id, *work); + } + } + + // Compute rewards for each work log ID + let mut rewards_by_work_log_id = HashMap::new(); + let mut total_proportional_rewards = U256::ZERO; + let mut total_capped_rewards = U256::ZERO; + + for (work_log_id, work) in work_by_work_log_id { + let proportional_rewards = + if total_work > U256::ZERO { work * povw_emissions / total_work } else { U256::ZERO }; + + // Get reward cap from cache + let reward_cap = reward_caps.get(&(work_log_id, epoch_u64)).copied().ok_or_else(|| { + anyhow::anyhow!( + "Reward cap not found for work log {:?} in epoch {}", + work_log_id, + epoch_u64 + ) + })?; + + // Apply cap + let capped_rewards = proportional_rewards.min(reward_cap); + let is_capped = capped_rewards < proportional_rewards; + + // Get staking amount from cache for this epoch + let staking_amount = + staking_amounts_by_epoch.get(&(work_log_id, epoch_u64)).copied().unwrap_or(U256::ZERO); + + // Get the actual recipient from cache + let recipient_address = + work_recipients_by_epoch.get(&(work_log_id, epoch_u64)).copied().unwrap_or(work_log_id); + + // Track totals + total_proportional_rewards += proportional_rewards; + total_capped_rewards += capped_rewards; + + rewards_by_work_log_id.insert( + work_log_id, + WorkLogRewardInfo { + work_log_id, + work, + proportional_rewards, + capped_rewards, + reward_cap, + is_capped, + recipient_address, + staking_amount, + }, + ); + } + + Ok(EpochPoVWRewards { + epoch, + total_work, + total_emissions: povw_emissions, + total_capped_rewards, + total_proportional_rewards, + epoch_start_time, + epoch_end_time, + rewards_by_work_log_id, + }) +} + +/// Compute PoVW rewards for all epochs and generate aggregates +#[allow(clippy::too_many_arguments)] +pub fn compute_povw_rewards( + current_epoch: u64, + processing_end_epoch: u64, + work_by_work_log_by_epoch: &HashMap<(Address, u64), U256>, + work_recipients_by_epoch: &HashMap<(Address, u64), Address>, + total_work_by_epoch: &HashMap, + pending_epoch_total_work: U256, + povw_emissions_by_epoch: &HashMap, + reward_caps: &HashMap<(Address, u64), U256>, + staking_amounts_by_epoch: &HashMap<(Address, u64), U256>, + epoch_time_ranges: &HashMap, +) -> anyhow::Result { + let mut epoch_rewards = Vec::new(); + let mut aggregates_by_work_log: HashMap = HashMap::new(); + + // Statistics for summary + let mut total_epochs_with_work = 0; + let mut total_work_all_time = U256::ZERO; + let mut total_emissions_all_time = U256::ZERO; + let mut total_capped_rewards_all_time = U256::ZERO; + let mut total_uncapped_rewards_all_time = U256::ZERO; + + // Process each epoch from 0 to processing_end_epoch + for epoch_num in 0..=processing_end_epoch { + let epoch_result = compute_povw_rewards_for_epoch( + U256::from(epoch_num), + U256::from(current_epoch), + work_by_work_log_by_epoch, + work_recipients_by_epoch, + total_work_by_epoch, + pending_epoch_total_work, + povw_emissions_by_epoch, + reward_caps, + staking_amounts_by_epoch, + epoch_time_ranges, + )?; + + // Update summary statistics + if epoch_result.total_work > U256::ZERO { + total_epochs_with_work += 1; + } + total_work_all_time += epoch_result.total_work; + total_emissions_all_time += epoch_result.total_emissions; + total_capped_rewards_all_time += epoch_result.total_capped_rewards; + total_uncapped_rewards_all_time += epoch_result.total_proportional_rewards; + + // Update aggregates for each work log ID in this epoch + for (work_log_id, info) in &epoch_result.rewards_by_work_log_id { + let entry = aggregates_by_work_log.entry(*work_log_id).or_insert_with(|| { + PoVWWorkLogIdSummary { + work_log_id: *work_log_id, + total_work_submitted: U256::ZERO, + total_actual_rewards: U256::ZERO, + total_uncapped_rewards: U256::ZERO, + epochs_participated: 0, + } + }); + + entry.total_work_submitted += info.work; + entry.total_actual_rewards += info.capped_rewards; + entry.total_uncapped_rewards += info.proportional_rewards; + if info.work > U256::ZERO { + entry.epochs_participated += 1; + } + } + + epoch_rewards.push(epoch_result); + } + + let summary = PoVWSummary { + total_epochs_with_work, + total_unique_work_log_ids: aggregates_by_work_log.len(), + total_work_all_time, + total_emissions_all_time, + total_capped_rewards_all_time, + total_uncapped_rewards_all_time, + }; + + Ok(PoVWRewardsResult { epoch_rewards, summary_by_work_log_id: aggregates_by_work_log, summary }) +} diff --git a/crates/rewards/src/powers.rs b/crates/rewards/src/powers.rs new file mode 100644 index 000000000..c1b93fbf3 --- /dev/null +++ b/crates/rewards/src/powers.rs @@ -0,0 +1,186 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Voting and reward delegation power tracking. + +use alloy::primitives::{Address, U256}; +use std::collections::{HashMap, HashSet}; + +/// Delegation powers for voting and rewards +#[derive(Debug, Clone)] +pub struct DelegationPowers { + /// Voting power held + pub vote_power: U256, + /// Reward power held + pub reward_power: U256, + /// Addresses that have delegated voting to this address + pub vote_delegators: Vec
, + /// Addresses that have delegated rewards to this address + pub reward_delegators: Vec
, +} + +/// Delegation powers for all addresses at a specific epoch +#[derive(Debug, Clone)] +pub struct EpochDelegationPowers { + /// The epoch number + pub epoch: u64, + /// Delegation powers by address + pub powers: HashMap, +} + +// Event types for delegation processing +#[derive(Debug, Clone)] +pub enum DelegationEvent { + VoteDelegationChange { delegator: Address, new_delegate: Address }, + RewardDelegationChange { delegator: Address, new_delegate: Address }, + VotePowerChange { delegate: Address, new_votes: U256 }, + RewardPowerChange { delegate: Address, new_rewards: U256 }, +} + +#[derive(Debug, Clone)] +pub struct TimestampedDelegationEvent { + pub event: DelegationEvent, + pub timestamp: u64, + pub block_number: u64, + pub transaction_index: u64, + pub log_index: u64, + pub epoch: u64, +} + +/// Compute delegation powers from pre-processed timestamped events +pub fn compute_delegation_powers( + timestamped_events: &[TimestampedDelegationEvent], + _current_epoch: u64, + processing_end_epoch: u64, +) -> anyhow::Result> { + // Track current state + let mut current_vote_powers: HashMap = HashMap::new(); + let mut current_reward_powers: HashMap = HashMap::new(); + let mut current_vote_delegations: HashMap = HashMap::new(); // delegator -> delegate + let mut current_reward_delegations: HashMap = HashMap::new(); // delegator -> delegate + let mut epoch_states: HashMap> = HashMap::new(); + let mut last_epoch: Option = None; + + for event in timestamped_events { + // Capture state at epoch boundaries + if last_epoch.is_some() && last_epoch != Some(event.epoch) { + if let Some(last) = last_epoch { + for epoch in last..event.epoch { + let epoch_powers = build_epoch_delegation_powers( + ¤t_vote_powers, + ¤t_reward_powers, + ¤t_vote_delegations, + ¤t_reward_delegations, + ); + epoch_states.insert(epoch, epoch_powers); + } + } + } + + // Apply the event + match &event.event { + DelegationEvent::VoteDelegationChange { delegator, new_delegate } => { + if *delegator == *new_delegate { + current_vote_delegations.remove(delegator); + } else { + current_vote_delegations.insert(*delegator, *new_delegate); + } + } + DelegationEvent::RewardDelegationChange { delegator, new_delegate } => { + if *delegator == *new_delegate { + current_reward_delegations.remove(delegator); + } else { + current_reward_delegations.insert(*delegator, *new_delegate); + } + } + DelegationEvent::VotePowerChange { delegate, new_votes } => { + if *new_votes > U256::ZERO { + current_vote_powers.insert(*delegate, *new_votes); + } else { + current_vote_powers.remove(delegate); + } + } + DelegationEvent::RewardPowerChange { delegate, new_rewards } => { + if *new_rewards > U256::ZERO { + current_reward_powers.insert(*delegate, *new_rewards); + } else { + current_reward_powers.remove(delegate); + } + } + } + + last_epoch = Some(event.epoch); + } + + // Capture final state for remaining epochs + if let Some(last) = last_epoch { + for epoch in last..=processing_end_epoch { + let epoch_powers = build_epoch_delegation_powers( + ¤t_vote_powers, + ¤t_reward_powers, + ¤t_vote_delegations, + ¤t_reward_delegations, + ); + epoch_states.insert(epoch, epoch_powers); + } + } + + // Convert to Vec + let mut result: Vec = epoch_states + .into_iter() + .map(|(epoch, powers)| EpochDelegationPowers { epoch, powers }) + .collect(); + + result.sort_by_key(|e| e.epoch); + + Ok(result) +} + +fn build_epoch_delegation_powers( + vote_powers: &HashMap, + reward_powers: &HashMap, + vote_delegations: &HashMap, + reward_delegations: &HashMap, +) -> HashMap { + let mut epoch_powers = HashMap::new(); + + // Get all delegates that have either vote or reward power + let all_delegates: HashSet
= + vote_powers.keys().chain(reward_powers.keys()).copied().collect(); + + for delegate in all_delegates { + let vote_power = vote_powers.get(&delegate).copied().unwrap_or(U256::ZERO); + let reward_power = reward_powers.get(&delegate).copied().unwrap_or(U256::ZERO); + + // Find delegators for this delegate + let vote_delegators: Vec
= vote_delegations + .iter() + .filter(|(_, &del)| del == delegate) + .map(|(delegator, _)| *delegator) + .collect(); + + let reward_delegators: Vec
= reward_delegations + .iter() + .filter(|(_, &del)| del == delegate) + .map(|(delegator, _)| *delegator) + .collect(); + + epoch_powers.insert( + delegate, + DelegationPowers { vote_power, reward_power, vote_delegators, reward_delegators }, + ); + } + + epoch_powers +} diff --git a/crates/rewards/src/staking.rs b/crates/rewards/src/staking.rs new file mode 100644 index 000000000..bddf0196f --- /dev/null +++ b/crates/rewards/src/staking.rs @@ -0,0 +1,560 @@ +// Copyright 2025 RISC Zero, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Staking position tracking and rewards computation. + +use alloy::primitives::{Address, U256}; +use std::collections::{HashMap, HashSet}; + +/// Types of staking events from the blockchain +#[derive(Debug, Clone)] +pub enum StakeEvent { + Created { owner: Address, amount: U256 }, + Added { owner: Address, new_total: U256 }, + UnstakeInitiated { owner: Address }, + UnstakeCompleted { owner: Address }, + RewardDelegateChanged { delegator: Address, new_delegate: Address }, + VoteDelegateChanged { delegator: Address, new_delegate: Address }, +} + +/// Staking event with block timestamp and epoch information +#[derive(Debug, Clone)] +pub struct TimestampedStakeEvent { + pub block_number: u64, + pub block_timestamp: u64, + pub transaction_index: u64, + pub log_index: u64, + pub epoch: u64, + pub event: StakeEvent, +} + +/// Represents a staking position with delegation information +#[derive(Debug, Clone)] +pub struct StakingPosition { + /// Amount of tokens staked + pub staked_amount: U256, + /// Whether the stake is being withdrawn + pub is_withdrawing: bool, + /// Address to whom rewards are delegated + pub rewards_delegated_to: Option
, + /// Address to whom voting power is delegated + pub votes_delegated_to: Option
, + /// Rewards generated by this position (regardless of delegation) + pub rewards_generated: U256, +} + +/// Information about rewards actually received by an address in an epoch +/// This represents the actual recipient of rewards (could be via delegation) +#[derive(Debug, Clone)] +pub struct StakerRewardInfo { + /// Address that received the rewards + pub staker_address: Address, + /// Staking power (veZKC balance) that generated these rewards + pub staking_power: U256, + /// Rewards actually received by this address + pub rewards_earned: U256, + /// Percentage of total rewards received + pub percentage: f64, +} + +/// Complete staking data for a single epoch (positions + rewards) +#[derive(Debug, Clone)] +pub struct EpochStakingData { + /// The epoch number + pub epoch: u64, + /// Staking positions by staker address (tracks who owns positions and rewards generated) + pub positions_by_staker: HashMap, + /// Total staked amount in this epoch + pub total_staked: U256, + /// Number of active stakers + pub num_stakers: usize, + /// Number of stakers in withdrawal + pub num_withdrawing: usize, + /// Total staking emissions for this epoch + pub total_staking_emissions: U256, + /// Total staking power in this epoch + pub total_staking_power: U256, + /// Rewards by recipient address (tracks who actually received rewards) + pub rewards_by_address: HashMap, + /// Number of participants who earned rewards + pub num_reward_recipients: usize, +} + +/// Aggregate data for a single staker across all epochs +#[derive(Debug, Clone)] +pub struct StakerAggregate { + /// Staker address + pub staker_address: Address, + /// Current staked amount (latest epoch) + pub current_staked: U256, + /// Whether currently withdrawing + pub is_withdrawing: bool, + /// Current rewards delegate + pub rewards_delegated_to: Option
, + /// Current votes delegate + pub votes_delegated_to: Option
, + /// Total rewards generated by positions owned by this address + pub total_rewards_generated: U256, + /// Total rewards actually received by this address (from own positions or delegations) + pub total_rewards_earned: U256, + /// Number of epochs participated in + pub epochs_participated: u64, +} + +/// Summary statistics across all epochs +#[derive(Debug, Clone)] +pub struct StakingSummary { + /// Total staked amount in the latest epoch + pub current_total_staked: U256, + /// Total unique stakers ever + pub total_unique_stakers: usize, + /// Current number of active stakers + pub current_active_stakers: usize, + /// Current number of stakers withdrawing + pub current_withdrawing: usize, + /// Total staking emissions across all epochs + pub total_staking_emissions_all_time: U256, + /// Total unique reward recipients across all epochs + pub total_unique_reward_recipients: usize, +} + +/// Complete result of staking data computation +#[derive(Debug, Clone)] +pub struct StakingDataResult { + /// Staking data by epoch + pub epochs: Vec, + /// Aggregate data by staker + pub staker_aggregates: HashMap, + /// Summary statistics + pub summary: StakingSummary, +} + +/// Compute all staking data (positions and rewards) for all epochs +pub fn compute_staking_data( + current_epoch: u64, + processing_end_epoch: u64, + timestamped_stake_events: &[TimestampedStakeEvent], + staking_emissions_by_epoch: &HashMap, + staking_power_by_address_by_epoch: &HashMap<(Address, u64), U256>, + total_staking_power_by_epoch: &HashMap, +) -> anyhow::Result { + // First compute positions for all epochs + let positions_by_epoch = + compute_positions(timestamped_stake_events, current_epoch, processing_end_epoch)?; + + // Now compute rewards and combine with positions + let mut epochs = Vec::new(); + let mut staker_aggregates: HashMap = HashMap::new(); + + // Track statistics + let mut all_stakers_ever = HashSet::new(); + let mut all_reward_recipients_ever = HashSet::new(); + let mut total_staking_emissions_all_time = U256::ZERO; + + for mut epoch_positions in positions_by_epoch { + let epoch = epoch_positions.epoch; + + // Get emissions for this epoch + let staking_emissions = + staking_emissions_by_epoch.get(&epoch).copied().unwrap_or(U256::ZERO); + + // Get total staking power + let total_staking_power = + total_staking_power_by_epoch.get(&epoch).copied().unwrap_or(U256::ZERO); + + // Compute rewards for this epoch + let mut rewards_by_recipient = HashMap::new(); + + if staking_emissions > U256::ZERO { + // First: Calculate rewards GENERATED by each position based on staked amount + // This shows the value created by each position regardless of delegation + if epoch_positions.total_staked > U256::ZERO { + for position in epoch_positions.positions.values_mut() { + if position.staked_amount > U256::ZERO { + // Rewards generated based on position's stake proportion + position.rewards_generated = (position.staked_amount * staking_emissions) + / epoch_positions.total_staked; + } + } + } + + // Second: Determine who RECEIVES the rewards using staking power + // Staking power accounts for delegations (delegator has 0, delegate has sum) + if total_staking_power > U256::ZERO { + for ((staker_address, staker_epoch), staking_power) in + staking_power_by_address_by_epoch + { + if *staker_epoch == epoch && *staking_power > U256::ZERO { + // Calculate rewards to receive based on staking power + let rewards_to_receive = + (*staking_power * staking_emissions) / total_staking_power; + + // Find who actually receives these rewards + // Note: The staker with staking_power might be receiving delegated rewards + let recipient_info = rewards_by_recipient + .entry(*staker_address) + .or_insert_with(|| StakerRewardInfo { + staker_address: *staker_address, + staking_power: U256::ZERO, + rewards_earned: U256::ZERO, + percentage: 0.0, + }); + recipient_info.staking_power += staking_power; + recipient_info.rewards_earned += rewards_to_receive; + + all_reward_recipients_ever.insert(*staker_address); + } + } + + // Calculate percentages for recipients + for recipient_info in rewards_by_recipient.values_mut() { + recipient_info.percentage = (recipient_info.staking_power * U256::from(10000) + / total_staking_power) + .to::() as f64 + / 100.0; + } + } + } + + // Track all stakers from positions + for address in epoch_positions.positions.keys() { + all_stakers_ever.insert(*address); + } + + // Update aggregates + for (address, position) in &epoch_positions.positions { + let aggregate = staker_aggregates.entry(*address).or_insert_with(|| StakerAggregate { + staker_address: *address, + current_staked: U256::ZERO, + is_withdrawing: false, + rewards_delegated_to: None, + votes_delegated_to: None, + total_rewards_generated: U256::ZERO, + total_rewards_earned: U256::ZERO, + epochs_participated: 0, + }); + + // Update with latest position data (since we process epochs in order) + aggregate.current_staked = position.staked_amount; + aggregate.is_withdrawing = position.is_withdrawing; + aggregate.rewards_delegated_to = position.rewards_delegated_to; + aggregate.votes_delegated_to = position.votes_delegated_to; + aggregate.total_rewards_generated += position.rewards_generated; + aggregate.epochs_participated += 1; + } + + // Update aggregates from reward recipients (tracks who actually received rewards) + for (recipient_address, reward_info) in &rewards_by_recipient { + // Ensure recipient has an aggregate entry (they may only be receiving delegated rewards) + let aggregate = + staker_aggregates.entry(*recipient_address).or_insert_with(|| StakerAggregate { + staker_address: *recipient_address, + current_staked: U256::ZERO, + is_withdrawing: false, + rewards_delegated_to: None, + votes_delegated_to: None, + total_rewards_generated: U256::ZERO, + total_rewards_earned: U256::ZERO, + epochs_participated: 0, + }); + + // Add earned rewards (these are rewards actually received) + aggregate.total_rewards_earned += reward_info.rewards_earned; + } + + total_staking_emissions_all_time += staking_emissions; + + // Create combined epoch data + let num_reward_recipients = rewards_by_recipient.len(); + epochs.push(EpochStakingData { + epoch, + positions_by_staker: epoch_positions.positions, + total_staked: epoch_positions.total_staked, + num_stakers: epoch_positions.num_stakers, + num_withdrawing: epoch_positions.num_withdrawing, + total_staking_emissions: staking_emissions, + total_staking_power, + rewards_by_address: rewards_by_recipient, + num_reward_recipients, + }); + } + + // Get latest epoch data for current stats + let latest_epoch = epochs.last().ok_or_else(|| anyhow::anyhow!("No epoch data computed"))?; + + let summary = StakingSummary { + current_total_staked: latest_epoch.total_staked, + total_unique_stakers: all_stakers_ever.len(), + current_active_stakers: latest_epoch.num_stakers, + current_withdrawing: latest_epoch.num_withdrawing, + total_staking_emissions_all_time, + total_unique_reward_recipients: all_reward_recipients_ever.len(), + }; + + Ok(StakingDataResult { epochs, staker_aggregates, summary }) +} + +/// Structure for position computation +#[derive(Debug, Clone)] +pub struct EpochStakingPositions { + pub epoch: u64, + pub positions: HashMap, + pub total_staked: U256, + pub num_stakers: usize, + pub num_withdrawing: usize, +} + +/// Compute staking positions from timestamped events +fn compute_positions( + timestamped_events: &[TimestampedStakeEvent], + _current_epoch: u64, + processing_end_epoch: u64, +) -> anyhow::Result> { + // Tracking current state + let mut current_stakes: HashMap = HashMap::new(); + let mut current_withdrawing: HashMap = HashMap::new(); + let mut current_vote_delegations: HashMap = HashMap::new(); + let mut current_reward_delegations: HashMap = HashMap::new(); + + // Results by epoch + let mut epoch_positions = Vec::new(); + let mut last_processed_epoch = None; + + // Process events in chronological order + for event in timestamped_events { + // Check if we've moved to a new epoch + if last_processed_epoch.is_some() && Some(event.epoch) != last_processed_epoch { + // Save snapshot for the previous epoch before moving on + if let Some(prev_epoch) = last_processed_epoch { + epoch_positions.push(create_epoch_snapshot( + prev_epoch, + ¤t_stakes, + ¤t_withdrawing, + ¤t_vote_delegations, + ¤t_reward_delegations, + )); + } + } + + // Apply the event to update current state + match &event.event { + StakeEvent::Created { owner, amount } => { + current_stakes.insert(*owner, *amount); + current_withdrawing.insert(*owner, false); + } + StakeEvent::Added { owner, new_total } => { + current_stakes.insert(*owner, *new_total); + } + StakeEvent::UnstakeInitiated { owner } => { + current_withdrawing.insert(*owner, true); + } + StakeEvent::UnstakeCompleted { owner } => { + current_stakes.remove(owner); + current_withdrawing.remove(owner); + current_vote_delegations.remove(owner); + current_reward_delegations.remove(owner); + } + StakeEvent::RewardDelegateChanged { delegator, new_delegate } => { + if *new_delegate == Address::ZERO { + current_reward_delegations.remove(delegator); + } else { + current_reward_delegations.insert(*delegator, *new_delegate); + } + } + StakeEvent::VoteDelegateChanged { delegator, new_delegate } => { + if *new_delegate == Address::ZERO { + current_vote_delegations.remove(delegator); + } else { + current_vote_delegations.insert(*delegator, *new_delegate); + } + } + } + + last_processed_epoch = Some(event.epoch); + } + + // Process any remaining epochs up to processing_end_epoch + for epoch in last_processed_epoch.unwrap_or(0)..=processing_end_epoch { + if epoch_positions.iter().any(|e| e.epoch == epoch) { + continue; // Already processed + } + + epoch_positions.push(create_epoch_snapshot( + epoch, + ¤t_stakes, + ¤t_withdrawing, + ¤t_vote_delegations, + ¤t_reward_delegations, + )); + } + + // Sort by epoch to ensure chronological order + epoch_positions.sort_by_key(|e| e.epoch); + + Ok(epoch_positions) +} + +fn create_epoch_snapshot( + epoch: u64, + stakes: &HashMap, + withdrawing: &HashMap, + vote_delegations: &HashMap, + reward_delegations: &HashMap, +) -> EpochStakingPositions { + let mut positions = HashMap::new(); + let mut total_staked = U256::ZERO; + let mut num_withdrawing = 0; + + for (address, amount) in stakes { + let is_withdrawing = withdrawing.get(address).copied().unwrap_or(false); + if is_withdrawing { + num_withdrawing += 1; + } + + positions.insert( + *address, + StakingPosition { + staked_amount: *amount, + is_withdrawing, + rewards_delegated_to: reward_delegations.get(address).copied(), + votes_delegated_to: vote_delegations.get(address).copied(), + rewards_generated: U256::ZERO, // Will be calculated later when rewards are computed + }, + ); + + total_staked += amount; + } + + let num_stakers = positions.len(); + EpochStakingPositions { epoch, positions, total_staked, num_stakers, num_withdrawing } +} + +// ============================================================================ +// Compatibility Functions (for gradual migration) +// ============================================================================ + +/// Legacy: Compute only staking positions (without rewards) +pub fn compute_staking_positions( + timestamped_events: &[TimestampedStakeEvent], + current_epoch: u64, + processing_end_epoch: u64, +) -> anyhow::Result { + let positions = compute_positions(timestamped_events, current_epoch, processing_end_epoch)?; + + // Convert to legacy format + let epoch_positions = positions + .into_iter() + .map(|p| EpochStakingPositions { + epoch: p.epoch, + positions: p.positions, + total_staked: p.total_staked, + num_stakers: p.num_stakers, + num_withdrawing: p.num_withdrawing, + }) + .collect::>(); + + // Get latest for summary + let latest = epoch_positions.last().ok_or_else(|| anyhow::anyhow!("No epoch data"))?; + + // Count unique stakers + let mut all_stakers = HashSet::new(); + for epoch in &epoch_positions { + for address in epoch.positions.keys() { + all_stakers.insert(*address); + } + } + + let summary = StakingSummary { + current_total_staked: latest.total_staked, + total_unique_stakers: all_stakers.len(), + current_active_stakers: latest.num_stakers, + current_withdrawing: latest.num_withdrawing, + total_staking_emissions_all_time: U256::ZERO, // Not computed in legacy + total_unique_reward_recipients: 0, // Not computed in legacy + }; + + Ok(StakingPositionsResult { epoch_positions, summary }) +} + +/// Legacy result structure (kept for compatibility) +#[derive(Debug, Clone)] +pub struct StakingPositionsResult { + pub epoch_positions: Vec, + pub summary: StakingSummary, +} + +pub fn compute_staking_rewards( + current_epoch: u64, + processing_end_epoch: u64, + staking_emissions_by_epoch: &HashMap, + staking_power_by_address_by_epoch: &HashMap<(Address, u64), U256>, + total_staking_power_by_epoch: &HashMap, +) -> anyhow::Result { + // Use the new unified function internally + let data = compute_staking_data( + current_epoch, + processing_end_epoch, + &[], // No events needed for just rewards + staking_emissions_by_epoch, + staking_power_by_address_by_epoch, + total_staking_power_by_epoch, + )?; + + // Convert to legacy format + let epoch_rewards: Vec = data + .epochs + .into_iter() + .map(|e| EpochStakingRewards { + epoch: e.epoch, + total_staking_emissions: e.total_staking_emissions, + total_staking_power: e.total_staking_power, + rewards_by_staker: e.rewards_by_address, + num_participants: e.num_reward_recipients, + }) + .collect(); + + let total_epochs = epoch_rewards.len(); + let summary = StakingRewardsSummary { + total_epochs_with_rewards: total_epochs, + total_unique_stakers: data.summary.total_unique_reward_recipients, + total_staking_emissions_all_time: data.summary.total_staking_emissions_all_time, + }; + + Ok(StakingRewardsResult { epoch_rewards, summary }) +} + +/// Legacy: Rewards for a single epoch +#[derive(Debug, Clone)] +pub struct EpochStakingRewards { + pub epoch: u64, + pub total_staking_emissions: U256, + pub total_staking_power: U256, + pub rewards_by_staker: HashMap, + pub num_participants: usize, +} + +/// Legacy: Summary for rewards only +#[derive(Debug, Clone)] +pub struct StakingRewardsSummary { + pub total_epochs_with_rewards: usize, + pub total_unique_stakers: usize, + pub total_staking_emissions_all_time: U256, +} + +/// Legacy: Result structure for rewards +#[derive(Debug, Clone)] +pub struct StakingRewardsResult { + pub epoch_rewards: Vec, + pub summary: StakingRewardsSummary, +} diff --git a/crates/zkc/build.rs b/crates/zkc/build.rs index 889b65f1c..2a9100bb3 100644 --- a/crates/zkc/build.rs +++ b/crates/zkc/build.rs @@ -7,7 +7,8 @@ mod build_contracts { use std::{env, fs, path::Path}; // Contract interface files to copy to the artifacts folder - const ZKC_INTERFACE_FILES: [&str; 3] = ["IStaking.sol", "IRewards.sol", "IZKC.sol"]; + const ZKC_INTERFACE_FILES: [&str; 4] = + ["IStaking.sol", "IRewards.sol", "IZKC.sol", "IVotes.sol"]; const INTERFACE_FILES: [&str; 1] = ["IStakingRewards.sol"]; /// Copy contract interface files from contracts/src/povw to src/contracts/artifacts diff --git a/crates/zkc/src/contracts/artifacts/IVotes.sol b/crates/zkc/src/contracts/artifacts/IVotes.sol new file mode 100644 index 000000000..15b76565f --- /dev/null +++ b/crates/zkc/src/contracts/artifacts/IVotes.sol @@ -0,0 +1,14 @@ +// SPDX-License-Identifier: MIT +pragma solidity 0.8.26; + +import {IVotes as OZIVotes} from "@openzeppelin/contracts/interfaces/IERC5805.sol"; + +/// @title IVotes +/// @notice Interface that extends OpenZeppelin's IVotes interface with custom errors +/// @dev This allows us to extend the standard IVotes interface in the future if needed +interface IVotes is OZIVotes { + // Custom errors + error CannotDelegateVotesWhileWithdrawing(); + + // This interface extends OpenZeppelin's IVotes interface +} diff --git a/crates/zkc/src/contracts/mod.rs b/crates/zkc/src/contracts/mod.rs index 12510fc4e..df16a3a63 100644 --- a/crates/zkc/src/contracts/mod.rs +++ b/crates/zkc/src/contracts/mod.rs @@ -33,6 +33,11 @@ alloy::sol!( "src/contracts/artifacts/IStakingRewards.sol" ); +alloy::sol!( + #![sol(rpc, all_derives)] + "src/contracts/artifacts/IVotes.sol" +); + pub fn extract_tx_log( receipt: &TransactionReceipt, ) -> Result, anyhow::Error> { diff --git a/dockerfiles/indexer.dockerfile b/dockerfiles/market-indexer.dockerfile similarity index 91% rename from dockerfiles/indexer.dockerfile rename to dockerfiles/market-indexer.dockerfile index 3970c6d11..b3c7c830d 100644 --- a/dockerfiles/indexer.dockerfile +++ b/dockerfiles/market-indexer.dockerfile @@ -60,10 +60,10 @@ COPY foundry.toml . SHELL ["/bin/bash", "-c"] -RUN cargo build --release --bin boundless-indexer +RUN cargo build --release --bin market-indexer FROM init AS runtime -COPY --from=builder /src/target/release/boundless-indexer /app/boundless-indexer +COPY --from=builder /src/target/release/market-indexer /app/market-indexer -ENTRYPOINT ["/app/boundless-indexer"] +ENTRYPOINT ["/app/market-indexer"] diff --git a/dockerfiles/rewards-indexer.dockerfile b/dockerfiles/rewards-indexer.dockerfile new file mode 100644 index 000000000..73597f6f8 --- /dev/null +++ b/dockerfiles/rewards-indexer.dockerfile @@ -0,0 +1,67 @@ +# Build stage +FROM rust:1.88.0-bookworm AS init + +RUN apt-get -qq update && \ + apt-get install -y -q clang + +SHELL ["/bin/bash", "-c"] + +RUN cargo install cargo-chef +ARG CACHE_DATE=2025-07-17 # update this date to force rebuild +# The rewards indexer doesn't need r0vm to run, but cargo chef pulls in dev-dependencies +# which require it. See https://github.com/LukeMathWalker/cargo-chef/issues/114 +# +# Github token can be provided as a secret with the name githubTokenSecret. Useful +# for shared build environments where Github rate limiting is an issue. +RUN --mount=type=secret,id=githubTokenSecret,target=/run/secrets/githubTokenSecret \ + if [ -f /run/secrets/githubTokenSecret ]; then \ + GITHUB_TOKEN=$(cat /run/secrets/githubTokenSecret) curl -L https://risczero.com/install | bash && \ + GITHUB_TOKEN=$(cat /run/secrets/githubTokenSecret) PATH="$PATH:/root/.risc0/bin" rzup install; \ + else \ + curl -L https://risczero.com/install | bash && \ + PATH="$PATH:/root/.risc0/bin" rzup install; \ + fi + +FROM init AS planner + +WORKDIR /src + +COPY Cargo.toml . +COPY Cargo.lock . +COPY crates/ ./crates/ +COPY rust-toolchain.toml . +COPY contracts/ ./contracts/ +COPY documentation/ ./documentation/ +COPY lib/ ./lib/ +COPY remappings.txt . +COPY foundry.toml . + +RUN cargo chef prepare --recipe-path recipe.json + +FROM init AS builder + +WORKDIR /src + +COPY --from=planner /src/recipe.json /src/recipe.json + +RUN cargo chef cook --release --recipe-path recipe.json + +COPY Cargo.toml . +COPY Cargo.lock . +COPY crates/ ./crates/ +COPY rust-toolchain.toml . +COPY contracts/ ./contracts/ +COPY documentation/ ./documentation/ +COPY lib/ ./lib/ +COPY remappings.txt . +COPY foundry.toml . + +SHELL ["/bin/bash", "-c"] + +RUN cargo build --release --bin rewards-indexer + +FROM init AS runtime + +COPY --from=builder /src/target/release/rewards-indexer /app/rewards-indexer + +ENTRYPOINT ["/app/rewards-indexer"] diff --git a/infra/builder/configure_builder.sh b/infra/builder/configure_builder.sh index 1ce479032..8d87e479f 100755 --- a/infra/builder/configure_builder.sh +++ b/infra/builder/configure_builder.sh @@ -1,5 +1,5 @@ #!/bin/bash -set -euo pipefail +set -xeuo pipefail # Check if the SSH key is added to the SSH agent if ! ssh-add -l | grep -q "id_ed25519_dev_docker_builder"; then @@ -15,7 +15,7 @@ else fi INSTANCE_ID=$(aws ec2 describe-instances \ - --filters "Name=tag:Name,Values=builder-local" \ + --filters "Name=tag:Name,Values=builder-local-v2" \ --query "Reservations[*].Instances[*].InstanceId" \ --output text) @@ -32,13 +32,13 @@ if [ -n "$INSTANCE_ID" ]; then echo "Instance $INSTANCE_ID is already running." fi else - echo "No instance found with the tag name 'builder-local'." + echo "No instance found with the tag name 'builder-local-v2'." fi # Step 1: Get the EC2 instance DNS name echo "Fetching EC2 instance DNS..." INSTANCE_DNS=$(aws ec2 describe-instances \ - --filters "Name=tag:Name,Values=builder-local" "Name=instance-state-name,Values=running" \ + --filters "Name=tag:Name,Values=builder-local-v2" "Name=instance-state-name,Values=running" \ --query 'Reservations[*].Instances[*].PublicDnsName' \ --output text) @@ -61,10 +61,10 @@ else echo "$INSTANCE_DNS is already in known_hosts" fi -# Step 3: Remove any previously registered builder, suppressing errors +# Step 3: Remove any previously registered builder (errors shown but ignored) # Typically there are errors as it tries to ssh into the previous instance, but at this point the instance may not exist. echo "Removing any existing Docker builder named 'aws-builder' (if it exists). May take some time..." -docker buildx rm aws-builder > /dev/null 2>&1 || true +docker buildx rm aws-builder || true # Step 4: Register the new builder echo "Creating new Docker builder..." @@ -75,4 +75,4 @@ export DOCKER_REMOTE_BUILDER="aws-builder" echo "DOCKER_REMOTE_BUILDER set to 'aws-builder'" # Done -echo "Docker builder setup complete." \ No newline at end of file +echo "Docker builder setup complete." diff --git a/infra/builder/index.ts b/infra/builder/index.ts index 8d54ffd85..f62e8678c 100644 --- a/infra/builder/index.ts +++ b/infra/builder/index.ts @@ -8,12 +8,12 @@ const publicKey = config.requireSecret('PUBLIC_KEY'); const { bucket, keyAlias } = createPulumiState(); // Generate an SSH key pair -const sshKey = new aws.ec2.KeyPair("ssh-key", { +const sshKey = new aws.ec2.KeyPair("ssh-key-v2", { publicKey: publicKey, }); // Create a new security group for our server -const securityGroup = new aws.ec2.SecurityGroup("builder-sec", { +const securityGroup = new aws.ec2.SecurityGroup("builder-sec-v2", { description: "Enable SSH access and outbound access", ingress: [ { @@ -34,13 +34,13 @@ const securityGroup = new aws.ec2.SecurityGroup("builder-sec", { }); // Create a new EC2 instance with instance store -const serverLocal = new aws.ec2.Instance("builder-local", { +const serverLocal = new aws.ec2.Instance("builder-local-v2", { instanceType: "c6id.2xlarge", // Using c6id.2xlarge which has 16GB RAM and 237GB NVMe SSD keyName: sshKey.keyName, ami: "ami-087f352c165340ea1", // Amazon Linux 2 AMI vpcSecurityGroupIds: [securityGroup.id], tags: { - Name: "builder-local", + Name: "builder-local-v2", }, userDataReplaceOnChange: true, userData: diff --git a/infra/builder/pulumiResources.ts b/infra/builder/pulumiResources.ts index c66e86217..bece7ed65 100644 --- a/infra/builder/pulumiResources.ts +++ b/infra/builder/pulumiResources.ts @@ -1,7 +1,7 @@ import * as pulumi from "@pulumi/pulumi"; import * as aws from "@pulumi/aws"; -const BOUNDLESS_DEV_ADMIN_ROLE_ARN = "arn:aws:iam::751442549745:role/aws-reserved/sso.amazonaws.com/us-east-2/AWSReservedSSO_AWSAdministratorAccess_05b42ccedab0fe1d"; +const BOUNDLESS_DEV_ADMIN_ROLE_ARN = "arn:aws:iam::751442549745:role/aws-reserved/sso.amazonaws.com/us-west-2/AWSReservedSSO_BoundlessDevelopmentAdmin_0c1fec23b49c47ae"; // Pulumi state bucket and secret key just used for the builder. // Builder is just deployed to dev, so does not use the state bucket that we use for staging/prod. @@ -10,9 +10,9 @@ export const createPulumiState = (): { keyAlias: aws.kms.Alias, } => { const bucket = new aws.s3.BucketV2( - 'boundless-builder-state-bucket', - { - bucketPrefix: 'boundless-builder-state', + 'boundless-builder-state-bucket-v2', + { + bucketPrefix: 'boundless-builder-state-v2', }, { protect: true, @@ -23,28 +23,28 @@ export const createPulumiState = (): { const bucketPolicy: aws.iam.PolicyDocument = { Version: "2012-10-17", Statement: [ - { - "Effect": "Allow", - "Principal": { - "AWS": [ - BOUNDLESS_DEV_ADMIN_ROLE_ARN, - ] - }, - "Action": [ - "s3:GetObject", - "s3:ListBucket", - "s3:PutObject", - "s3:DeleteObject", - ], - "Resource": [ - pulumi.interpolate`${bucket.arn}`, - pulumi.interpolate`${bucket.arn}/*` - ] - } + { + "Effect": "Allow", + "Principal": { + "AWS": [ + BOUNDLESS_DEV_ADMIN_ROLE_ARN, + ] + }, + "Action": [ + "s3:GetObject", + "s3:ListBucket", + "s3:PutObject", + "s3:DeleteObject", + ], + "Resource": [ + pulumi.interpolate`${bucket.arn}`, + pulumi.interpolate`${bucket.arn}/*` + ] + } ] }; - new aws.s3.BucketPolicy("builder-state-bucket-policy", { + new aws.s3.BucketPolicy("builder-state-bucket-policy-v2", { bucket: bucket.id, policy: pulumi.jsonStringify(bucketPolicy), }); @@ -61,8 +61,8 @@ export const createPulumiState = (): { } ); - const keyAlias = new aws.kms.Alias('builder-secrets-key-alias', { - name: 'alias/builder-secrets-key', + const keyAlias = new aws.kms.Alias('builder-secrets-key-alias-v2', { + name: 'alias/builder-secrets-key-v2', targetKeyId: pulumiSecretsKey.keyId, }); @@ -82,7 +82,7 @@ export const createPulumiState = (): { ], }; - new aws.kms.KeyPolicy('builder-secrets-key-policy', { + new aws.kms.KeyPolicy('builder-secrets-key-policy-v2', { keyId: pulumiSecretsKey.id, policy: pulumi.jsonStringify(keyPolicyDoc), }); diff --git a/infra/indexer/Pulumi.l-prod-1.yaml b/infra/indexer/Pulumi.l-prod-1.yaml new file mode 100644 index 000000000..1356ee770 --- /dev/null +++ b/infra/indexer/Pulumi.l-prod-1.yaml @@ -0,0 +1,15 @@ +secretsprovider: awskms:///arn:aws:kms:us-west-2:968153779208:alias/pulumi-secrets-key +encryptedkey: AQICAHhnm/1/W7/xeF2uxmXOGjFzOf6jjMX+KgbMb0K+LSCbsAGxUaGCL++GQCvX/zLtljzzAAAAfjB8BgkqhkiG9w0BBwagbzBtAgEAMGgGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQM9k9K+qatJH8EMesJAgEQgDuCOpKHxoA6+LTQnfvVw5a6ELbGojJbAVAA6e5wTaWkMArFNkFiyhHacnJUQnjpAiVWKl2FDz6D4l6NkQ== +config: + aws:region: us-west-2 + indexer:DOCKER_DIR: ../../ + indexer:DOCKER_TAG: latest + indexer:START_BLOCK: "23250070" + indexer:BASE_STACK: organization/bootstrap/services-prod + indexer:POVW_ACCOUNTING_ADDRESS: 0x319bd4050b2170a7aE3Ead3E6d5AB8a5c7cFBDF8 + indexer:VEZKC_ADDRESS: 0xE8Ae8eE8ffa57F6a79B6Cbe06BAFc0b05F3ffbf4 + indexer:ZKC_ADDRESS: 0x000006c2A22ff4A44ff1f5d0F2ed65F781F55555 + indexer:RUST_LOG: "info" + indexer:CHAIN_ID: "1" + indexer:INDEXER_API_DOMAIN: indexer.eth-mainnet.boundless.network + \ No newline at end of file diff --git a/infra/indexer/Pulumi.l-prod-11155111.yaml b/infra/indexer/Pulumi.l-prod-11155111.yaml index 5501de77e..9e8d99113 100644 --- a/infra/indexer/Pulumi.l-prod-11155111.yaml +++ b/infra/indexer/Pulumi.l-prod-11155111.yaml @@ -7,6 +7,9 @@ config: indexer:CHAIN_ID: "11155111" indexer:DOCKER_DIR: ../../ indexer:DOCKER_TAG: latest + indexer:POVW_ACCOUNTING_ADDRESS: 0xC5E956732F4bA6B1973a859Cf382244db6e84D0b + indexer:VEZKC_ADDRESS: 0xc23340732038ca6C5765763180E81B395d2e9cCA + indexer:ZKC_ADDRESS: 0xb4FC69A452D09D2662BD8C3B5BB756902260aE28 indexer:GH_TOKEN_SECRET: secure: v1:UidEowMsyIGoV5cu:nseGsOs61VAdZAZMS+XYkfRCoj9NHPqxbvGukVhJ6avXtzy/6/Fzmo0aYQRZWLziLLEsOrstp/IS0/IyHkO7iAzaKoV4mRmojpDx3Eg1xsWpR+7oRMZD6IOWaeBmf3p6P4t3jHm5IAhfO9UutQ== indexer:PAGERDUTY_ALERTS_TOPIC_ARN: arn:aws:sns:us-west-2:968153779208:boundless-pagerduty-topic @@ -17,3 +20,4 @@ config: indexer:START_BLOCK: "9125190" indexer:ETH_RPC_URL: secure: v1:9tN91A1uwvQ5nwuo:gCEg8168h9WizyuxgTEFHWN4xr6yRrLs4uD4BJcun1LMQ+YghSaDAqOxnQCYGRs4E76W41iQHolE3rZBuqftG0WDH/eIRNTI1gY= + indexer:INDEXER_API_DOMAIN: indexer.eth-sepolia.boundless.network diff --git a/infra/indexer/Pulumi.l-staging-11155111.yaml b/infra/indexer/Pulumi.l-staging-11155111.yaml new file mode 100644 index 000000000..40117d94e --- /dev/null +++ b/infra/indexer/Pulumi.l-staging-11155111.yaml @@ -0,0 +1,12 @@ +secretsprovider: awskms:///arn:aws:kms:us-west-2:968153779208:alias/pulumi-secrets-key +encryptedkey: AQICAHhnm/1/W7/xeF2uxmXOGjFzOf6jjMX+KgbMb0K+LSCbsAFIpCALI/QCNNWmPcLuTxX9AAAAfjB8BgkqhkiG9w0BBwagbzBtAgEAMGgGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQMnK86ocERMhqNKwPmAgEQgDvHWap3b3OE6kGKbObtaWi0/DRXcKml5Es7irokJKf1ZEu0qSEXwXsQjf0R3Le4n13PeRbyaRHpCg4f8w== +config: + aws:region: us-west-2 + indexer:DOCKER_DIR: ../../ + indexer:DOCKER_TAG: latest + indexer:BASE_STACK: organization/bootstrap/services-staging + indexer:POVW_ACCOUNTING_ADDRESS: 0x129aEAB812d77c102ff1AD9D76DfB55FaAE4b8D3 + indexer:VEZKC_ADDRESS: 0xB7c63161DA06eC56840465308e24C70dF65ABC56 + indexer:ZKC_ADDRESS: 0x99A52662b576f4b2D4FFBc4504331A624a7b2846 + indexer:RUST_LOG: "info" + indexer:CHAIN_ID: "11155111" diff --git a/infra/indexer/alarmConfig.ts b/infra/indexer/alarmConfig.ts index a3d0d99f0..61e6657b0 100644 --- a/infra/indexer/alarmConfig.ts +++ b/infra/indexer/alarmConfig.ts @@ -43,6 +43,10 @@ type ChainStageAlarmConfig = { export const alarmConfig: ChainStageAlarms = { + [ChainId.ETH_MAINNET]: { + [Stage.STAGING]: undefined, + [Stage.PROD]: undefined, + }, [ChainId.BASE_SEPOLIA]: { [Stage.STAGING]: { clients: [ diff --git a/infra/indexer/components/indexer-api.ts b/infra/indexer/components/indexer-api.ts new file mode 100644 index 000000000..c810ccda7 --- /dev/null +++ b/infra/indexer/components/indexer-api.ts @@ -0,0 +1,489 @@ +import * as path from 'path'; +import * as aws from '@pulumi/aws'; +import * as pulumi from '@pulumi/pulumi'; +import { createRustLambda } from './rust-lambda'; + +export interface IndexerApiArgs { + /** VPC where RDS lives */ + vpcId: pulumi.Input; + /** Private subnets for Lambda to attach to */ + privSubNetIds: pulumi.Input[]>; + /** RDS Url secret */ + dbUrlSecret: aws.secretsmanager.Secret; + /** RDS sg ID */ + rdsSgId: pulumi.Input; + /** Indexer Security Group ID (that has access to RDS) */ + indexerSgId: pulumi.Input; + /** RUST_LOG level */ + rustLogLevel: string; + /** Optional custom domain for CloudFront */ + domain?: pulumi.Input; +} + +export class IndexerApi extends pulumi.ComponentResource { + public readonly lambdaFunction: aws.lambda.Function; + public readonly apiEndpoint: pulumi.Output; + public readonly apiGatewayId: pulumi.Output; + public readonly logGroupName: pulumi.Output; + public readonly cloudFrontDomain: pulumi.Output; + public readonly distributionId: pulumi.Output; + + constructor( + name: string, + args: IndexerApiArgs, + opts?: pulumi.ComponentResourceOptions, + ) { + super(name, name, opts); + + const serviceName = name; + + const usEast1Provider = new aws.Provider( + `${serviceName}-us-east-1`, + { region: 'us-east-1' }, + { parent: this }, + ); + + // Create IAM role for Lambda + const role = new aws.iam.Role( + `${serviceName}-role`, + { + assumeRolePolicy: aws.iam.assumeRolePolicyForPrincipal({ Service: 'lambda.amazonaws.com' }), + }, + { parent: this }, + ); + + // Attach basic execution role policy + new aws.iam.RolePolicyAttachment( + `${serviceName}-logs`, + { + role: role.name, + policyArn: aws.iam.ManagedPolicies.AWSLambdaBasicExecutionRole, + }, + { parent: this }, + ); + + // Attach VPC access policy + new aws.iam.RolePolicyAttachment( + `${serviceName}-vpc-access`, + { + role: role.name, + policyArn: aws.iam.ManagedPolicies.AWSLambdaVPCAccessExecutionRole, + }, + { parent: this }, + ); + + // Create inline policy for Secrets Manager access + const inlinePolicy = pulumi.all([args.dbUrlSecret.arn]).apply(([secretArn]) => + JSON.stringify({ + Version: '2012-10-17', + Statement: [ + { + Effect: 'Allow', + Action: ['secretsmanager:GetSecretValue'], + Resource: [secretArn], + }, + ], + }), + ); + + new aws.iam.RolePolicy( + `${serviceName}-policy`, + { + role: role.id, + policy: inlinePolicy, + }, + { parent: this }, + ); + + // Use the existing indexer security group that already has access to RDS + // This is the same security group used by the ECS tasks + + // Get database URL from secret + const dbUrl = aws.secretsmanager.getSecretVersionOutput({ + secretId: args.dbUrlSecret.id, + }).secretString; + + // Create the Lambda function + const { lambda, logGroupName } = createRustLambda(`${serviceName}-lambda`, { + projectPath: path.join(__dirname, '../../../'), + packageName: 'indexer-api', + release: true, + role: role.arn, + environmentVariables: { + DB_URL: dbUrl, + RUST_LOG: args.rustLogLevel, + }, + memorySize: 256, + timeout: 30, + vpcConfig: { + subnetIds: args.privSubNetIds, + securityGroupIds: [args.indexerSgId], + }, + }); + + this.lambdaFunction = lambda; + this.logGroupName = logGroupName; + + // Create API Gateway v2 (HTTP API) + const api = new aws.apigatewayv2.Api( + `${serviceName}-api`, + { + name: serviceName, + protocolType: 'HTTP', + corsConfiguration: { + allowOrigins: ['*'], + allowMethods: ['GET', 'OPTIONS'], + allowHeaders: ['content-type', 'x-amz-date', 'authorization', 'x-api-key', 'x-amz-security-token'], + exposeHeaders: ['x-amzn-RequestId'], + maxAge: 300, + }, + }, + { parent: this }, + ); + + this.apiGatewayId = api.id; + + // Create Lambda integration + const integration = new aws.apigatewayv2.Integration( + `${serviceName}-integration`, + { + apiId: api.id, + integrationType: 'AWS_PROXY', + integrationUri: lambda.arn, + integrationMethod: 'POST', + payloadFormatVersion: '2.0', + }, + { parent: this }, + ); + + // Create route for all paths (Lambda will handle routing internally) + new aws.apigatewayv2.Route( + `${serviceName}-route`, + { + apiId: api.id, + routeKey: '$default', + target: pulumi.interpolate`integrations/${integration.id}`, + }, + { parent: this }, + ); + + // Create deployment stage + const apiStage = new aws.apigatewayv2.Stage( + `${serviceName}-stage`, + { + apiId: api.id, + name: '$default', + autoDeploy: true, + }, + { parent: this }, + ); + + this.apiEndpoint = pulumi.interpolate`${api.apiEndpoint}`; + + // Grant API Gateway permission to invoke Lambda + new aws.lambda.Permission( + `${serviceName}-api-permission`, + { + function: lambda.name, + statementId: 'AllowAPIGatewayInvoke', + action: 'lambda:InvokeFunction', + principal: 'apigateway.amazonaws.com', + sourceArn: pulumi.interpolate`${api.executionArn}/*`, + }, + { parent: this }, + ); + + + let certificateArn: pulumi.Output | undefined; + let certificateValidation: aws.acm.CertificateValidation | undefined; + let certificateValidationRecords: pulumi.Output<{ name: string; value: string; type: string }[]> | undefined; + let distributionAliases: pulumi.Input[]> | undefined; + + if (args.domain) { + const certificate = new aws.acm.Certificate( + `${serviceName}-cert`, + { + domainName: args.domain, + validationMethod: 'DNS', + }, + { parent: this, provider: usEast1Provider }, + ); + + certificateArn = certificate.arn; + certificateValidationRecords = certificate.domainValidationOptions.apply(options => + options.map(option => ({ + name: option.resourceRecordName, + value: option.resourceRecordValue, + type: option.resourceRecordType, + })), + ); + + certificateValidation = new aws.acm.CertificateValidation( + `${serviceName}-cert-validation`, + { + certificateArn: certificate.arn, + validationRecordFqdns: certificate.domainValidationOptions.apply(options => + options.map(option => option.resourceRecordName), + ), + }, + { parent: this, provider: usEast1Provider }, + ); + + distributionAliases = [args.domain]; + } + + + // Create WAF WebACL + const webAcl = new aws.wafv2.WebAcl( + `${serviceName}-waf`, + { + name: `${serviceName}-waf`, + scope: 'CLOUDFRONT', + defaultAction: { + allow: {}, + }, + rules: [ + // Rate limiting rule + { + name: 'RateLimitRule', + priority: 1, + statement: { + rateBasedStatement: { + limit: 75, // 75 requests per 5 minutes per IP + aggregateKeyType: 'IP', + forwardedIpConfig: { + headerName: 'CF-Connecting-IP', + fallbackBehavior: 'MATCH', + }, + }, + }, + action: { + block: {}, + }, + visibilityConfig: { + sampledRequestsEnabled: true, + cloudwatchMetricsEnabled: true, + metricName: 'RateLimitRule', + }, + }, + // AWS Managed Core Rule Set + { + name: 'AWS-AWSManagedRulesCommonRuleSet', + priority: 2, + overrideAction: { + none: {}, + }, + statement: { + managedRuleGroupStatement: { + vendorName: 'AWS', + name: 'AWSManagedRulesCommonRuleSet', + }, + }, + visibilityConfig: { + sampledRequestsEnabled: true, + cloudwatchMetricsEnabled: true, + metricName: 'AWSManagedRulesCommonRuleSetMetric', + }, + }, + // AWS Managed Known Bad Inputs Rule Set + { + name: 'AWS-AWSManagedRulesKnownBadInputsRuleSet', + priority: 3, + overrideAction: { + none: {}, + }, + statement: { + managedRuleGroupStatement: { + vendorName: 'AWS', + name: 'AWSManagedRulesKnownBadInputsRuleSet', + }, + }, + visibilityConfig: { + sampledRequestsEnabled: true, + cloudwatchMetricsEnabled: true, + metricName: 'AWSManagedRulesKnownBadInputsRuleSetMetric', + }, + }, + // SQL Injection Protection + { + name: 'AWS-AWSManagedRulesSQLiRuleSet', + priority: 4, + overrideAction: { + none: {}, + }, + statement: { + managedRuleGroupStatement: { + vendorName: 'AWS', + name: 'AWSManagedRulesSQLiRuleSet', + }, + }, + visibilityConfig: { + sampledRequestsEnabled: true, + cloudwatchMetricsEnabled: true, + metricName: 'AWSManagedRulesSQLiRuleSetMetric', + }, + }, + ], + visibilityConfig: { + sampledRequestsEnabled: true, + cloudwatchMetricsEnabled: true, + metricName: `${serviceName}-waf`, + }, + }, + { parent: this, provider: usEast1Provider }, // WAF for CloudFront must be in us-east-1 + ); + + // Parse API endpoint to get domain + const apiDomain = this.apiEndpoint.apply(endpoint => { + const url = new URL(endpoint); + return url.hostname; + }); + + const viewerCertificate: pulumi.Input = + certificateArn + ? { + acmCertificateArn: certificateArn, + sslSupportMethod: 'sni-only', + minimumProtocolVersion: 'TLSv1.2_2021', + } + : { + cloudfrontDefaultCertificate: true, + }; + + const distributionOpts: pulumi.CustomResourceOptions = { parent: this }; + if (certificateValidation) { + distributionOpts.dependsOn = [certificateValidation]; + } + + // Create CloudFront distribution + const distribution = new aws.cloudfront.Distribution( + `${serviceName}-cdn`, + { + enabled: true, + isIpv6Enabled: true, + comment: `${serviceName} API CDN`, + priceClass: 'PriceClass_100', // Use only North America and Europe edge locations + webAclId: webAcl.arn, + aliases: distributionAliases, + + origins: [{ + domainName: apiDomain, + originId: 'api', + customOriginConfig: { + httpPort: 80, + httpsPort: 443, + originProtocolPolicy: 'https-only', + originSslProtocols: ['TLSv1.2'], + }, + }], + + defaultCacheBehavior: { + targetOriginId: 'api', + viewerProtocolPolicy: 'redirect-to-https', + allowedMethods: ['GET', 'HEAD', 'OPTIONS'], + cachedMethods: ['GET', 'HEAD', 'OPTIONS'], + compress: true, + + // Cache policy for default behavior (current leaderboard) + defaultTtl: 60, // 1 minute default + minTtl: 0, // Allow immediate expiration + maxTtl: 300, // Max 5 minutes + + forwardedValues: { + queryString: true, // Forward query parameters for pagination + cookies: { + forward: 'none', + }, + headers: [], // API Gateway doesn't need special headers + }, + }, + + orderedCacheBehaviors: [ + { + // Historical epoch data - cache longer + pathPattern: '/v1/rewards/povw/leaderboard/epoch/*', + targetOriginId: 'api', + viewerProtocolPolicy: 'redirect-to-https', + allowedMethods: ['GET', 'HEAD', 'OPTIONS'], + cachedMethods: ['GET', 'HEAD', 'OPTIONS'], + compress: true, + + defaultTtl: 300, // 5 minutes default + minTtl: 60, // At least 1 minute + maxTtl: 3600, // Max 1 hour + + forwardedValues: { + queryString: true, + cookies: { + forward: 'none', + }, + headers: [], + }, + }, + ], + + restrictions: { + geoRestriction: { + restrictionType: 'none', + }, + }, + + viewerCertificate, + + customErrorResponses: [ + { + errorCode: 403, + responseCode: 403, + responsePagePath: '/error.html', + errorCachingMinTtl: 10, + }, + { + errorCode: 404, + responseCode: 404, + responsePagePath: '/error.html', + errorCachingMinTtl: 10, + }, + { + errorCode: 500, + errorCachingMinTtl: 0, // Don't cache errors + }, + { + errorCode: 502, + errorCachingMinTtl: 0, + }, + { + errorCode: 503, + errorCachingMinTtl: 0, + }, + { + errorCode: 504, + errorCachingMinTtl: 0, + }, + ], + }, + { parent: this }, + ); + + this.cloudFrontDomain = distribution.domainName; + this.distributionId = distribution.id; + + const componentOutputs: Record> = { + lambdaFunction: lambda.id, + apiEndpoint: this.apiEndpoint, + apiGatewayId: this.apiGatewayId, + logGroupName: this.logGroupName, + cloudFrontDomain: this.cloudFrontDomain, + distributionId: this.distributionId, + }; + + if (certificateArn) { + componentOutputs.certificateArn = certificateArn; + } + + if (certificateValidationRecords) { + componentOutputs.certificateValidationRecords = certificateValidationRecords; + } + + this.registerOutputs(componentOutputs); + } +} diff --git a/infra/indexer/components/indexer-infra.ts b/infra/indexer/components/indexer-infra.ts new file mode 100644 index 000000000..edb1d2f7f --- /dev/null +++ b/infra/indexer/components/indexer-infra.ts @@ -0,0 +1,212 @@ +import * as aws from '@pulumi/aws'; +import * as awsx from '@pulumi/awsx'; +import * as pulumi from '@pulumi/pulumi'; +import * as crypto from 'crypto'; + +export interface IndexerInfraArgs { + serviceName: string; + vpcId: pulumi.Output; + privSubNetIds: pulumi.Output; + rdsPassword: pulumi.Output; +} + +export class IndexerShared extends pulumi.ComponentResource { + public readonly ecrRepository: awsx.ecr.Repository; + public readonly ecrAuthToken: pulumi.Output; + public readonly indexerSecurityGroup: aws.ec2.SecurityGroup; + public readonly rdsSecurityGroupId: pulumi.Output; + public readonly dbUrlSecret: aws.secretsmanager.Secret; + public readonly dbUrlSecretVersion: aws.secretsmanager.SecretVersion; + public readonly secretHash: pulumi.Output; + public readonly executionRole: aws.iam.Role; + public readonly taskRole: aws.iam.Role; + public readonly taskRolePolicyAttachment: aws.iam.RolePolicyAttachment; + public readonly cluster: aws.ecs.Cluster; + + constructor(name: string, args: IndexerInfraArgs, opts?: pulumi.ComponentResourceOptions) { + super('indexer:infra', name, opts); + + const { vpcId, privSubNetIds, rdsPassword } = args; + const serviceName = `${args.serviceName}-base`; + + this.ecrRepository = new awsx.ecr.Repository(`${serviceName}-repo`, { + lifecyclePolicy: { + rules: [ + { + description: 'Delete untagged images after N days', + tagStatus: 'untagged', + maximumAgeLimit: 7, + }, + ], + }, + forceDelete: true, + name: `${serviceName}-repo`, + }, { parent: this }); + + this.ecrAuthToken = aws.ecr.getAuthorizationTokenOutput({ + registryId: this.ecrRepository.repository.registryId, + }); + + this.indexerSecurityGroup = new aws.ec2.SecurityGroup(`${serviceName}-sg`, { + name: `${serviceName}-sg`, + vpcId, + egress: [ + { + fromPort: 0, + toPort: 0, + protocol: '-1', + cidrBlocks: ['0.0.0.0/0'], + ipv6CidrBlocks: ['::/0'], + }, + ], + }, { parent: this }); + + const rdsUser = 'indexer'; + const rdsPort = 5432; + const rdsDbName = 'indexerV1'; + + const dbSubnets = new aws.rds.SubnetGroup(`${serviceName}-dbsubnets`, { + subnetIds: privSubNetIds, + }, { parent: this }); + + const rdsSecurityGroup = new aws.ec2.SecurityGroup(`${serviceName}-rds`, { + name: `${serviceName}-rds`, + vpcId, + ingress: [ + { + fromPort: rdsPort, + toPort: rdsPort, + protocol: 'tcp', + securityGroups: [this.indexerSecurityGroup.id], + }, + ], + egress: [ + { + fromPort: 0, + toPort: 0, + protocol: '-1', + cidrBlocks: ['0.0.0.0/0'], + }, + ], + }, { parent: this }); + + const auroraCluster = new aws.rds.Cluster(`${serviceName}-aurora-v1`, { + engine: 'aurora-postgresql', + engineVersion: '17.4', + clusterIdentifier: `${serviceName}-aurora-v1`, + databaseName: rdsDbName, + masterUsername: rdsUser, + masterPassword: rdsPassword, + port: rdsPort, + backupRetentionPeriod: 7, + skipFinalSnapshot: true, + dbSubnetGroupName: dbSubnets.name, + vpcSecurityGroupIds: [rdsSecurityGroup.id], + storageEncrypted: true, + }, { parent: this /* protect: true */ }); + + new aws.rds.ClusterInstance(`${serviceName}-aurora-writer-1`, { + clusterIdentifier: auroraCluster.id, + engine: 'aurora-postgresql', + engineVersion: '17.4', + instanceClass: 'db.t4g.medium', + identifier: `${serviceName}-aurora-writer-v1`, + publiclyAccessible: false, + dbSubnetGroupName: dbSubnets.name, + }, { parent: this /* protect: true */ }); + + const dbUrlSecretValue = pulumi.interpolate`postgres://${rdsUser}:${rdsPassword}@${auroraCluster.endpoint}:${rdsPort}/${rdsDbName}?sslmode=require`; + this.dbUrlSecret = new aws.secretsmanager.Secret(`${serviceName}-db-url`, {}, { parent: this }); + this.dbUrlSecretVersion = new aws.secretsmanager.SecretVersion(`${serviceName}-db-url-ver`, { + secretId: this.dbUrlSecret.id, + secretString: dbUrlSecretValue, + }, { parent: this }); + + this.secretHash = pulumi + .all([dbUrlSecretValue, this.dbUrlSecretVersion.arn]) + .apply(([value, versionArn]) => { + const hash = crypto.createHash('sha1'); + hash.update(value); + hash.update(versionArn); + return hash.digest('hex'); + }); + + const dbSecretAccessPolicy = new aws.iam.Policy(`${serviceName}-db-url-policy`, { + policy: this.dbUrlSecret.arn.apply((secretArn): aws.iam.PolicyDocument => ({ + Version: '2012-10-17', + Statement: [ + { + Effect: 'Allow', + Action: ['secretsmanager:GetSecretValue', 'ssm:GetParameters'], + Resource: [secretArn], + }, + ], + })), + }, { parent: this }); + + this.executionRole = new aws.iam.Role(`${serviceName}-ecs-execution-role`, { + assumeRolePolicy: aws.iam.assumeRolePolicyForPrincipal({ + Service: 'ecs-tasks.amazonaws.com', + }), + }, { parent: this }); + + this.ecrRepository.repository.arn.apply((repoArn) => { + new aws.iam.RolePolicy(`${serviceName}-ecs-execution-pol`, { + role: this.executionRole.id, + policy: { + Version: '2012-10-17', + Statement: [ + { + Effect: 'Allow', + // GetAuthorizationToken is an account-level AWS ECR action + // and does not support resource-level permissions. Must use '*'. + // See: https://docs.aws.amazon.com/AmazonECR/latest/userguide/security-iam-awsmanpol.html + Action: ['ecr:GetAuthorizationToken'], + Resource: '*', + }, + { + Effect: 'Allow', + Action: [ + 'ecr:BatchCheckLayerAvailability', + 'ecr:GetDownloadUrlForLayer', + 'ecr:BatchGetImage', + ], + Resource: repoArn, + }, + { + Effect: 'Allow', + Action: ['secretsmanager:GetSecretValue', 'ssm:GetParameters'], + Resource: [this.dbUrlSecret.arn], + }, + ], + }, + }, { parent: this }); + }); + + this.cluster = new aws.ecs.Cluster(`${serviceName}-cluster`, { + name: `${serviceName}-cluster`, + }, { parent: this, dependsOn: [this.executionRole, this.dbUrlSecretVersion] }); + + this.taskRole = new aws.iam.Role(`${serviceName}-task`, { + assumeRolePolicy: aws.iam.assumeRolePolicyForPrincipal({ + Service: 'ecs-tasks.amazonaws.com', + }), + managedPolicyArns: [aws.iam.ManagedPolicy.AmazonECSTaskExecutionRolePolicy], + }, { parent: this }); + + this.taskRolePolicyAttachment = new aws.iam.RolePolicyAttachment(`${serviceName}-task-policy`, { + role: this.taskRole.id, + policyArn: dbSecretAccessPolicy.arn, + }, { parent: this }); + + this.rdsSecurityGroupId = rdsSecurityGroup.id; + + this.registerOutputs({ + repositoryUrl: this.ecrRepository.repository.repositoryUrl, + dbUrlSecretArn: this.dbUrlSecret.arn, + rdsSecurityGroupId: this.rdsSecurityGroupId, + taskRoleArn: this.taskRole.arn, + executionRoleArn: this.executionRole.arn, + }); + } +} diff --git a/infra/indexer/components/indexer.ts b/infra/indexer/components/indexer.ts deleted file mode 100644 index cde08d3fe..000000000 --- a/infra/indexer/components/indexer.ts +++ /dev/null @@ -1,453 +0,0 @@ -import * as fs from 'fs'; -import * as aws from '@pulumi/aws'; -import * as awsx from '@pulumi/awsx'; -import * as docker_build from '@pulumi/docker-build'; -import * as pulumi from '@pulumi/pulumi'; -import { getServiceNameV1 } from '../../util'; -import * as crypto from 'crypto'; -const SERVICE_NAME_BASE = 'indexer'; - -export class IndexerInstance extends pulumi.ComponentResource { - public readonly dbUrlSecret: aws.secretsmanager.Secret; - public readonly dbUrlSecretVersion: aws.secretsmanager.SecretVersion; - public readonly rdsSecurityGroupId: pulumi.Output; - - constructor( - name: string, - args: { - chainId: string; - ciCacheSecret?: pulumi.Output; - dockerDir: string; - dockerTag: string; - privSubNetIds: pulumi.Output; - pubSubNetIds: pulumi.Output; - githubTokenSecret?: pulumi.Output; - boundlessAddress: string; - vpcId: pulumi.Output; - rdsPassword: pulumi.Output; - ethRpcUrl: pulumi.Output; - boundlessAlertsTopicArns?: string[]; - startBlock: string; - serviceMetricsNamespace: string; - dockerRemoteBuilder?: string; - }, - opts?: pulumi.ComponentResourceOptions - ) { - super(name, name, opts); - - const { - ciCacheSecret, - dockerDir, - dockerTag, - privSubNetIds, - githubTokenSecret, - boundlessAddress, - vpcId, - rdsPassword, - ethRpcUrl, - startBlock, - serviceMetricsNamespace - } = args; - - const serviceName = name; - - const ecrRepository = new awsx.ecr.Repository(`${serviceName}-repo`, { - lifecyclePolicy: { - rules: [ - { - description: 'Delete untagged images after N days', - tagStatus: 'untagged', - maximumAgeLimit: 7, - }, - ], - }, - forceDelete: true, - name: `${serviceName}-repo`, - }); - - const authToken = aws.ecr.getAuthorizationTokenOutput({ - registryId: ecrRepository.repository.registryId, - }); - - // Optionally add in the gh token secret and sccache s3 creds to the build ctx - let buildSecrets = {}; - if (ciCacheSecret !== undefined) { - const cacheFileData = ciCacheSecret.apply((filePath: any) => fs.readFileSync(filePath, 'utf8')); - buildSecrets = { - ci_cache_creds: cacheFileData, - }; - } - if (githubTokenSecret !== undefined) { - buildSecrets = { - ...buildSecrets, - githubTokenSecret - } - } - - const image = new docker_build.Image(`${serviceName}-img`, { - tags: [pulumi.interpolate`${ecrRepository.repository.repositoryUrl}:${dockerTag}`], - context: { - location: dockerDir, - }, - platforms: ['linux/amd64'], - push: true, - dockerfile: { - location: `${dockerDir}/dockerfiles/indexer.dockerfile`, - }, - builder: args.dockerRemoteBuilder ? { - name: args.dockerRemoteBuilder, - } : undefined, - buildArgs: { - S3_CACHE_PREFIX: 'private/boundless/rust-cache-docker-Linux-X64/sccache', - }, - secrets: buildSecrets, - cacheFrom: [ - { - registry: { - ref: pulumi.interpolate`${ecrRepository.repository.repositoryUrl}:cache`, - }, - }, - ], - cacheTo: [ - { - registry: { - mode: docker_build.CacheMode.Max, - imageManifest: true, - ociMediaTypes: true, - ref: pulumi.interpolate`${ecrRepository.repository.repositoryUrl}:cache`, - }, - }, - ], - registries: [ - { - address: ecrRepository.repository.repositoryUrl, - password: authToken.apply((authToken) => authToken.password), - username: authToken.apply((authToken) => authToken.userName), - }, - ], - }); - - const indexerSecGroup = new aws.ec2.SecurityGroup(`${serviceName}-sg`, { - name: `${serviceName}-sg`, - vpcId: vpcId, - egress: [ - { - fromPort: 0, - toPort: 0, - protocol: '-1', - cidrBlocks: ['0.0.0.0/0'], - ipv6CidrBlocks: ['::/0'], - }, - ], - }); - - const rdsUser = 'indexer'; - const rdsPort = 5432; - const rdsDbName = 'indexerV1'; - - const dbSubnets = new aws.rds.SubnetGroup(`${serviceName}-dbsubnets`, { - subnetIds: privSubNetIds, - }); - - const rdsSecurityGroup = new aws.ec2.SecurityGroup(`${serviceName}-rds`, { - name: `${serviceName}-rds`, - vpcId: vpcId, - ingress: [ - { - fromPort: rdsPort, - toPort: rdsPort, - protocol: 'tcp', - securityGroups: [indexerSecGroup.id], - }, - ], - egress: [ - { - fromPort: 0, - toPort: 0, - protocol: '-1', - cidrBlocks: ['0.0.0.0/0'], - }, - ], - }); - - const auroraCluster = new aws.rds.Cluster(`${serviceName}-aurora-v1`, { - engine: "aurora-postgresql", - engineVersion: "17.4", - clusterIdentifier: `${serviceName}-aurora-v1`, - databaseName: rdsDbName, - masterUsername: rdsUser, - masterPassword: rdsPassword, - port: rdsPort, - backupRetentionPeriod: 7, - skipFinalSnapshot: true, - dbSubnetGroupName: dbSubnets.name, - vpcSecurityGroupIds: [rdsSecurityGroup.id], - storageEncrypted: true, - }, { /** protect: true **/ }); // TODO: Re-enable protection once deployed and stable. - - const auroraWriter = new aws.rds.ClusterInstance(`${serviceName}-aurora-writer-1`, { - clusterIdentifier: auroraCluster.id, - engine: "aurora-postgresql", - engineVersion: "17.4", - instanceClass: "db.t4g.medium", - identifier: `${serviceName}-aurora-writer-v1`, - publiclyAccessible: false, - dbSubnetGroupName: dbSubnets.name, - }, - { /** protect: true **/ } // TODO: Re-enable protection once deployed and stable. - ); - - const dbUrlSecretValue = pulumi.interpolate`postgres://${rdsUser}:${rdsPassword}@${auroraCluster.endpoint}:${rdsPort}/${rdsDbName}?sslmode=require`; - const dbUrlSecret = new aws.secretsmanager.Secret(`${serviceName}-db-url`); - const dbUrlSecretVersion = new aws.secretsmanager.SecretVersion(`${serviceName}-db-url-ver`, { - secretId: dbUrlSecret.id, - secretString: dbUrlSecretValue, - }); - - const secretHash = pulumi - .all([dbUrlSecretValue, dbUrlSecretVersion.arn]) - .apply(([_dbUrlSecretValue, _secretVersionArn]: any[]) => { - const hash = crypto.createHash("sha1"); - hash.update(_dbUrlSecretValue); - hash.update(_secretVersionArn); - return hash.digest("hex"); - }); - - const dbSecretAccessPolicy = new aws.iam.Policy(`${serviceName}-db-url-policy`, { - policy: dbUrlSecret.arn.apply((secretArn): aws.iam.PolicyDocument => { - return { - Version: '2012-10-17', - Statement: [ - { - Effect: 'Allow', - Action: ['secretsmanager:GetSecretValue', 'ssm:GetParameters'], - Resource: [secretArn], - }, - ], - }; - }), - }); - - const executionRole = new aws.iam.Role(`${serviceName}-ecs-execution-role`, { - assumeRolePolicy: aws.iam.assumeRolePolicyForPrincipal({ - Service: 'ecs-tasks.amazonaws.com', - }), - }); - - ecrRepository.repository.arn.apply(_arn => { - new aws.iam.RolePolicy(`${serviceName}-ecs-execution-pol`, { - role: executionRole.id, - policy: { - Version: '2012-10-17', - Statement: [ - { - Effect: 'Allow', - Action: [ - 'ecr:GetAuthorizationToken', - 'ecr:BatchCheckLayerAvailability', - 'ecr:GetDownloadUrlForLayer', - 'ecr:BatchGetImage', - ], - Resource: '*', - }, - { - Effect: 'Allow', - Action: [ - 'logs:CreateLogStream', - 'logs:PutLogEvents', - ], - Resource: '*', - }, - { - Effect: 'Allow', - Action: ['secretsmanager:GetSecretValue', 'ssm:GetParameters'], - Resource: [dbUrlSecret.arn], - }, - ], - }, - }); - }) - - const cluster = new aws.ecs.Cluster(`${serviceName}-cluster`, { - name: `${serviceName}-cluster`, - }); - - const serviceLogGroup = `${serviceName}-service`; - - const taskRole = new aws.iam.Role(`${serviceName}-task`, { - assumeRolePolicy: aws.iam.assumeRolePolicyForPrincipal({ - Service: 'ecs-tasks.amazonaws.com', - }), - managedPolicyArns: [aws.iam.ManagedPolicy.AmazonECSTaskExecutionRolePolicy], - }); - - const taskRolePolicy = new aws.iam.RolePolicyAttachment(`${serviceName}-task-policy`, { - role: taskRole.id, - policyArn: dbSecretAccessPolicy.arn, - }); - - const service = new awsx.ecs.FargateService(`${serviceName}-service`, { - name: `${serviceName}-service`, - cluster: cluster.arn, - networkConfiguration: { - securityGroups: [indexerSecGroup.id], - assignPublicIp: false, - subnets: privSubNetIds, - }, - desiredCount: 1, - deploymentCircuitBreaker: { - enable: false, - rollback: false, - }, - // forceDelete: true, - forceNewDeployment: true, - enableExecuteCommand: true, - taskDefinitionArgs: { - logGroup: { - args: { - name: serviceLogGroup, - retentionInDays: 0, - skipDestroy: true, - }, - }, - executionRole: { roleArn: executionRole.arn }, - taskRole: { roleArn: taskRole.arn }, - container: { - name: `${serviceName}`, - image: image.ref, - cpu: 1024, - memory: 512, - essential: true, - linuxParameters: { - initProcessEnabled: true, - }, - command: [ - '--rpc-url', - ethRpcUrl, - '--boundless-market-address', - boundlessAddress, - '--start-block', - startBlock, - '--log-json' - ], - secrets: [ - { - name: 'DATABASE_URL', - valueFrom: dbUrlSecret.arn, - }, - ], - environment: [ - { - name: 'RUST_LOG', - value: 'boundless_indexer=debug,info', - }, - { - name: 'NO_COLOR', - value: '1', - }, - { - name: 'RUST_BACKTRACE', - value: '1', - }, - { - name: 'DB_POOL_SIZE', - value: '5', - }, - { - name: 'SECRET_HASH', - value: secretHash, - } - ] - }, - }, - }, { dependsOn: [taskRole, taskRolePolicy] }); - - const alarmActions = args.boundlessAlertsTopicArns ?? []; - - new aws.cloudwatch.LogMetricFilter(`${serviceName}-log-err-filter`, { - name: `${serviceName}-log-err-filter`, - logGroupName: serviceLogGroup, - metricTransformation: { - namespace: serviceMetricsNamespace, - name: `${serviceName}-log-err`, - value: '1', - defaultValue: '0', - }, - // Whitespace prevents us from alerting on SQL injection probes. - pattern: `"ERROR "`, - }, { dependsOn: [service] }); - - // Two errors within an hour triggers alarm. - new aws.cloudwatch.MetricAlarm(`${serviceName}-error-alarm`, { - name: `${serviceName}-log-err`, - metricQueries: [ - { - id: 'm1', - metric: { - namespace: serviceMetricsNamespace, - metricName: `${serviceName}-log-err`, - period: 60, - stat: 'Sum', - }, - returnData: true, - }, - ], - threshold: 1, - comparisonOperator: 'GreaterThanOrEqualToThreshold', - // Two errors within an hour triggers alarm. - evaluationPeriods: 60, - datapointsToAlarm: 2, - treatMissingData: 'notBreaching', - alarmDescription: 'Indexer log ERROR level', - actionsEnabled: true, - alarmActions, - }); - - new aws.cloudwatch.LogMetricFilter(`${serviceName}-fatal-filter`, { - name: `${serviceName}-log-fatal-filter`, - logGroupName: serviceLogGroup, - metricTransformation: { - namespace: serviceMetricsNamespace, - name: `${serviceName}-log-fatal`, - value: '1', - defaultValue: '0', - }, - pattern: 'FATAL', - }, { dependsOn: [service] }); - - new aws.cloudwatch.MetricAlarm(`${serviceName}-fatal-alarm`, { - name: `${serviceName}-log-fatal`, - metricQueries: [ - { - id: 'm1', - metric: { - namespace: serviceMetricsNamespace, - metricName: `${serviceName}-log-fatal`, - period: 60, - stat: 'Sum', - }, - returnData: true, - }, - ], - threshold: 1, - comparisonOperator: 'GreaterThanOrEqualToThreshold', - evaluationPeriods: 1, - datapointsToAlarm: 1, - treatMissingData: 'notBreaching', - alarmDescription: `Indexer ${name} FATAL (task exited)`, - actionsEnabled: true, - alarmActions, - }); - - this.dbUrlSecret = dbUrlSecret; - this.dbUrlSecretVersion = dbUrlSecretVersion; - this.rdsSecurityGroupId = rdsSecurityGroup.id; - - - this.registerOutputs({ - dbUrlSecret: this.dbUrlSecret, - dbUrlSecretVersion: this.dbUrlSecretVersion, - rdsSecurityGroupId: this.rdsSecurityGroupId - }); - } -} diff --git a/infra/indexer/components/market-indexer.ts b/infra/indexer/components/market-indexer.ts new file mode 100644 index 000000000..0b8bc9305 --- /dev/null +++ b/infra/indexer/components/market-indexer.ts @@ -0,0 +1,275 @@ +import * as fs from 'fs'; +import * as aws from '@pulumi/aws'; +import * as awsx from '@pulumi/awsx'; +import * as docker_build from '@pulumi/docker-build'; +import * as pulumi from '@pulumi/pulumi'; +import { IndexerShared } from './indexer-infra'; + +export interface MarketIndexerArgs { + infra: IndexerShared; + privSubNetIds: pulumi.Output; + ciCacheSecret?: pulumi.Output; + githubTokenSecret?: pulumi.Output; + dockerDir: string; + dockerTag: string; + boundlessAddress: string; + ethRpcUrl: pulumi.Output; + startBlock: string; + serviceMetricsNamespace: string; + boundlessAlertsTopicArns?: string[]; + dockerRemoteBuilder?: string; +} + +export class MarketIndexer extends pulumi.ComponentResource { + constructor(name: string, args: MarketIndexerArgs, opts?: pulumi.ComponentResourceOptions) { + super('indexer:market', name, opts); + + const { + infra, + privSubNetIds, + ciCacheSecret, + githubTokenSecret, + dockerDir, + dockerTag, + boundlessAddress, + ethRpcUrl, + startBlock, + serviceMetricsNamespace, + boundlessAlertsTopicArns, + dockerRemoteBuilder, + } = args; + + const serviceName = name; + + let buildSecrets: Record> = {}; + if (ciCacheSecret !== undefined) { + const cacheFileData = ciCacheSecret.apply((filePath: any) => fs.readFileSync(filePath, 'utf8')); + buildSecrets = { + ci_cache_creds: cacheFileData, + }; + } + if (githubTokenSecret !== undefined) { + buildSecrets = { + ...buildSecrets, + githubTokenSecret, + }; + } + + const marketImage = new docker_build.Image(`${serviceName}-market-img`, { + tags: [pulumi.interpolate`${infra.ecrRepository.repository.repositoryUrl}:market-${dockerTag}`], + context: { + location: dockerDir, + }, + platforms: ['linux/amd64'], + push: true, + dockerfile: { + location: `${dockerDir}/dockerfiles/market-indexer.dockerfile`, + }, + builder: dockerRemoteBuilder + ? { + name: dockerRemoteBuilder, + } + : undefined, + buildArgs: { + S3_CACHE_PREFIX: 'private/boundless/rust-cache-docker-Linux-X64/sccache', + }, + secrets: buildSecrets, + cacheFrom: [ + { + registry: { + ref: pulumi.interpolate`${infra.ecrRepository.repository.repositoryUrl}:cache`, + }, + }, + ], + cacheTo: [ + { + registry: { + mode: docker_build.CacheMode.Max, + imageManifest: true, + ociMediaTypes: true, + ref: pulumi.interpolate`${infra.ecrRepository.repository.repositoryUrl}:cache`, + }, + }, + ], + registries: [ + { + address: infra.ecrRepository.repository.repositoryUrl, + password: infra.ecrAuthToken.apply((authToken) => authToken.password), + username: infra.ecrAuthToken.apply((authToken) => authToken.userName), + }, + ], + }, { parent: this }); + + const serviceLogGroup = `${serviceName}-service`; + + const marketService = new awsx.ecs.FargateService(`${serviceName}-market-service`, { + name: `${serviceName}-market-service`, + cluster: infra.cluster.arn, + networkConfiguration: { + securityGroups: [infra.indexerSecurityGroup.id], + assignPublicIp: false, + subnets: privSubNetIds, + }, + desiredCount: 1, + deploymentCircuitBreaker: { + enable: false, + rollback: false, + }, + forceNewDeployment: true, + enableExecuteCommand: true, + taskDefinitionArgs: { + logGroup: { + args: { + name: serviceLogGroup, + retentionInDays: 0, + skipDestroy: true, + }, + }, + executionRole: { roleArn: infra.executionRole.arn }, + taskRole: { roleArn: infra.taskRole.arn }, + container: { + name: `${serviceName}-market`, + image: marketImage.ref, + cpu: 1024, + memory: 512, + essential: true, + linuxParameters: { + initProcessEnabled: true, + }, + command: [ + '--rpc-url', + ethRpcUrl, + '--boundless-market-address', + boundlessAddress, + '--start-block', + startBlock, + '--log-json', + ], + secrets: [ + { + name: 'DATABASE_URL', + valueFrom: infra.dbUrlSecret.arn, + }, + ], + environment: [ + { + name: 'RUST_LOG', + value: 'boundless_indexer=debug,info', + }, + { + name: 'NO_COLOR', + value: '1', + }, + { + name: 'RUST_BACKTRACE', + value: '1', + }, + { + name: 'DB_POOL_SIZE', + value: '5', + }, + { + name: 'SECRET_HASH', + value: infra.secretHash, + }, + ], + }, + }, + }, { parent: this, dependsOn: [infra.taskRole, infra.taskRolePolicyAttachment] }); + + // Grant execution role permission to write to this service's specific log group + const region = aws.getRegionOutput().name; + const accountId = aws.getCallerIdentityOutput().accountId; + const logGroupArn = pulumi.interpolate`arn:aws:logs:${region}:${accountId}:log-group:${serviceLogGroup}:*`; + + new aws.iam.RolePolicy(`${serviceName}-market-logs-policy`, { + role: infra.executionRole.id, + policy: { + Version: '2012-10-17', + Statement: [ + { + Effect: 'Allow', + Action: ['logs:CreateLogStream', 'logs:PutLogEvents'], + Resource: logGroupArn, + }, + ], + }, + }, { parent: this }); + + const alarmActions = boundlessAlertsTopicArns ?? []; + + new aws.cloudwatch.LogMetricFilter(`${serviceName}-market-log-err-filter`, { + name: `${serviceName}-market-log-err-filter`, + logGroupName: serviceLogGroup, + metricTransformation: { + namespace: serviceMetricsNamespace, + name: `${serviceName}-market-log-err`, + value: '1', + defaultValue: '0', + }, + pattern: `"ERROR "`, + }, { parent: this, dependsOn: [marketService] }); + + new aws.cloudwatch.MetricAlarm(`${serviceName}-market-error-alarm`, { + name: `${serviceName}-market-log-err`, + metricQueries: [ + { + id: 'm1', + metric: { + namespace: serviceMetricsNamespace, + metricName: `${serviceName}-market-log-err`, + period: 60, + stat: 'Sum', + }, + returnData: true, + }, + ], + threshold: 1, + comparisonOperator: 'GreaterThanOrEqualToThreshold', + evaluationPeriods: 60, + datapointsToAlarm: 2, + treatMissingData: 'notBreaching', + alarmDescription: 'Market indexer log ERROR level', + actionsEnabled: true, + alarmActions, + }, { parent: this }); + + new aws.cloudwatch.LogMetricFilter(`${serviceName}-market-log-fatal-filter`, { + name: `${serviceName}-market-log-fatal-filter`, + logGroupName: serviceLogGroup, + metricTransformation: { + namespace: serviceMetricsNamespace, + name: `${serviceName}-market-log-fatal`, + value: '1', + defaultValue: '0', + }, + pattern: 'FATAL', + }, { parent: this, dependsOn: [marketService] }); + + new aws.cloudwatch.MetricAlarm(`${serviceName}-market-fatal-alarm`, { + name: `${serviceName}-market-log-fatal`, + metricQueries: [ + { + id: 'm1', + metric: { + namespace: serviceMetricsNamespace, + metricName: `${serviceName}-market-log-fatal`, + period: 60, + stat: 'Sum', + }, + returnData: true, + }, + ], + threshold: 1, + comparisonOperator: 'GreaterThanOrEqualToThreshold', + evaluationPeriods: 1, + datapointsToAlarm: 1, + treatMissingData: 'notBreaching', + alarmDescription: `Market indexer ${name} FATAL (task exited)`, + actionsEnabled: true, + alarmActions, + }, { parent: this }); + + this.registerOutputs({}); + } +} diff --git a/infra/indexer/components/monitor-lambda.ts b/infra/indexer/components/monitor-lambda.ts index 975cf3f70..31b52c32c 100644 --- a/infra/indexer/components/monitor-lambda.ts +++ b/infra/indexer/components/monitor-lambda.ts @@ -39,7 +39,7 @@ export class MonitorLambda extends pulumi.ComponentResource { ) { super(name, name, opts); - const serviceName = name; + const serviceName = `${name}-mon`; const chainId: ChainId = getChainId(args.chainId); const stage = pulumi.getStack().includes("staging") ? Stage.STAGING : Stage.PROD; const chainStageAlarmConfig = alarmConfig[chainId][stage]; diff --git a/infra/indexer/components/rewards-indexer.ts b/infra/indexer/components/rewards-indexer.ts new file mode 100644 index 000000000..0b4bda387 --- /dev/null +++ b/infra/indexer/components/rewards-indexer.ts @@ -0,0 +1,279 @@ +import * as fs from 'fs'; +import * as aws from '@pulumi/aws'; +import * as awsx from '@pulumi/awsx'; +import * as docker_build from '@pulumi/docker-build'; +import * as pulumi from '@pulumi/pulumi'; +import { IndexerShared } from './indexer-infra'; + +export interface RewardsIndexerArgs { + infra: IndexerShared; + privSubNetIds: pulumi.Output; + ciCacheSecret?: pulumi.Output; + githubTokenSecret?: pulumi.Output; + dockerDir: string; + dockerTag: string; + ethRpcUrl: pulumi.Output; + vezkcAddress: string; + zkcAddress: string; + povwAccountingAddress: string; + serviceMetricsNamespace: string; + boundlessAlertsTopicArns?: string[]; + dockerRemoteBuilder?: string; +} + +export class RewardsIndexer extends pulumi.ComponentResource { + constructor(name: string, args: RewardsIndexerArgs, opts?: pulumi.ComponentResourceOptions) { + super('indexer:rewards', name, opts); + + const { + infra, + privSubNetIds, + ciCacheSecret, + githubTokenSecret, + dockerDir, + dockerTag, + ethRpcUrl, + vezkcAddress, + zkcAddress, + povwAccountingAddress, + serviceMetricsNamespace, + boundlessAlertsTopicArns, + dockerRemoteBuilder, + } = args; + + const serviceName = name; + + let buildSecrets: Record> = {}; + if (ciCacheSecret !== undefined) { + const cacheFileData = ciCacheSecret.apply((filePath: any) => fs.readFileSync(filePath, 'utf8')); + buildSecrets = { + ci_cache_creds: cacheFileData, + }; + } + if (githubTokenSecret !== undefined) { + buildSecrets = { + ...buildSecrets, + githubTokenSecret, + }; + } + + const rewardsImage = new docker_build.Image(`${serviceName}-rewards-img`, { + tags: [pulumi.interpolate`${infra.ecrRepository.repository.repositoryUrl}:rewards-${dockerTag}`], + context: { + location: dockerDir, + }, + platforms: ['linux/amd64'], + push: true, + dockerfile: { + location: `${dockerDir}/dockerfiles/rewards-indexer.dockerfile`, + }, + builder: dockerRemoteBuilder + ? { + name: dockerRemoteBuilder, + } + : undefined, + buildArgs: { + S3_CACHE_PREFIX: `private/boundless/${serviceName}/rust-cache-docker-Linux-X64/sccache`, + }, + secrets: buildSecrets, + cacheFrom: [ + { + registry: { + ref: pulumi.interpolate`${infra.ecrRepository.repository.repositoryUrl}:rewards-cache`, + }, + }, + ], + cacheTo: [ + { + registry: { + mode: docker_build.CacheMode.Max, + imageManifest: true, + ociMediaTypes: true, + ref: pulumi.interpolate`${infra.ecrRepository.repository.repositoryUrl}:rewards-cache`, + }, + }, + ], + registries: [ + { + address: infra.ecrRepository.repository.repositoryUrl, + password: infra.ecrAuthToken.apply((authToken) => authToken.password), + username: infra.ecrAuthToken.apply((authToken) => authToken.userName), + }, + ], + }, { parent: this }); + + const rewardsServiceLogGroup = `${serviceName}-rewards-service-v2`; + + const rewardsService = new awsx.ecs.FargateService(`${serviceName}-rewards-service`, { + name: `${serviceName}-rewards-service`, + cluster: infra.cluster.arn, + networkConfiguration: { + securityGroups: [infra.indexerSecurityGroup.id], + assignPublicIp: false, + subnets: privSubNetIds, + }, + desiredCount: 1, + deploymentCircuitBreaker: { + enable: false, + rollback: false, + }, + forceNewDeployment: true, + enableExecuteCommand: true, + taskDefinitionArgs: { + logGroup: { + args: { + name: rewardsServiceLogGroup, + retentionInDays: 0, + skipDestroy: true, + }, + }, + executionRole: { roleArn: infra.executionRole.arn }, + taskRole: { roleArn: infra.taskRole.arn }, + container: { + name: `${serviceName}-rewards`, + image: rewardsImage.ref, + cpu: 512, + memory: 256, + essential: true, + linuxParameters: { + initProcessEnabled: true, + }, + command: [ + '--rpc-url', + ethRpcUrl, + '--vezkc-address', + vezkcAddress, + '--zkc-address', + zkcAddress, + '--povw-accounting-address', + povwAccountingAddress, + '--log-json', + ], + secrets: [ + { + name: 'DATABASE_URL', + valueFrom: infra.dbUrlSecret.arn, + }, + ], + environment: [ + { + name: 'RUST_LOG', + value: 'boundless_indexer=debug,info', + }, + { + name: 'NO_COLOR', + value: '1', + }, + { + name: 'RUST_BACKTRACE', + value: '1', + }, + { + name: 'DB_POOL_SIZE', + value: '3', + }, + { + name: 'SECRET_HASH', + value: infra.secretHash, + }, + ], + }, + }, + }, { parent: this, dependsOn: [infra.taskRole, infra.taskRolePolicyAttachment] }); + + // Grant execution role permission to write to this service's specific log group + const region = aws.getRegionOutput().name; + const accountId = aws.getCallerIdentityOutput().accountId; + const logGroupArn = pulumi.interpolate`arn:aws:logs:${region}:${accountId}:log-group:${rewardsServiceLogGroup}:*`; + + new aws.iam.RolePolicy(`${serviceName}-rewards-logs-policy`, { + role: infra.executionRole.id, + policy: { + Version: '2012-10-17', + Statement: [ + { + Effect: 'Allow', + Action: ['logs:CreateLogStream', 'logs:PutLogEvents'], + Resource: logGroupArn, + }, + ], + }, + }, { parent: this }); + + const alarmActions = boundlessAlertsTopicArns ?? []; + + new aws.cloudwatch.LogMetricFilter(`${serviceName}-rewards-log-err-filter`, { + name: `${serviceName}-rewards-log-err-filter`, + logGroupName: rewardsServiceLogGroup, + metricTransformation: { + namespace: serviceMetricsNamespace, + name: `${serviceName}-rewards-log-err`, + value: '1', + defaultValue: '0', + }, + pattern: `"ERROR "`, + }, { parent: this, dependsOn: [rewardsService] }); + + new aws.cloudwatch.MetricAlarm(`${serviceName}-rewards-error-alarm`, { + name: `${serviceName}-rewards-log-err`, + metricQueries: [ + { + id: 'm1', + metric: { + namespace: serviceMetricsNamespace, + metricName: `${serviceName}-rewards-log-err`, + period: 60, + stat: 'Sum', + }, + returnData: true, + }, + ], + threshold: 1, + comparisonOperator: 'GreaterThanOrEqualToThreshold', + evaluationPeriods: 60, + datapointsToAlarm: 2, + treatMissingData: 'notBreaching', + alarmDescription: 'Rewards indexer log ERROR level', + actionsEnabled: true, + alarmActions, + }, { parent: this }); + + new aws.cloudwatch.LogMetricFilter(`${serviceName}-rewards-log-fatal-filter`, { + name: `${serviceName}-rewards-log-fatal-filter`, + logGroupName: rewardsServiceLogGroup, + metricTransformation: { + namespace: serviceMetricsNamespace, + name: `${serviceName}-rewards-log-fatal`, + value: '1', + defaultValue: '0', + }, + pattern: 'FATAL', + }, { parent: this, dependsOn: [rewardsService] }); + + new aws.cloudwatch.MetricAlarm(`${serviceName}-rewards-fatal-alarm`, { + name: `${serviceName}-rewards-log-fatal`, + metricQueries: [ + { + id: 'm1', + metric: { + namespace: serviceMetricsNamespace, + metricName: `${serviceName}-rewards-log-fatal`, + period: 60, + stat: 'Sum', + }, + returnData: true, + }, + ], + threshold: 1, + comparisonOperator: 'GreaterThanOrEqualToThreshold', + evaluationPeriods: 1, + datapointsToAlarm: 1, + treatMissingData: 'notBreaching', + alarmDescription: `Rewards indexer ${name} FATAL (task exited)`, + actionsEnabled: true, + alarmActions, + }, { parent: this }); + + this.registerOutputs({}); + } +} diff --git a/infra/indexer/index.ts b/infra/indexer/index.ts index 1431d04fb..e19eb960e 100644 --- a/infra/indexer/index.ts +++ b/infra/indexer/index.ts @@ -1,6 +1,9 @@ import * as pulumi from '@pulumi/pulumi'; -import { IndexerInstance } from './components/indexer'; +import { IndexerShared } from './components/indexer-infra'; +import { MarketIndexer } from './components/market-indexer'; +import { RewardsIndexer } from './components/rewards-indexer'; import { MonitorLambda } from './components/monitor-lambda'; +import { IndexerApi } from './components/indexer-api'; import { getEnvVar, getServiceNameV1 } from '../util'; require('dotenv').config(); @@ -13,27 +16,24 @@ export = () => { const ethRpcUrl = isDev ? pulumi.output(getEnvVar("ETH_RPC_URL")) : config.requireSecret('ETH_RPC_URL'); const rdsPassword = isDev ? pulumi.output(getEnvVar("RDS_PASSWORD")) : config.requireSecret('RDS_PASSWORD'); - const chainId = isDev ? getEnvVar("CHAIN_ID") : config.require('CHAIN_ID'); + const chainId = config.require('CHAIN_ID'); const githubTokenSecret = config.getSecret('GH_TOKEN_SECRET'); const dockerDir = config.require('DOCKER_DIR'); const dockerTag = config.require('DOCKER_TAG'); const ciCacheSecret = config.getSecret('CI_CACHE_SECRET'); - const boundlessAddress = config.require('BOUNDLESS_ADDRESS'); const baseStackName = config.require('BASE_STACK'); const boundlessAlertsTopicArn = config.get('SLACK_ALERTS_TOPIC_ARN'); const boundlessPagerdutyTopicArn = config.get('PAGERDUTY_ALERTS_TOPIC_ARN'); const alertsTopicArns = [boundlessAlertsTopicArn, boundlessPagerdutyTopicArn].filter(Boolean) as string[]; - const startBlock = config.require('START_BLOCK'); const rustLogLevel = config.get('RUST_LOG') || 'info'; const baseStack = new pulumi.StackReference(baseStackName); const vpcId = baseStack.getOutput('VPC_ID') as pulumi.Output; const privSubNetIds = baseStack.getOutput('PRIVATE_SUBNET_IDS') as pulumi.Output; - const pubSubNetIds = baseStack.getOutput('PUBLIC_SUBNET_IDS') as pulumi.Output; - const indexerServiceName = getServiceNameV1(stackName, "indexer", chainId); const monitorServiceName = getServiceNameV1(stackName, "monitor", chainId); + const apiServiceName = getServiceNameV1(stackName, "api", chainId); // Metric namespace for service metrics, e.g. operation health of the monitor/indexer infra const serviceMetricsNamespace = `Boundless/Services/${indexerServiceName}`; @@ -41,35 +41,108 @@ export = () => { // Metric namespace for market metrics, e.g. fulfillment success rate, order count, etc. const marketMetricsNamespace = `Boundless/Market/${marketName}`; - const indexer = new IndexerInstance(indexerServiceName, { - chainId, - ciCacheSecret, - dockerDir, - dockerTag, - privSubNetIds, - pubSubNetIds, - githubTokenSecret, - boundlessAddress, + const boundlessAddress = config.get('BOUNDLESS_ADDRESS'); + const startBlock = boundlessAddress ? config.require('START_BLOCK') : undefined; + + const vezkcAddress = config.get('VEZKC_ADDRESS'); + const zkcAddress = config.get('ZKC_ADDRESS'); + const povwAccountingAddress = config.get('POVW_ACCOUNTING_ADDRESS'); + const indexerApiDomain = config.get('INDEXER_API_DOMAIN'); + + const shouldDeployMarket = !!boundlessAddress && !!startBlock; + const shouldDeployRewards = !!vezkcAddress && !!zkcAddress && !!povwAccountingAddress; + + if (!shouldDeployMarket && !shouldDeployRewards) { + return {}; + } + + const infra = new IndexerShared(indexerServiceName, { + serviceName: indexerServiceName, vpcId, + privSubNetIds, rdsPassword, - ethRpcUrl, - boundlessAlertsTopicArns: alertsTopicArns, - startBlock, - serviceMetricsNamespace, - dockerRemoteBuilder, }); - new MonitorLambda(monitorServiceName, { - vpcId: vpcId, - privSubNetIds: privSubNetIds, - intervalMinutes: '1', - dbUrlSecret: indexer.dbUrlSecret, - rdsSgId: indexer.rdsSecurityGroupId, - chainId: chainId, - rustLogLevel: rustLogLevel, - boundlessAlertsTopicArns: alertsTopicArns, - serviceMetricsNamespace, - marketMetricsNamespace, - }, { parent: indexer, dependsOn: [indexer, indexer.dbUrlSecret, indexer.dbUrlSecretVersion] }); + let marketIndexer: MarketIndexer | undefined; + if (shouldDeployMarket && boundlessAddress && startBlock) { + marketIndexer = new MarketIndexer(indexerServiceName, { + infra, + privSubNetIds, + ciCacheSecret, + githubTokenSecret, + dockerDir, + dockerTag, + boundlessAddress, + ethRpcUrl, + startBlock, + serviceMetricsNamespace, + boundlessAlertsTopicArns: alertsTopicArns, + dockerRemoteBuilder, + }, { parent: infra }); + } + + let rewardsIndexer: RewardsIndexer | undefined; + if (shouldDeployRewards && vezkcAddress && zkcAddress && povwAccountingAddress) { + rewardsIndexer = new RewardsIndexer(indexerServiceName, { + infra, + privSubNetIds, + ciCacheSecret, + githubTokenSecret, + dockerDir, + dockerTag, + ethRpcUrl, + vezkcAddress, + zkcAddress, + povwAccountingAddress, + serviceMetricsNamespace, + boundlessAlertsTopicArns: alertsTopicArns, + dockerRemoteBuilder, + }, { parent: infra }); + } + + const sharedDependencies: pulumi.Resource[] = [infra.dbUrlSecret, infra.dbUrlSecretVersion]; + if (marketIndexer) { + sharedDependencies.push(marketIndexer); + } + if (rewardsIndexer) { + sharedDependencies.push(rewardsIndexer); + } + + if (shouldDeployMarket && marketIndexer) { + new MonitorLambda(monitorServiceName, { + vpcId: vpcId, + privSubNetIds: privSubNetIds, + intervalMinutes: '1', + dbUrlSecret: infra.dbUrlSecret, + rdsSgId: infra.rdsSecurityGroupId, + chainId: chainId, + rustLogLevel: rustLogLevel, + boundlessAlertsTopicArns: alertsTopicArns, + serviceMetricsNamespace, + marketMetricsNamespace, + }, { parent: infra, dependsOn: sharedDependencies }); + } + + + let api: IndexerApi | undefined; + if (shouldDeployRewards && rewardsIndexer) { + api = new IndexerApi(apiServiceName, { + vpcId: vpcId, + privSubNetIds: privSubNetIds, + dbUrlSecret: infra.dbUrlSecret, + rdsSgId: infra.rdsSecurityGroupId, + indexerSgId: infra.indexerSecurityGroup.id, + rustLogLevel: rustLogLevel, + domain: indexerApiDomain, + }, { parent: infra, dependsOn: sharedDependencies }); + } + + return api + ? { + apiEndpoint: api.cloudFrontDomain, + apiGatewayEndpoint: api.apiEndpoint, + distributionId: api.distributionId, + } + : {}; }; diff --git a/infra/util/index.ts b/infra/util/index.ts index d9e831965..dffbdcfa4 100644 --- a/infra/util/index.ts +++ b/infra/util/index.ts @@ -1,10 +1,14 @@ export enum ChainId { + ETH_MAINNET = "1", ETH_SEPOLIA = "11155111", BASE = "8453", BASE_SEPOLIA = "84532", } export const getChainName = (chainId: string | ChainId): string => { + if (chainId === ChainId.ETH_MAINNET) { + return "Ethereum Mainnet"; + } if (chainId === ChainId.ETH_SEPOLIA) { return "Ethereum Sepolia"; } @@ -18,6 +22,9 @@ export const getChainName = (chainId: string | ChainId): string => { }; export const getChainId = (chainId: string): ChainId => { + if (chainId === "1") { + return ChainId.ETH_MAINNET; + } if (chainId === "11155111") { return ChainId.ETH_SEPOLIA; } diff --git a/justfile b/justfile index b0f28d339..afaadb121 100644 --- a/justfile +++ b/justfile @@ -31,7 +31,7 @@ test-cargo: test-cargo-root test-cargo-example test-cargo-db # Run Cargo tests for root workspace test-cargo-root: - RISC0_DEV_MODE=1 cargo test --workspace --exclude order-stream --exclude boundless-cli -- --include-ignored + RISC0_DEV_MODE=1 cargo test --workspace --exclude order-stream --exclude boundless-cli --exclude indexer-api --exclude boundless-indexer -- --include-ignored # Run Cargo tests for counter example test-cargo-example: @@ -40,12 +40,23 @@ test-cargo-example: RISC0_DEV_MODE=1 cargo test # Run database tests -test-cargo-db: +test-cargo-db: just test-db setup DATABASE_URL={{DATABASE_URL}} RISC0_DEV_MODE=1 cargo test -p order-stream -- --include-ignored DATABASE_URL={{DATABASE_URL}} RISC0_DEV_MODE=1 cargo test -p boundless-cli -- --include-ignored just test-db clean +# Run indexer integration tests (requires ETH_MAINNET_RPC_URL) +test-indexer: + #!/usr/bin/env bash + set -e + if [ -z "$ETH_MAINNET_RPC_URL" ]; then + echo "Error: ETH_MAINNET_RPC_URL environment variable must be set to a mainnet archive node that supports event querying" + exit 1 + fi + RISC0_DEV_MODE=1 cargo test -p boundless-indexer --all-targets -- --ignored --nocapture + RISC0_DEV_MODE=1 cargo test -p indexer-api --all-targets -- --ignored --nocapture + # Manage test postgres instance (setup or clean, defaults to setup) test-db action="setup": #!/usr/bin/env bash @@ -129,6 +140,7 @@ check-clippy: cargo clippy --workspace --all-targets check-docs: + cd documentation && bun install # Matches the docs-rs job in CI RUSTDOCFLAGS="--cfg docsrs -D warnings" RISC0_SKIP_BUILD=1 cargo +nightly-2025-05-09 doc -p boundless-market --all-features --no-deps diff --git a/license-check.py b/license-check.py index ea426ed9b..f87a88d2e 100755 --- a/license-check.py +++ b/license-check.py @@ -69,6 +69,8 @@ str(Path.cwd()) + "/crates/test-utils", str(Path.cwd()) + "/documentation", str(Path.cwd()) + "/examples", + str(Path.cwd()) + "/crates/lambdas", + str(Path.cwd()) + "/crates/rewards", ] def check_header(file, expected_year, lines_actual):