From 0f45ec591f484ef00020d4504112673adf0a293f Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Fri, 17 Oct 2025 10:10:27 +0200 Subject: [PATCH 01/35] feat: separate performance collection and distribution --- poetry.lock | 486 +++++++++++++----- pyproject.toml | 2 + src/main.py | 13 +- src/metrics/prometheus/basic.py | 8 + src/modules/csm/csm.py | 165 +++--- src/modules/performance_collector/__init__.py | 0 .../checkpoint.py | 105 ++-- src/modules/performance_collector/codec.py | 137 +++++ src/modules/performance_collector/db.py | 204 ++++++++ .../performance_collector/http_server.py | 116 +++++ .../performance_collector.py | 71 +++ src/modules/performance_collector/types.py | 15 + src/providers/performance/__init__.py | 0 src/providers/performance/client.py | 51 ++ src/types.py | 1 + src/variables.py | 20 + src/web3py/extensions/performance.py | 21 + src/web3py/types.py | 2 + tests/fork/conftest.py | 15 +- tests/fork/test_csm_oracle_cycle.py | 99 +--- tests/modules/csm/test_csm_module.py | 31 +- .../test_checkpoint.py | 30 +- .../performance_collector/test_codec.py | 236 +++++++++ .../test_processing_attestation.py | 2 +- 24 files changed, 1408 insertions(+), 422 deletions(-) create mode 100644 src/modules/performance_collector/__init__.py rename src/modules/{csm => performance_collector}/checkpoint.py (84%) create mode 100644 src/modules/performance_collector/codec.py create mode 100644 src/modules/performance_collector/db.py create mode 100644 src/modules/performance_collector/http_server.py create mode 100644 src/modules/performance_collector/performance_collector.py create mode 100644 src/modules/performance_collector/types.py create mode 100644 src/providers/performance/__init__.py create mode 100644 src/providers/performance/client.py create mode 100644 src/web3py/extensions/performance.py rename tests/modules/{csm => performance_collector}/test_checkpoint.py (94%) create mode 100644 tests/modules/performance_collector/test_codec.py rename tests/modules/{csm => performance_collector}/test_processing_attestation.py (98%) diff --git a/poetry.lock b/poetry.lock index c70444e90..cfc2eb48b 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,12 +1,12 @@ -# This file is automatically @generated by Poetry and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" version = "2.6.1" description = "Happy Eyeballs for asyncio" -category = "main" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "aiohappyeyeballs-2.6.1-py3-none-any.whl", hash = "sha256:f349ba8f4b75cb25c99c5c2d84e997e485204d2902a9597802b0371f09331fb8"}, {file = "aiohappyeyeballs-2.6.1.tar.gz", hash = "sha256:c3f9d0113123803ccadfdf3f0faa505bc78e6a72d1cc4806cbd719826e943558"}, @@ -16,9 +16,9 @@ files = [ name = "aiohttp" version = "3.12.9" description = "Async http client/server framework (asyncio)" -category = "main" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "aiohttp-3.12.9-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:abb01935bb606bbc080424799bfda358d38374c45a7cbbc89f9bb330deb1db26"}, {file = "aiohttp-3.12.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e2337516411cd15b7257736484dfd5101fa0e6b11ef2086b4bb6db9365373dcb"}, @@ -118,15 +118,15 @@ propcache = ">=0.2.0" yarl = ">=1.17.0,<2.0" [package.extras] -speedups = ["Brotli", "aiodns (>=3.3.0)", "brotlicffi"] +speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (>=3.3.0)", "brotlicffi ; platform_python_implementation != \"CPython\""] [[package]] name = "aiosignal" version = "1.3.2" description = "aiosignal: a list of registered asynchronous callbacks" -category = "main" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "aiosignal-1.3.2-py2.py3-none-any.whl", hash = "sha256:45cde58e409a301715980c2b01d0c28bdde3770d8290b5eb2173759d9acb31a5"}, {file = "aiosignal-1.3.2.tar.gz", hash = "sha256:a8c255c66fafb1e499c9351d0bf32ff2d8a0321595ebac3b93713656d2436f54"}, @@ -139,9 +139,9 @@ frozenlist = ">=1.1.0" name = "annotated-types" version = "0.7.0" description = "Reusable constraint types to use with typing.Annotated" -category = "main" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "annotated_types-0.7.0-py3-none-any.whl", hash = "sha256:1f02e8b43a8fbbc3f3e0d4f0f4bfc8131bcb4eebe8849b8e5c773f3a1c582a53"}, {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, @@ -151,9 +151,9 @@ files = [ name = "astroid" version = "3.3.10" description = "An abstract syntax tree for Python with inference support." -category = "dev" optional = false python-versions = ">=3.9.0" +groups = ["dev"] files = [ {file = "astroid-3.3.10-py3-none-any.whl", hash = "sha256:104fb9cb9b27ea95e847a94c003be03a9e039334a8ebca5ee27dafaf5c5711eb"}, {file = "astroid-3.3.10.tar.gz", hash = "sha256:c332157953060c6deb9caa57303ae0d20b0fbdb2e59b4a4f2a6ba49d0a7961ce"}, @@ -163,9 +163,9 @@ files = [ name = "asttokens" version = "3.0.0" description = "Annotate AST trees with source code positions" -category = "dev" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "asttokens-3.0.0-py3-none-any.whl", hash = "sha256:e3078351a059199dd5138cb1c706e6430c05eff2ff136af5eb4790f9d28932e2"}, {file = "asttokens-3.0.0.tar.gz", hash = "sha256:0dcd8baa8d62b0c1d118b399b2ddba3c4aff271d0d7a9e0d4c1681c79035bbc7"}, @@ -179,29 +179,29 @@ test = ["astroid (>=2,<4)", "pytest", "pytest-cov", "pytest-xdist"] name = "attrs" version = "25.3.0" description = "Classes Without Boilerplate" -category = "main" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "attrs-25.3.0-py3-none-any.whl", hash = "sha256:427318ce031701fea540783410126f03899a97ffc6f61596ad581ac2e40e3bc3"}, {file = "attrs-25.3.0.tar.gz", hash = "sha256:75d7cefc7fb576747b2c81b4442d4d4a1ce0900973527c011d1030fd3bf4af1b"}, ] [package.extras] -benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"] -tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] +tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] [[package]] name = "base58" version = "2.1.1" description = "Base58 and Base58Check implementation." -category = "main" optional = false python-versions = ">=3.5" +groups = ["main", "dev"] files = [ {file = "base58-2.1.1-py3-none-any.whl", hash = "sha256:11a36f4d3ce51dfc1043f3218591ac4eb1ceb172919cebe05b52a5bcc8d245c2"}, {file = "base58-2.1.1.tar.gz", hash = "sha256:c5d0cb3f5b6e81e8e35da5754388ddcc6d0d14b6c6a132cb93d69ed580a7278c"}, @@ -214,9 +214,9 @@ tests = ["PyHamcrest (>=2.0.2)", "mypy", "pytest (>=4.6)", "pytest-benchmark", " name = "bitarray" version = "3.4.2" description = "efficient arrays of booleans -- C extension" -category = "main" optional = false python-versions = "*" +groups = ["main", "dev"] files = [ {file = "bitarray-3.4.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:42b552f885c5629182928c79237b375a92bcf1bc1e725b1c8a5e8eab28ea300d"}, {file = "bitarray-3.4.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3e16d6184f349587b6a5045bcf073baf763a86273aab454485ba437d0bca82e8"}, @@ -358,9 +358,9 @@ files = [ name = "black" version = "24.10.0" description = "The uncompromising code formatter." -category = "dev" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "black-24.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e6668650ea4b685440857138e5fe40cde4d652633b1bdffc62933d0db4ed9812"}, {file = "black-24.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1c536fcf674217e87b8cc3657b81809d3c085d7bf3ef262ead700da345bfa6ea"}, @@ -399,13 +399,25 @@ d = ["aiohttp (>=3.10)"] jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] uvloop = ["uvloop (>=0.15.2)"] +[[package]] +name = "blinker" +version = "1.9.0" +description = "Fast, simple object-to-object and broadcast signaling" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc"}, + {file = "blinker-1.9.0.tar.gz", hash = "sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf"}, +] + [[package]] name = "certifi" version = "2025.4.26" description = "Python package for providing Mozilla's CA Bundle." -category = "main" optional = false python-versions = ">=3.6" +groups = ["main", "dev"] files = [ {file = "certifi-2025.4.26-py3-none-any.whl", hash = "sha256:30350364dfe371162649852c63336a15c70c6510c2ad5015b21c2345311805f3"}, {file = "certifi-2025.4.26.tar.gz", hash = "sha256:0a816057ea3cdefcef70270d2c515e4506bbc954f417fa5ade2021213bb8f0c6"}, @@ -415,9 +427,9 @@ files = [ name = "cfgv" version = "3.4.0" description = "Validate configuration and produce human readable error messages." -category = "dev" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "cfgv-3.4.0-py2.py3-none-any.whl", hash = "sha256:b7265b1f29fd3316bfcd2b330d63d024f2bfd8bcb8b0272f8e19a504856c48f9"}, {file = "cfgv-3.4.0.tar.gz", hash = "sha256:e52591d4c5f5dead8e0f673fb16db7949d2cfb3f7da4582893288f0ded8fe560"}, @@ -427,9 +439,9 @@ files = [ name = "charset-normalizer" version = "3.4.2" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." -category = "main" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "charset_normalizer-3.4.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:7c48ed483eb946e6c04ccbe02c6b4d1d48e51944b6db70f697e089c193404941"}, {file = "charset_normalizer-3.4.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b2d318c11350e10662026ad0eb71bb51c7812fc8590825304ae0bdd4ac283acd"}, @@ -529,9 +541,9 @@ files = [ name = "ckzg" version = "2.1.1" description = "Python bindings for C-KZG-4844" -category = "main" optional = false python-versions = "*" +groups = ["main", "dev"] files = [ {file = "ckzg-2.1.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:4b9825a1458219e8b4b023012b8ef027ef1f47e903f9541cbca4615f80132730"}, {file = "ckzg-2.1.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e2a40a3ba65cca4b52825d26829e6f7eb464aa27a9e9efb6b8b2ce183442c741"}, @@ -639,9 +651,9 @@ files = [ name = "click" version = "8.2.1" description = "Composable command line interface toolkit" -category = "dev" optional = false python-versions = ">=3.10" +groups = ["main", "dev"] files = [ {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, @@ -654,9 +666,10 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} name = "colorama" version = "0.4.6" description = "Cross-platform colored terminal text." -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "dev"] +markers = "sys_platform == \"win32\" or platform_system == \"Windows\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, @@ -666,9 +679,9 @@ files = [ name = "conventional-pre-commit" version = "4.2.0" description = "A pre-commit hook that checks commit messages for Conventional Commits formatting." -category = "dev" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "conventional_pre_commit-4.2.0-py3-none-any.whl", hash = "sha256:08acc2bca20bc998090653ca0de9b250e5a51b1f9b0b1bae00f424cc071616cb"}, {file = "conventional_pre_commit-4.2.0.tar.gz", hash = "sha256:6b5a2867338c58a1d14d300de68b56c17b7c8403bb122155f38639a423d21ff1"}, @@ -681,9 +694,9 @@ dev = ["black", "build", "coverage", "flake8", "pre-commit", "pytest", "setuptoo name = "coverage" version = "7.8.2" description = "Code coverage measurement for Python" -category = "dev" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "coverage-7.8.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:bd8ec21e1443fd7a447881332f7ce9d35b8fbd2849e761bb290b584535636b0a"}, {file = "coverage-7.8.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4c26c2396674816deaeae7ded0e2b42c26537280f8fe313335858ffff35019be"}, @@ -755,15 +768,16 @@ files = [ ] [package.extras] -toml = ["tomli"] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] [[package]] name = "cytoolz" version = "1.0.1" description = "Cython implementation of Toolz: High performance functional utilities" -category = "main" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] +markers = "implementation_name == \"cpython\"" files = [ {file = "cytoolz-1.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cec9af61f71fc3853eb5dca3d42eb07d1f48a4599fa502cbe92adde85f74b042"}, {file = "cytoolz-1.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:140bbd649dbda01e91add7642149a5987a7c3ccc251f2263de894b89f50b6608"}, @@ -877,9 +891,9 @@ cython = ["cython"] name = "decorator" version = "5.2.1" description = "Decorators for Humans" -category = "dev" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a"}, {file = "decorator-5.2.1.tar.gz", hash = "sha256:65f266143752f734b0a7cc83c46f4618af75b8c5911b00ccb61d0ac9b6da0360"}, @@ -889,9 +903,9 @@ files = [ name = "dill" version = "0.4.0" description = "serialize all of Python" -category = "dev" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "dill-0.4.0-py3-none-any.whl", hash = "sha256:44f54bf6412c2c8464c14e8243eb163690a9800dbe2c367330883b19c7561049"}, {file = "dill-0.4.0.tar.gz", hash = "sha256:0633f1d2df477324f53a895b02c901fb961bdbf65a17122586ea7019292cbcf0"}, @@ -905,9 +919,9 @@ profile = ["gprof2dot (>=2022.7.29)"] name = "distlib" version = "0.3.9" description = "Distribution utilities" -category = "dev" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, @@ -917,9 +931,9 @@ files = [ name = "eth-abi" version = "5.2.0" description = "eth_abi: Python utilities for working with Ethereum ABI definitions, especially encoding and decoding" -category = "main" optional = false python-versions = "<4,>=3.8" +groups = ["main", "dev"] files = [ {file = "eth_abi-5.2.0-py3-none-any.whl", hash = "sha256:17abe47560ad753f18054f5b3089fcb588f3e3a092136a416b6c1502cb7e8877"}, {file = "eth_abi-5.2.0.tar.gz", hash = "sha256:178703fa98c07d8eecd5ae569e7e8d159e493ebb6eeb534a8fe973fbc4e40ef0"}, @@ -940,9 +954,9 @@ tools = ["hypothesis (>=6.22.0,<6.108.7)"] name = "eth-account" version = "0.13.7" description = "eth-account: Sign Ethereum transactions and messages with local private keys" -category = "main" optional = false python-versions = "<4,>=3.8" +groups = ["main", "dev"] files = [ {file = "eth_account-0.13.7-py3-none-any.whl", hash = "sha256:39727de8c94d004ff61d10da7587509c04d2dc7eac71e04830135300bdfc6d24"}, {file = "eth_account-0.13.7.tar.gz", hash = "sha256:5853ecbcbb22e65411176f121f5f24b8afeeaf13492359d254b16d8b18c77a46"}, @@ -969,9 +983,9 @@ test = ["coverage", "hypothesis (>=6.22.0,<6.108.7)", "pytest (>=7.0.0)", "pytes name = "eth-hash" version = "0.7.1" description = "eth-hash: The Ethereum hashing function, keccak256, sometimes (erroneously) called sha3" -category = "main" optional = false python-versions = "<4,>=3.8" +groups = ["main", "dev"] files = [ {file = "eth_hash-0.7.1-py3-none-any.whl", hash = "sha256:0fb1add2adf99ef28883fd6228eb447ef519ea72933535ad1a0b28c6f65f868a"}, {file = "eth_hash-0.7.1.tar.gz", hash = "sha256:d2411a403a0b0a62e8247b4117932d900ffb4c8c64b15f92620547ca5ce46be5"}, @@ -984,16 +998,16 @@ pycryptodome = {version = ">=3.6.6,<4", optional = true, markers = "extra == \"p dev = ["build (>=0.9.0)", "bump_my_version (>=0.19.0)", "ipython", "mypy (==1.10.0)", "pre-commit (>=3.4.0)", "pytest (>=7.0.0)", "pytest-xdist (>=2.4.0)", "sphinx (>=6.0.0)", "sphinx-autobuild (>=2021.3.14)", "sphinx_rtd_theme (>=1.0.0)", "towncrier (>=24,<25)", "tox (>=4.0.0)", "twine", "wheel"] docs = ["sphinx (>=6.0.0)", "sphinx-autobuild (>=2021.3.14)", "sphinx_rtd_theme (>=1.0.0)", "towncrier (>=24,<25)"] pycryptodome = ["pycryptodome (>=3.6.6,<4)"] -pysha3 = ["pysha3 (>=1.0.0,<2.0.0)", "safe-pysha3 (>=1.0.0)"] +pysha3 = ["pysha3 (>=1.0.0,<2.0.0) ; python_version < \"3.9\"", "safe-pysha3 (>=1.0.0) ; python_version >= \"3.9\""] test = ["pytest (>=7.0.0)", "pytest-xdist (>=2.4.0)"] [[package]] name = "eth-keyfile" version = "0.8.1" description = "eth-keyfile: A library for handling the encrypted keyfiles used to store ethereum private keys" -category = "main" optional = false python-versions = "<4,>=3.8" +groups = ["main", "dev"] files = [ {file = "eth_keyfile-0.8.1-py3-none-any.whl", hash = "sha256:65387378b82fe7e86d7cb9f8d98e6d639142661b2f6f490629da09fddbef6d64"}, {file = "eth_keyfile-0.8.1.tar.gz", hash = "sha256:9708bc31f386b52cca0969238ff35b1ac72bd7a7186f2a84b86110d3c973bec1"}, @@ -1013,9 +1027,9 @@ test = ["pytest (>=7.0.0)", "pytest-xdist (>=2.4.0)"] name = "eth-keys" version = "0.7.0" description = "eth-keys: Common API for Ethereum key operations" -category = "main" optional = false python-versions = "<4,>=3.8" +groups = ["main", "dev"] files = [ {file = "eth_keys-0.7.0-py3-none-any.whl", hash = "sha256:b0cdda8ffe8e5ba69c7c5ca33f153828edcace844f67aabd4542d7de38b159cf"}, {file = "eth_keys-0.7.0.tar.gz", hash = "sha256:79d24fd876201df67741de3e3fefb3f4dbcbb6ace66e47e6fe662851a4547814"}, @@ -1035,9 +1049,9 @@ test = ["asn1tools (>=0.146.2)", "eth-hash[pysha3]", "factory-boy (>=3.0.1)", "h name = "eth-rlp" version = "2.2.0" description = "eth-rlp: RLP definitions for common Ethereum objects in Python" -category = "main" optional = false python-versions = "<4,>=3.8" +groups = ["main", "dev"] files = [ {file = "eth_rlp-2.2.0-py3-none-any.whl", hash = "sha256:5692d595a741fbaef1203db6a2fedffbd2506d31455a6ad378c8449ee5985c47"}, {file = "eth_rlp-2.2.0.tar.gz", hash = "sha256:5e4b2eb1b8213e303d6a232dfe35ab8c29e2d3051b86e8d359def80cd21db83d"}, @@ -1057,9 +1071,9 @@ test = ["eth-hash[pycryptodome]", "pytest (>=7.0.0)", "pytest-xdist (>=2.4.0)"] name = "eth-tester" version = "0.12.1b1" description = "eth-tester: Tools for testing Ethereum applications." -category = "dev" optional = false python-versions = "<4,>=3.8" +groups = ["dev"] files = [ {file = "eth_tester-0.12.1b1-py3-none-any.whl", hash = "sha256:aa3f91960e5ce9fe74eac4a0dcb22ffada84b8e28dc11d0f0a69085a5879be60"}, {file = "eth_tester-0.12.1b1.tar.gz", hash = "sha256:7aeb3b5839fb1bc20e7f15c5e289ba95809fa41117a5ac194e8d270467982832"}, @@ -1074,19 +1088,19 @@ rlp = ">=3.0.0" semantic_version = ">=2.6.0" [package.extras] -dev = ["build (>=0.9.0)", "bump_my_version (>=0.19.0)", "eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "eth-hash[pysha3] (>=0.1.4,<1.0.0)", "ipython", "pre-commit (>=3.4.0)", "py-evm (>=0.10.0b0,<0.11.0b0)", "pytest (>=7.0.0)", "pytest-xdist (>=2.0.0,<3)", "towncrier (>=24,<25)", "tox (>=4.0.0)", "twine", "wheel"] +dev = ["build (>=0.9.0)", "bump_my_version (>=0.19.0)", "eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "eth-hash[pycryptodome] (>=0.1.4,<1.0.0) ; implementation_name == \"pypy\"", "eth-hash[pysha3] (>=0.1.4,<1.0.0) ; implementation_name == \"cpython\"", "ipython", "pre-commit (>=3.4.0)", "py-evm (>=0.10.0b0,<0.11.0b0)", "pytest (>=7.0.0)", "pytest-xdist (>=2.0.0,<3)", "towncrier (>=24,<25)", "tox (>=4.0.0)", "twine", "wheel"] docs = ["towncrier (>=24,<25)"] -py-evm = ["eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "eth-hash[pysha3] (>=0.1.4,<1.0.0)", "py-evm (>=0.10.0b0,<0.11.0b0)"] -pyevm = ["eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "eth-hash[pysha3] (>=0.1.4,<1.0.0)", "py-evm (>=0.10.0b0,<0.11.0b0)"] +py-evm = ["eth-hash[pycryptodome] (>=0.1.4,<1.0.0) ; implementation_name == \"pypy\"", "eth-hash[pysha3] (>=0.1.4,<1.0.0) ; implementation_name == \"cpython\"", "py-evm (>=0.10.0b0,<0.11.0b0)"] +pyevm = ["eth-hash[pycryptodome] (>=0.1.4,<1.0.0) ; implementation_name == \"pypy\"", "eth-hash[pysha3] (>=0.1.4,<1.0.0) ; implementation_name == \"cpython\"", "py-evm (>=0.10.0b0,<0.11.0b0)"] test = ["eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "pytest (>=7.0.0)", "pytest-xdist (>=2.0.0,<3)"] [[package]] name = "eth-typing" version = "5.2.1" description = "eth-typing: Common type annotations for ethereum python packages" -category = "main" optional = false python-versions = "<4,>=3.8" +groups = ["main", "dev"] files = [ {file = "eth_typing-5.2.1-py3-none-any.whl", hash = "sha256:b0c2812ff978267563b80e9d701f487dd926f1d376d674f3b535cfe28b665d3d"}, {file = "eth_typing-5.2.1.tar.gz", hash = "sha256:7557300dbf02a93c70fa44af352b5c4a58f94e997a0fd6797fb7d1c29d9538ee"}, @@ -1104,9 +1118,9 @@ test = ["pytest (>=7.0.0)", "pytest-xdist (>=2.4.0)"] name = "eth-utils" version = "5.3.0" description = "eth-utils: Common utility functions for python code that interacts with Ethereum" -category = "main" optional = false python-versions = "<4,>=3.8" +groups = ["main", "dev"] files = [ {file = "eth_utils-5.3.0-py3-none-any.whl", hash = "sha256:ac184883ab299d923428bbe25dae5e356979a3993e0ef695a864db0a20bc262d"}, {file = "eth_utils-5.3.0.tar.gz", hash = "sha256:1f096867ac6be895f456fa3acb26e9573ae66e753abad9208f316d24d6178156"}, @@ -1128,9 +1142,9 @@ test = ["hypothesis (>=4.43.0)", "mypy (==1.10.0)", "pytest (>=7.0.0)", "pytest- name = "execnet" version = "2.1.1" description = "execnet: rapid multi-Python deployment" -category = "main" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "execnet-2.1.1-py3-none-any.whl", hash = "sha256:26dee51f1b80cebd6d0ca8e74dd8745419761d3bef34163928cbebbdc4749fdc"}, {file = "execnet-2.1.1.tar.gz", hash = "sha256:5189b52c6121c24feae288166ab41b32549c7e2348652736540b9e6e7d4e72e3"}, @@ -1143,24 +1157,24 @@ testing = ["hatch", "pre-commit", "pytest", "tox"] name = "executing" version = "2.2.0" description = "Get the currently executing AST node of a frame, and other information" -category = "dev" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "executing-2.2.0-py2.py3-none-any.whl", hash = "sha256:11387150cad388d62750327a53d3339fad4888b39a6fe233c3afbb54ecffd3aa"}, {file = "executing-2.2.0.tar.gz", hash = "sha256:5d108c028108fe2551d1a7b2e8b713341e2cb4fc0aa7dcf966fa4327a5226755"}, ] [package.extras] -tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"] +tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich ; python_version >= \"3.11\""] [[package]] name = "faker" version = "37.3.0" description = "Faker is a Python package that generates fake data for you." -category = "dev" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "faker-37.3.0-py3-none-any.whl", hash = "sha256:48c94daa16a432f2d2bc803c7ff602509699fca228d13e97e379cd860a7e216e"}, {file = "faker-37.3.0.tar.gz", hash = "sha256:77b79e7a2228d57175133af0bbcdd26dc623df81db390ee52f5104d46c010f2f"}, @@ -1173,9 +1187,9 @@ tzdata = "*" name = "filelock" version = "3.18.0" description = "A platform independent file lock." -category = "dev" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "filelock-3.18.0-py3-none-any.whl", hash = "sha256:c401f4f8377c4464e6db25fff06205fd89bdd83b65eb0488ed1b160f780e21de"}, {file = "filelock-3.18.0.tar.gz", hash = "sha256:adbc88eabb99d2fec8c9c1b229b171f18afa655400173ddc653d5d01501fb9f2"}, @@ -1184,15 +1198,39 @@ files = [ [package.extras] docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"] -typing = ["typing-extensions (>=4.12.2)"] +typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] + +[[package]] +name = "flask" +version = "3.1.2" +description = "A simple framework for building complex web applications." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "flask-3.1.2-py3-none-any.whl", hash = "sha256:ca1d8112ec8a6158cc29ea4858963350011b5c846a414cdb7a954aa9e967d03c"}, + {file = "flask-3.1.2.tar.gz", hash = "sha256:bf656c15c80190ed628ad08cdfd3aaa35beb087855e2f494910aa3774cc4fd87"}, +] + +[package.dependencies] +blinker = ">=1.9.0" +click = ">=8.1.3" +itsdangerous = ">=2.2.0" +jinja2 = ">=3.1.2" +markupsafe = ">=2.1.1" +werkzeug = ">=3.1.0" + +[package.extras] +async = ["asgiref (>=3.2)"] +dotenv = ["python-dotenv"] [[package]] name = "frozenlist" version = "1.6.2" description = "A list-like structure which implements collections.abc.MutableSequence" -category = "main" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "frozenlist-1.6.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:92836b9903e52f787f4f4bfc6cf3b03cf19de4cbc09f5969e58806f876d8647f"}, {file = "frozenlist-1.6.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a3af419982432a13a997451e611ff7681a4fbf81dca04f70b08fc51106335ff0"}, @@ -1304,9 +1342,9 @@ files = [ name = "hexbytes" version = "1.3.1" description = "hexbytes: Python `bytes` subclass that decodes hex, with a readable console output" -category = "main" optional = false python-versions = "<4,>=3.8" +groups = ["main", "dev"] files = [ {file = "hexbytes-1.3.1-py3-none-any.whl", hash = "sha256:da01ff24a1a9a2b1881c4b85f0e9f9b0f51b526b379ffa23832ae7899d29c2c7"}, {file = "hexbytes-1.3.1.tar.gz", hash = "sha256:a657eebebdfe27254336f98d8af6e2236f3f83aed164b87466b6cf6c5f5a4765"}, @@ -1321,9 +1359,9 @@ test = ["eth_utils (>=2.0.0)", "hypothesis (>=3.44.24)", "pytest (>=7.0.0)", "py name = "hypothesis" version = "6.135.1" description = "A library for property-based testing" -category = "dev" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "hypothesis-6.135.1-py3-none-any.whl", hash = "sha256:14fab728bfe2409a3934e6e1ea6ae0a706d0bc78187137218a253aec7528b4c8"}, {file = "hypothesis-6.135.1.tar.gz", hash = "sha256:36eea411ef5dde5612301fcd9a293b6f2a3a5ab96488be2e23e7c5799cbd7b33"}, @@ -1334,7 +1372,7 @@ attrs = ">=22.2.0" sortedcontainers = ">=2.1.0,<3.0.0" [package.extras] -all = ["black (>=19.10b0)", "click (>=7.0)", "crosshair-tool (>=0.0.88)", "django (>=4.2)", "dpcontracts (>=0.4)", "hypothesis-crosshair (>=0.0.23)", "lark (>=0.10.1)", "libcst (>=0.3.16)", "numpy (>=1.19.3)", "pandas (>=1.1)", "pytest (>=4.6)", "python-dateutil (>=1.4)", "pytz (>=2014.1)", "redis (>=3.0.0)", "rich (>=9.0.0)", "tzdata (>=2025.2)", "watchdog (>=4.0.0)"] +all = ["black (>=19.10b0)", "click (>=7.0)", "crosshair-tool (>=0.0.88)", "django (>=4.2)", "dpcontracts (>=0.4)", "hypothesis-crosshair (>=0.0.23)", "lark (>=0.10.1)", "libcst (>=0.3.16)", "numpy (>=1.19.3)", "pandas (>=1.1)", "pytest (>=4.6)", "python-dateutil (>=1.4)", "pytz (>=2014.1)", "redis (>=3.0.0)", "rich (>=9.0.0)", "tzdata (>=2025.2) ; sys_platform == \"win32\" or sys_platform == \"emscripten\"", "watchdog (>=4.0.0)"] cli = ["black (>=19.10b0)", "click (>=7.0)", "rich (>=9.0.0)"] codemods = ["libcst (>=0.3.16)"] crosshair = ["crosshair-tool (>=0.0.88)", "hypothesis-crosshair (>=0.0.23)"] @@ -1349,15 +1387,15 @@ pytest = ["pytest (>=4.6)"] pytz = ["pytz (>=2014.1)"] redis = ["redis (>=3.0.0)"] watchdog = ["watchdog (>=4.0.0)"] -zoneinfo = ["tzdata (>=2025.2)"] +zoneinfo = ["tzdata (>=2025.2) ; sys_platform == \"win32\" or sys_platform == \"emscripten\""] [[package]] name = "identify" version = "2.6.12" description = "File identification library for Python" -category = "dev" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "identify-2.6.12-py2.py3-none-any.whl", hash = "sha256:ad9672d5a72e0d2ff7c5c8809b62dfa60458626352fb0eb7b55e69bdc45334a2"}, {file = "identify-2.6.12.tar.gz", hash = "sha256:d8de45749f1efb108badef65ee8386f0f7bb19a7f26185f74de6367bffbaf0e6"}, @@ -1370,9 +1408,9 @@ license = ["ukkonen"] name = "idna" version = "3.10" description = "Internationalized Domain Names in Applications (IDNA)" -category = "main" optional = false python-versions = ">=3.6" +groups = ["main", "dev"] files = [ {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, @@ -1385,9 +1423,9 @@ all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2 name = "iniconfig" version = "2.1.0" description = "brain-dead simple config-ini parsing" -category = "main" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "iniconfig-2.1.0-py3-none-any.whl", hash = "sha256:9deba5723312380e77435581c6bf4935c94cbfab9b1ed33ef8d238ea168eb760"}, {file = "iniconfig-2.1.0.tar.gz", hash = "sha256:3abbd2e30b36733fee78f9c7f7308f2d0050e88f0087fd25c2645f63c773e1c7"}, @@ -1397,9 +1435,9 @@ files = [ name = "ipfshttpclient" version = "0.7.0" description = "Python IPFS HTTP CLIENT library" -category = "dev" optional = false python-versions = ">=3.5.4,!=3.6.0,!=3.6.1,!=3.7.0,!=3.7.1" +groups = ["dev"] files = [ {file = "ipfshttpclient-0.7.0-py3-none-any.whl", hash = "sha256:161c348e91cdc194c06c8725446a51a2d758ff2cc5ea97ec98f49e2af2465405"}, {file = "ipfshttpclient-0.7.0.tar.gz", hash = "sha256:feb1033c14c3ac87ee81264176c5beefeaf386385804427160466117ccc43693"}, @@ -1413,9 +1451,9 @@ requests = ">=2.11" name = "ipython" version = "9.3.0" description = "IPython: Productive Interactive Computing" -category = "dev" optional = false python-versions = ">=3.11" +groups = ["dev"] files = [ {file = "ipython-9.3.0-py3-none-any.whl", hash = "sha256:1a0b6dd9221a1f5dddf725b57ac0cb6fddc7b5f470576231ae9162b9b3455a04"}, {file = "ipython-9.3.0.tar.gz", hash = "sha256:79eb896f9f23f50ad16c3bc205f686f6e030ad246cc309c6279a242b14afe9d8"}, @@ -1445,9 +1483,9 @@ test-extra = ["curio", "ipykernel", "ipython[test]", "jupyter_ai", "matplotlib ( name = "ipython-pygments-lexers" version = "1.1.1" description = "Defines a variety of Pygments lexers for highlighting IPython code." -category = "dev" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "ipython_pygments_lexers-1.1.1-py3-none-any.whl", hash = "sha256:a9462224a505ade19a605f71f8fa63c2048833ce50abc86768a0d81d876dc81c"}, {file = "ipython_pygments_lexers-1.1.1.tar.gz", hash = "sha256:09c0138009e56b6854f9535736f4171d855c8c08a563a0dcd8022f78355c7e81"}, @@ -1460,9 +1498,9 @@ pygments = "*" name = "isort" version = "6.0.1" description = "A Python utility / library to sort Python imports." -category = "dev" optional = false python-versions = ">=3.9.0" +groups = ["dev"] files = [ {file = "isort-6.0.1-py3-none-any.whl", hash = "sha256:2dc5d7f65c9678d94c88dfc29161a320eec67328bc97aad576874cb4be1e9615"}, {file = "isort-6.0.1.tar.gz", hash = "sha256:1cb5df28dfbc742e490c5e41bad6da41b805b0a8be7bc93cd0fb2a8a890ac450"}, @@ -1472,13 +1510,25 @@ files = [ colors = ["colorama"] plugins = ["setuptools"] +[[package]] +name = "itsdangerous" +version = "2.2.0" +description = "Safely pass data to untrusted environments and back." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef"}, + {file = "itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173"}, +] + [[package]] name = "jedi" version = "0.19.2" description = "An autocompletion tool for Python that can be used for text editors." -category = "dev" optional = false python-versions = ">=3.6" +groups = ["dev"] files = [ {file = "jedi-0.19.2-py2.py3-none-any.whl", hash = "sha256:a8ef22bde8490f57fe5c7681a3c83cb58874daf72b4784de3cce5b6ef6edb5b9"}, {file = "jedi-0.19.2.tar.gz", hash = "sha256:4770dc3de41bde3966b02eb84fbcf557fb33cce26ad23da12c742fb50ecb11f0"}, @@ -1492,13 +1542,31 @@ docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alab qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] testing = ["Django", "attrs", "colorama", "docopt", "pytest (<9.0.0)"] +[[package]] +name = "jinja2" +version = "3.1.6" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + [[package]] name = "json-stream" version = "2.3.3" description = "Streaming JSON encoder and decoder" -category = "main" optional = false python-versions = "<4,>=3.5" +groups = ["main"] files = [ {file = "json_stream-2.3.3-py3-none-any.whl", hash = "sha256:65f08c5031d7df145c6fe89e434b718c1574b2bb84b8a0eea974de90916a089d"}, {file = "json_stream-2.3.3.tar.gz", hash = "sha256:894444c68c331174926763e224fb34b7ed3f90749d1c165afd0f5930207534c4"}, @@ -1515,9 +1583,9 @@ requests = ["requests"] name = "json-stream-rs-tokenizer" version = "0.4.29" description = "A faster tokenizer for the json-stream Python library" -category = "main" optional = false python-versions = "<4,>=3.7" +groups = ["main"] files = [ {file = "json_stream_rs_tokenizer-0.4.29-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:52bd1f819196a6a6831c57bf1f232830ccd4cd11f7661d1385f450145eaadca6"}, {file = "json_stream_rs_tokenizer-0.4.29-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:bdd3eaa40af57cb4e3b4546940fe75e45da239eec907ce7bfa6cf4198cd29e55"}, @@ -1606,13 +1674,112 @@ files = [ benchmark = ["contexttimer (>=0.3,<0.4)", "json-stream-to-standard-types (>=0.1,<0.2)", "si-prefix (>=1.2,<2)", "tqdm (>=4.64,<5)", "typer (>=0.6,<0.7)"] test = ["json-stream (==2.3.2)", "json-stream-rs-tokenizer[benchmark]", "pytest (>7.1,<8)"] +[[package]] +name = "markupsafe" +version = "3.0.3" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "markupsafe-3.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f981d352f04553a7171b8e44369f2af4055f888dfb147d55e42d29e29e74559"}, + {file = "markupsafe-3.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c1493fb6e50ab01d20a22826e57520f1284df32f2d8601fdd90b6304601419"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1ba88449deb3de88bd40044603fafffb7bc2b055d626a330323a9ed736661695"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f42d0984e947b8adf7dd6dde396e720934d12c506ce84eea8476409563607591"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c0c0b3ade1c0b13b936d7970b1d37a57acde9199dc2aecc4c336773e1d86049c"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0303439a41979d9e74d18ff5e2dd8c43ed6c6001fd40e5bf2e43f7bd9bbc523f"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:d2ee202e79d8ed691ceebae8e0486bd9a2cd4794cec4824e1c99b6f5009502f6"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:177b5253b2834fe3678cb4a5f0059808258584c559193998be2601324fdeafb1"}, + {file = "markupsafe-3.0.3-cp310-cp310-win32.whl", hash = "sha256:2a15a08b17dd94c53a1da0438822d70ebcd13f8c3a95abe3a9ef9f11a94830aa"}, + {file = "markupsafe-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:c4ffb7ebf07cfe8931028e3e4c85f0357459a3f9f9490886198848f4fa002ec8"}, + {file = "markupsafe-3.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:e2103a929dfa2fcaf9bb4e7c091983a49c9ac3b19c9061b6d5427dd7d14d81a1"}, + {file = "markupsafe-3.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad"}, + {file = "markupsafe-3.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a"}, + {file = "markupsafe-3.0.3-cp311-cp311-win32.whl", hash = "sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19"}, + {file = "markupsafe-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01"}, + {file = "markupsafe-3.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c"}, + {file = "markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e"}, + {file = "markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b"}, + {file = "markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d"}, + {file = "markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c"}, + {file = "markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f"}, + {file = "markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795"}, + {file = "markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12"}, + {file = "markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed"}, + {file = "markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5"}, + {file = "markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485"}, + {file = "markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73"}, + {file = "markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287"}, + {file = "markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe"}, + {file = "markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe"}, + {file = "markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9"}, + {file = "markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581"}, + {file = "markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4"}, + {file = "markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab"}, + {file = "markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa"}, + {file = "markupsafe-3.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15d939a21d546304880945ca1ecb8a039db6b4dc49b2c5a400387cdae6a62e26"}, + {file = "markupsafe-3.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f71a396b3bf33ecaa1626c255855702aca4d3d9fea5e051b41ac59a9c1c41edc"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f4b68347f8c5eab4a13419215bdfd7f8c9b19f2b25520968adfad23eb0ce60c"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e8fc20152abba6b83724d7ff268c249fa196d8259ff481f3b1476383f8f24e42"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:949b8d66bc381ee8b007cd945914c721d9aba8e27f71959d750a46f7c282b20b"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:3537e01efc9d4dccdf77221fb1cb3b8e1a38d5428920e0657ce299b20324d758"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:591ae9f2a647529ca990bc681daebdd52c8791ff06c2bfa05b65163e28102ef2"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a320721ab5a1aba0a233739394eb907f8c8da5c98c9181d1161e77a0c8e36f2d"}, + {file = "markupsafe-3.0.3-cp39-cp39-win32.whl", hash = "sha256:df2449253ef108a379b8b5d6b43f4b1a8e81a061d6537becd5582fba5f9196d7"}, + {file = "markupsafe-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:7c3fb7d25180895632e5d3148dbdc29ea38ccb7fd210aa27acbd1201a1902c6e"}, + {file = "markupsafe-3.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:38664109c14ffc9e7437e86b4dceb442b0096dfe3541d7864d9cbe1da4cf36c8"}, + {file = "markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698"}, +] + [[package]] name = "matplotlib-inline" version = "0.1.7" description = "Inline Matplotlib backend for Jupyter" -category = "dev" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "matplotlib_inline-0.1.7-py3-none-any.whl", hash = "sha256:df192d39a4ff8f21b1895d72e6a13f5fcc5099f00fa84384e0ea28c2cc0653ca"}, {file = "matplotlib_inline-0.1.7.tar.gz", hash = "sha256:8423b23ec666be3d16e16b60bdd8ac4e86e840ebd1dd11a30b9f117f2fa0ab90"}, @@ -1625,9 +1792,9 @@ traitlets = "*" name = "mccabe" version = "0.7.0" description = "McCabe checker, plugin for flake8" -category = "dev" optional = false python-versions = ">=3.6" +groups = ["dev"] files = [ {file = "mccabe-0.7.0-py2.py3-none-any.whl", hash = "sha256:6c2d30ab6be0e4a46919781807b4f0d834ebdd6c6e3dca0bda5a15f863427b6e"}, {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"}, @@ -1637,9 +1804,9 @@ files = [ name = "more-itertools" version = "10.7.0" description = "More routines for operating on iterables, beyond itertools" -category = "main" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "more_itertools-10.7.0-py3-none-any.whl", hash = "sha256:d43980384673cb07d2f7d2d918c616b30c659c089ee23953f601d6609c67510e"}, {file = "more_itertools-10.7.0.tar.gz", hash = "sha256:9fddd5403be01a94b204faadcff459ec3568cf110265d3c54323e1e866ad29d3"}, @@ -1649,9 +1816,9 @@ files = [ name = "morphys" version = "1.0" description = "Smart conversions between unicode and bytes types for common cases" -category = "main" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "morphys-1.0-py2.py3-none-any.whl", hash = "sha256:76d6dbaa4d65f597e59d332c81da786d83e4669387b9b2a750cfec74e7beec20"}, ] @@ -1660,9 +1827,9 @@ files = [ name = "multiaddr" version = "0.0.9" description = "Python implementation of jbenet's multiaddr" -category = "dev" optional = false python-versions = ">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*" +groups = ["dev"] files = [ {file = "multiaddr-0.0.9-py2.py3-none-any.whl", hash = "sha256:5c0f862cbcf19aada2a899f80ef896ddb2e85614e0c8f04dd287c06c69dac95b"}, {file = "multiaddr-0.0.9.tar.gz", hash = "sha256:30b2695189edc3d5b90f1c303abb8f02d963a3a4edf2e7178b975eb417ab0ecf"}, @@ -1678,9 +1845,9 @@ varint = "*" name = "multidict" version = "6.4.4" description = "multidict implementation" -category = "main" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "multidict-6.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8adee3ac041145ffe4488ea73fa0a622b464cc25340d98be76924d0cda8545ff"}, {file = "multidict-6.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:b61e98c3e2a861035aaccd207da585bdcacef65fe01d7a0d07478efac005e028"}, @@ -1792,9 +1959,9 @@ files = [ name = "mypy" version = "1.16.0" description = "Optional static typing for Python" -category = "dev" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "mypy-1.16.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7909541fef256527e5ee9c0a7e2aeed78b6cda72ba44298d1334fe7881b05c5c"}, {file = "mypy-1.16.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e71d6f0090c2256c713ed3d52711d01859c82608b5d68d4fa01a3fe30df95571"}, @@ -1846,9 +2013,9 @@ reports = ["lxml"] name = "mypy-extensions" version = "1.1.0" description = "Type system extensions for programs checked with the mypy type checker." -category = "dev" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "mypy_extensions-1.1.0-py3-none-any.whl", hash = "sha256:1be4cccdb0f2482337c4743e60421de3a356cd97508abadd57d47403e94f5505"}, {file = "mypy_extensions-1.1.0.tar.gz", hash = "sha256:52e68efc3284861e772bbcd66823fde5ae21fd2fdb51c62a211403730b916558"}, @@ -1858,9 +2025,9 @@ files = [ name = "netaddr" version = "1.3.0" description = "A network address manipulation library for Python" -category = "dev" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "netaddr-1.3.0-py3-none-any.whl", hash = "sha256:c2c6a8ebe5554ce33b7d5b3a306b71bbb373e000bbbf2350dd5213cc56e3dbbe"}, {file = "netaddr-1.3.0.tar.gz", hash = "sha256:5c3c3d9895b551b763779ba7db7a03487dc1f8e3b385af819af341ae9ef6e48a"}, @@ -1873,9 +2040,9 @@ nicer-shell = ["ipython"] name = "nodeenv" version = "1.9.1" description = "Node.js virtual environment builder" -category = "dev" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["dev"] files = [ {file = "nodeenv-1.9.1-py2.py3-none-any.whl", hash = "sha256:ba11c9782d29c27c70ffbdda2d7415098754709be8a7056d79a737cd901155c9"}, {file = "nodeenv-1.9.1.tar.gz", hash = "sha256:6ec12890a2dab7946721edbfbcd91f3319c6ccc9aec47be7c7e6b7011ee6645f"}, @@ -1885,9 +2052,9 @@ files = [ name = "oz-merkle-tree" version = "0.1.0" description = "A Python library to generate merkle trees and merkle proofs." -category = "main" optional = false python-versions = "^3.11" +groups = ["main"] files = [] develop = false @@ -1905,9 +2072,9 @@ resolved_reference = "f4ad6e006b8daf05ce2ce255e123eb9f923d8ef8" name = "packaging" version = "25.0" description = "Core utilities for Python packages" -category = "main" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484"}, {file = "packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f"}, @@ -1917,9 +2084,9 @@ files = [ name = "parsimonious" version = "0.10.0" description = "(Soon to be) the fastest pure-Python PEG parser I could muster" -category = "main" optional = false python-versions = "*" +groups = ["main", "dev"] files = [ {file = "parsimonious-0.10.0-py3-none-any.whl", hash = "sha256:982ab435fabe86519b57f6b35610aa4e4e977e9f02a14353edf4bbc75369fc0f"}, {file = "parsimonious-0.10.0.tar.gz", hash = "sha256:8281600da180ec8ae35427a4ab4f7b82bfec1e3d1e52f80cb60ea82b9512501c"}, @@ -1932,9 +2099,9 @@ regex = ">=2022.3.15" name = "parso" version = "0.8.4" description = "A Python Parser" -category = "dev" optional = false python-versions = ">=3.6" +groups = ["dev"] files = [ {file = "parso-0.8.4-py2.py3-none-any.whl", hash = "sha256:a418670a20291dacd2dddc80c377c5c3791378ee1e8d12bffc35420643d43f18"}, {file = "parso-0.8.4.tar.gz", hash = "sha256:eb3a7b58240fb99099a345571deecc0f9540ea5f4dd2fe14c2a99d6b281ab92d"}, @@ -1948,9 +2115,9 @@ testing = ["docopt", "pytest"] name = "pathspec" version = "0.12.1" description = "Utility library for gitignore style pattern matching of file paths." -category = "dev" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pathspec-0.12.1-py3-none-any.whl", hash = "sha256:a0d503e138a4c123b27490a4f7beda6a01c6f288df0e4a8b79c7eb0dc7b4cc08"}, {file = "pathspec-0.12.1.tar.gz", hash = "sha256:a482d51503a1ab33b1c67a6c3813a26953dbdc71c31dacaef9a838c4e29f5712"}, @@ -1960,9 +2127,10 @@ files = [ name = "pexpect" version = "4.9.0" description = "Pexpect allows easy control of interactive console applications." -category = "dev" optional = false python-versions = "*" +groups = ["dev"] +markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\"" files = [ {file = "pexpect-4.9.0-py2.py3-none-any.whl", hash = "sha256:7236d1e080e4936be2dc3e326cec0af72acf9212a7e1d060210e70a47e253523"}, {file = "pexpect-4.9.0.tar.gz", hash = "sha256:ee7d41123f3c9911050ea2c2dac107568dc43b2d3b0c7557a33212c398ead30f"}, @@ -1975,9 +2143,9 @@ ptyprocess = ">=0.5" name = "platformdirs" version = "4.3.8" description = "A small Python package for determining appropriate platform-specific dirs, e.g. a `user data dir`." -category = "dev" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "platformdirs-4.3.8-py3-none-any.whl", hash = "sha256:ff7059bb7eb1179e2685604f4aaf157cfd9535242bd23742eadc3c13542139b4"}, {file = "platformdirs-4.3.8.tar.gz", hash = "sha256:3d512d96e16bcb959a814c9f348431070822a6496326a4be0911c40b5a74c2bc"}, @@ -1992,9 +2160,9 @@ type = ["mypy (>=1.14.1)"] name = "pluggy" version = "1.6.0" description = "plugin and hook calling mechanisms for python" -category = "main" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "pluggy-1.6.0-py3-none-any.whl", hash = "sha256:e920276dd6813095e9377c0bc5566d94c932c33b27a3e3945d8389c374dd4746"}, {file = "pluggy-1.6.0.tar.gz", hash = "sha256:7dcc130b76258d33b90f61b658791dede3486c3e6bfb003ee5c9bfb396dd22f3"}, @@ -2008,9 +2176,9 @@ testing = ["coverage", "pytest", "pytest-benchmark"] name = "polyfactory" version = "2.21.0" description = "Mock data generation factories" -category = "dev" optional = false python-versions = "<4.0,>=3.8" +groups = ["dev"] files = [ {file = "polyfactory-2.21.0-py3-none-any.whl", hash = "sha256:9483b764756c8622313d99f375889b1c0d92f09affb05742d7bcfa2b5198d8c5"}, {file = "polyfactory-2.21.0.tar.gz", hash = "sha256:a6d8dba91b2515d744cc014b5be48835633f7ccb72519a68f8801759e5b1737a"}, @@ -2033,9 +2201,9 @@ sqlalchemy = ["sqlalchemy (>=1.4.29)"] name = "pre-commit" version = "3.8.0" description = "A framework for managing and maintaining multi-language pre-commit hooks." -category = "dev" optional = false python-versions = ">=3.9" +groups = ["dev"] files = [ {file = "pre_commit-3.8.0-py2.py3-none-any.whl", hash = "sha256:9a90a53bf82fdd8778d58085faf8d83df56e40dfe18f45b19446e26bf1b3a63f"}, {file = "pre_commit-3.8.0.tar.gz", hash = "sha256:8bb6494d4a20423842e198980c9ecf9f96607a07ea29549e180eef9ae80fe7af"}, @@ -2052,9 +2220,9 @@ virtualenv = ">=20.10.0" name = "prometheus-client" version = "0.21.1" description = "Python client for the Prometheus monitoring system." -category = "main" optional = false python-versions = ">=3.8" +groups = ["main"] files = [ {file = "prometheus_client-0.21.1-py3-none-any.whl", hash = "sha256:594b45c410d6f4f8888940fe80b5cc2521b305a1fafe1c58609ef715a001f301"}, {file = "prometheus_client-0.21.1.tar.gz", hash = "sha256:252505a722ac04b0456be05c05f75f45d760c2911ffc45f2a06bcaed9f3ae3fb"}, @@ -2067,9 +2235,9 @@ twisted = ["twisted"] name = "prompt-toolkit" version = "3.0.51" description = "Library for building powerful interactive command lines in Python" -category = "dev" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "prompt_toolkit-3.0.51-py3-none-any.whl", hash = "sha256:52742911fde84e2d423e2f9a4cf1de7d7ac4e51958f648d9540e0fb8db077b07"}, {file = "prompt_toolkit-3.0.51.tar.gz", hash = "sha256:931a162e3b27fc90c86f1b48bb1fb2c528c2761475e57c9c06de13311c7b54ed"}, @@ -2082,9 +2250,9 @@ wcwidth = "*" name = "propcache" version = "0.3.1" description = "Accelerated property cache" -category = "main" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "propcache-0.3.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f27785888d2fdd918bc36de8b8739f2d6c791399552333721b58193f68ea3e98"}, {file = "propcache-0.3.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4e89cde74154c7b5957f87a355bb9c8ec929c167b59c83d90654ea36aeb6180"}, @@ -2190,9 +2358,10 @@ files = [ name = "ptyprocess" version = "0.7.0" description = "Run a subprocess in a pseudo terminal" -category = "dev" optional = false python-versions = "*" +groups = ["dev"] +markers = "sys_platform != \"win32\" and sys_platform != \"emscripten\"" files = [ {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, @@ -2202,9 +2371,9 @@ files = [ name = "pure-eval" version = "0.2.3" description = "Safely evaluate AST nodes without side effects" -category = "dev" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "pure_eval-0.2.3-py3-none-any.whl", hash = "sha256:1db8e35b67b3d218d818ae653e27f06c3aa420901fa7b081ca98cbedc874e0d0"}, {file = "pure_eval-0.2.3.tar.gz", hash = "sha256:5f4e983f40564c576c7c8635ae88db5956bb2229d7e9237d03b3c0b0190eaf42"}, @@ -2217,9 +2386,9 @@ tests = ["pytest"] name = "py-multibase" version = "1.0.3" description = "Multibase implementation for Python" -category = "main" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "py-multibase-1.0.3.tar.gz", hash = "sha256:d28a20efcbb61eec28f55827a0bf329c7cea80fffd933aecaea6ae8431267fe4"}, {file = "py_multibase-1.0.3-py2.py3-none-any.whl", hash = "sha256:2677c1fafcc0ae15ddb9c7f444c5becc2530b3889124fd4fa2959ddfefb8c15b"}, @@ -2234,9 +2403,9 @@ six = ">=1.10.0,<2.0" name = "py-multicodec" version = "0.2.1" description = "Multicodec implementation in Python" -category = "main" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "py-multicodec-0.2.1.tar.gz", hash = "sha256:83021ffe8c0e272d19b5b86bc5b39efa67c8e9f4735ce6cafdbc1ace767ec647"}, {file = "py_multicodec-0.2.1-py2.py3-none-any.whl", hash = "sha256:55b6bb53088a63e56c434cb11b29795e8805652bac43d50a8f2a9bcf5ca84e1f"}, @@ -2251,9 +2420,9 @@ varint = ">=1.0.2,<2.0.0" name = "py-multiformats-cid" version = "0.4.4" description = "Self-describing content-addressed identifiers for distributed systems" -category = "main" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "py_multiformats_cid-0.4.4-py2.py3-none-any.whl", hash = "sha256:83f6c3109ed56664799c7367703a68d1724b1e9bd79b45c8935c77660560d1dd"}, ] @@ -2269,9 +2438,9 @@ py-multihash = "*" name = "py-multihash" version = "2.0.1" description = "Multihash implementation in Python" -category = "main" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "py-multihash-2.0.1.tar.gz", hash = "sha256:b97511a87b7091f8b37a3d74ccb4a898e133529e7c5e431f9a27f78248a75e60"}, {file = "py_multihash-2.0.1-py2.py3-none-any.whl", hash = "sha256:c388728b3456d35cd6668b42a3d9ba32dd640493c8e93b992979668dcf2c0676"}, @@ -2287,9 +2456,9 @@ varint = ">=1.0.2,<2.0" name = "pycryptodome" version = "3.23.0" description = "Cryptographic library for Python" -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +groups = ["main", "dev"] files = [ {file = "pycryptodome-3.23.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a176b79c49af27d7f6c12e4b178b0824626f40a7b9fed08f712291b6d54bf566"}, {file = "pycryptodome-3.23.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:573a0b3017e06f2cffd27d92ef22e46aa3be87a2d317a5abf7cc0e84e321bd75"}, @@ -2338,9 +2507,9 @@ files = [ name = "pydantic" version = "2.11.5" description = "Data validation using Python type hints" -category = "main" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "pydantic-2.11.5-py3-none-any.whl", hash = "sha256:f9c26ba06f9747749ca1e5c94d6a85cb84254577553c8785576fd38fa64dc0f7"}, {file = "pydantic-2.11.5.tar.gz", hash = "sha256:7f853db3d0ce78ce8bbb148c401c2cdd6431b3473c0cdff2755c7690952a7b7a"}, @@ -2354,15 +2523,15 @@ typing-inspection = ">=0.4.0" [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] [[package]] name = "pydantic-core" version = "2.33.2" description = "Core functionality for Pydantic validation and serialization" -category = "main" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "pydantic_core-2.33.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:2b3d326aaef0c0399d9afffeb6367d5e26ddc24d351dbc9c636840ac355dc5d8"}, {file = "pydantic_core-2.33.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:0e5b2671f05ba48b94cb90ce55d8bdcaaedb8ba00cc5359f6810fc918713983d"}, @@ -2472,9 +2641,9 @@ typing-extensions = ">=4.6.0,<4.7.0 || >4.7.0" name = "pygments" version = "2.19.1" description = "Pygments is a syntax highlighting package written in Python." -category = "dev" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "pygments-2.19.1-py3-none-any.whl", hash = "sha256:9ea1544ad55cecf4b8242fab6dd35a93bbce657034b0611ee383099054ab6d8c"}, {file = "pygments-2.19.1.tar.gz", hash = "sha256:61c16d2a8576dc0649d9f39e089b5f02bcd27fba10d8fb4dcc28173f7a45151f"}, @@ -2487,9 +2656,9 @@ windows-terminal = ["colorama (>=0.4.6)"] name = "pylint" version = "3.3.7" description = "python code static checker" -category = "dev" optional = false python-versions = ">=3.9.0" +groups = ["dev"] files = [ {file = "pylint-3.3.7-py3-none-any.whl", hash = "sha256:43860aafefce92fca4cf6b61fe199cdc5ae54ea28f9bf4cd49de267b5195803d"}, {file = "pylint-3.3.7.tar.gz", hash = "sha256:2b11de8bde49f9c5059452e0c310c079c746a0a8eeaa789e5aa966ecc23e4559"}, @@ -2498,10 +2667,7 @@ files = [ [package.dependencies] astroid = ">=3.3.8,<=3.4.0.dev0" colorama = {version = ">=0.4.5", markers = "sys_platform == \"win32\""} -dill = [ - {version = ">=0.3.6", markers = "python_version >= \"3.11\""}, - {version = ">=0.3.7", markers = "python_version >= \"3.12\""}, -] +dill = {version = ">=0.3.7", markers = "python_version >= \"3.12\""} isort = ">=4.2.5,<5.13 || >5.13,<7" mccabe = ">=0.6,<0.8" platformdirs = ">=2.2" @@ -2515,9 +2681,9 @@ testutils = ["gitpython (>3)"] name = "pytest" version = "7.4.4" description = "pytest: simple powerful testing with Python" -category = "main" optional = false python-versions = ">=3.7" +groups = ["main", "dev"] files = [ {file = "pytest-7.4.4-py3-none-any.whl", hash = "sha256:b090cdf5ed60bf4c45261be03239c2c1c22df034fbffe691abe93cd80cea01d8"}, {file = "pytest-7.4.4.tar.gz", hash = "sha256:2cf0005922c6ace4a3e2ec8b4080eb0d9753fdc93107415332f50ce9e7994280"}, @@ -2536,9 +2702,9 @@ testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "no name = "pytest-cov" version = "4.1.0" description = "Pytest plugin for measuring coverage." -category = "dev" optional = false python-versions = ">=3.7" +groups = ["dev"] files = [ {file = "pytest-cov-4.1.0.tar.gz", hash = "sha256:3904b13dfbfec47f003b8e77fd5b589cd11904a21ddf1ab38a64f204d6a10ef6"}, {file = "pytest_cov-4.1.0-py3-none-any.whl", hash = "sha256:6ba70b9e97e69fcc3fb45bfeab2d0a138fb65c4d0d6a41ef33983ad114be8c3a"}, @@ -2555,9 +2721,9 @@ testing = ["fields", "hunter", "process-tests", "pytest-xdist", "six", "virtuale name = "pytest-xdist" version = "3.7.0" description = "pytest xdist plugin for distributed testing, most importantly across multiple CPUs" -category = "main" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "pytest_xdist-3.7.0-py3-none-any.whl", hash = "sha256:7d3fbd255998265052435eb9daa4e99b62e6fb9cfb6efd1f858d4d8c0c7f0ca0"}, {file = "pytest_xdist-3.7.0.tar.gz", hash = "sha256:f9248c99a7c15b7d2f90715df93610353a485827bc06eefb6566d23f6400f126"}, @@ -2576,9 +2742,9 @@ testing = ["filelock"] name = "python-baseconv" version = "1.2.2" description = "Convert numbers from base 10 integers to base X strings and back again." -category = "main" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "python-baseconv-1.2.2.tar.gz", hash = "sha256:0539f8bd0464013b05ad62e0a1673f0ac9086c76b43ebf9f833053527cd9931b"}, ] @@ -2587,9 +2753,9 @@ files = [ name = "pyunormalize" version = "16.0.0" description = "Unicode normalization forms (NFC, NFKC, NFD, NFKD). A library independent of the Python core Unicode database." -category = "main" optional = false python-versions = ">=3.6" +groups = ["main"] files = [ {file = "pyunormalize-16.0.0-py3-none-any.whl", hash = "sha256:c647d95e5d1e2ea9a2f448d1d95d8518348df24eab5c3fd32d2b5c3300a49152"}, {file = "pyunormalize-16.0.0.tar.gz", hash = "sha256:2e1dfbb4a118154ae26f70710426a52a364b926c9191f764601f5a8cb12761f7"}, @@ -2599,9 +2765,10 @@ files = [ name = "pywin32" version = "310" description = "Python for Window Extensions" -category = "main" optional = false python-versions = "*" +groups = ["main"] +markers = "platform_system == \"Windows\"" files = [ {file = "pywin32-310-cp310-cp310-win32.whl", hash = "sha256:6dd97011efc8bf51d6793a82292419eba2c71cf8e7250cfac03bba284454abc1"}, {file = "pywin32-310-cp310-cp310-win_amd64.whl", hash = "sha256:c3e78706e4229b915a0821941a84e7ef420bf2b77e08c9dae3c76fd03fd2ae3d"}, @@ -2625,9 +2792,9 @@ files = [ name = "pyyaml" version = "6.0.2" description = "YAML parser and emitter for Python" -category = "dev" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "PyYAML-6.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0a9a2848a5b7feac301353437eb7d5957887edbf81d56e903999a75a3d743086"}, {file = "PyYAML-6.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:29717114e51c84ddfba879543fb232a6ed60086602313ca38cce623c1d62cfbf"}, @@ -2688,9 +2855,9 @@ files = [ name = "regex" version = "2024.11.6" description = "Alternative regular expression module, to replace re." -category = "main" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:ff590880083d60acc0433f9c3f713c51f7ac6ebb9adf889c79a261ecf541aa91"}, {file = "regex-2024.11.6-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:658f90550f38270639e83ce492f27d2c8d2cd63805c65a13a14d36ca126753f0"}, @@ -2792,9 +2959,9 @@ files = [ name = "requests" version = "2.32.3" description = "Python HTTP for Humans." -category = "main" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] files = [ {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, @@ -2814,9 +2981,9 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] name = "responses" version = "0.25.7" description = "A utility library for mocking out the `requests` Python library." -category = "dev" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "responses-0.25.7-py3-none-any.whl", hash = "sha256:92ca17416c90fe6b35921f52179bff29332076bb32694c0df02dcac2c6bc043c"}, {file = "responses-0.25.7.tar.gz", hash = "sha256:8ebae11405d7a5df79ab6fd54277f6f2bc29b2d002d0dd2d5c632594d1ddcedb"}, @@ -2828,15 +2995,15 @@ requests = ">=2.30.0,<3.0" urllib3 = ">=1.25.10,<3.0" [package.extras] -tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asyncio", "pytest-cov", "pytest-httpserver", "tomli", "tomli-w", "types-PyYAML", "types-requests"] +tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asyncio", "pytest-cov", "pytest-httpserver", "tomli ; python_version < \"3.11\"", "tomli-w", "types-PyYAML", "types-requests"] [[package]] name = "rlp" version = "4.1.0" description = "rlp: A package for Recursive Length Prefix encoding and decoding" -category = "main" optional = false python-versions = "<4,>=3.8" +groups = ["main", "dev"] files = [ {file = "rlp-4.1.0-py3-none-any.whl", hash = "sha256:8eca394c579bad34ee0b937aecb96a57052ff3716e19c7a578883e767bc5da6f"}, {file = "rlp-4.1.0.tar.gz", hash = "sha256:be07564270a96f3e225e2c107db263de96b5bc1f27722d2855bd3459a08e95a9"}, @@ -2855,25 +3022,25 @@ test = ["hypothesis (>=6.22.0,<6.108.7)", "pytest (>=7.0.0)", "pytest-xdist (>=2 name = "semantic-version" version = "2.10.0" description = "A library implementing the 'SemVer' scheme." -category = "dev" optional = false python-versions = ">=2.7" +groups = ["dev"] files = [ {file = "semantic_version-2.10.0-py2.py3-none-any.whl", hash = "sha256:de78a3b8e0feda74cabc54aab2da702113e33ac9d9eb9d2389bcf1f58b7d9177"}, {file = "semantic_version-2.10.0.tar.gz", hash = "sha256:bdabb6d336998cbb378d4b9db3a4b56a1e3235701dc05ea2690d9a997ed5041c"}, ] [package.extras] -dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1)", "coverage", "flake8", "nose2", "readme-renderer (<25.0)", "tox", "wheel", "zest.releaser[recommended]"] +dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1) ; python_version == \"3.4\"", "coverage", "flake8", "nose2", "readme-renderer (<25.0) ; python_version == \"3.4\"", "tox", "wheel", "zest.releaser[recommended]"] doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] name = "six" version = "1.17.0" description = "Python 2 and 3 compatibility utilities" -category = "main" optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +groups = ["main", "dev"] files = [ {file = "six-1.17.0-py2.py3-none-any.whl", hash = "sha256:4721f391ed90541fddacab5acf947aa0d3dc7d27b2e1e8eda2be8970586c3274"}, {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, @@ -2883,9 +3050,9 @@ files = [ name = "sortedcontainers" version = "2.4.0" description = "Sorted Containers -- Sorted List, Sorted Dict, Sorted Set" -category = "dev" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "sortedcontainers-2.4.0-py2.py3-none-any.whl", hash = "sha256:a163dcaede0f1c021485e957a39245190e74249897e2ae4b2aa38595db237ee0"}, {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, @@ -2895,9 +3062,9 @@ files = [ name = "stack-data" version = "0.6.3" description = "Extract data from python stack frames and tracebacks for informative displays" -category = "dev" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "stack_data-0.6.3-py3-none-any.whl", hash = "sha256:d5558e0c25a4cb0853cddad3d77da9891a08cb85dd9f9f91b9f8cd66e511e695"}, {file = "stack_data-0.6.3.tar.gz", hash = "sha256:836a778de4fec4dcd1dcd89ed8abff8a221f58308462e1c4aa2a3cf30148f0b9"}, @@ -2915,9 +3082,9 @@ tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] name = "timeout-decorator" version = "0.5.0" description = "Timeout decorator" -category = "main" optional = false python-versions = "*" +groups = ["main"] files = [ {file = "timeout-decorator-0.5.0.tar.gz", hash = "sha256:6a2f2f58db1c5b24a2cc79de6345760377ad8bdc13813f5265f6c3e63d16b3d7"}, ] @@ -2926,9 +3093,9 @@ files = [ name = "tomlkit" version = "0.13.3" description = "Style preserving TOML library" -category = "dev" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0"}, {file = "tomlkit-0.13.3.tar.gz", hash = "sha256:430cf247ee57df2b94ee3fbe588e71d362a941ebb545dec29b53961d61add2a1"}, @@ -2938,9 +3105,10 @@ files = [ name = "toolz" version = "1.0.0" description = "List processing tools and functional utilities" -category = "main" optional = false python-versions = ">=3.8" +groups = ["main", "dev"] +markers = "implementation_name == \"pypy\" or implementation_name == \"cpython\"" files = [ {file = "toolz-1.0.0-py3-none-any.whl", hash = "sha256:292c8f1c4e7516bf9086f8850935c799a874039c8bcf959d47b600e4c44a6236"}, {file = "toolz-1.0.0.tar.gz", hash = "sha256:2c86e3d9a04798ac556793bced838816296a2f085017664e4995cb40a1047a02"}, @@ -2950,9 +3118,9 @@ files = [ name = "traitlets" version = "5.14.3" description = "Traitlets Python configuration system" -category = "dev" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "traitlets-5.14.3-py3-none-any.whl", hash = "sha256:b74e89e397b1ed28cc831db7aea759ba6640cb3de13090ca145426688ff1ac4f"}, {file = "traitlets-5.14.3.tar.gz", hash = "sha256:9ed0579d3502c94b4b3732ac120375cda96f923114522847de4b3bb98b96b6b7"}, @@ -2966,9 +3134,9 @@ test = ["argcomplete (>=3.0.3)", "mypy (>=1.7.0)", "pre-commit", "pytest (>=7.0, name = "types-requests" version = "2.32.0.20250602" description = "Typing stubs for requests" -category = "main" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "types_requests-2.32.0.20250602-py3-none-any.whl", hash = "sha256:f4f335f87779b47ce10b8b8597b409130299f6971ead27fead4fe7ba6ea3e726"}, {file = "types_requests-2.32.0.20250602.tar.gz", hash = "sha256:ee603aeefec42051195ae62ca7667cd909a2f8128fdf8aad9e8a5219ecfab3bf"}, @@ -2981,9 +3149,9 @@ urllib3 = ">=2" name = "types-setuptools" version = "67.8.0.0" description = "Typing stubs for setuptools" -category = "dev" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "types-setuptools-67.8.0.0.tar.gz", hash = "sha256:95c9ed61871d6c0e258433373a4e1753c0a7c3627a46f4d4058c7b5a08ab844f"}, {file = "types_setuptools-67.8.0.0-py3-none-any.whl", hash = "sha256:6df73340d96b238a4188b7b7668814b37e8018168aef1eef94a3b1872e3f60ff"}, @@ -2993,9 +3161,9 @@ files = [ name = "types-urllib3" version = "1.26.25.14" description = "Typing stubs for urllib3" -category = "dev" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "types-urllib3-1.26.25.14.tar.gz", hash = "sha256:229b7f577c951b8c1b92c1bc2b2fdb0b49847bd2af6d1cc2a2e3dd340f3bda8f"}, {file = "types_urllib3-1.26.25.14-py3-none-any.whl", hash = "sha256:9683bbb7fb72e32bfe9d2be6e04875fbe1b3eeec3cbb4ea231435aa7fd6b4f0e"}, @@ -3005,9 +3173,9 @@ files = [ name = "typing-extensions" version = "4.14.0" description = "Backported and Experimental Type Hints for Python 3.9+" -category = "main" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "typing_extensions-4.14.0-py3-none-any.whl", hash = "sha256:a1514509136dd0b477638fc68d6a91497af5076466ad0fa6c338e44e359944af"}, {file = "typing_extensions-4.14.0.tar.gz", hash = "sha256:8676b788e32f02ab42d9e7c61324048ae4c6d844a399eebace3d4979d75ceef4"}, @@ -3017,9 +3185,9 @@ files = [ name = "typing-inspection" version = "0.4.1" description = "Runtime typing introspection tools" -category = "main" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "typing_inspection-0.4.1-py3-none-any.whl", hash = "sha256:389055682238f53b04f7badcb49b989835495a96700ced5dab2d8feae4b26f51"}, {file = "typing_inspection-0.4.1.tar.gz", hash = "sha256:6ae134cc0203c33377d43188d4064e9b357dba58cff3185f22924610e70a9d28"}, @@ -3032,9 +3200,9 @@ typing-extensions = ">=4.12.0" name = "tzdata" version = "2025.2" description = "Provider of IANA time zone data" -category = "dev" optional = false python-versions = ">=2" +groups = ["dev"] files = [ {file = "tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8"}, {file = "tzdata-2025.2.tar.gz", hash = "sha256:b60a638fcc0daffadf82fe0f57e53d06bdec2f36c4df66280ae79bce6bd6f2b9"}, @@ -3044,16 +3212,16 @@ files = [ name = "urllib3" version = "2.4.0" description = "HTTP library with thread-safe connection pooling, file post, and more." -category = "main" optional = false python-versions = ">=3.9" +groups = ["main", "dev"] files = [ {file = "urllib3-2.4.0-py3-none-any.whl", hash = "sha256:4e16665048960a0900c702d4a66415956a584919c03361cac9f1df5c5dd7e813"}, {file = "urllib3-2.4.0.tar.gz", hash = "sha256:414bc6535b787febd7567804cc015fee39daab8ad86268f1310a9250697de466"}, ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] @@ -3062,9 +3230,9 @@ zstd = ["zstandard (>=0.18.0)"] name = "varint" version = "1.0.2" description = "Simple python varint implementation" -category = "main" optional = false python-versions = "*" +groups = ["main", "dev"] files = [ {file = "varint-1.0.2.tar.gz", hash = "sha256:a6ecc02377ac5ee9d65a6a8ad45c9ff1dac8ccee19400a5950fb51d594214ca5"}, ] @@ -3073,9 +3241,9 @@ files = [ name = "virtualenv" version = "20.31.2" description = "Virtual Python Environment builder" -category = "dev" optional = false python-versions = ">=3.8" +groups = ["dev"] files = [ {file = "virtualenv-20.31.2-py3-none-any.whl", hash = "sha256:36efd0d9650ee985f0cad72065001e66d49a6f24eb44d98980f630686243cf11"}, {file = "virtualenv-20.31.2.tar.gz", hash = "sha256:e10c0a9d02835e592521be48b332b6caee6887f332c111aa79a09b9e79efc2af"}, @@ -3088,15 +3256,31 @@ platformdirs = ">=3.9.1,<5" [package.extras] docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] -test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"GraalVM\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""] + +[[package]] +name = "waitress" +version = "3.0.2" +description = "Waitress WSGI server" +optional = false +python-versions = ">=3.9.0" +groups = ["main"] +files = [ + {file = "waitress-3.0.2-py3-none-any.whl", hash = "sha256:c56d67fd6e87c2ee598b76abdd4e96cfad1f24cacdea5078d382b1f9d7b5ed2e"}, + {file = "waitress-3.0.2.tar.gz", hash = "sha256:682aaaf2af0c44ada4abfb70ded36393f0e307f4ab9456a215ce0020baefc31f"}, +] + +[package.extras] +docs = ["Sphinx (>=1.8.1)", "docutils", "pylons-sphinx-themes (>=1.0.9)"] +testing = ["coverage (>=7.6.0)", "pytest", "pytest-cov"] [[package]] name = "wcwidth" version = "0.2.13" description = "Measures the displayed width of unicode strings in a terminal" -category = "dev" optional = false python-versions = "*" +groups = ["dev"] files = [ {file = "wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859"}, {file = "wcwidth-0.2.13.tar.gz", hash = "sha256:72ea0c06399eb286d978fdedb6923a9eb47e1c486ce63e9b4e64fc18303972b5"}, @@ -3106,9 +3290,9 @@ files = [ name = "web3" version = "7.12.0" description = "web3: A Python library for interacting with Ethereum" -category = "main" optional = false python-versions = "<4,>=3.8" +groups = ["main"] files = [ {file = "web3-7.12.0-py3-none-any.whl", hash = "sha256:c7e2b9c1db5a379ef53b45fe8a19bdc2d47ad262039fbf6675794bc40f74bf06"}, {file = "web3-7.12.0.tar.gz", hash = "sha256:08fbe79a2e2503c9820132ebad24ba0372831588cabac5f467999c97ace7dda3"}, @@ -3140,9 +3324,9 @@ tester = ["eth-tester[py-evm] (>=0.13.0b1,<0.14.0b1)", "py-geth (>=5.1.0)"] name = "web3-multi-provider" version = "2.2.1" description = "Web3py provider that makes it easy to switch between different blockchain nodes to make sure application will be be online if main blockchain node will be unavailable." -category = "main" optional = false python-versions = "<4,>=3.12" +groups = ["main"] files = [ {file = "web3_multi_provider-2.2.1-py3-none-any.whl", hash = "sha256:7c9096a52ff7fc331546367656e351f75b9ba79f1013d9205412557bde98024e"}, {file = "web3_multi_provider-2.2.1.tar.gz", hash = "sha256:ba6c68db3a2832c443e81d3cf620c9bf89039d810934f5a1085719a2629a1f0d"}, @@ -3159,9 +3343,9 @@ metrics = ["prometheus-client (>=0.21.1,<0.22.0)"] name = "websockets" version = "15.0.1" description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)" -category = "main" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:d63efaa0cd96cf0c5fe4d581521d9fa87744540d4bc999ae6e08595a1014b45b"}, {file = "websockets-15.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:ac60e3b188ec7574cb761b08d50fcedf9d77f1530352db4eef1707fe9dee7205"}, @@ -3234,13 +3418,31 @@ files = [ {file = "websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee"}, ] +[[package]] +name = "werkzeug" +version = "3.1.3" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e"}, + {file = "werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + [[package]] name = "yarl" version = "1.20.0" description = "Yet another URL library" -category = "main" optional = false python-versions = ">=3.9" +groups = ["main"] files = [ {file = "yarl-1.20.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:f1f6670b9ae3daedb325fa55fbe31c22c8228f6e0b513772c2e1c623caa6ab22"}, {file = "yarl-1.20.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:85a231fa250dfa3308f3c7896cc007a47bc76e9e8e8595c20b7426cac4884c62"}, @@ -3354,6 +3556,6 @@ multidict = ">=4.0" propcache = ">=0.2.1" [metadata] -lock-version = "2.0" +lock-version = "2.1" python-versions = "^3.12" -content-hash = "64f864ebb5829acdcae25318b0a5d03217ba50234ae48019e00f037b08fb26af" +content-hash = "a37a3db132adce24d9e2ff274c4fdc3b4cbd16c0b38e118c8961e0c9e5722a1d" diff --git a/pyproject.toml b/pyproject.toml index c97138c05..6b733e681 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -28,6 +28,8 @@ web3-multi-provider = { version = "^2.2.1", extras = ["metrics"] } json-stream = "^2.3.2" oz-merkle-tree = { git = "https://github.com/lidofinance/oz-merkle-tree" } py-multiformats-cid = "^0.4.4" +flask = "^3.0.0" +waitress = "^3.0.2" [tool.poetry.group.dev.dependencies] base58 = "^2.1.1" diff --git a/src/main.py b/src/main.py index 44c39ea5b..a49652ef3 100644 --- a/src/main.py +++ b/src/main.py @@ -13,6 +13,7 @@ from src.modules.checks.checks_module import ChecksModule from src.modules.csm.csm import CSOracle from src.modules.ejector.ejector import Ejector +from src.modules.performance_collector.performance_collector import PerformanceCollector from src.providers.ipfs import IPFSProvider, Kubo, MultiIPFSProvider, Pinata, PublicIPFS from src.types import OracleModule from src.utils.build import get_build_info @@ -27,6 +28,7 @@ LidoValidatorsProvider, TransactionUtils, ) +from src.web3py.extensions.performance import PerformanceClientModule from src.web3py.types import Web3 logger = logging.getLogger(__name__) @@ -73,6 +75,9 @@ def main(module_name: OracleModule): retries=variables.HTTP_REQUEST_RETRY_COUNT_IPFS, ) + logger.info({'msg': 'Initialize Performance Collector client.'}) + performance = PerformanceClientModule(variables.PERFORMANCE_COLLECTOR_URI) + logger.info({'msg': 'Check configured providers.'}) if Version(kac.get_status().appVersion) < constants.ALLOWED_KAPI_VERSION: raise IncompatibleException(f'Incompatible KAPI version. Required >= {constants.ALLOWED_KAPI_VERSION}.') @@ -87,12 +92,13 @@ def main(module_name: OracleModule): 'cc': lambda: cc, # type: ignore[dict-item] 'kac': lambda: kac, # type: ignore[dict-item] 'ipfs': lambda: ipfs, # type: ignore[dict-item] + 'performance': lambda: performance, # type: ignore[dict-item] }) logger.info({'msg': 'Initialize prometheus metrics.'}) init_metrics() - instance: Accounting | Ejector | CSOracle + instance: Accounting | Ejector | CSOracle | PerformanceCollector if module_name == OracleModule.ACCOUNTING: logger.info({'msg': 'Initialize Accounting module.'}) instance = Accounting(web3) @@ -102,10 +108,13 @@ def main(module_name: OracleModule): elif module_name == OracleModule.CSM: logger.info({'msg': 'Initialize CSM performance oracle module.'}) instance = CSOracle(web3) + elif module_name == OracleModule.PERFORMANCE_COLLECTOR: + instance = PerformanceCollector(web3) else: raise ValueError(f'Unexpected arg: {module_name=}.') - instance.check_contract_configs() + if module_name != OracleModule.PERFORMANCE_COLLECTOR: + instance.check_contract_configs() if variables.DAEMON: instance.run_as_daemon() diff --git a/src/metrics/prometheus/basic.py b/src/metrics/prometheus/basic.py index f09ba1ebd..644e86cef 100644 --- a/src/metrics/prometheus/basic.py +++ b/src/metrics/prometheus/basic.py @@ -68,6 +68,14 @@ class Status(Enum): buckets=requests_buckets, ) +PERFORMANCE_REQUESTS_DURATION = Histogram( + 'performance_requests_duration', + 'Duration of requests to Performance Collector API', + ['endpoint', 'code', 'domain'], + namespace=PROMETHEUS_PREFIX, + buckets=requests_buckets, +) + KEYS_API_REQUESTS_DURATION = Histogram( 'keys_api_requests_duration', 'Duration of requests to Keys API', diff --git a/src/modules/csm/csm.py b/src/modules/csm/csm.py index bcc18b401..d9ad1d8f2 100644 --- a/src/modules/csm/csm.py +++ b/src/modules/csm/csm.py @@ -9,7 +9,6 @@ CSM_CURRENT_FRAME_RANGE_R_EPOCH, ) from src.metrics.prometheus.duration_meter import duration_meter -from src.modules.csm.checkpoint import FrameCheckpointProcessor, FrameCheckpointsIterator, MinStepIsNotReached from src.modules.csm.distribution import Distribution, DistributionResult, StrikesValidator from src.modules.csm.helpers.last_report import LastReport from src.modules.csm.log import FramePerfLog @@ -27,8 +26,11 @@ EpochNumber, ReferenceBlockStamp, SlotNumber, + ValidatorIndex, ) from src.utils.cache import global_lru_cache as lru_cache +from src.utils.range import sequence +from src.utils.validator_state import is_active_validator from src.utils.web3converter import Web3Converter from src.web3py.extensions.lido_validators import NodeOperatorId from src.web3py.types import Web3 @@ -69,24 +71,46 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute if not self._check_compatability(last_finalized_blockstamp): return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH - collected = self.collect_data(last_finalized_blockstamp) - if not collected: - logger.info( - {"msg": "Data required for the report is not fully collected yet. Waiting for the next finalized epoch"} - ) - return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH - report_blockstamp = self.get_blockstamp_for_report(last_finalized_blockstamp) if not report_blockstamp: return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH + collected = self.collect_data(report_blockstamp) + if not collected: + return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH + self.process_report(report_blockstamp) return ModuleExecuteDelay.NEXT_SLOT + @duration_meter() + def collect_data(self, blockstamp: ReferenceBlockStamp) -> bool: + logger.info({"msg": "Collecting data for the report"}) + + converter = self.converter(blockstamp) + + l_epoch, r_epoch = self.get_epochs_range_to_process(blockstamp) + logger.info({"msg": f"Epochs range for performance data collect: [{l_epoch};{r_epoch}]"}) + + self.state.migrate(l_epoch, r_epoch, converter.frame_config.epochs_per_frame) + self.state.log_progress() + + if not self.state.is_fulfilled: + for l_epoch_, r_epoch_ in self.state.frames: + is_data_range_available = self.w3.performance.is_range_available( + l_epoch_, r_epoch_ + ) + if not is_data_range_available: + logger.warning({"msg": f"Performance data range is not available yet for [{l_epoch_};{r_epoch_}] frame"}) + return False + self.fulfill_state() + + return self.state.is_fulfilled + @lru_cache(maxsize=1) @duration_meter() def build_report(self, blockstamp: ReferenceBlockStamp) -> tuple: - self.validate_state(blockstamp) + l_epoch, r_epoch = self.get_epochs_range_to_process(blockstamp) + self.state.validate(l_epoch, r_epoch) last_report = self._get_last_report(blockstamp) rewards_tree_root, rewards_cid = last_report.rewards_tree_root, last_report.rewards_tree_cid @@ -145,74 +169,43 @@ def is_reporting_allowed(self, blockstamp: ReferenceBlockStamp) -> bool: CONTRACT_ON_PAUSE.labels("csm").set(on_pause) return not on_pause - def validate_state(self, blockstamp: ReferenceBlockStamp) -> None: - # NOTE: We cannot use `r_epoch` from the `current_frame_range` call because the `blockstamp` is a - # `ReferenceBlockStamp`, hence it's a block the frame ends at. We use `ref_epoch` instead. - l_epoch, _ = self.get_epochs_range_to_process(blockstamp) - r_epoch = blockstamp.ref_epoch - - self.state.validate(l_epoch, r_epoch) - - def collect_data(self, blockstamp: BlockStamp) -> bool: - """Ongoing report data collection for the estimated reference slot""" - - logger.info({"msg": "Collecting data for the report"}) - - converter = self.converter(blockstamp) - - l_epoch, r_epoch = self.get_epochs_range_to_process(blockstamp) - logger.info({"msg": f"Epochs range for performance data collect: [{l_epoch};{r_epoch}]"}) - - # NOTE: Finalized slot is the first slot of justifying epoch, so we need to take the previous. But if the first - # slot of the justifying epoch is empty, blockstamp.slot_number will point to the slot where the last finalized - # block was created. As a result, finalized_epoch in this case will be less than the actual number of the last - # finalized epoch. As a result we can have a delay in frame finalization. - finalized_epoch = EpochNumber(converter.get_epoch_by_slot(blockstamp.slot_number) - 1) - - report_blockstamp = self.get_blockstamp_for_report(blockstamp) - - if not report_blockstamp: - logger.info({"msg": "No report blockstamp available, using pre-computed one for collecting data"}) - - if report_blockstamp and report_blockstamp.ref_epoch != r_epoch: - logger.warning( - { - "msg": f"Epochs range has been changed, but the change is not yet observed on finalized epoch {finalized_epoch}" - } - ) - return False - - if l_epoch > finalized_epoch: - logger.info({"msg": "The starting epoch of the epochs range is not finalized yet"}) - return False - - self.state.migrate(l_epoch, r_epoch, converter.frame_config.epochs_per_frame) - self.state.log_progress() - - if self.state.is_fulfilled: - logger.info({"msg": "All epochs are already processed. Nothing to collect"}) - return True - - try: - checkpoints = FrameCheckpointsIterator( - converter, - min(self.state.unprocessed_epochs), - r_epoch, - finalized_epoch, - ) - except MinStepIsNotReached: - return False - - processor = FrameCheckpointProcessor(self.w3.cc, self.state, converter, blockstamp) - - for checkpoint in checkpoints: - if self.get_epochs_range_to_process(self._receive_last_finalized_slot()) != (l_epoch, r_epoch): - logger.info({"msg": "Checkpoints were prepared for an outdated epochs range, stop processing"}) - raise ValueError("Outdated checkpoint") - processor.exec(checkpoint) - # Reset BaseOracle cycle timeout to avoid timeout errors during long checkpoints processing - self._reset_cycle_timeout() - return self.state.is_fulfilled + def fulfill_state(self): + finalized_blockstamp = self._receive_last_finalized_slot() + validators = self.w3.cc.get_validators(finalized_blockstamp) + + for l_epoch, r_epoch in self.state.frames: + for epoch in sequence(l_epoch, r_epoch): + epoch_data = self.w3.performance.get_epoch(epoch) + if epoch_data is None: + raise ValueError(f"Epoch {epoch} is missing in Performance Collector") + misses, props, syncs = epoch_data + + for validator in validators: + missed_att = validator.index in misses + included_att = validator.index not in misses + is_active = is_active_validator(validator, EpochNumber(epoch)) + if not is_active and missed_att: + raise ValueError(f"Validator {validator.index} missed attestation in epoch {epoch}, but was not active") + + self.state.save_att_duty(EpochNumber(epoch), validator.index, included=included_att) + + blocks_in_epoch = 0 + for p in props: + vid = ValidatorIndex(p.validator_index) + self.state.save_prop_duty(EpochNumber(epoch), vid, included=bool(p.is_proposed)) + blocks_in_epoch += p.is_proposed + + if blocks_in_epoch and syncs: + for rec in syncs: + vid = ValidatorIndex(rec.validator_index) + fulfilled = max(0, blocks_in_epoch - rec.missed_count) + for _ in range(fulfilled): + self.state.save_sync_duty(EpochNumber(epoch), vid, included=True) + for _ in range(rec.missed_count): + self.state.save_sync_duty(EpochNumber(epoch), vid, included=False) + + self.state.add_processed_epoch(EpochNumber(epoch)) + self.state.log_progress() def make_rewards_tree(self, shares: dict[NodeOperatorId, RewardsShares]) -> RewardsTree: if not shares: @@ -250,7 +243,7 @@ def publish_log(self, logs: list[FramePerfLog]) -> CID: return log_cid @lru_cache(maxsize=1) - def get_epochs_range_to_process(self, blockstamp: BlockStamp) -> tuple[EpochNumber, EpochNumber]: + def get_epochs_range_to_process(self, blockstamp: ReferenceBlockStamp) -> tuple[EpochNumber, EpochNumber]: converter = self.converter(blockstamp) far_future_initial_epoch = converter.get_epoch_by_timestamp(UINT64_MAX) @@ -258,35 +251,25 @@ def get_epochs_range_to_process(self, blockstamp: BlockStamp) -> tuple[EpochNumb raise ValueError("CSM oracle initial epoch is not set yet") l_ref_slot = last_processing_ref_slot = self.w3.csm.get_csm_last_processing_ref_slot(blockstamp) - r_ref_slot = initial_ref_slot = self.get_initial_ref_slot(blockstamp) if last_processing_ref_slot > blockstamp.slot_number: raise InconsistentData(f"{last_processing_ref_slot=} > {blockstamp.slot_number=}") # The very first report, no previous ref slot. if not last_processing_ref_slot: + initial_ref_slot = self.get_initial_ref_slot(blockstamp) l_ref_slot = SlotNumber(initial_ref_slot - converter.slots_per_frame) if l_ref_slot < 0: raise CSMError("Invalid frame configuration for the current network") - # NOTE: before the initial slot the contract can't return current frame - if blockstamp.slot_number > initial_ref_slot: - r_ref_slot = self.get_initial_or_current_frame(blockstamp).ref_slot - - # We are between reports, next report slot didn't happen yet. Predicting the next ref slot for the report - # to calculate epochs range to collect the data. - if l_ref_slot == r_ref_slot: - r_ref_slot = converter.get_epoch_last_slot( - EpochNumber(converter.get_epoch_by_slot(l_ref_slot) + converter.frame_config.epochs_per_frame) - ) - + r_ref_slot = blockstamp.slot_number if l_ref_slot < last_processing_ref_slot: raise CSMError(f"Got invalid epochs range: {l_ref_slot=} < {last_processing_ref_slot=}") if l_ref_slot >= r_ref_slot: raise CSMError(f"Got invalid epochs range {r_ref_slot=}, {l_ref_slot=}") l_epoch = converter.get_epoch_by_slot(SlotNumber(l_ref_slot + 1)) - r_epoch = converter.get_epoch_by_slot(r_ref_slot) + r_epoch = blockstamp.ref_epoch # Update Prometheus metrics CSM_CURRENT_FRAME_RANGE_L_EPOCH.set(l_epoch) diff --git a/src/modules/performance_collector/__init__.py b/src/modules/performance_collector/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/modules/csm/checkpoint.py b/src/modules/performance_collector/checkpoint.py similarity index 84% rename from src/modules/csm/checkpoint.py rename to src/modules/performance_collector/checkpoint.py index 0fb6573c1..49a80630a 100644 --- a/src/modules/csm/checkpoint.py +++ b/src/modules/performance_collector/checkpoint.py @@ -10,14 +10,20 @@ from src import variables from src.constants import SLOTS_PER_HISTORICAL_ROOT, EPOCHS_PER_SYNC_COMMITTEE_PERIOD -from src.metrics.prometheus.csm import CSM_UNPROCESSED_EPOCHS_COUNT, CSM_MIN_UNPROCESSED_EPOCH -from src.modules.csm.state import State +from src.modules.performance_collector.db import DutiesDB +from src.modules.performance_collector.types import ( + SlotBlockRoot, + AttestationCommittees, + ValidatorDuty, + SyncCommittees, + ProposeDuties, +) from src.modules.submodules.types import ZERO_HASH from src.providers.consensus.client import ConsensusClient from src.providers.consensus.types import SyncCommittee, SyncAggregate from src.utils.blockstamp import build_blockstamp from src.providers.consensus.types import BlockAttestation -from src.types import BlockRoot, BlockStamp, CommitteeIndex, EpochNumber, SlotNumber, ValidatorIndex +from src.types import BlockRoot, BlockStamp, CommitteeIndex, EpochNumber, SlotNumber from src.utils.range import sequence from src.utils.slot import get_prev_non_missed_slot from src.utils.timeit import timeit @@ -31,6 +37,8 @@ class MinStepIsNotReached(Exception): ... + + class SlotOutOfRootsRange(Exception): ... @@ -40,12 +48,6 @@ class FrameCheckpoint: duty_epochs: Sequence[EpochNumber] # NOTE: max 255 elements. -@dataclass -class ValidatorDuty: - validator_index: ValidatorIndex - included: bool - - class FrameCheckpointsIterator: converter: Web3Converter @@ -56,6 +58,7 @@ class FrameCheckpointsIterator: max_available_epoch_to_check: EpochNumber # Min checkpoint step is 10 because it's a reasonable number of epochs to process at once (~1 hour) + # FIXME: frame might change while waiting for the next checkpoint MIN_CHECKPOINT_STEP = 10 # Max checkpoint step is 255 epochs because block_roots size from state is 8192 slots (256 epochs) # to check duty of every epoch, we need to check 64 slots (32 slots of duty epoch + 32 slots of next epoch). @@ -71,7 +74,7 @@ def __init__( self, converter: Web3Converter, l_epoch: EpochNumber, r_epoch: EpochNumber, finalized_epoch: EpochNumber ): if l_epoch > r_epoch: - raise ValueError("Left border epoch should be less or equal right border epoch") + raise ValueError(f"Left border epoch should be less or equal right border epoch: {l_epoch=} > {r_epoch=}") self.converter = converter self.l_epoch = l_epoch self.r_epoch = r_epoch @@ -113,11 +116,6 @@ def _is_min_step_reached(self): return False -type SlotBlockRoot = tuple[SlotNumber, BlockRoot | None] -type SyncCommittees = dict[SlotNumber, list[ValidatorDuty]] -type AttestationCommittees = dict[tuple[SlotNumber, CommitteeIndex], list[ValidatorDuty]] - - class SyncCommitteesCache(UserDict): max_size = max(2, variables.CSM_ORACLE_MAX_CONCURRENCY) @@ -135,26 +133,26 @@ class FrameCheckpointProcessor: cc: ConsensusClient converter: Web3Converter - state: State + db: DutiesDB finalized_blockstamp: BlockStamp def __init__( self, cc: ConsensusClient, - state: State, + db: DutiesDB, converter: Web3Converter, finalized_blockstamp: BlockStamp, ): self.cc = cc self.converter = converter - self.state = state + self.db = db self.finalized_blockstamp = finalized_blockstamp def exec(self, checkpoint: FrameCheckpoint) -> int: logger.info( {"msg": f"Processing checkpoint for slot {checkpoint.slot} with {len(checkpoint.duty_epochs)} epochs"} ) - unprocessed_epochs = [e for e in checkpoint.duty_epochs if e in self.state.unprocessed_epochs] + unprocessed_epochs = [e for e in checkpoint.duty_epochs if not self.db.has_epoch(int(e))] if not unprocessed_epochs: logger.info({"msg": "Nothing to process in the checkpoint"}) return 0 @@ -164,7 +162,6 @@ def exec(self, checkpoint: FrameCheckpoint) -> int: for duty_epoch in unprocessed_epochs } self._process(block_roots, checkpoint.slot, unprocessed_epochs, duty_epochs_roots) - self.state.commit() return len(unprocessed_epochs) def _get_block_roots(self, checkpoint_slot: SlotNumber): @@ -183,8 +180,7 @@ def _get_block_roots(self, checkpoint_slot: SlotNumber): # Replace duplicated roots with `None` to mark missing slots br = [ - br[i] if br[i] != ZERO_BLOCK_ROOT and (i == pivot_index or br[i] != br[i - 1]) - else None + br[i] if br[i] != ZERO_BLOCK_ROOT and (i == pivot_index or br[i] != br[i - 1]) else None for i in range(len(br)) ] if is_pivot_missing: @@ -212,7 +208,9 @@ def _select_block_roots( return duty_epoch_roots, next_epoch_roots @staticmethod - def _select_block_root_by_slot(block_roots: list[BlockRoot | None], checkpoint_slot: SlotNumber, root_slot: SlotNumber) -> BlockRoot | None: + def _select_block_root_by_slot( + block_roots: list[BlockRoot | None], checkpoint_slot: SlotNumber, root_slot: SlotNumber + ) -> BlockRoot | None: # From spec # https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#get_block_root_at_slot if not root_slot < checkpoint_slot <= root_slot + SLOTS_PER_HISTORICAL_ROOT: @@ -224,7 +222,7 @@ def _process( checkpoint_block_roots: list[BlockRoot | None], checkpoint_slot: SlotNumber, unprocessed_epochs: list[EpochNumber], - epochs_roots_to_check: dict[EpochNumber, tuple[list[SlotBlockRoot], list[SlotBlockRoot]]] + epochs_roots_to_check: dict[EpochNumber, tuple[list[SlotBlockRoot], list[SlotBlockRoot]]], ): executor = ThreadPoolExecutor(max_workers=variables.CSM_ORACLE_MAX_CONCURRENCY) try: @@ -234,7 +232,7 @@ def _process( checkpoint_block_roots, checkpoint_slot, duty_epoch, - *epochs_roots_to_check[duty_epoch] + *epochs_roots_to_check[duty_epoch], ) for duty_epoch in unprocessed_epochs } @@ -274,33 +272,12 @@ def _check_duties( process_attestations(attestations, att_committees) with lock: - if duty_epoch not in self.state.unprocessed_epochs: - raise ValueError(f"Epoch {duty_epoch} is not in epochs that should be processed") - for att_committee in att_committees.values(): - for att_duty in att_committee: - self.state.save_att_duty( - duty_epoch, - att_duty.validator_index, - included=att_duty.included, - ) - for sync_committee in sync_committees.values(): - for sync_duty in sync_committee: - self.state.save_sync_duty( - duty_epoch, - sync_duty.validator_index, - included=sync_duty.included, - ) - for proposer_duty in propose_duties.values(): - self.state.save_prop_duty( - duty_epoch, - proposer_duty.validator_index, - included=proposer_duty.included - ) - self.state.add_processed_epoch(duty_epoch) - self.state.log_progress() - unprocessed_epochs = self.state.unprocessed_epochs - CSM_UNPROCESSED_EPOCHS_COUNT.set(len(unprocessed_epochs)) - CSM_MIN_UNPROCESSED_EPOCH.set(min(unprocessed_epochs or {EpochNumber(-1)})) + self.db.store_epoch_from_duties( + duty_epoch, + att_committees=att_committees, + propose_duties=propose_duties, + sync_committees=sync_committees, + ) @timeit( lambda args, duration: logger.info( @@ -350,9 +327,7 @@ def _get_sync_committee(self, epoch: EpochNumber) -> SyncCommittee: logger.info({"msg": f"Preparing cached Sync Committee for [{from_epoch};{to_epoch}] chain epochs"}) state_blockstamp = build_blockstamp( get_prev_non_missed_slot( - self.cc, - self.converter.get_epoch_first_slot(epoch), - self.finalized_blockstamp.slot_number + self.cc, self.converter.get_epoch_first_slot(epoch), self.finalized_blockstamp.slot_number ) ) sync_committee = self.cc.get_sync_committee(state_blockstamp, epoch) @@ -365,11 +340,8 @@ def _get_sync_committee(self, epoch: EpochNumber) -> SyncCommittee: ) ) def _prepare_propose_duties( - self, - epoch: EpochNumber, - checkpoint_block_roots: list[BlockRoot | None], - checkpoint_slot: SlotNumber - ) -> dict[SlotNumber, ValidatorDuty]: + self, epoch: EpochNumber, checkpoint_block_roots: list[BlockRoot | None], checkpoint_slot: SlotNumber + ) -> ProposeDuties: duties = {} dependent_root = self._get_dependent_root_for_proposer_duties(epoch, checkpoint_block_roots, checkpoint_slot) proposer_duties = self.cc.get_proposer_duties(epoch, dependent_root) @@ -378,10 +350,7 @@ def _prepare_propose_duties( return duties def _get_dependent_root_for_proposer_duties( - self, - epoch: EpochNumber, - checkpoint_block_roots: list[BlockRoot | None], - checkpoint_slot: SlotNumber + self, epoch: EpochNumber, checkpoint_block_roots: list[BlockRoot | None], checkpoint_slot: SlotNumber ) -> BlockRoot: dependent_root = None dependent_slot = self.converter.get_epoch_last_slot(EpochNumber(epoch - 1)) @@ -394,22 +363,20 @@ def _get_dependent_root_for_proposer_duties( logger.debug( { "msg": f"Got dependent root from state block roots for epoch {epoch}. " - f"{dependent_slot=} {dependent_root=}" + f"{dependent_slot=} {dependent_root=}" } ) break dependent_slot = SlotNumber(int(dependent_slot - 1)) except SlotOutOfRootsRange: dependent_non_missed_slot = get_prev_non_missed_slot( - self.cc, - dependent_slot, - self.finalized_blockstamp.slot_number + self.cc, dependent_slot, self.finalized_blockstamp.slot_number ).message.slot dependent_root = self.cc.get_block_root(dependent_non_missed_slot).root logger.debug( { "msg": f"Got dependent root from CL for epoch {epoch}. " - f"{dependent_non_missed_slot=} {dependent_root=}" + f"{dependent_non_missed_slot=} {dependent_root=}" } ) return dependent_root diff --git a/src/modules/performance_collector/codec.py b/src/modules/performance_collector/codec.py new file mode 100644 index 000000000..71a7ce50a --- /dev/null +++ b/src/modules/performance_collector/codec.py @@ -0,0 +1,137 @@ +import struct +from dataclasses import dataclass +from typing import Sequence, TypeAlias + +from pyroaring import BitMap + +# TODO: get from config +SLOTS_PER_EPOCH = 32 +COMMITTEE_SIZE = 512 + + +@dataclass +class ProposalDuty: + validator_index: int + is_proposed: bool + + +class ProposalDutiesCodec: + # little-endian | uint64 validator_index | bool is_proposed + # See: https://docs.python.org/3/library/struct.html#format-characters + PACK_FMT = " bytes: + if len(proposals) != SLOTS_PER_EPOCH: + raise ValueError("Invalid proposals count") + items = sorted(((p.validator_index, p.is_proposed) for p in proposals), key=lambda t: t[0]) + return b"".join(struct.pack(cls.PACK_FMT, vid, flag) for vid, flag in items) + + @classmethod + def decode(cls, blob: bytes) -> list[ProposalDuty]: + out: list[ProposalDuty] = [] + if not blob: + return out + if len(blob) % cls.ITEM_SIZE != 0: + raise ValueError("Invalid proposals bytes length") + for i in range(0, len(blob), cls.ITEM_SIZE): + vid, p = struct.unpack_from(cls.PACK_FMT, blob, i) + out.append(ProposalDuty(validator_index=int(vid), is_proposed=p)) + return out + + +@dataclass +class SyncDuty: + validator_index: int + missed_count: int # 0..32 + + +class SyncDutiesCodec: + # little-endian | uint64 validator_index | uint8 missed_count + # See: https://docs.python.org/3/library/struct.html#format-characters + PACK_FMT = " bytes: + if len(syncs) == 0: + raise ValueError("Invalid syncs count") + for s in syncs: + if not (0 <= int(s.missed_count) <= SLOTS_PER_EPOCH): + raise ValueError("missed_count out of range [0..32]") + items_sorted = sorted(((m.validator_index, m.missed_count) for m in syncs), key=lambda t: t[0]) + return b"".join(struct.pack(cls.PACK_FMT, vid, cnt) for vid, cnt in items_sorted) + + @classmethod + def decode(cls, blob: bytes) -> list[SyncDuty]: + out: list[SyncDuty] = [] + if not blob: + return out + if len(blob) % cls.ITEM_SIZE != 0: + raise ValueError("invalid sync misses bytes length") + for i in range(0, len(blob), cls.ITEM_SIZE): + vid, m = struct.unpack_from(cls.PACK_FMT, blob, i) + out.append(SyncDuty(validator_index=int(vid), missed_count=int(m))) + return out + + +AttMissDuty: TypeAlias = int + + +class AttDutiesMissCodec: + + @staticmethod + def encode(missed: set[AttMissDuty]) -> bytes: + bm = BitMap(sorted(v for v in missed)) + bm.shrink_to_fit() + bm.run_optimize() + return bm.serialize() + + @staticmethod + def decode(blob: bytes) -> set[AttMissDuty]: + return set(BitMap.deserialize(blob)) + + +EpochBlob: TypeAlias = tuple[set[int], list[ProposalDuty], list[SyncDuty]] + + +class EpochBlobCodec: + # little-endian | uint8 version | uint32 att_count | uint8 prop_count | uint16 sync_count + # See: https://docs.python.org/3/library/struct.html#format-characters + HEADER_FMT = " bytes: + att_bytes = AttDutiesMissCodec.encode(att_misses) + prop_bytes = ProposalDutiesCodec.encode(proposals) + sync_bytes = SyncDutiesCodec.encode(sync_misses) + header = struct.pack(cls.HEADER_FMT, cls.VERSION, len(att_bytes), len(proposals), len(sync_misses)) + return header + prop_bytes + sync_bytes + att_bytes + + @classmethod + def decode(cls, blob: bytes) -> EpochBlob: + if len(blob) < cls.HEADER_SIZE: + raise ValueError(f"Epoch blob too short to decode: header size is {cls.HEADER_SIZE} but full blob size is {len(blob)}") + ver, att_count, prop_count, sync_count = struct.unpack_from(cls.HEADER_FMT, blob, 0) + if ver != cls.VERSION: + raise ValueError(f"Unsupported epoch blob version: {ver}") + props_size = int(prop_count) * ProposalDutiesCodec.ITEM_SIZE + sync_size = int(sync_count) * SyncDutiesCodec.ITEM_SIZE + expected_blob_size = cls.HEADER_SIZE + props_size + sync_size + att_count + if len(blob) < expected_blob_size: + raise ValueError(f"Epoch blob size mismatch: expected {expected_blob_size} but got {len(blob)}") + offset = cls.HEADER_SIZE + props = ProposalDutiesCodec.decode(blob[offset:(offset + props_size)]) + offset += props_size + syncs = SyncDutiesCodec.decode(blob[offset:(offset + sync_size)]) + offset += sync_size + att = AttDutiesMissCodec.decode(bytes(blob[offset:(offset + att_count)])) if att_count else BitMap() + return set(att), props, syncs diff --git a/src/modules/performance_collector/db.py b/src/modules/performance_collector/db.py new file mode 100644 index 000000000..58f351135 --- /dev/null +++ b/src/modules/performance_collector/db.py @@ -0,0 +1,204 @@ +import sqlite3 +from typing import Dict, Optional, Sequence + +from src import variables +from src.modules.performance_collector.codec import ProposalDuty, SyncDuty, EpochBlobCodec, AttMissDuty +from src.modules.performance_collector.types import AttestationCommittees, ProposeDuties, SyncCommittees +from src.types import EpochNumber + + +class DutiesDB: + def __init__(self, path: str, *, default_num_validators: Optional[int] = None): + self.path = path + self.default_num_validators = default_num_validators + self._init_schema() + + def _connect(self) -> sqlite3.Connection: + conn = sqlite3.connect(self.path, check_same_thread=False, timeout=30.0) + conn.execute("PRAGMA journal_mode=WAL;") + conn.execute("PRAGMA synchronous=NORMAL;") + conn.execute("PRAGMA temp_store=MEMORY;") + return conn + + def _init_schema(self): + conn = self._connect() + cur = conn.cursor() + cur.execute( + """ + CREATE TABLE IF NOT EXISTS duties + ( + epoch INTEGER PRIMARY KEY, + blob BLOB NOT NULL + ); + """ + ) + conn.commit() + conn.close() + + def store_epoch( + self, + epoch: EpochNumber, + att_misses: set[AttMissDuty], + proposals: Sequence[ProposalDuty] | None = None, + sync_misses: Sequence[SyncDuty] | None = None, + ) -> bytes: + + blob = EpochBlobCodec.encode(att_misses, proposals, sync_misses) + + conn = self._connect() + cur = conn.cursor() + cur.execute( + "INSERT OR REPLACE INTO duties(epoch, blob) VALUES(?, ?)", + (epoch, sqlite3.Binary(blob)), + ) + conn.commit() + conn.close() + return blob + + def store_epoch_from_duties( + self, + epoch: EpochNumber, + att_committees: AttestationCommittees, + propose_duties: ProposeDuties, + sync_committees: SyncCommittees, + ) -> bytes: + att_misses = set() + for committee in att_committees.values(): + for duty in committee: + if not duty.included: + att_misses.add(duty.validator_index) + + proposals_list: list[ProposalDuty] = [] + for proposer_duty in propose_duties.values(): + proposals_list.append( + ProposalDuty(validator_index=proposer_duty.validator_index, is_proposed=proposer_duty.included) + ) + + # FIXME: should we get it like a map? + sync_miss_map: Dict[int, int] = {} + for duties in sync_committees.values(): + for duty in duties: + vid = duty.validator_index + if sync_miss_map.get(vid) is None: + sync_miss_map[duty.validator_index] = 0 + if not duty.included: + sync_miss_map[vid] += 1 + sync_misses: list[SyncDuty] = [ + SyncDuty(validator_index=vid, missed_count=cnt) for vid, cnt in sync_miss_map.items() + ] + + blob = self.store_epoch(epoch, att_misses, proposals_list, sync_misses) + + self._auto_prune(epoch) + + return blob + + def _auto_prune(self, current_epoch: int) -> None: + retention = int(getattr(variables, 'PERFORMANCE_COLLECTOR_RETENTION_EPOCHS', 0)) + if retention <= 0: + return + threshold = int(current_epoch) - retention + if threshold <= 0: + return + conn = self._connect() + try: + cur = conn.cursor() + cur.execute("DELETE FROM duties WHERE epoch < ?", (threshold,)) + conn.commit() + finally: + conn.close() + + def is_range_available(self, l_epoch: int, r_epoch: int) -> bool: + if int(l_epoch) > int(r_epoch): + raise ValueError("Invalid epoch range") + conn = self._connect() + cur = conn.cursor() + cur.execute( + "SELECT COUNT(1) FROM duties WHERE epoch BETWEEN ? AND ?", + (int(l_epoch), int(r_epoch)), + ) + (cnt,) = cur.fetchone() or (0,) + conn.close() + return int(cnt) == (r_epoch - l_epoch + 1) + + def missing_epochs_in(self, l_epoch: int, r_epoch: int) -> list[int]: + if l_epoch > r_epoch: + raise ValueError("Invalid epoch range") + conn = self._connect() + cur = conn.cursor() + cur.execute( + "SELECT epoch FROM duties WHERE epoch BETWEEN ? AND ? ORDER BY epoch", + (l_epoch, r_epoch), + ) + present = [int(row[0]) for row in cur.fetchall()] + conn.close() + missing = [] + exp = l_epoch + for e in present: + while exp < e: + missing.append(exp) + exp += 1 + exp = e + 1 + while exp <= r_epoch: + missing.append(exp) + exp += 1 + return missing + + def _get_entry(self, epoch: int) -> Optional[bytes]: + conn = self._connect() + cur = conn.cursor() + cur.execute("SELECT blob FROM duties WHERE epoch=?", (int(epoch),)) + row = cur.fetchone() + conn.close() + if not row: + return None + return bytes(row[0]) + + def get_epoch_blob(self, epoch: int) -> Optional[bytes]: + return self._get_entry(epoch) + + def has_epoch(self, epoch: int) -> bool: + conn = self._connect() + cur = conn.cursor() + cur.execute("SELECT 1 FROM duties WHERE epoch=? LIMIT 1", (int(epoch),)) + ok = cur.fetchone() is not None + conn.close() + return ok + + def min_epoch(self) -> int: + conn = self._connect() + cur = conn.cursor() + cur.execute("SELECT MIN(epoch) FROM duties") + val = int(cur.fetchone()[0] or 0) + conn.close() + return val + + def max_epoch(self) -> int: + conn = self._connect() + cur = conn.cursor() + cur.execute("SELECT MAX(epoch) FROM duties") + val = int(cur.fetchone()[0] or 0) + conn.close() + return val + + def min_unprocessed_epoch(self) -> int: + conn = self._connect() + cur = conn.cursor() + cur.execute("SELECT MIN(epoch), MAX(epoch) FROM duties") + row = cur.fetchone() + if not row or row[0] is None or row[1] is None: + conn.close() + return 0 + l_epoch, r_epoch = int(row[0]), int(row[1]) + cur.execute( + """ + SELECT MIN(t.epoch + 1) + FROM duties t + LEFT JOIN duties d2 ON d2.epoch = t.epoch + 1 + WHERE t.epoch BETWEEN ? AND ? AND d2.epoch IS NULL + """, + (l_epoch, r_epoch), + ) + (missing,) = cur.fetchone() + conn.close() + return int(missing) if missing else (r_epoch + 1) diff --git a/src/modules/performance_collector/http_server.py b/src/modules/performance_collector/http_server.py new file mode 100644 index 000000000..4eb511978 --- /dev/null +++ b/src/modules/performance_collector/http_server.py @@ -0,0 +1,116 @@ +from threading import Thread +from typing import Any, Dict, Optional + +from flask import Flask, jsonify, request +from waitress import serve +import traceback + +from src.modules.performance_collector.db import DutiesDB +from src.modules.performance_collector.codec import EpochBlobCodec +from src import variables + + +def _parse_from_to(args: Dict[str, Any]) -> Optional[tuple[int, int]]: + f = args.get("from") + t = args.get("to") + if f is None or t is None: + return None + try: + fi = int(f) + ti = int(t) + except Exception: + return None + if fi > ti: + return None + return fi, ti + + +def _create_app(db_path: str) -> Flask: + app = Flask(__name__) + app.config["DB_PATH"] = db_path + + @app.get("/health") + def health(): + return jsonify({"status": "ok"}) + + @app.get("/epochs/check") + def epochs_check(): + try: + parsed = _parse_from_to(request.args) + if not parsed: + return jsonify({"error": "Invalid or missing 'from'/'to' params"}), 400 + l, r = parsed + db = DutiesDB(app.config["DB_PATH"]) + result = db.is_range_available(l, r) + return jsonify({"result": bool(result)}) + except Exception as e: + return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 + + @app.get("/epochs/missing") + def epochs_missing(): + try: + parsed = _parse_from_to(request.args) + if not parsed: + return jsonify({"error": "Invalid or missing 'from'/'to' params"}), 400 + l, r = parsed + db = DutiesDB(app.config["DB_PATH"]) + result = db.missing_epochs_in(l, r) + return jsonify({"result": result}) + except Exception as e: + return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 + + @app.get("/epochs/blob") + def epochs_blob(): + try: + parsed = _parse_from_to(request.args) + if not parsed: + return jsonify({"error": "Invalid or missing 'from'/'to' params"}), 400 + l, r = parsed + db = DutiesDB(app.config["DB_PATH"]) + epochs: list[dict[str, Any]] = [] + for e in range(l, r + 1): + blob = db.get_epoch_blob(e) + epochs.append({ + "epoch": e, + "blob": blob.hex() if blob is not None else None, + }) + return jsonify({"result": epochs}) + except Exception as e: + return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 + + @app.get("/epochs/") + def epoch_details(epoch: int): + try: + db = DutiesDB(app.config["DB_PATH"]) + blob = db.get_epoch_blob(epoch) + if blob is None: + return jsonify({"error": "epoch not found", "epoch": epoch}), 404 + + misses, props, syncs = EpochBlobCodec.decode(blob) + + proposals = [ + {"validator_index": int(p.validator_index), "is_proposed": bool(p.is_proposed)} for p in props + ] + sync_misses = [ + {"validator_index": int(s.validator_index), "missed_count": int(s.missed_count)} for s in syncs + ] + + return jsonify( + { + "epoch": int(epoch), + "att_misses": list(misses), + "proposals": proposals, + "sync_misses": sync_misses, + } + ) + except Exception as e: + return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 + + return app + + +def start_performance_api_server(db_path): + host = "0.0.0.0" + app = _create_app(db_path) + t = Thread(target=lambda: serve(app, host=host, port=variables.PERFORMANCE_COLLECTOR_SERVER_API_PORT), daemon=True) + t.start() diff --git a/src/modules/performance_collector/performance_collector.py b/src/modules/performance_collector/performance_collector.py new file mode 100644 index 000000000..64155a8cb --- /dev/null +++ b/src/modules/performance_collector/performance_collector.py @@ -0,0 +1,71 @@ +import logging +from typing import Optional + +from src.modules.performance_collector.checkpoint import FrameCheckpointsIterator, FrameCheckpointProcessor, MinStepIsNotReached +from src.modules.performance_collector.db import DutiesDB +from src.modules.performance_collector.http_server import start_performance_api_server +from src.modules.submodules.oracle_module import BaseModule, ModuleExecuteDelay +from src.modules.submodules.types import ChainConfig, FrameConfig +from src.types import BlockStamp, EpochNumber +from src.utils.web3converter import Web3Converter +from src import variables + +logger = logging.getLogger(__name__) + + +class PerformanceCollector(BaseModule): + """ + Continuously collects performance data from Consensus Layer into db for the given epoch range. + """ + + def __init__(self, w3, db_path: Optional[str] = None): + super().__init__(w3) + db_path = db_path or str((variables.CACHE_PATH / "eth_duties.sqlite").absolute()) + self.db = DutiesDB(db_path) + logger.info({'msg': 'Initialize Performance Collector module.'}) + try: + logger.info({'msg': f'Start performance API server on port {variables.PERFORMANCE_COLLECTOR_SERVER_API_PORT}'}) + start_performance_api_server(db_path) + except Exception as e: + logger.error({'msg': 'Failed to start performance API server', 'error': repr(e)}) + raise + + def refresh_contracts(self): + return None + + def _build_converter(self) -> Web3Converter: + cc_spec = self.w3.cc.get_config_spec() + genesis = self.w3.cc.get_genesis() + chain_cfg = ChainConfig( + slots_per_epoch=cc_spec.SLOTS_PER_EPOCH, + seconds_per_slot=cc_spec.SECONDS_PER_SLOT, + genesis_time=genesis.genesis_time, + ) + # FIXME: mocked value + frame_cfg = FrameConfig(initial_epoch=0, epochs_per_frame=32, fast_lane_length_slots=0) + return Web3Converter(chain_cfg, frame_cfg) + + def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecuteDelay: + converter = self._build_converter() + + start_epoch = max(self.db.min_unprocessed_epoch(), variables.PERFORMANCE_COLLECTOR_SERVER_START_EPOCH) + finalized_epoch = EpochNumber(converter.get_epoch_by_slot(last_finalized_blockstamp.slot_number) - 1) + + try: + checkpoints = FrameCheckpointsIterator( + converter, + start_epoch, + variables.PERFORMANCE_COLLECTOR_SERVER_END_EPOCH, + finalized_epoch, + ) + except MinStepIsNotReached: + return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH + + processor = FrameCheckpointProcessor(self.w3.cc, self.db, converter, last_finalized_blockstamp) + + for checkpoint in checkpoints: + processor.exec(checkpoint) + # Reset BaseOracle cycle timeout to avoid timeout errors during long checkpoints processing + self._reset_cycle_timeout() + + return ModuleExecuteDelay.NEXT_SLOT diff --git a/src/modules/performance_collector/types.py b/src/modules/performance_collector/types.py new file mode 100644 index 000000000..7a3097a4c --- /dev/null +++ b/src/modules/performance_collector/types.py @@ -0,0 +1,15 @@ +from dataclasses import dataclass + +from src.types import SlotNumber, CommitteeIndex, BlockRoot, ValidatorIndex + + +@dataclass +class ValidatorDuty: + validator_index: ValidatorIndex + included: bool + + +type SlotBlockRoot = tuple[SlotNumber, BlockRoot | None] +type SyncCommittees = dict[SlotNumber, list[ValidatorDuty]] +type ProposeDuties = dict[SlotNumber, ValidatorDuty] +type AttestationCommittees = dict[tuple[SlotNumber, CommitteeIndex], list[ValidatorDuty]] diff --git a/src/providers/performance/__init__.py b/src/providers/performance/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/providers/performance/client.py b/src/providers/performance/client.py new file mode 100644 index 000000000..906acb4c5 --- /dev/null +++ b/src/providers/performance/client.py @@ -0,0 +1,51 @@ +from src.metrics.prometheus.basic import PERFORMANCE_REQUESTS_DURATION +from src.modules.performance_collector.codec import EpochBlobCodec, ProposalDuty, SyncDuty, EpochBlob +from src.providers.http_provider import HTTPProvider, NotOkResponse, data_is_dict + + +class PerformanceClientError(NotOkResponse): + pass + + +# TODO: dataclasses and types ??? + + +class PerformanceClient(HTTPProvider): + PROVIDER_EXCEPTION = PerformanceClientError + PROMETHEUS_HISTOGRAM = PERFORMANCE_REQUESTS_DURATION + + API_EPOCHS_CHECK = 'epochs/check' + API_EPOCHS_MISSING = 'epochs/missing' + API_EPOCHS_BLOB = 'epochs/blob' + + def is_range_available(self, l_epoch: int, r_epoch: int) -> bool: + data, _ = self._get( + self.API_EPOCHS_CHECK, + query_params={'from': int(l_epoch), 'to': int(r_epoch)}, + retval_validator=data_is_dict, + ) + return data['result'] + + def missing_epochs_in(self, l_epoch: int, r_epoch: int) -> list[int]: + data, _ = self._get( + self.API_EPOCHS_MISSING, + query_params={'from': int(l_epoch), 'to': int(r_epoch)}, + retval_validator=data_is_dict, + ) + return data['result'] + + def get_epoch_blobs(self, l_epoch: int, r_epoch: int) -> list[dict[str, str | None]]: + data, _ = self._get( + self.API_EPOCHS_BLOB, + query_params={'from': int(l_epoch), 'to': int(r_epoch)}, + retval_validator=data_is_dict, + ) + return data['result'] + + def get_epochs(self, l_epoch: int, r_epoch: int) -> list[tuple[set[int], list[ProposalDuty], list[SyncDuty]]]: + epochs_data = self.get_epoch_blobs(l_epoch, r_epoch) + return [EpochBlobCodec.decode(bytes.fromhex(epoch_data['blob'])) for epoch_data in epochs_data] + + def get_epoch(self, epoch: int) -> EpochBlob | None: + res = self.get_epochs(epoch, epoch) + return res[0] diff --git a/src/types.py b/src/types.py index 22ed30d2d..dc2b4168f 100644 --- a/src/types.py +++ b/src/types.py @@ -11,6 +11,7 @@ class OracleModule(StrEnum): EJECTOR = 'ejector' CHECK = 'check' CSM = 'csm' + PERFORMANCE_COLLECTOR = 'performance_collector' EpochNumber = NewType('EpochNumber', int) diff --git a/src/variables.py b/src/variables.py index 685ead3c1..ab5544365 100644 --- a/src/variables.py +++ b/src/variables.py @@ -11,6 +11,7 @@ EXECUTION_CLIENT_URI: Final = os.getenv('EXECUTION_CLIENT_URI', '').split(',') CONSENSUS_CLIENT_URI: Final = os.getenv('CONSENSUS_CLIENT_URI', '').split(',') KEYS_API_URI: Final = os.getenv('KEYS_API_URI', '').split(',') +PERFORMANCE_COLLECTOR_URI: Final = os.getenv('PERFORMANCE_COLLECTOR_URI', '').split(',') PINATA_JWT: Final = from_file_or_env('PINATA_JWT') KUBO_HOST: Final = os.getenv('KUBO_HOST') @@ -67,6 +68,13 @@ os.getenv('HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_CONSENSUS', 5) ) +# Performance Collector HTTP client variables +HTTP_REQUEST_TIMEOUT_PERFORMANCE: Final = int(os.getenv('HTTP_REQUEST_TIMEOUT_PERFORMANCE', 60)) +HTTP_REQUEST_RETRY_COUNT_PERFORMANCE: Final = int(os.getenv('HTTP_REQUEST_RETRY_COUNT_PERFORMANCE', 3)) +HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE: Final = int( + os.getenv('HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE', 2) +) + HTTP_REQUEST_TIMEOUT_KEYS_API: Final = int(os.getenv('HTTP_REQUEST_TIMEOUT_KEYS_API', 120)) HTTP_REQUEST_RETRY_COUNT_KEYS_API: Final = int(os.getenv('HTTP_REQUEST_RETRY_COUNT_KEYS_API', 5)) HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_KEYS_API: Final = int( @@ -85,6 +93,11 @@ OPSGENIE_API_URL: Final[str] = os.getenv('OPSGENIE_API_URL', '') HEALTHCHECK_SERVER_PORT: Final = int(os.getenv('HEALTHCHECK_SERVER_PORT', 9010)) +PERFORMANCE_COLLECTOR_SERVER_API_PORT: Final = int(os.getenv('PERFORMANCE_COLLECTOR_SERVER_API_PORT', 9020)) +PERFORMANCE_COLLECTOR_RETENTION_EPOCHS: Final = int(os.getenv('PERFORMANCE_COLLECTOR_RETENTION_EPOCHS', 28 * 225 * 6)) +PERFORMANCE_COLLECTOR_SERVER_START_EPOCH: Final = int(os.getenv('PERFORMANCE_COLLECTOR_SERVER_START_EPOCH', 0)) +# TODO: endless? +PERFORMANCE_COLLECTOR_SERVER_END_EPOCH: Final = int(os.getenv('PERFORMANCE_COLLECTOR_SERVER_END_EPOCH', 2 ** 64 - 1)) MAX_CYCLE_LIFETIME_IN_SECONDS: Final = int(os.getenv("MAX_CYCLE_LIFETIME_IN_SECONDS", 3000)) @@ -147,6 +160,12 @@ def raise_from_errors(errors): 'PROMETHEUS_PORT': PROMETHEUS_PORT, 'PROMETHEUS_PREFIX': PROMETHEUS_PREFIX, 'HEALTHCHECK_SERVER_PORT': HEALTHCHECK_SERVER_PORT, + 'PERFORMANCE_COLLECTOR_SERVER_API_PORT': PERFORMANCE_COLLECTOR_SERVER_API_PORT, + 'PERFORMANCE_COLLECTOR_RETENTION_EPOCHS': PERFORMANCE_COLLECTOR_RETENTION_EPOCHS, + 'PERFORMANCE_COLLECTOR_SERVER_START_EPOCH': PERFORMANCE_COLLECTOR_SERVER_START_EPOCH, + 'HTTP_REQUEST_TIMEOUT_PERFORMANCE': HTTP_REQUEST_TIMEOUT_PERFORMANCE, + 'HTTP_REQUEST_RETRY_COUNT_PERFORMANCE': HTTP_REQUEST_RETRY_COUNT_PERFORMANCE, + 'HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE': HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE, 'MAX_CYCLE_LIFETIME_IN_SECONDS': MAX_CYCLE_LIFETIME_IN_SECONDS, 'CACHE_PATH': CACHE_PATH, }.items() @@ -156,6 +175,7 @@ def raise_from_errors(errors): 'EXECUTION_CLIENT_URI': EXECUTION_CLIENT_URI, 'CONSENSUS_CLIENT_URI': CONSENSUS_CLIENT_URI, 'KEYS_API_URI': KEYS_API_URI, + 'PERFORMANCE_COLLECTOR_URI': PERFORMANCE_COLLECTOR_URI, 'PINATA_JWT': PINATA_JWT, 'MEMBER_PRIV_KEY': MEMBER_PRIV_KEY, 'OPSGENIE_API_KEY': OPSGENIE_API_KEY, diff --git a/src/web3py/extensions/performance.py b/src/web3py/extensions/performance.py new file mode 100644 index 000000000..61a044125 --- /dev/null +++ b/src/web3py/extensions/performance.py @@ -0,0 +1,21 @@ +from web3 import Web3 +from web3.module import Module + +from src.providers.performance.client import PerformanceClient +from src.variables import ( + HTTP_REQUEST_TIMEOUT_PERFORMANCE, + HTTP_REQUEST_RETRY_COUNT_PERFORMANCE, + HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE, +) + + +class PerformanceClientModule(PerformanceClient, Module): + def __init__(self, hosts: list[str]): + + super(PerformanceClient, self).__init__( + hosts, + HTTP_REQUEST_TIMEOUT_PERFORMANCE, + HTTP_REQUEST_RETRY_COUNT_PERFORMANCE, + HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE, + ) + super(Module, self).__init__() diff --git a/src/web3py/types.py b/src/web3py/types.py index 7f206cd46..d389911bc 100644 --- a/src/web3py/types.py +++ b/src/web3py/types.py @@ -1,6 +1,7 @@ from web3 import Web3 as _Web3 from src.providers.ipfs import IPFSProvider +from src.providers.performance.client import PerformanceClient from src.web3py.extensions import ( CSM, ConsensusClientModule, @@ -19,3 +20,4 @@ class Web3(_Web3): kac: KeysAPIClientModule csm: CSM ipfs: IPFSProvider + performance: PerformanceClient diff --git a/tests/fork/conftest.py b/tests/fork/conftest.py index 350bf0171..6d0b10016 100644 --- a/tests/fork/conftest.py +++ b/tests/fork/conftest.py @@ -35,7 +35,9 @@ HTTP_REQUEST_TIMEOUT_CONSENSUS, ) from src.web3py.contract_tweak import tweak_w3_contracts -from src.web3py.extensions import KeysAPIClientModule, LazyCSM, LidoContracts, LidoValidatorsProvider, TransactionUtils +from src.web3py.extensions import KeysAPIClientModule, LazyCSM, LidoContracts, LidoValidatorsProvider, TransactionUtils, \ + FallbackProviderModule +from src.web3py.extensions.performance import PerformanceClientModule logger = logging.getLogger('fork_tests') @@ -173,6 +175,15 @@ def real_cl_client(): ) +@pytest.fixture +def real_el_client(): + return FallbackProviderModule( + variables.EXECUTION_CLIENT_URI, + request_kwargs={'timeout': variables.HTTP_REQUEST_TIMEOUT_EXECUTION}, + cache_allowed_requests=True, + ) + + @pytest.fixture def real_finalized_slot(real_cl_client: ConsensusClient) -> SlotNumber: finalized_slot = real_cl_client.get_block_header('finalized').data.header.message.slot @@ -268,6 +279,7 @@ def forked_el_client(blockstamp_for_forking: BlockStamp, testrun_path: str, anvi @pytest.fixture() def web3(forked_el_client, patched_cl_client, mocked_ipfs_client): kac = KeysAPIClientModule(variables.KEYS_API_URI, forked_el_client) + performance = PerformanceClientModule(variables.PERFORMANCE_COLLECTOR_URI) forked_el_client.attach_modules( { 'lido_contracts': LidoContracts, @@ -277,6 +289,7 @@ def web3(forked_el_client, patched_cl_client, mocked_ipfs_client): 'cc': lambda: patched_cl_client, # type: ignore[dict-item] 'kac': lambda: kac, # type: ignore[dict-item] "ipfs": lambda: mocked_ipfs_client, + 'performance': lambda: performance } ) yield forked_el_client diff --git a/tests/fork/test_csm_oracle_cycle.py b/tests/fork/test_csm_oracle_cycle.py index 82a25d1eb..dcc36d5c3 100644 --- a/tests/fork/test_csm_oracle_cycle.py +++ b/tests/fork/test_csm_oracle_cycle.py @@ -1,15 +1,12 @@ -import os -import subprocess -from pathlib import Path - import pytest +from src import variables from src.modules.csm.csm import CSOracle +from src.modules.performance_collector.performance_collector import PerformanceCollector from src.modules.submodules.types import FrameConfig from src.utils.range import sequence from src.web3py.types import Web3 -from tests.fork.conftest import first_slot_of_epoch, logger -from tests.fork.utils.lock import LockedDir +from tests.fork.conftest import first_slot_of_epoch @pytest.fixture() @@ -18,89 +15,16 @@ def hash_consensus_bin(): yield f.read() -@pytest.fixture(scope='session') -def csm_repo_path(testruns_folder_path): - return Path(testruns_folder_path) / "community-staking-module" - - -@pytest.fixture(scope='session') -def prepared_csm_repo(testruns_folder_path, csm_repo_path): - - if os.environ.get("GITHUB_ACTIONS") == "true": - # CI should have the repo cloned and prepared - if os.path.exists(csm_repo_path): - return csm_repo_path - raise ValueError("No cloned community-staking-module repo found, but running in CI. Fix the workflow.") - - original_dir = os.getcwd() - - with LockedDir(testruns_folder_path): - if not os.path.exists(csm_repo_path / ".prepared"): - logger.info("TESTRUN Cloning community-staking-module repo") - subprocess.run( - ["git", "clone", "https://github.com/lidofinance/community-staking-module", csm_repo_path], check=True - ) - os.chdir(csm_repo_path) - subprocess.run(["git", "checkout", "develop"], check=True) - subprocess.run(["just", "deps"], check=True) - subprocess.run(["just", "build"], check=True) - subprocess.run(["touch", ".prepared"], check=True) - os.chdir(original_dir) - - return csm_repo_path - - @pytest.fixture() -def update_csm_to_v2(accounts_from_fork, forked_el_client: Web3, anvil_port: int, prepared_csm_repo: Path): - original_dir = os.getcwd() - - chain = 'mainnet' - - logger.info("TESTRUN Deploying CSM v2") - _, pks = accounts_from_fork - deployer, *_ = pks - - os.chdir(prepared_csm_repo) - - with subprocess.Popen( - ['just', '_deploy-impl', '--broadcast', '--private-key', deployer], - env={ - **os.environ, - "RPC_URL": f"http://localhost:{str(anvil_port)}", - 'CHAIN': chain, - }, - stdout=subprocess.DEVNULL, - stderr=subprocess.STDOUT, - ) as process: - process.wait() - assert process.returncode == 0, "Failed to deploy CSM v2" - logger.info("TESTRUN Deployed CSM v2") - - logger.info("TESTRUN Updating to CSM v2") - with subprocess.Popen( - ['just', "vote-upgrade"], - env={ - **os.environ, - 'CHAIN': chain, - "ANVIL_PORT": str(anvil_port), - "RPC_URL": f"http://127.0.0.1:{anvil_port}", - 'DEPLOY_CONFIG': f'./artifacts/local/upgrade-{chain}.json', - }, - stdout=subprocess.DEVNULL, - stderr=subprocess.STDOUT, - ) as process: - process.wait() - assert process.returncode == 0, "Failed to update to CSM v2" - logger.info("TESTRUN Updated to CSM v2") - - os.chdir(original_dir) - # TODO: update ABIs in `assets` folder? - forked_el_client.provider.make_request("anvil_autoImpersonateAccount", [True]) +def csm_module(web3: Web3): + yield CSOracle(web3) @pytest.fixture() -def csm_module(web3: Web3, update_csm_to_v2): - yield CSOracle(web3) +def performance_collector(web3: Web3, frame_config: FrameConfig): + variables.PERFORMANCE_COLLECTOR_SERVER_START_EPOCH = frame_config.initial_epoch - frame_config.epochs_per_frame + variables.PERFORMANCE_COLLECTOR_SERVER_END_EPOCH = frame_config.initial_epoch + yield PerformanceCollector(web3) @pytest.fixture @@ -135,7 +59,7 @@ def missed_initial_frame(frame_config: FrameConfig): [start_before_initial_epoch, start_after_initial_epoch, missed_initial_frame], indirect=True, ) -def test_csm_module_report(module, set_oracle_members, running_finalized_slots, account_from): +def test_csm_module_report(performance_collector, module, set_oracle_members, running_finalized_slots, account_from): assert module.report_contract.get_last_processing_ref_slot() == 0, "Last processing ref slot should be 0" members = set_oracle_members(count=2) @@ -145,6 +69,7 @@ def test_csm_module_report(module, set_oracle_members, running_finalized_slots, switch_finalized, _ = running_finalized_slots # pylint:disable=duplicate-code while switch_finalized(): + performance_collector.cycle_handler() for _, private_key in members: # NOTE: reporters using the same cache with account_from(private_key): @@ -152,6 +77,8 @@ def test_csm_module_report(module, set_oracle_members, running_finalized_slots, report_frame = module.get_initial_or_current_frame( module._receive_last_finalized_slot() # pylint: disable=protected-access ) + # NOTE: Patch the var to bypass `FrameCheckpointsIterator.MIN_CHECKPOINT_STEP` + variables.PERFORMANCE_COLLECTOR_SERVER_END_EPOCH = report_frame.ref_slot // 32 last_processing_after_report = module.w3.csm.oracle.get_last_processing_ref_slot() assert ( diff --git a/tests/modules/csm/test_csm_module.py b/tests/modules/csm/test_csm_module.py index 19ea5717c..c4760afde 100644 --- a/tests/modules/csm/test_csm_module.py +++ b/tests/modules/csm/test_csm_module.py @@ -101,17 +101,18 @@ class FrameTestParam: ), id="holesky_testnet", ), - pytest.param( - FrameTestParam( - epochs_per_frame=32, - initial_ref_slot=last_slot_of_epoch(100), - last_processing_ref_slot=0, - current_ref_slot=0, - finalized_slot=0, - expected_frame=(69, 100), - ), - id="not_yet_reached_initial_epoch", - ), + # NOTE: Impossible case in current processing + # pytest.param( + # FrameTestParam( + # epochs_per_frame=32, + # initial_ref_slot=last_slot_of_epoch(100), + # last_processing_ref_slot=0, + # current_ref_slot=0, + # finalized_slot=0, + # expected_frame=(69, 100), + # ), + # id="not_yet_reached_initial_epoch", + # ), pytest.param( FrameTestParam( epochs_per_frame=32, @@ -188,11 +189,14 @@ def test_current_frame_range(module: CSOracle, mock_chain_config: NoReturn, para ) module.get_initial_ref_slot = Mock(return_value=param.initial_ref_slot) + ref_epoch = slot_to_epoch(param.current_ref_slot) if param.expected_frame is ValueError: with pytest.raises(ValueError): - module.get_epochs_range_to_process(ReferenceBlockStampFactory.build(slot_number=param.finalized_slot)) + module.get_epochs_range_to_process( + ReferenceBlockStampFactory.build(slot_number=param.current_ref_slot, ref_epoch=ref_epoch) + ) else: - bs = ReferenceBlockStampFactory.build(slot_number=param.finalized_slot) + bs = ReferenceBlockStampFactory.build(slot_number=param.current_ref_slot, ref_epoch=ref_epoch) l_epoch, r_epoch = module.get_epochs_range_to_process(bs) assert (l_epoch, r_epoch) == param.expected_frame @@ -588,6 +592,7 @@ def test_build_report(module: CSOracle, param: BuildReportTestParam): @pytest.mark.unit def test_execute_module_not_collected(module: CSOracle): module._check_compatability = Mock(return_value=True) + module.get_blockstamp_for_report = Mock(return_value=Mock(slot_number=100500)) module.collect_data = Mock(return_value=False) execute_delay = module.execute_module( diff --git a/tests/modules/csm/test_checkpoint.py b/tests/modules/performance_collector/test_checkpoint.py similarity index 94% rename from tests/modules/csm/test_checkpoint.py rename to tests/modules/performance_collector/test_checkpoint.py index 52b99aaf5..c4ac4c8df 100644 --- a/tests/modules/csm/test_checkpoint.py +++ b/tests/modules/performance_collector/test_checkpoint.py @@ -4,9 +4,9 @@ import pytest -import src.modules.csm.checkpoint as checkpoint_module +import src.modules.performance_collector.checkpoint as checkpoint_module from src.constants import EPOCHS_PER_SYNC_COMMITTEE_PERIOD -from src.modules.csm.checkpoint import ( +from src.modules.performance_collector.checkpoint import ( FrameCheckpoint, FrameCheckpointProcessor, FrameCheckpointsIterator, @@ -17,7 +17,7 @@ ValidatorDuty, process_attestations, ) -from src.modules.csm.state import State +from src.modules.performance_collector.db import DutiesDB from src.modules.submodules.types import ChainConfig, FrameConfig from src.providers.consensus.client import ConsensusClient from src.providers.consensus.types import BeaconSpecResponse, BlockAttestation, SlotAttestationCommittee, SyncCommittee @@ -34,8 +34,8 @@ @pytest.fixture(autouse=True) -def no_commit(monkeypatch): - monkeypatch.setattr(State, "commit", Mock()) +def no_db_write(monkeypatch): + monkeypatch.setattr(DutiesDB, "store_epoch", Mock()) @pytest.fixture @@ -61,7 +61,7 @@ def converter(frame_config: FrameConfig, chain_config: ChainConfig) -> Web3Conve @pytest.fixture def sync_committees_cache(): - with patch('src.modules.csm.checkpoint.SYNC_COMMITTEES_CACHE', SyncCommitteesCache()) as cache: + with patch('src.modules.performance_collector.checkpoint.SYNC_COMMITTEES_CACHE', SyncCommitteesCache()) as cache: yield cache @@ -371,15 +371,13 @@ def test_check_duties_processes_epoch_with_attestations_and_sync_committee(frame sync_aggregate.sync_committee_bits = "0xff" frame_checkpoint_processor.cc.get_block_attestations_and_sync = Mock(return_value=([attestation], sync_aggregate)) - frame_checkpoint_processor.state.unprocessed_epochs = [duty_epoch] + frame_checkpoint_processor.db.has_epoch = lambda: False frame_checkpoint_processor._check_duties( checkpoint_block_roots, checkpoint_slot, duty_epoch, duty_epoch_roots, next_epoch_roots ) - frame_checkpoint_processor.state.save_att_duty.assert_called() - frame_checkpoint_processor.state.save_sync_duty.assert_called() - frame_checkpoint_processor.state.save_prop_duty.assert_called() + frame_checkpoint_processor.db.store_epoch_from_duties.assert_called() @pytest.mark.unit @@ -401,15 +399,13 @@ def test_check_duties_processes_epoch_with_no_attestations(frame_checkpoint_proc sync_aggregate.sync_committee_bits = "0x00" frame_checkpoint_processor.cc.get_block_attestations_and_sync = Mock(return_value=([], sync_aggregate)) - frame_checkpoint_processor.state.unprocessed_epochs = [duty_epoch] + frame_checkpoint_processor.db.has_epoch = lambda: False frame_checkpoint_processor._check_duties( checkpoint_block_roots, checkpoint_slot, duty_epoch, duty_epoch_roots, next_epoch_roots ) - assert frame_checkpoint_processor.state.save_att_duty.call_count == 0 - assert frame_checkpoint_processor.state.save_sync_duty.call_count == 2 - assert frame_checkpoint_processor.state.save_prop_duty.call_count == 2 + frame_checkpoint_processor.db.store_epoch_from_duties.assert_called() @pytest.mark.unit @@ -484,7 +480,7 @@ def test_get_sync_committee_fetches_and_caches_when_not_cached( prev_slot_response = Mock() prev_slot_response.message.slot = SlotNumber(0) prev_slot_response.message.body.execution_payload.block_hash = "0x00" - with patch('src.modules.csm.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response)): + with patch('src.modules.performance_collector.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response)): result = frame_checkpoint_processor._get_sync_committee(epoch) assert result.validators == sync_committee.validators @@ -508,7 +504,7 @@ def test_get_sync_committee_handles_cache_eviction( prev_slot_response = Mock() prev_slot_response.message.slot = SlotNumber(0) prev_slot_response.message.body.execution_payload.block_hash = "0x00" - with patch('src.modules.csm.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response)): + with patch('src.modules.performance_collector.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response)): result = frame_checkpoint_processor._get_sync_committee(epoch) assert result == sync_committee @@ -564,7 +560,7 @@ def test_get_dependent_root_for_proposer_duties_from_cl_when_slot_out_of_range(f prev_slot_response = Mock() prev_slot_response.message.slot = non_missed_slot - with patch('src.modules.csm.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response)): + with patch('src.modules.performance_collector.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response)): frame_checkpoint_processor.cc.get_block_root = Mock(return_value=Mock(root=checkpoint_block_roots[0])) dependent_root = frame_checkpoint_processor._get_dependent_root_for_proposer_duties( diff --git a/tests/modules/performance_collector/test_codec.py b/tests/modules/performance_collector/test_codec.py new file mode 100644 index 000000000..c58cb59b2 --- /dev/null +++ b/tests/modules/performance_collector/test_codec.py @@ -0,0 +1,236 @@ +import pytest + +from src.modules.performance_collector.codec import ( + ProposalDuty, + ProposalDutiesCodec, + SyncDuty, + SyncDutiesCodec, + AttDutiesMissCodec, + EpochBlobCodec, +) + + +def _proposals_to_tuples(items: list[ProposalDuty]) -> list[tuple[int, int]]: + return [(int(i.validator_index), int(bool(i.is_proposed))) for i in items] + + +def _syncs_to_tuples(items: list[SyncDuty]) -> list[tuple[int, int]]: + return [(int(i.validator_index), int(i.missed_count)) for i in items] + + +PROPOSALS_EXAMPLE: list[ProposalDuty] = [ + ProposalDuty(validator_index=1001, is_proposed=True), + ProposalDuty(validator_index=1023, is_proposed=False), + ProposalDuty(validator_index=1098, is_proposed=True), + ProposalDuty(validator_index=1110, is_proposed=True), + ProposalDuty(validator_index=1177, is_proposed=False), + ProposalDuty(validator_index=1205, is_proposed=True), + ProposalDuty(validator_index=1266, is_proposed=False), + ProposalDuty(validator_index=1314, is_proposed=True), + ProposalDuty(validator_index=1333, is_proposed=False), + ProposalDuty(validator_index=1402, is_proposed=True), + ProposalDuty(validator_index=1444, is_proposed=True), + ProposalDuty(validator_index=1509, is_proposed=False), + ProposalDuty(validator_index=1531, is_proposed=True), + ProposalDuty(validator_index=1600, is_proposed=False), + ProposalDuty(validator_index=1625, is_proposed=True), + ProposalDuty(validator_index=1702, is_proposed=True), + ProposalDuty(validator_index=1737, is_proposed=False), + ProposalDuty(validator_index=1801, is_proposed=True), + ProposalDuty(validator_index=1822, is_proposed=False), + ProposalDuty(validator_index=1905, is_proposed=True), + ProposalDuty(validator_index=1950, is_proposed=False), + ProposalDuty(validator_index=2007, is_proposed=True), + ProposalDuty(validator_index=2059, is_proposed=True), + ProposalDuty(validator_index=2103, is_proposed=False), + ProposalDuty(validator_index=2166, is_proposed=True), + ProposalDuty(validator_index=2201, is_proposed=False), + ProposalDuty(validator_index=2255, is_proposed=True), + ProposalDuty(validator_index=2311, is_proposed=False), + ProposalDuty(validator_index=2399, is_proposed=True), + ProposalDuty(validator_index=2420, is_proposed=False), + ProposalDuty(validator_index=2504, is_proposed=True), + ProposalDuty(validator_index=2570, is_proposed=False), +] + + +SYNCS_EXAMPLE: list[SyncDuty] = [ + SyncDuty(validator_index=8000, missed_count=0), + SyncDuty(validator_index=8001, missed_count=1), + SyncDuty(validator_index=8002, missed_count=2), + SyncDuty(validator_index=8003, missed_count=3), + SyncDuty(validator_index=8004, missed_count=4), + SyncDuty(validator_index=8005, missed_count=5), + SyncDuty(validator_index=8006, missed_count=6), + SyncDuty(validator_index=8007, missed_count=7), + SyncDuty(validator_index=8008, missed_count=8), + SyncDuty(validator_index=8009, missed_count=9), + SyncDuty(validator_index=8010, missed_count=10), + SyncDuty(validator_index=8011, missed_count=11), + SyncDuty(validator_index=8012, missed_count=12), + SyncDuty(validator_index=8013, missed_count=13), + SyncDuty(validator_index=8014, missed_count=14), + SyncDuty(validator_index=8015, missed_count=15), + SyncDuty(validator_index=8016, missed_count=16), + SyncDuty(validator_index=8017, missed_count=17), + SyncDuty(validator_index=8018, missed_count=18), + SyncDuty(validator_index=8019, missed_count=19), + SyncDuty(validator_index=8020, missed_count=20), + SyncDuty(validator_index=8021, missed_count=21), + SyncDuty(validator_index=8022, missed_count=22), + SyncDuty(validator_index=8023, missed_count=23), + SyncDuty(validator_index=8024, missed_count=24), + SyncDuty(validator_index=8025, missed_count=25), + SyncDuty(validator_index=8026, missed_count=26), + SyncDuty(validator_index=8027, missed_count=27), + SyncDuty(validator_index=8028, missed_count=28), + SyncDuty(validator_index=8029, missed_count=29), + SyncDuty(validator_index=8030, missed_count=30), + SyncDuty(validator_index=8031, missed_count=31), + SyncDuty(validator_index=8032, missed_count=32), + SyncDuty(validator_index=8033, missed_count=0), + SyncDuty(validator_index=8034, missed_count=2), + SyncDuty(validator_index=8035, missed_count=4), + SyncDuty(validator_index=8036, missed_count=6), + SyncDuty(validator_index=8037, missed_count=8), + SyncDuty(validator_index=8038, missed_count=10), + SyncDuty(validator_index=8039, missed_count=12), + SyncDuty(validator_index=8040, missed_count=14), + SyncDuty(validator_index=8041, missed_count=16), + SyncDuty(validator_index=8042, missed_count=18), + SyncDuty(validator_index=8043, missed_count=20), + SyncDuty(validator_index=8044, missed_count=22), + SyncDuty(validator_index=8045, missed_count=24), + SyncDuty(validator_index=8046, missed_count=26), + SyncDuty(validator_index=8047, missed_count=28), + SyncDuty(validator_index=8048, missed_count=30), + SyncDuty(validator_index=8049, missed_count=32), + SyncDuty(validator_index=8050, missed_count=1), + SyncDuty(validator_index=8051, missed_count=3), + SyncDuty(validator_index=8052, missed_count=5), + SyncDuty(validator_index=8053, missed_count=7), + SyncDuty(validator_index=8054, missed_count=9), + SyncDuty(validator_index=8055, missed_count=11), + SyncDuty(validator_index=8056, missed_count=13), + SyncDuty(validator_index=8057, missed_count=15), + SyncDuty(validator_index=8058, missed_count=17), + SyncDuty(validator_index=8059, missed_count=19), + SyncDuty(validator_index=8060, missed_count=21), + SyncDuty(validator_index=8061, missed_count=23), + SyncDuty(validator_index=8062, missed_count=25), + SyncDuty(validator_index=8063, missed_count=27), +] + + +ATT_MISSES_EXAMPLE: set[int] = { + 10, 17, 21, 28, 35, 41, 43, 49, 57, 60, + 66, 72, 75, 81, 86, 90, 97, 101, 108, 112, + 119, 123, 127, 130, 137, 141, 149, 152, 159, 162, + 170, 173, 177, 182, 189, 193, 197, 201, 206, 210, + 215, 219, 223, 228, 234, 239, 241, 246, 251, 257, + 260, 266, 270, 274, 279, 283, 288, 292, 297, 301, + 305, 309, 314, 318, 323, 327, 330, 336, 340, 345, +} + + +@pytest.mark.unit +def test_proposal_duties_codec_roundtrip(): + src = PROPOSALS_EXAMPLE + + blob = ProposalDutiesCodec.encode(src) + dst = ProposalDutiesCodec.decode(blob) + + # The codec sorts on encode; compare as sorted tuples + assert sorted(_proposals_to_tuples(dst)) == sorted(_proposals_to_tuples(src)) + + +@pytest.mark.unit +def test_proposal_duties_codec_empty(): + with pytest.raises(ValueError): + ProposalDutiesCodec.decode(ProposalDutiesCodec.encode([])) + + +@pytest.mark.unit +def test_sync_miss_duties_codec_roundtrip(): + src = SYNCS_EXAMPLE + + blob = SyncDutiesCodec.encode(src) + dst = SyncDutiesCodec.decode(blob) + + assert sorted(_syncs_to_tuples(dst)) == sorted(_syncs_to_tuples(src)) + + +@pytest.mark.unit +def test_sync_miss_duties_codec_empty(): + with pytest.raises(ValueError): + SyncDutiesCodec.decode(SyncDutiesCodec.encode([])) + + +@pytest.mark.unit +def test_sync_miss_duties_codec_out_of_range(): + with pytest.raises(ValueError): + SyncDutiesCodec.encode([SyncDuty(validator_index=1, missed_count=33)]) + + +@pytest.mark.unit +def test_att_duties_miss_codec_roundtrip(): + src = ATT_MISSES_EXAMPLE + blob = AttDutiesMissCodec.encode(src) + dst = AttDutiesMissCodec.decode(blob) + assert set(dst) == set(src) + + +@pytest.mark.unit +def test_att_duties_miss_codec_empty(): + AttDutiesMissCodec.decode(AttDutiesMissCodec.encode(set())) + + +@pytest.mark.unit +def test_epoch_blob_codec_roundtrip(): + att_misses = ATT_MISSES_EXAMPLE + proposals = PROPOSALS_EXAMPLE + syncs = SYNCS_EXAMPLE + + blob = EpochBlobCodec.encode(att_misses=att_misses, proposals=proposals, sync_misses=syncs) + att_decoded, proposals_decoded, syncs_decoded = EpochBlobCodec.decode(blob) + + # att_decoded may be a set (non-empty) or BitMap; normalize to set + from pyroaring import BitMap # type: ignore + if isinstance(att_decoded, BitMap): + att_decoded = set(att_decoded) # type: ignore + + assert set(att_decoded) == set(att_misses) + assert sorted(_proposals_to_tuples(proposals_decoded)) == sorted(_proposals_to_tuples(proposals)) + assert sorted(_syncs_to_tuples(syncs_decoded)) == sorted(_syncs_to_tuples(syncs)) + + +@pytest.mark.unit +def test_epoch_blob_codec_bad_version(): + att_misses = set() + proposals = PROPOSALS_EXAMPLE + syncs = SYNCS_EXAMPLE + + blob = EpochBlobCodec.encode(att_misses=att_misses, proposals=proposals, sync_misses=syncs) + + bad = bytes([255]) + blob[1:] + with pytest.raises(ValueError): + EpochBlobCodec.decode(bad) + + +@pytest.mark.unit +def test_epoch_blob_codec_short_header(): + with pytest.raises(ValueError): + EpochBlobCodec.decode(b"\x01\x00") + + +@pytest.mark.unit +def test_epoch_blob_codec_truncated_payload(): + att_misses = set() + proposals = PROPOSALS_EXAMPLE + syncs = SYNCS_EXAMPLE + + blob = EpochBlobCodec.encode(att_misses=att_misses, proposals=proposals, sync_misses=syncs) + bad_blob = blob[:-1] + + with pytest.raises(ValueError): + EpochBlobCodec.decode(bad_blob) diff --git a/tests/modules/csm/test_processing_attestation.py b/tests/modules/performance_collector/test_processing_attestation.py similarity index 98% rename from tests/modules/csm/test_processing_attestation.py rename to tests/modules/performance_collector/test_processing_attestation.py index 80eb036e1..9e29c2d1a 100644 --- a/tests/modules/csm/test_processing_attestation.py +++ b/tests/modules/performance_collector/test_processing_attestation.py @@ -3,7 +3,7 @@ import pytest -from src.modules.csm.checkpoint import ( +from src.modules.performance_collector.checkpoint import ( get_committee_indices, hex_bitlist_to_list, hex_bitvector_to_list, From 453effb0771a83440c026bf3c499aa607dc063b2 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Tue, 21 Oct 2025 14:24:39 +0200 Subject: [PATCH 02/35] fix: lock --- poetry.lock | 247 +++++++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 223 insertions(+), 24 deletions(-) diff --git a/poetry.lock b/poetry.lock index d9496569f..f0d5df6c1 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -118,7 +118,7 @@ propcache = ">=0.2.0" yarl = ">=1.17.0,<2.0" [package.extras] -speedups = ["Brotli", "aiodns (>=3.3.0)", "brotlicffi"] +speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (>=3.3.0)", "brotlicffi ; platform_python_implementation != \"CPython\""] [[package]] name = "aiosignal" @@ -188,12 +188,12 @@ files = [ ] [package.extras] -benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"] -tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] +tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] [[package]] name = "base58" @@ -418,6 +418,18 @@ d = ["aiohttp (>=3.10)"] jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] uvloop = ["uvloop (>=0.15.2)"] +[[package]] +name = "blinker" +version = "1.9.0" +description = "Fast, simple object-to-object and broadcast signaling" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc"}, + {file = "blinker-1.9.0.tar.gz", hash = "sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf"}, +] + [[package]] name = "certifi" version = "2025.4.26" @@ -660,7 +672,7 @@ version = "8.2.1" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.10" -groups = ["dev"] +groups = ["main", "dev"] files = [ {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, @@ -676,11 +688,11 @@ description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" groups = ["main", "dev"] +markers = "sys_platform == \"win32\" or platform_system == \"Windows\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] -markers = {main = "sys_platform == \"win32\"", dev = "sys_platform == \"win32\" or platform_system == \"Windows\""} [[package]] name = "conventional-pre-commit" @@ -775,7 +787,7 @@ files = [ ] [package.extras] -toml = ["tomli"] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] [[package]] name = "cytoolz" @@ -1022,7 +1034,7 @@ pycryptodome = {version = ">=3.6.6,<4", optional = true, markers = "extra == \"p dev = ["build (>=0.9.0)", "bump_my_version (>=0.19.0)", "ipython", "mypy (==1.10.0)", "pre-commit (>=3.4.0)", "pytest (>=7.0.0)", "pytest-xdist (>=2.4.0)", "sphinx (>=6.0.0)", "sphinx-autobuild (>=2021.3.14)", "sphinx_rtd_theme (>=1.0.0)", "towncrier (>=24,<25)", "tox (>=4.0.0)", "twine", "wheel"] docs = ["sphinx (>=6.0.0)", "sphinx-autobuild (>=2021.3.14)", "sphinx_rtd_theme (>=1.0.0)", "towncrier (>=24,<25)"] pycryptodome = ["pycryptodome (>=3.6.6,<4)"] -pysha3 = ["pysha3 (>=1.0.0,<2.0.0)", "safe-pysha3 (>=1.0.0)"] +pysha3 = ["pysha3 (>=1.0.0,<2.0.0) ; python_version < \"3.9\"", "safe-pysha3 (>=1.0.0) ; python_version >= \"3.9\""] test = ["pytest (>=7.0.0)", "pytest-xdist (>=2.4.0)"] [[package]] @@ -1112,10 +1124,10 @@ rlp = ">=3.0.0" semantic_version = ">=2.6.0" [package.extras] -dev = ["build (>=0.9.0)", "bump_my_version (>=0.19.0)", "eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "eth-hash[pysha3] (>=0.1.4,<1.0.0)", "ipython", "pre-commit (>=3.4.0)", "py-evm (>=0.10.0b0,<0.11.0b0)", "pytest (>=7.0.0)", "pytest-xdist (>=2.0.0,<3)", "towncrier (>=24,<25)", "tox (>=4.0.0)", "twine", "wheel"] +dev = ["build (>=0.9.0)", "bump_my_version (>=0.19.0)", "eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "eth-hash[pycryptodome] (>=0.1.4,<1.0.0) ; implementation_name == \"pypy\"", "eth-hash[pysha3] (>=0.1.4,<1.0.0) ; implementation_name == \"cpython\"", "ipython", "pre-commit (>=3.4.0)", "py-evm (>=0.10.0b0,<0.11.0b0)", "pytest (>=7.0.0)", "pytest-xdist (>=2.0.0,<3)", "towncrier (>=24,<25)", "tox (>=4.0.0)", "twine", "wheel"] docs = ["towncrier (>=24,<25)"] -py-evm = ["eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "eth-hash[pysha3] (>=0.1.4,<1.0.0)", "py-evm (>=0.10.0b0,<0.11.0b0)"] -pyevm = ["eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "eth-hash[pysha3] (>=0.1.4,<1.0.0)", "py-evm (>=0.10.0b0,<0.11.0b0)"] +py-evm = ["eth-hash[pycryptodome] (>=0.1.4,<1.0.0) ; implementation_name == \"pypy\"", "eth-hash[pysha3] (>=0.1.4,<1.0.0) ; implementation_name == \"cpython\"", "py-evm (>=0.10.0b0,<0.11.0b0)"] +pyevm = ["eth-hash[pycryptodome] (>=0.1.4,<1.0.0) ; implementation_name == \"pypy\"", "eth-hash[pysha3] (>=0.1.4,<1.0.0) ; implementation_name == \"cpython\"", "py-evm (>=0.10.0b0,<0.11.0b0)"] test = ["eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "pytest (>=7.0.0)", "pytest-xdist (>=2.0.0,<3)"] [[package]] @@ -1190,7 +1202,7 @@ files = [ ] [package.extras] -tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"] +tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich ; python_version >= \"3.11\""] [[package]] name = "faker" @@ -1222,7 +1234,31 @@ files = [ [package.extras] docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"] -typing = ["typing-extensions (>=4.12.2)"] +typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] + +[[package]] +name = "flask" +version = "3.1.2" +description = "A simple framework for building complex web applications." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "flask-3.1.2-py3-none-any.whl", hash = "sha256:ca1d8112ec8a6158cc29ea4858963350011b5c846a414cdb7a954aa9e967d03c"}, + {file = "flask-3.1.2.tar.gz", hash = "sha256:bf656c15c80190ed628ad08cdfd3aaa35beb087855e2f494910aa3774cc4fd87"}, +] + +[package.dependencies] +blinker = ">=1.9.0" +click = ">=8.1.3" +itsdangerous = ">=2.2.0" +jinja2 = ">=3.1.2" +markupsafe = ">=2.1.1" +werkzeug = ">=3.1.0" + +[package.extras] +async = ["asgiref (>=3.2)"] +dotenv = ["python-dotenv"] [[package]] name = "frozenlist" @@ -1372,7 +1408,7 @@ attrs = ">=22.2.0" sortedcontainers = ">=2.1.0,<3.0.0" [package.extras] -all = ["black (>=19.10b0)", "click (>=7.0)", "crosshair-tool (>=0.0.88)", "django (>=4.2)", "dpcontracts (>=0.4)", "hypothesis-crosshair (>=0.0.23)", "lark (>=0.10.1)", "libcst (>=0.3.16)", "numpy (>=1.19.3)", "pandas (>=1.1)", "pytest (>=4.6)", "python-dateutil (>=1.4)", "pytz (>=2014.1)", "redis (>=3.0.0)", "rich (>=9.0.0)", "tzdata (>=2025.2)", "watchdog (>=4.0.0)"] +all = ["black (>=19.10b0)", "click (>=7.0)", "crosshair-tool (>=0.0.88)", "django (>=4.2)", "dpcontracts (>=0.4)", "hypothesis-crosshair (>=0.0.23)", "lark (>=0.10.1)", "libcst (>=0.3.16)", "numpy (>=1.19.3)", "pandas (>=1.1)", "pytest (>=4.6)", "python-dateutil (>=1.4)", "pytz (>=2014.1)", "redis (>=3.0.0)", "rich (>=9.0.0)", "tzdata (>=2025.2) ; sys_platform == \"win32\" or sys_platform == \"emscripten\"", "watchdog (>=4.0.0)"] cli = ["black (>=19.10b0)", "click (>=7.0)", "rich (>=9.0.0)"] codemods = ["libcst (>=0.3.16)"] crosshair = ["crosshair-tool (>=0.0.88)", "hypothesis-crosshair (>=0.0.23)"] @@ -1387,7 +1423,7 @@ pytest = ["pytest (>=4.6)"] pytz = ["pytz (>=2014.1)"] redis = ["redis (>=3.0.0)"] watchdog = ["watchdog (>=4.0.0)"] -zoneinfo = ["tzdata (>=2025.2)"] +zoneinfo = ["tzdata (>=2025.2) ; sys_platform == \"win32\" or sys_platform == \"emscripten\""] [[package]] name = "identify" @@ -1510,6 +1546,18 @@ files = [ colors = ["colorama"] plugins = ["setuptools"] +[[package]] +name = "itsdangerous" +version = "2.2.0" +description = "Safely pass data to untrusted environments and back." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef"}, + {file = "itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173"}, +] + [[package]] name = "jedi" version = "0.19.2" @@ -1530,6 +1578,24 @@ docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alab qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] testing = ["Django", "attrs", "colorama", "docopt", "pytest (<9.0.0)"] +[[package]] +name = "jinja2" +version = "3.1.6" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, + {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + [[package]] name = "json-stream" version = "2.3.3" @@ -1644,6 +1710,105 @@ files = [ benchmark = ["contexttimer (>=0.3,<0.4)", "json-stream-to-standard-types (>=0.1,<0.2)", "si-prefix (>=1.2,<2)", "tqdm (>=4.64,<5)", "typer (>=0.6,<0.7)"] test = ["json-stream (==2.3.2)", "json-stream-rs-tokenizer[benchmark]", "pytest (>7.1,<8)"] +[[package]] +name = "markupsafe" +version = "3.0.3" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "markupsafe-3.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f981d352f04553a7171b8e44369f2af4055f888dfb147d55e42d29e29e74559"}, + {file = "markupsafe-3.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c1493fb6e50ab01d20a22826e57520f1284df32f2d8601fdd90b6304601419"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1ba88449deb3de88bd40044603fafffb7bc2b055d626a330323a9ed736661695"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f42d0984e947b8adf7dd6dde396e720934d12c506ce84eea8476409563607591"}, + {file = "markupsafe-3.0.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c0c0b3ade1c0b13b936d7970b1d37a57acde9199dc2aecc4c336773e1d86049c"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0303439a41979d9e74d18ff5e2dd8c43ed6c6001fd40e5bf2e43f7bd9bbc523f"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:d2ee202e79d8ed691ceebae8e0486bd9a2cd4794cec4824e1c99b6f5009502f6"}, + {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:177b5253b2834fe3678cb4a5f0059808258584c559193998be2601324fdeafb1"}, + {file = "markupsafe-3.0.3-cp310-cp310-win32.whl", hash = "sha256:2a15a08b17dd94c53a1da0438822d70ebcd13f8c3a95abe3a9ef9f11a94830aa"}, + {file = "markupsafe-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:c4ffb7ebf07cfe8931028e3e4c85f0357459a3f9f9490886198848f4fa002ec8"}, + {file = "markupsafe-3.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:e2103a929dfa2fcaf9bb4e7c091983a49c9ac3b19c9061b6d5427dd7d14d81a1"}, + {file = "markupsafe-3.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad"}, + {file = "markupsafe-3.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf"}, + {file = "markupsafe-3.0.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115"}, + {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a"}, + {file = "markupsafe-3.0.3-cp311-cp311-win32.whl", hash = "sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19"}, + {file = "markupsafe-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01"}, + {file = "markupsafe-3.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c"}, + {file = "markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e"}, + {file = "markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d"}, + {file = "markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f"}, + {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b"}, + {file = "markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d"}, + {file = "markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c"}, + {file = "markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f"}, + {file = "markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795"}, + {file = "markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676"}, + {file = "markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc"}, + {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12"}, + {file = "markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed"}, + {file = "markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5"}, + {file = "markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485"}, + {file = "markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73"}, + {file = "markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025"}, + {file = "markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb"}, + {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218"}, + {file = "markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287"}, + {file = "markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe"}, + {file = "markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97"}, + {file = "markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf"}, + {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe"}, + {file = "markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9"}, + {file = "markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581"}, + {file = "markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4"}, + {file = "markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab"}, + {file = "markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50"}, + {file = "markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523"}, + {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9"}, + {file = "markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa"}, + {file = "markupsafe-3.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15d939a21d546304880945ca1ecb8a039db6b4dc49b2c5a400387cdae6a62e26"}, + {file = "markupsafe-3.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f71a396b3bf33ecaa1626c255855702aca4d3d9fea5e051b41ac59a9c1c41edc"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f4b68347f8c5eab4a13419215bdfd7f8c9b19f2b25520968adfad23eb0ce60c"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e8fc20152abba6b83724d7ff268c249fa196d8259ff481f3b1476383f8f24e42"}, + {file = "markupsafe-3.0.3-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:949b8d66bc381ee8b007cd945914c721d9aba8e27f71959d750a46f7c282b20b"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:3537e01efc9d4dccdf77221fb1cb3b8e1a38d5428920e0657ce299b20324d758"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:591ae9f2a647529ca990bc681daebdd52c8791ff06c2bfa05b65163e28102ef2"}, + {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a320721ab5a1aba0a233739394eb907f8c8da5c98c9181d1161e77a0c8e36f2d"}, + {file = "markupsafe-3.0.3-cp39-cp39-win32.whl", hash = "sha256:df2449253ef108a379b8b5d6b43f4b1a8e81a061d6537becd5582fba5f9196d7"}, + {file = "markupsafe-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:7c3fb7d25180895632e5d3148dbdc29ea38ccb7fd210aa27acbd1201a1902c6e"}, + {file = "markupsafe-3.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:38664109c14ffc9e7437e86b4dceb442b0096dfe3541d7864d9cbe1da4cf36c8"}, + {file = "markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698"}, +] + [[package]] name = "matplotlib-inline" version = "0.1.7" @@ -2372,7 +2537,7 @@ typing-inspection = ">=0.4.0" [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] [[package]] name = "pydantic-core" @@ -2833,7 +2998,7 @@ requests = ">=2.30.0,<3.0" urllib3 = ">=1.25.10,<3.0" [package.extras] -tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asyncio", "pytest-cov", "pytest-httpserver", "tomli", "tomli-w", "types-PyYAML", "types-requests"] +tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asyncio", "pytest-cov", "pytest-httpserver", "tomli ; python_version < \"3.11\"", "tomli-w", "types-PyYAML", "types-requests"] [[package]] name = "rlp" @@ -2869,7 +3034,7 @@ files = [ ] [package.extras] -dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1)", "coverage", "flake8", "nose2", "readme-renderer (<25.0)", "tox", "wheel", "zest.releaser[recommended]"] +dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1) ; python_version == \"3.4\"", "coverage", "flake8", "nose2", "readme-renderer (<25.0) ; python_version == \"3.4\"", "tox", "wheel", "zest.releaser[recommended]"] doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] @@ -3086,7 +3251,7 @@ files = [ ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] @@ -3121,7 +3286,23 @@ platformdirs = ">=3.9.1,<5" [package.extras] docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] -test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"GraalVM\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""] + +[[package]] +name = "waitress" +version = "3.0.2" +description = "Waitress WSGI server" +optional = false +python-versions = ">=3.9.0" +groups = ["main"] +files = [ + {file = "waitress-3.0.2-py3-none-any.whl", hash = "sha256:c56d67fd6e87c2ee598b76abdd4e96cfad1f24cacdea5078d382b1f9d7b5ed2e"}, + {file = "waitress-3.0.2.tar.gz", hash = "sha256:682aaaf2af0c44ada4abfb70ded36393f0e307f4ab9456a215ce0020baefc31f"}, +] + +[package.extras] +docs = ["Sphinx (>=1.8.1)", "docutils", "pylons-sphinx-themes (>=1.0.9)"] +testing = ["coverage (>=7.6.0)", "pytest", "pytest-cov"] [[package]] name = "wcwidth" @@ -3267,6 +3448,24 @@ files = [ {file = "websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee"}, ] +[[package]] +name = "werkzeug" +version = "3.1.3" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e"}, + {file = "werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog (>=2.3)"] + [[package]] name = "yarl" version = "1.20.0" @@ -3389,4 +3588,4 @@ propcache = ">=0.2.1" [metadata] lock-version = "2.1" python-versions = "^3.12" -content-hash = "096751c6157c4a461a4864096214eaea0b974fec29d241da155d53adf0ad8c8b" +content-hash = "3660c739c071839b795775dd6c6a904b555d6d5b28c315385f9557849bb476ab" From 485fe91aabc82232d2a779a29fc4171d93d79117 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Tue, 21 Oct 2025 14:25:44 +0200 Subject: [PATCH 03/35] fix: black --- tests/fork/conftest.py | 4 +- .../performance_collector/test_checkpoint.py | 12 ++- .../performance_collector/test_codec.py | 78 +++++++++++++++++-- 3 files changed, 82 insertions(+), 12 deletions(-) diff --git a/tests/fork/conftest.py b/tests/fork/conftest.py index a4a7e69c8..a6f927a1b 100644 --- a/tests/fork/conftest.py +++ b/tests/fork/conftest.py @@ -42,7 +42,7 @@ LidoContracts, LidoValidatorsProvider, TransactionUtils, - PerformanceClientModule + PerformanceClientModule, ) logger = logging.getLogger('fork_tests') @@ -295,7 +295,7 @@ def web3(forked_el_client, patched_cl_client, mocked_ipfs_client): 'cc': lambda: patched_cl_client, # type: ignore[dict-item] 'kac': lambda: kac, # type: ignore[dict-item] "ipfs": lambda: mocked_ipfs_client, - 'performance': lambda: performance + 'performance': lambda: performance, } ) yield forked_el_client diff --git a/tests/modules/performance_collector/test_checkpoint.py b/tests/modules/performance_collector/test_checkpoint.py index c4ac4c8df..94f02ffe2 100644 --- a/tests/modules/performance_collector/test_checkpoint.py +++ b/tests/modules/performance_collector/test_checkpoint.py @@ -480,7 +480,9 @@ def test_get_sync_committee_fetches_and_caches_when_not_cached( prev_slot_response = Mock() prev_slot_response.message.slot = SlotNumber(0) prev_slot_response.message.body.execution_payload.block_hash = "0x00" - with patch('src.modules.performance_collector.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response)): + with patch( + 'src.modules.performance_collector.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response) + ): result = frame_checkpoint_processor._get_sync_committee(epoch) assert result.validators == sync_committee.validators @@ -504,7 +506,9 @@ def test_get_sync_committee_handles_cache_eviction( prev_slot_response = Mock() prev_slot_response.message.slot = SlotNumber(0) prev_slot_response.message.body.execution_payload.block_hash = "0x00" - with patch('src.modules.performance_collector.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response)): + with patch( + 'src.modules.performance_collector.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response) + ): result = frame_checkpoint_processor._get_sync_committee(epoch) assert result == sync_committee @@ -560,7 +564,9 @@ def test_get_dependent_root_for_proposer_duties_from_cl_when_slot_out_of_range(f prev_slot_response = Mock() prev_slot_response.message.slot = non_missed_slot - with patch('src.modules.performance_collector.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response)): + with patch( + 'src.modules.performance_collector.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response) + ): frame_checkpoint_processor.cc.get_block_root = Mock(return_value=Mock(root=checkpoint_block_roots[0])) dependent_root = frame_checkpoint_processor._get_dependent_root_for_proposer_duties( diff --git a/tests/modules/performance_collector/test_codec.py b/tests/modules/performance_collector/test_codec.py index c58cb59b2..5ab1c9a20 100644 --- a/tests/modules/performance_collector/test_codec.py +++ b/tests/modules/performance_collector/test_codec.py @@ -123,13 +123,76 @@ def _syncs_to_tuples(items: list[SyncDuty]) -> list[tuple[int, int]]: ATT_MISSES_EXAMPLE: set[int] = { - 10, 17, 21, 28, 35, 41, 43, 49, 57, 60, - 66, 72, 75, 81, 86, 90, 97, 101, 108, 112, - 119, 123, 127, 130, 137, 141, 149, 152, 159, 162, - 170, 173, 177, 182, 189, 193, 197, 201, 206, 210, - 215, 219, 223, 228, 234, 239, 241, 246, 251, 257, - 260, 266, 270, 274, 279, 283, 288, 292, 297, 301, - 305, 309, 314, 318, 323, 327, 330, 336, 340, 345, + 10, + 17, + 21, + 28, + 35, + 41, + 43, + 49, + 57, + 60, + 66, + 72, + 75, + 81, + 86, + 90, + 97, + 101, + 108, + 112, + 119, + 123, + 127, + 130, + 137, + 141, + 149, + 152, + 159, + 162, + 170, + 173, + 177, + 182, + 189, + 193, + 197, + 201, + 206, + 210, + 215, + 219, + 223, + 228, + 234, + 239, + 241, + 246, + 251, + 257, + 260, + 266, + 270, + 274, + 279, + 283, + 288, + 292, + 297, + 301, + 305, + 309, + 314, + 318, + 323, + 327, + 330, + 336, + 340, + 345, } @@ -196,6 +259,7 @@ def test_epoch_blob_codec_roundtrip(): # att_decoded may be a set (non-empty) or BitMap; normalize to set from pyroaring import BitMap # type: ignore + if isinstance(att_decoded, BitMap): att_decoded = set(att_decoded) # type: ignore From 8d86ca74042c20a8721478b185dcb8ee1f40576a Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Wed, 22 Oct 2025 13:05:17 +0200 Subject: [PATCH 04/35] fix: already processed epochs --- src/modules/csm/csm.py | 10 ++++++++-- src/web3py/extensions/__init__.py | 1 + 2 files changed, 9 insertions(+), 2 deletions(-) diff --git a/src/modules/csm/csm.py b/src/modules/csm/csm.py index ff1294870..f3bb69435 100644 --- a/src/modules/csm/csm.py +++ b/src/modules/csm/csm.py @@ -176,9 +176,15 @@ def fulfill_state(self): for l_epoch, r_epoch in self.state.frames: for epoch in sequence(l_epoch, r_epoch): + if epoch not in self.state.unprocessed_epochs: + logger.info({"msg": f"Epoch {epoch} is already processed"}) + continue + epoch_data = self.w3.performance.get_epoch(epoch) if epoch_data is None: - raise ValueError(f"Epoch {epoch} is missing in Performance Collector") + logger.warning({"msg": f"Epoch {epoch} is missing in Performance Collector"}) + continue + misses, props, syncs = epoch_data for validator in validators: @@ -187,10 +193,10 @@ def fulfill_state(self): is_active = is_active_validator(validator, EpochNumber(epoch)) if not is_active and missed_att: raise ValueError(f"Validator {validator.index} missed attestation in epoch {epoch}, but was not active") - self.state.save_att_duty(EpochNumber(epoch), validator.index, included=included_att) blocks_in_epoch = 0 + for p in props: vid = ValidatorIndex(p.validator_index) self.state.save_prop_duty(EpochNumber(epoch), vid, included=bool(p.is_proposed)) diff --git a/src/web3py/extensions/__init__.py b/src/web3py/extensions/__init__.py index 80f83141d..01ce1d1a6 100644 --- a/src/web3py/extensions/__init__.py +++ b/src/web3py/extensions/__init__.py @@ -6,3 +6,4 @@ from src.web3py.extensions.fallback import FallbackProviderModule from src.web3py.extensions.csm import CSM, LazyCSM from src.web3py.extensions.ipfs import IPFS +from src.web3py.extensions.performance import PerformanceClientModule From 9e2c9515cd0e58aff6098758d161d35e91ef9022 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Thu, 23 Oct 2025 16:18:46 +0200 Subject: [PATCH 05/35] feat: ChainConverter --- src/modules/csm/csm.py | 1 + .../performance_collector/checkpoint.py | 10 ++--- .../performance_collector/http_server.py | 15 +++++++- .../performance_collector.py | 32 ++++++++++------ src/providers/performance/client.py | 13 +++++-- src/utils/web3converter.py | 38 +++++++++++-------- 6 files changed, 72 insertions(+), 37 deletions(-) diff --git a/src/modules/csm/csm.py b/src/modules/csm/csm.py index f3bb69435..7a9fd2c19 100644 --- a/src/modules/csm/csm.py +++ b/src/modules/csm/csm.py @@ -101,6 +101,7 @@ def collect_data(self, blockstamp: ReferenceBlockStamp) -> bool: ) if not is_data_range_available: logger.warning({"msg": f"Performance data range is not available yet for [{l_epoch_};{r_epoch_}] frame"}) + # TODO: set r_epoch r_epoch for FrameCheckpointsIterator softly through POST request return False self.fulfill_state() diff --git a/src/modules/performance_collector/checkpoint.py b/src/modules/performance_collector/checkpoint.py index 49a80630a..bac9f5222 100644 --- a/src/modules/performance_collector/checkpoint.py +++ b/src/modules/performance_collector/checkpoint.py @@ -28,7 +28,7 @@ from src.utils.slot import get_prev_non_missed_slot from src.utils.timeit import timeit from src.utils.types import hex_str_to_bytes -from src.utils.web3converter import Web3Converter +from src.utils.web3converter import ChainConverter ZERO_BLOCK_ROOT = HexBytes(ZERO_HASH).to_0x_hex() @@ -49,7 +49,7 @@ class FrameCheckpoint: class FrameCheckpointsIterator: - converter: Web3Converter + converter: ChainConverter l_epoch: EpochNumber r_epoch: EpochNumber @@ -71,7 +71,7 @@ class FrameCheckpointsIterator: CHECKPOINT_SLOT_DELAY_EPOCHS = 2 def __init__( - self, converter: Web3Converter, l_epoch: EpochNumber, r_epoch: EpochNumber, finalized_epoch: EpochNumber + self, converter: ChainConverter, l_epoch: EpochNumber, r_epoch: EpochNumber, finalized_epoch: EpochNumber ): if l_epoch > r_epoch: raise ValueError(f"Left border epoch should be less or equal right border epoch: {l_epoch=} > {r_epoch=}") @@ -131,7 +131,7 @@ def __setitem__(self, sync_committee_period: int, value: SyncCommittee): class FrameCheckpointProcessor: cc: ConsensusClient - converter: Web3Converter + converter: ChainConverter db: DutiesDB finalized_blockstamp: BlockStamp @@ -140,7 +140,7 @@ def __init__( self, cc: ConsensusClient, db: DutiesDB, - converter: Web3Converter, + converter: ChainConverter, finalized_blockstamp: BlockStamp, ): self.cc = cc diff --git a/src/modules/performance_collector/http_server.py b/src/modules/performance_collector/http_server.py index 4eb511978..1165d1211 100644 --- a/src/modules/performance_collector/http_server.py +++ b/src/modules/performance_collector/http_server.py @@ -78,8 +78,17 @@ def epochs_blob(): except Exception as e: return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 - @app.get("/epochs/") - def epoch_details(epoch: int): + @app.get("/epochs/blob/") + def epoch_blob(epoch: int): + try: + db = DutiesDB(app.config["DB_PATH"]) + blob = db.get_epoch_blob(epoch) + return jsonify({"result": blob.hex() if blob is not None else None}) + except Exception as e: + return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 + + @app.get("/debug/epochs/") + def debug_epoch_details(epoch: int): try: db = DutiesDB(app.config["DB_PATH"]) blob = db.get_epoch_blob(epoch) @@ -106,6 +115,8 @@ def epoch_details(epoch: int): except Exception as e: return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 + # TODO: POST endpoint for setting r_epoch for FrameCheckpointsIterator softly + return app diff --git a/src/modules/performance_collector/performance_collector.py b/src/modules/performance_collector/performance_collector.py index 64155a8cb..5d31c234c 100644 --- a/src/modules/performance_collector/performance_collector.py +++ b/src/modules/performance_collector/performance_collector.py @@ -1,13 +1,17 @@ import logging from typing import Optional -from src.modules.performance_collector.checkpoint import FrameCheckpointsIterator, FrameCheckpointProcessor, MinStepIsNotReached +from src.modules.performance_collector.checkpoint import ( + FrameCheckpointsIterator, + FrameCheckpointProcessor, + MinStepIsNotReached, +) from src.modules.performance_collector.db import DutiesDB from src.modules.performance_collector.http_server import start_performance_api_server from src.modules.submodules.oracle_module import BaseModule, ModuleExecuteDelay -from src.modules.submodules.types import ChainConfig, FrameConfig +from src.modules.submodules.types import ChainConfig from src.types import BlockStamp, EpochNumber -from src.utils.web3converter import Web3Converter +from src.utils.web3converter import ChainConverter from src import variables logger = logging.getLogger(__name__) @@ -20,20 +24,23 @@ class PerformanceCollector(BaseModule): def __init__(self, w3, db_path: Optional[str] = None): super().__init__(w3) + logger.info({'msg': 'Initialize Performance Collector module.'}) db_path = db_path or str((variables.CACHE_PATH / "eth_duties.sqlite").absolute()) self.db = DutiesDB(db_path) - logger.info({'msg': 'Initialize Performance Collector module.'}) try: - logger.info({'msg': f'Start performance API server on port {variables.PERFORMANCE_COLLECTOR_SERVER_API_PORT}'}) + logger.info( + {'msg': f'Start performance API server on port {variables.PERFORMANCE_COLLECTOR_SERVER_API_PORT}'} + ) start_performance_api_server(db_path) except Exception as e: logger.error({'msg': 'Failed to start performance API server', 'error': repr(e)}) raise def refresh_contracts(self): + # No need to refresh contracts for this module. There are no contracts used. return None - def _build_converter(self) -> Web3Converter: + def _build_converter(self) -> ChainConverter: cc_spec = self.w3.cc.get_config_spec() genesis = self.w3.cc.get_genesis() chain_cfg = ChainConfig( @@ -41,21 +48,24 @@ def _build_converter(self) -> Web3Converter: seconds_per_slot=cc_spec.SECONDS_PER_SLOT, genesis_time=genesis.genesis_time, ) - # FIXME: mocked value - frame_cfg = FrameConfig(initial_epoch=0, epochs_per_frame=32, fast_lane_length_slots=0) - return Web3Converter(chain_cfg, frame_cfg) + return ChainConverter(chain_cfg) def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecuteDelay: converter = self._build_converter() - start_epoch = max(self.db.min_unprocessed_epoch(), variables.PERFORMANCE_COLLECTOR_SERVER_START_EPOCH) + start_epoch = EpochNumber( + max(self.db.min_unprocessed_epoch(), variables.PERFORMANCE_COLLECTOR_SERVER_START_EPOCH) + ) + end_epoch = variables.PERFORMANCE_COLLECTOR_SERVER_END_EPOCH + # TODO: adjust range by incoming POST requests + finalized_epoch = EpochNumber(converter.get_epoch_by_slot(last_finalized_blockstamp.slot_number) - 1) try: checkpoints = FrameCheckpointsIterator( converter, start_epoch, - variables.PERFORMANCE_COLLECTOR_SERVER_END_EPOCH, + end_epoch, finalized_epoch, ) except MinStepIsNotReached: diff --git a/src/providers/performance/client.py b/src/providers/performance/client.py index 906acb4c5..dd6410136 100644 --- a/src/providers/performance/client.py +++ b/src/providers/performance/client.py @@ -42,10 +42,17 @@ def get_epoch_blobs(self, l_epoch: int, r_epoch: int) -> list[dict[str, str | No ) return data['result'] + def get_epoch_blob(self, epoch: int) -> dict[str, str | None]: + data, _ = self._get( + self.API_EPOCHS_BLOB + f"/{epoch}", + retval_validator=data_is_dict, + ) + return data['result'] + def get_epochs(self, l_epoch: int, r_epoch: int) -> list[tuple[set[int], list[ProposalDuty], list[SyncDuty]]]: epochs_data = self.get_epoch_blobs(l_epoch, r_epoch) return [EpochBlobCodec.decode(bytes.fromhex(epoch_data['blob'])) for epoch_data in epochs_data] - def get_epoch(self, epoch: int) -> EpochBlob | None: - res = self.get_epochs(epoch, epoch) - return res[0] + def get_epoch(self, epoch: int) -> tuple[set[int], list[ProposalDuty], list[SyncDuty]] | None: + blob = self.get_epoch_blob(epoch) + return EpochBlobCodec.decode(bytes.fromhex(blob['blob'])) if blob else None diff --git a/src/utils/web3converter.py b/src/utils/web3converter.py index e8706785f..99fc58f75 100644 --- a/src/utils/web3converter.py +++ b/src/utils/web3converter.py @@ -9,31 +9,43 @@ def epoch_from_slot(slot: SlotNumber, slots_per_epoch: int) -> EpochNumber: return EpochNumber(slot // slots_per_epoch) -class Web3Converter: +class ChainConverter: + chain_config: ChainConfig + + def __init__(self, chain_config: ChainConfig): + self.chain_config = chain_config + + def get_epoch_first_slot(self, epoch: EpochNumber) -> SlotNumber: + return SlotNumber(epoch * self.chain_config.slots_per_epoch) + + def get_epoch_last_slot(self, epoch: EpochNumber) -> SlotNumber: + return SlotNumber((epoch + 1) * self.chain_config.slots_per_epoch - 1) + + def get_epoch_by_slot(self, ref_slot: SlotNumber) -> EpochNumber: + return EpochNumber(ref_slot // self.chain_config.slots_per_epoch) + + def get_slot_by_timestamp(self, timestamp: int) -> SlotNumber: + return SlotNumber((timestamp - self.chain_config.genesis_time) // self.chain_config.seconds_per_slot) + + +class Web3Converter(ChainConverter): """ The Web3Converter class contains methods for converting between slot, epoch, and frame numbers using chain and frame settings passed as arguments when the class instance is created. Frame is the distance between two oracle reports. """ - - chain_config: ChainConfig frame_config: FrameConfig def __init__(self, chain_config: ChainConfig, frame_config: FrameConfig): - self.chain_config = chain_config + # TODO: fix SafeBorder inheritance issue in Web3Converter + super().__init__(chain_config) self.frame_config = frame_config @property def slots_per_frame(self) -> int: return self.frame_config.epochs_per_frame * self.chain_config.slots_per_epoch - def get_epoch_first_slot(self, epoch: EpochNumber) -> SlotNumber: - return SlotNumber(epoch * self.chain_config.slots_per_epoch) - - def get_epoch_last_slot(self, epoch: EpochNumber) -> SlotNumber: - return SlotNumber((epoch + 1) * self.chain_config.slots_per_epoch - 1) - def get_frame_last_slot(self, frame: FrameNumber) -> SlotNumber: return SlotNumber(self.get_frame_first_slot(FrameNumber(frame + 1)) - 1) @@ -42,16 +54,10 @@ def get_frame_first_slot(self, frame: FrameNumber) -> SlotNumber: (self.frame_config.initial_epoch + frame * self.frame_config.epochs_per_frame) * self.chain_config.slots_per_epoch ) - def get_epoch_by_slot(self, ref_slot: SlotNumber) -> EpochNumber: - return EpochNumber(ref_slot // self.chain_config.slots_per_epoch) - def get_epoch_by_timestamp(self, timestamp: int) -> EpochNumber: slot = self.get_slot_by_timestamp(timestamp) return self.get_epoch_by_slot(slot) - def get_slot_by_timestamp(self, timestamp: int) -> SlotNumber: - return SlotNumber((timestamp - self.chain_config.genesis_time) // self.chain_config.seconds_per_slot) - def get_frame_by_slot(self, slot: SlotNumber) -> FrameNumber: return self.get_frame_by_epoch(self.get_epoch_by_slot(slot)) From 6e95c017f0bf76fbc3764039bd5c45fe68a3fa88 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Mon, 27 Oct 2025 10:52:13 +0100 Subject: [PATCH 06/35] refactor: types --- .../performance_collector/checkpoint.py | 99 ++++++++++--------- src/modules/performance_collector/codec.py | 28 +++--- src/modules/performance_collector/db.py | 71 ++++--------- .../performance_collector/http_server.py | 13 +-- .../performance_collector.py | 3 +- src/modules/performance_collector/types.py | 15 --- src/providers/performance/client.py | 32 +++--- 7 files changed, 111 insertions(+), 150 deletions(-) delete mode 100644 src/modules/performance_collector/types.py diff --git a/src/modules/performance_collector/checkpoint.py b/src/modules/performance_collector/checkpoint.py index bac9f5222..6a23981ee 100644 --- a/src/modules/performance_collector/checkpoint.py +++ b/src/modules/performance_collector/checkpoint.py @@ -10,20 +10,14 @@ from src import variables from src.constants import SLOTS_PER_HISTORICAL_ROOT, EPOCHS_PER_SYNC_COMMITTEE_PERIOD +from src.modules.performance_collector.codec import ProposalDuty, SyncDuty, AttDutyMisses from src.modules.performance_collector.db import DutiesDB -from src.modules.performance_collector.types import ( - SlotBlockRoot, - AttestationCommittees, - ValidatorDuty, - SyncCommittees, - ProposeDuties, -) from src.modules.submodules.types import ZERO_HASH from src.providers.consensus.client import ConsensusClient from src.providers.consensus.types import SyncCommittee, SyncAggregate from src.utils.blockstamp import build_blockstamp from src.providers.consensus.types import BlockAttestation -from src.types import BlockRoot, BlockStamp, CommitteeIndex, EpochNumber, SlotNumber +from src.types import BlockRoot, BlockStamp, CommitteeIndex, EpochNumber, SlotNumber, ValidatorIndex from src.utils.range import sequence from src.utils.slot import get_prev_non_missed_slot from src.utils.timeit import timeit @@ -35,6 +29,12 @@ logger = logging.getLogger(__name__) lock = Lock() +type SlotBlockRoot = tuple[SlotNumber, BlockRoot | None] + +type AttestationCommittees = dict[tuple[SlotNumber, CommitteeIndex], list[ValidatorIndex]] + +type SyncDuties = list[SyncDuty] + class MinStepIsNotReached(Exception): ... @@ -257,9 +257,9 @@ def _check_duties( ): logger.info({"msg": f"Processing epoch {duty_epoch}"}) - att_committees = self._prepare_attestation_duties(duty_epoch) propose_duties = self._prepare_propose_duties(duty_epoch, checkpoint_block_roots, checkpoint_slot) - sync_committees = self._prepare_sync_committee_duties(duty_epoch, duty_epoch_roots) + att_committees, att_misses = self._prepare_attestation_duties(duty_epoch) + sync_duties = self._prepare_sync_committee_duties(duty_epoch) for slot, root in [*duty_epoch_roots, *next_epoch_roots]: missed_slot = root is None @@ -267,16 +267,17 @@ def _check_duties( continue attestations, sync_aggregate = self.cc.get_block_attestations_and_sync(root) if (slot, root) in duty_epoch_roots: - propose_duties[slot].included = True - process_sync(slot, sync_aggregate, sync_committees) - process_attestations(attestations, att_committees) + propose_duties[slot].is_proposed = True + process_sync(sync_aggregate, sync_duties) + process_attestations(attestations, att_committees, att_misses) with lock: - self.db.store_epoch_from_duties( + propose_duties = list(propose_duties.values()) + self.db.store_epoch( duty_epoch, - att_committees=att_committees, - propose_duties=propose_duties, - sync_committees=sync_committees, + att_misses=att_misses, + proposals=propose_duties, + syncs=sync_duties, ) @timeit( @@ -284,37 +285,26 @@ def _check_duties( {"msg": f"Attestation Committees for epoch {args.epoch} prepared in {duration:.2f} seconds"} ) ) - def _prepare_attestation_duties(self, epoch: EpochNumber) -> AttestationCommittees: - committees = {} + def _prepare_attestation_duties(self, epoch: EpochNumber) -> tuple[AttestationCommittees, AttDutyMisses]: + committees: AttestationCommittees = {} + att_misses: AttDutyMisses = set() for committee in self.cc.get_attestation_committees(self.finalized_blockstamp, epoch): - validators = [] - # Order of insertion is used to track the positions in the committees. - for validator_index in committee.validators: - validators.append(ValidatorDuty(validator_index, included=False)) - committees[(committee.slot, committee.index)] = validators - return committees + committees[(committee.slot, committee.index)] = committee.validators + att_misses.update(committee.validators) + return committees, att_misses @timeit( lambda args, duration: logger.info( {"msg": f"Sync Committee for epoch {args.epoch} prepared in {duration:.2f} seconds"} ) ) - def _prepare_sync_committee_duties( - self, epoch: EpochNumber, epoch_block_roots: list[SlotBlockRoot] - ) -> dict[SlotNumber, list[ValidatorDuty]]: - + def _prepare_sync_committee_duties(self, epoch: EpochNumber) -> SyncDuties: with lock: sync_committee = self._get_sync_committee(epoch) - duties = {} - for slot, root in epoch_block_roots: - missed_slot = root is None - if missed_slot: - continue - duties[slot] = [ - ValidatorDuty(validator_index=validator_index, included=False) - for validator_index in sync_committee.validators - ] + duties: SyncDuties = [] + for vid in sync_committee.validators: + duties.append(SyncDuty(vid, missed_count=0)) return duties @@ -341,12 +331,12 @@ def _get_sync_committee(self, epoch: EpochNumber) -> SyncCommittee: ) def _prepare_propose_duties( self, epoch: EpochNumber, checkpoint_block_roots: list[BlockRoot | None], checkpoint_slot: SlotNumber - ) -> ProposeDuties: + ) -> dict[SlotNumber, ProposalDuty]: duties = {} dependent_root = self._get_dependent_root_for_proposer_duties(epoch, checkpoint_block_roots, checkpoint_slot) proposer_duties = self.cc.get_proposer_duties(epoch, dependent_root) for duty in proposer_duties: - duties[duty.slot] = ValidatorDuty(validator_index=duty.validator_index, included=False) + duties[duty.slot] = ProposalDuty(duty.validator_index, is_proposed=False) return duties def _get_dependent_root_for_proposer_duties( @@ -382,26 +372,38 @@ def _get_dependent_root_for_proposer_duties( return dependent_root -def process_sync(slot: SlotNumber, sync_aggregate: SyncAggregate, committees: SyncCommittees) -> None: - committee = committees[slot] +def process_sync( + sync_aggregate: SyncAggregate, + sync_duties: list[SyncDuty] +) -> None: # Spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#syncaggregate sync_bits = hex_bitvector_to_list(sync_aggregate.sync_committee_bits) - for index_in_committee in get_set_indices(sync_bits): - committee[index_in_committee].included = True + # Go through only UNSET indexes to get misses + for index_in_committee in get_unset_indices(sync_bits): + sync_duties[index_in_committee].missed_count += 1 def process_attestations( attestations: Iterable[BlockAttestation], committees: AttestationCommittees, + misses: AttDutyMisses, ) -> None: for attestation in attestations: committee_offset = 0 att_bits = hex_bitlist_to_list(attestation.aggregation_bits) + att_slot = attestation.data.slot for committee_idx in get_committee_indices(attestation): - committee = committees.get((attestation.data.slot, committee_idx), []) + committee = committees.get((att_slot, committee_idx)) + if not committee: + # It is attestation from prev or future epoch. + # We already checked that before or check in next epoch processing. + continue att_committee_bits = att_bits[committee_offset:][: len(committee)] + # We can't get unset indices because the committee can attest partially in different blocks. + # If some part of the committee attested block X, their bits in block Y will be unset. for index_in_committee in get_set_indices(att_committee_bits): - committee[index_in_committee].included = True + vid = committee[index_in_committee] + misses.remove(vid) committee_offset += len(committee) @@ -414,6 +416,11 @@ def get_set_indices(bits: Sequence[bool]) -> list[int]: return [i for i, bit in enumerate(bits) if bit] +def get_unset_indices(bits: Sequence[bool]) -> list[int]: + """Returns indices of false values in the supplied sequence""" + return [i for i, bit in enumerate(bits) if not bit] + + def hex_bitvector_to_list(bitvector: str) -> list[bool]: bytes_ = hex_str_to_bytes(bitvector) return _bytes_to_bool_list(bytes_) diff --git a/src/modules/performance_collector/codec.py b/src/modules/performance_collector/codec.py index 71a7ce50a..96cb8a80b 100644 --- a/src/modules/performance_collector/codec.py +++ b/src/modules/performance_collector/codec.py @@ -4,6 +4,8 @@ from pyroaring import BitMap +from src.types import ValidatorIndex + # TODO: get from config SLOTS_PER_EPOCH = 32 COMMITTEE_SIZE = 512 @@ -76,27 +78,27 @@ def decode(cls, blob: bytes) -> list[SyncDuty]: return out -AttMissDuty: TypeAlias = int +AttDutyMisses: TypeAlias = set[ValidatorIndex] class AttDutiesMissCodec: @staticmethod - def encode(missed: set[AttMissDuty]) -> bytes: - bm = BitMap(sorted(v for v in missed)) + def encode(misses: AttDutyMisses) -> bytes: + bm = BitMap(sorted(v for v in misses)) bm.shrink_to_fit() bm.run_optimize() return bm.serialize() @staticmethod - def decode(blob: bytes) -> set[AttMissDuty]: - return set(BitMap.deserialize(blob)) + def decode(blob: bytes) -> AttDutyMisses: + return set([ValidatorIndex(i) for i in BitMap.deserialize(blob)]) -EpochBlob: TypeAlias = tuple[set[int], list[ProposalDuty], list[SyncDuty]] +EpochData: TypeAlias = tuple[AttDutyMisses, list[ProposalDuty], list[SyncDuty]] -class EpochBlobCodec: +class EpochDataCodec: # little-endian | uint8 version | uint32 att_count | uint8 prop_count | uint16 sync_count # See: https://docs.python.org/3/library/struct.html#format-characters HEADER_FMT = " bytes: att_bytes = AttDutiesMissCodec.encode(att_misses) prop_bytes = ProposalDutiesCodec.encode(proposals) - sync_bytes = SyncDutiesCodec.encode(sync_misses) - header = struct.pack(cls.HEADER_FMT, cls.VERSION, len(att_bytes), len(proposals), len(sync_misses)) + sync_bytes = SyncDutiesCodec.encode(syncs) + header = struct.pack(cls.HEADER_FMT, cls.VERSION, len(att_bytes), len(proposals), len(syncs)) return header + prop_bytes + sync_bytes + att_bytes @classmethod - def decode(cls, blob: bytes) -> EpochBlob: + def decode(cls, blob: bytes) -> EpochData: if len(blob) < cls.HEADER_SIZE: raise ValueError(f"Epoch blob too short to decode: header size is {cls.HEADER_SIZE} but full blob size is {len(blob)}") ver, att_count, prop_count, sync_count = struct.unpack_from(cls.HEADER_FMT, blob, 0) diff --git a/src/modules/performance_collector/db.py b/src/modules/performance_collector/db.py index 58f351135..ebb04aad2 100644 --- a/src/modules/performance_collector/db.py +++ b/src/modules/performance_collector/db.py @@ -1,10 +1,9 @@ import sqlite3 -from typing import Dict, Optional, Sequence +from typing import Optional from src import variables -from src.modules.performance_collector.codec import ProposalDuty, SyncDuty, EpochBlobCodec, AttMissDuty -from src.modules.performance_collector.types import AttestationCommittees, ProposeDuties, SyncCommittees -from src.types import EpochNumber +from src.modules.performance_collector.codec import ProposalDuty, SyncDuty, EpochDataCodec, AttDutyMisses +from src.types import EpochNumber, ValidatorIndex class DutiesDB: @@ -38,60 +37,26 @@ def _init_schema(self): def store_epoch( self, epoch: EpochNumber, - att_misses: set[AttMissDuty], - proposals: Sequence[ProposalDuty] | None = None, - sync_misses: Sequence[SyncDuty] | None = None, + att_misses: AttDutyMisses, + proposals: list[ProposalDuty], + syncs: list[SyncDuty], ) -> bytes: + blob = EpochDataCodec.encode(att_misses, proposals, syncs) + self._store_blob(epoch, blob) + self._auto_prune(epoch) + return blob - blob = EpochBlobCodec.encode(att_misses, proposals, sync_misses) - + def _store_blob(self, epoch: int, blob: bytes) -> None: conn = self._connect() cur = conn.cursor() - cur.execute( - "INSERT OR REPLACE INTO duties(epoch, blob) VALUES(?, ?)", - (epoch, sqlite3.Binary(blob)), - ) - conn.commit() - conn.close() - return blob - - def store_epoch_from_duties( - self, - epoch: EpochNumber, - att_committees: AttestationCommittees, - propose_duties: ProposeDuties, - sync_committees: SyncCommittees, - ) -> bytes: - att_misses = set() - for committee in att_committees.values(): - for duty in committee: - if not duty.included: - att_misses.add(duty.validator_index) - - proposals_list: list[ProposalDuty] = [] - for proposer_duty in propose_duties.values(): - proposals_list.append( - ProposalDuty(validator_index=proposer_duty.validator_index, is_proposed=proposer_duty.included) + try: + cur.execute( + "INSERT OR REPLACE INTO duties(epoch, blob) VALUES(?, ?)", + (epoch, sqlite3.Binary(blob)), ) - - # FIXME: should we get it like a map? - sync_miss_map: Dict[int, int] = {} - for duties in sync_committees.values(): - for duty in duties: - vid = duty.validator_index - if sync_miss_map.get(vid) is None: - sync_miss_map[duty.validator_index] = 0 - if not duty.included: - sync_miss_map[vid] += 1 - sync_misses: list[SyncDuty] = [ - SyncDuty(validator_index=vid, missed_count=cnt) for vid, cnt in sync_miss_map.items() - ] - - blob = self.store_epoch(epoch, att_misses, proposals_list, sync_misses) - - self._auto_prune(epoch) - - return blob + conn.commit() + finally: + conn.close() def _auto_prune(self, current_epoch: int) -> None: retention = int(getattr(variables, 'PERFORMANCE_COLLECTOR_RETENTION_EPOCHS', 0)) diff --git a/src/modules/performance_collector/http_server.py b/src/modules/performance_collector/http_server.py index 1165d1211..791b95a4a 100644 --- a/src/modules/performance_collector/http_server.py +++ b/src/modules/performance_collector/http_server.py @@ -6,7 +6,7 @@ import traceback from src.modules.performance_collector.db import DutiesDB -from src.modules.performance_collector.codec import EpochBlobCodec +from src.modules.performance_collector.codec import EpochDataCodec from src import variables @@ -67,13 +67,10 @@ def epochs_blob(): return jsonify({"error": "Invalid or missing 'from'/'to' params"}), 400 l, r = parsed db = DutiesDB(app.config["DB_PATH"]) - epochs: list[dict[str, Any]] = [] + epochs: list[str | None] = [] for e in range(l, r + 1): blob = db.get_epoch_blob(e) - epochs.append({ - "epoch": e, - "blob": blob.hex() if blob is not None else None, - }) + epochs.append(blob.hex() if blob is not None else None) return jsonify({"result": epochs}) except Exception as e: return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 @@ -95,7 +92,7 @@ def debug_epoch_details(epoch: int): if blob is None: return jsonify({"error": "epoch not found", "epoch": epoch}), 404 - misses, props, syncs = EpochBlobCodec.decode(blob) + misses, props, syncs = EpochDataCodec.decode(blob) proposals = [ {"validator_index": int(p.validator_index), "is_proposed": bool(p.is_proposed)} for p in props @@ -115,7 +112,7 @@ def debug_epoch_details(epoch: int): except Exception as e: return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 - # TODO: POST endpoint for setting r_epoch for FrameCheckpointsIterator softly + # TODO: POST endpoint for setting l_epoch and r_epoch for FrameCheckpointsIterator return app diff --git a/src/modules/performance_collector/performance_collector.py b/src/modules/performance_collector/performance_collector.py index 5d31c234c..2056a7d85 100644 --- a/src/modules/performance_collector/performance_collector.py +++ b/src/modules/performance_collector/performance_collector.py @@ -53,8 +53,9 @@ def _build_converter(self) -> ChainConverter: def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecuteDelay: converter = self._build_converter() + db_min_unprocessed_epoch = self.db.min_unprocessed_epoch() start_epoch = EpochNumber( - max(self.db.min_unprocessed_epoch(), variables.PERFORMANCE_COLLECTOR_SERVER_START_EPOCH) + max(db_min_unprocessed_epoch, variables.PERFORMANCE_COLLECTOR_SERVER_START_EPOCH) ) end_epoch = variables.PERFORMANCE_COLLECTOR_SERVER_END_EPOCH # TODO: adjust range by incoming POST requests diff --git a/src/modules/performance_collector/types.py b/src/modules/performance_collector/types.py deleted file mode 100644 index 7a3097a4c..000000000 --- a/src/modules/performance_collector/types.py +++ /dev/null @@ -1,15 +0,0 @@ -from dataclasses import dataclass - -from src.types import SlotNumber, CommitteeIndex, BlockRoot, ValidatorIndex - - -@dataclass -class ValidatorDuty: - validator_index: ValidatorIndex - included: bool - - -type SlotBlockRoot = tuple[SlotNumber, BlockRoot | None] -type SyncCommittees = dict[SlotNumber, list[ValidatorDuty]] -type ProposeDuties = dict[SlotNumber, ValidatorDuty] -type AttestationCommittees = dict[tuple[SlotNumber, CommitteeIndex], list[ValidatorDuty]] diff --git a/src/providers/performance/client.py b/src/providers/performance/client.py index dd6410136..1d4e29f0e 100644 --- a/src/providers/performance/client.py +++ b/src/providers/performance/client.py @@ -1,15 +1,15 @@ +from eth_typing import HexStr + from src.metrics.prometheus.basic import PERFORMANCE_REQUESTS_DURATION -from src.modules.performance_collector.codec import EpochBlobCodec, ProposalDuty, SyncDuty, EpochBlob +from src.modules.performance_collector.codec import EpochDataCodec, EpochData from src.providers.http_provider import HTTPProvider, NotOkResponse, data_is_dict +from src.types import EpochNumber class PerformanceClientError(NotOkResponse): pass -# TODO: dataclasses and types ??? - - class PerformanceClient(HTTPProvider): PROVIDER_EXCEPTION = PerformanceClientError PROMETHEUS_HISTOGRAM = PERFORMANCE_REQUESTS_DURATION @@ -21,38 +21,42 @@ class PerformanceClient(HTTPProvider): def is_range_available(self, l_epoch: int, r_epoch: int) -> bool: data, _ = self._get( self.API_EPOCHS_CHECK, - query_params={'from': int(l_epoch), 'to': int(r_epoch)}, + query_params={'from': l_epoch, 'to': r_epoch}, retval_validator=data_is_dict, ) return data['result'] - def missing_epochs_in(self, l_epoch: int, r_epoch: int) -> list[int]: + def missing_epochs_in(self, l_epoch: int, r_epoch: int) -> list[EpochNumber]: data, _ = self._get( self.API_EPOCHS_MISSING, - query_params={'from': int(l_epoch), 'to': int(r_epoch)}, + query_params={'from': l_epoch, 'to': r_epoch}, retval_validator=data_is_dict, ) return data['result'] - def get_epoch_blobs(self, l_epoch: int, r_epoch: int) -> list[dict[str, str | None]]: + def get_epoch_blobs(self, l_epoch: int, r_epoch: int) -> list[HexStr | None]: data, _ = self._get( self.API_EPOCHS_BLOB, - query_params={'from': int(l_epoch), 'to': int(r_epoch)}, + query_params={'from': l_epoch, 'to': r_epoch}, retval_validator=data_is_dict, ) return data['result'] - def get_epoch_blob(self, epoch: int) -> dict[str, str | None]: + def get_epoch_blob(self, epoch: int) -> HexStr | None: data, _ = self._get( self.API_EPOCHS_BLOB + f"/{epoch}", retval_validator=data_is_dict, ) return data['result'] - def get_epochs(self, l_epoch: int, r_epoch: int) -> list[tuple[set[int], list[ProposalDuty], list[SyncDuty]]]: + def get_epochs(self, l_epoch: int, r_epoch: int) -> list[EpochData]: epochs_data = self.get_epoch_blobs(l_epoch, r_epoch) - return [EpochBlobCodec.decode(bytes.fromhex(epoch_data['blob'])) for epoch_data in epochs_data] + return [ + EpochDataCodec.decode(bytes.fromhex(blob)) + if (blob := epoch_data['blob']) else None + for epoch_data in epochs_data + ] - def get_epoch(self, epoch: int) -> tuple[set[int], list[ProposalDuty], list[SyncDuty]] | None: + def get_epoch(self, epoch: int) -> EpochData | None: blob = self.get_epoch_blob(epoch) - return EpochBlobCodec.decode(bytes.fromhex(blob['blob'])) if blob else None + return EpochDataCodec.decode(bytes.fromhex(blob)) if blob else None From 46decb10f6a5f66f16a1e9d9ec6ed64969c8fe01 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Mon, 27 Oct 2025 12:07:57 +0100 Subject: [PATCH 07/35] feat: better logging --- src/modules/csm/csm.py | 37 ++++++++++++++++- .../performance_collector/checkpoint.py | 41 +++++++++++++++++-- .../performance_collector.py | 22 +++++++++- src/modules/submodules/consensus.py | 2 +- src/utils/web3converter.py | 4 +- 5 files changed, 98 insertions(+), 8 deletions(-) diff --git a/src/modules/csm/csm.py b/src/modules/csm/csm.py index 7a9fd2c19..54a24b058 100644 --- a/src/modules/csm/csm.py +++ b/src/modules/csm/csm.py @@ -96,6 +96,11 @@ def collect_data(self, blockstamp: ReferenceBlockStamp) -> bool: if not self.state.is_fulfilled: for l_epoch_, r_epoch_ in self.state.frames: + logger.info({ + "msg": "Requesting performance data availability check", + "start_epoch": l_epoch_, + "end_epoch": r_epoch_ + }) is_data_range_available = self.w3.performance.is_range_available( l_epoch_, r_epoch_ ) @@ -103,6 +108,12 @@ def collect_data(self, blockstamp: ReferenceBlockStamp) -> bool: logger.warning({"msg": f"Performance data range is not available yet for [{l_epoch_};{r_epoch_}] frame"}) # TODO: set r_epoch r_epoch for FrameCheckpointsIterator softly through POST request return False + else: + logger.info({ + "msg": "Performance data range is available", + "start_epoch": l_epoch_, + "end_epoch": r_epoch_ + }) self.fulfill_state() return self.state.is_fulfilled @@ -175,18 +186,42 @@ def fulfill_state(self): finalized_blockstamp = self._receive_last_finalized_slot() validators = self.w3.cc.get_validators(finalized_blockstamp) + logger.info({ + "msg": "Starting state fulfillment", + "total_frames": len(self.state.frames), + "total_validators": len(validators) + }) + for l_epoch, r_epoch in self.state.frames: + logger.info({ + "msg": "Processing frame", + "start_epoch": l_epoch, + "end_epoch": r_epoch, + "total_epochs": r_epoch - l_epoch + 1 + }) + for epoch in sequence(l_epoch, r_epoch): if epoch not in self.state.unprocessed_epochs: - logger.info({"msg": f"Epoch {epoch} is already processed"}) + logger.debug({"msg": f"Epoch {epoch} is already processed"}) continue + logger.info({ + "msg": "Requesting performance data from collector", + "epoch": epoch + }) epoch_data = self.w3.performance.get_epoch(epoch) if epoch_data is None: logger.warning({"msg": f"Epoch {epoch} is missing in Performance Collector"}) continue misses, props, syncs = epoch_data + logger.info({ + "msg": "Performance data received", + "epoch": epoch, + "misses_count": len(misses), + "proposals_count": len(props), + "sync_duties_count": len(syncs) + }) for validator in validators: missed_att = validator.index in misses diff --git a/src/modules/performance_collector/checkpoint.py b/src/modules/performance_collector/checkpoint.py index 6a23981ee..b3e6e7682 100644 --- a/src/modules/performance_collector/checkpoint.py +++ b/src/modules/performance_collector/checkpoint.py @@ -156,12 +156,26 @@ def exec(self, checkpoint: FrameCheckpoint) -> int: if not unprocessed_epochs: logger.info({"msg": "Nothing to process in the checkpoint"}) return 0 + + logger.info({ + 'msg': 'Starting epochs batch processing', + 'unprocessed_epochs_count': len(unprocessed_epochs), + 'checkpoint_slot': checkpoint.slot + }) + block_roots = self._get_block_roots(checkpoint.slot) duty_epochs_roots = { duty_epoch: self._select_block_roots(block_roots, duty_epoch, checkpoint.slot) for duty_epoch in unprocessed_epochs } self._process(block_roots, checkpoint.slot, unprocessed_epochs, duty_epochs_roots) + + logger.info({ + 'msg': 'All epochs processing completed', + 'processed_epochs': len(unprocessed_epochs), + 'checkpoint_slot': checkpoint.slot + }) + return len(unprocessed_epochs) def _get_block_roots(self, checkpoint_slot: SlotNumber): @@ -186,6 +200,14 @@ def _get_block_roots(self, checkpoint_slot: SlotNumber): if is_pivot_missing: br[pivot_index] = None + logger.debug({ + 'msg': 'Block roots analysis', + 'total_roots': len(br), + 'missing_roots_count': br.count(None), + 'pivot_index': pivot_index, + 'is_pivot_missing': is_pivot_missing + }) + return br def _select_block_roots( @@ -311,7 +333,19 @@ def _prepare_sync_committee_duties(self, epoch: EpochNumber) -> SyncDuties: def _get_sync_committee(self, epoch: EpochNumber) -> SyncCommittee: sync_committee_period = epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD if cached_sync_committee := SYNC_COMMITTEES_CACHE.get(sync_committee_period): + logger.debug({ + 'msg': 'Sync committee cache hit', + 'period': sync_committee_period, + 'cache_size': len(SYNC_COMMITTEES_CACHE) + }) return cached_sync_committee + + logger.debug({ + 'msg': 'Sync committee cache miss', + 'period': sync_committee_period, + 'cache_size': len(SYNC_COMMITTEES_CACHE) + }) + from_epoch = EpochNumber(epoch - epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD) to_epoch = EpochNumber(from_epoch + EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 1) logger.info({"msg": f"Preparing cached Sync Committee for [{from_epoch};{to_epoch}] chain epochs"}) @@ -399,11 +433,12 @@ def process_attestations( # We already checked that before or check in next epoch processing. continue att_committee_bits = att_bits[committee_offset:][: len(committee)] - # We can't get unset indices because the committee can attest partially in different blocks. - # If some part of the committee attested block X, their bits in block Y will be unset. + # We can't use unset indices because the committee can attest partially in different blocks. + # If some part of the committee attested block X, their bits in block Y might be unset. for index_in_committee in get_set_indices(att_committee_bits): vid = committee[index_in_committee] - misses.remove(vid) + if vid in misses: + misses.remove(vid) committee_offset += len(committee) diff --git a/src/modules/performance_collector/performance_collector.py b/src/modules/performance_collector/performance_collector.py index 2056a7d85..cbcfc9a22 100644 --- a/src/modules/performance_collector/performance_collector.py +++ b/src/modules/performance_collector/performance_collector.py @@ -62,6 +62,14 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute finalized_epoch = EpochNumber(converter.get_epoch_by_slot(last_finalized_blockstamp.slot_number) - 1) + logger.info({ + 'msg': 'Starting epoch range processing', + 'start_epoch': start_epoch, + 'end_epoch': end_epoch, + 'finalized_epoch': finalized_epoch, + 'db_min_unprocessed_epoch': db_min_unprocessed_epoch + }) + try: checkpoints = FrameCheckpointsIterator( converter, @@ -74,9 +82,21 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute processor = FrameCheckpointProcessor(self.w3.cc, self.db, converter, last_finalized_blockstamp) + checkpoint_count = 0 for checkpoint in checkpoints: - processor.exec(checkpoint) + processed_epochs = processor.exec(checkpoint) + checkpoint_count += 1 + logger.info({ + 'msg': 'Checkpoint processing completed', + 'checkpoint_slot': checkpoint.slot, + 'processed_epochs': processed_epochs + }) # Reset BaseOracle cycle timeout to avoid timeout errors during long checkpoints processing self._reset_cycle_timeout() + logger.info({ + 'msg': 'All checkpoints processing completed', + 'total_checkpoints_processed': checkpoint_count + }) + return ModuleExecuteDelay.NEXT_SLOT diff --git a/src/modules/submodules/consensus.py b/src/modules/submodules/consensus.py index d75bb1bc9..276524d53 100644 --- a/src/modules/submodules/consensus.py +++ b/src/modules/submodules/consensus.py @@ -483,7 +483,7 @@ def _get_web3_converter(self, blockstamp: BlockStamp) -> Web3Converter: @lru_cache(maxsize=1) def get_frame_number_by_slot(self, blockstamp: ReferenceBlockStamp) -> FrameNumber: converter = self._get_web3_converter(blockstamp) - frame_number = converter.get_frame_by_slot(blockstamp.ref_slot) + frame_number = converter.get_frame_by_slot(SlotNumber(blockstamp.ref_slot + 1)) logger.info({ "msg": "Get current frame from blockstamp", "frame": frame_number, diff --git a/src/utils/web3converter.py b/src/utils/web3converter.py index 99fc58f75..def531363 100644 --- a/src/utils/web3converter.py +++ b/src/utils/web3converter.py @@ -21,8 +21,8 @@ def get_epoch_first_slot(self, epoch: EpochNumber) -> SlotNumber: def get_epoch_last_slot(self, epoch: EpochNumber) -> SlotNumber: return SlotNumber((epoch + 1) * self.chain_config.slots_per_epoch - 1) - def get_epoch_by_slot(self, ref_slot: SlotNumber) -> EpochNumber: - return EpochNumber(ref_slot // self.chain_config.slots_per_epoch) + def get_epoch_by_slot(self, slot: SlotNumber) -> EpochNumber: + return EpochNumber(slot // self.chain_config.slots_per_epoch) def get_slot_by_timestamp(self, timestamp: int) -> SlotNumber: return SlotNumber((timestamp - self.chain_config.genesis_time) // self.chain_config.seconds_per_slot) From 39d50d28314040e25586ffebbb8657059e891d5c Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Mon, 27 Oct 2025 15:09:01 +0100 Subject: [PATCH 08/35] feat: additional validation --- src/constants.py | 1 + src/modules/performance_collector/checkpoint.py | 6 +++++- src/modules/performance_collector/codec.py | 16 +++------------- src/modules/performance_collector/db.py | 2 +- 4 files changed, 10 insertions(+), 15 deletions(-) diff --git a/src/constants.py b/src/constants.py index f385b4b0b..dddb2c0ec 100644 --- a/src/constants.py +++ b/src/constants.py @@ -33,6 +33,7 @@ # https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#time-parameters SLOTS_PER_HISTORICAL_ROOT = 2**13 # 8192 # https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#sync-committee +SYNC_COMMITTEE_SIZE = 512 EPOCHS_PER_SYNC_COMMITTEE_PERIOD = 256 # https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#domain-types DOMAIN_DEPOSIT_TYPE = bytes.fromhex("03000000") # 0x03000000 diff --git a/src/modules/performance_collector/checkpoint.py b/src/modules/performance_collector/checkpoint.py index b3e6e7682..a50a84fe9 100644 --- a/src/modules/performance_collector/checkpoint.py +++ b/src/modules/performance_collector/checkpoint.py @@ -9,7 +9,7 @@ from hexbytes import HexBytes from src import variables -from src.constants import SLOTS_PER_HISTORICAL_ROOT, EPOCHS_PER_SYNC_COMMITTEE_PERIOD +from src.constants import SLOTS_PER_HISTORICAL_ROOT, EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SYNC_COMMITTEE_SIZE from src.modules.performance_collector.codec import ProposalDuty, SyncDuty, AttDutyMisses from src.modules.performance_collector.db import DutiesDB from src.modules.submodules.types import ZERO_HASH @@ -295,6 +295,10 @@ def _check_duties( with lock: propose_duties = list(propose_duties.values()) + if len(propose_duties) > self.converter.chain_config.slots_per_epoch: + raise ValueError(f"Invalid number of propose duties prepared in epoch {duty_epoch}") + if len(sync_duties) > SYNC_COMMITTEE_SIZE: + raise ValueError(f"Invalid number of sync duties prepared in epoch {duty_epoch}") self.db.store_epoch( duty_epoch, att_misses=att_misses, diff --git a/src/modules/performance_collector/codec.py b/src/modules/performance_collector/codec.py index 96cb8a80b..330d0c4d1 100644 --- a/src/modules/performance_collector/codec.py +++ b/src/modules/performance_collector/codec.py @@ -1,16 +1,11 @@ import struct from dataclasses import dataclass -from typing import Sequence, TypeAlias +from typing import TypeAlias from pyroaring import BitMap from src.types import ValidatorIndex -# TODO: get from config -SLOTS_PER_EPOCH = 32 -COMMITTEE_SIZE = 512 - - @dataclass class ProposalDuty: validator_index: int @@ -24,9 +19,7 @@ class ProposalDutiesCodec: ITEM_SIZE = struct.calcsize(PACK_FMT) @classmethod - def encode(cls, proposals: Sequence[ProposalDuty]) -> bytes: - if len(proposals) != SLOTS_PER_EPOCH: - raise ValueError("Invalid proposals count") + def encode(cls, proposals: list[ProposalDuty]) -> bytes: items = sorted(((p.validator_index, p.is_proposed) for p in proposals), key=lambda t: t[0]) return b"".join(struct.pack(cls.PACK_FMT, vid, flag) for vid, flag in items) @@ -56,12 +49,9 @@ class SyncDutiesCodec: ITEM_SIZE = struct.calcsize(PACK_FMT) @classmethod - def encode(cls, syncs: Sequence[SyncDuty]) -> bytes: + def encode(cls, syncs: list[SyncDuty]) -> bytes: if len(syncs) == 0: raise ValueError("Invalid syncs count") - for s in syncs: - if not (0 <= int(s.missed_count) <= SLOTS_PER_EPOCH): - raise ValueError("missed_count out of range [0..32]") items_sorted = sorted(((m.validator_index, m.missed_count) for m in syncs), key=lambda t: t[0]) return b"".join(struct.pack(cls.PACK_FMT, vid, cnt) for vid, cnt in items_sorted) diff --git a/src/modules/performance_collector/db.py b/src/modules/performance_collector/db.py index ebb04aad2..b314d879b 100644 --- a/src/modules/performance_collector/db.py +++ b/src/modules/performance_collector/db.py @@ -3,7 +3,7 @@ from src import variables from src.modules.performance_collector.codec import ProposalDuty, SyncDuty, EpochDataCodec, AttDutyMisses -from src.types import EpochNumber, ValidatorIndex +from src.types import EpochNumber class DutiesDB: From d689979d0245f9fadcbd145b78e8e4260e56dbb2 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Tue, 28 Oct 2025 10:47:19 +0100 Subject: [PATCH 09/35] refactor: db --- src/modules/performance_collector/db.py | 157 ++++++++++-------------- 1 file changed, 68 insertions(+), 89 deletions(-) diff --git a/src/modules/performance_collector/db.py b/src/modules/performance_collector/db.py index b314d879b..77dcf6575 100644 --- a/src/modules/performance_collector/db.py +++ b/src/modules/performance_collector/db.py @@ -1,4 +1,5 @@ import sqlite3 +from contextlib import contextmanager from typing import Optional from src import variables @@ -7,32 +8,36 @@ class DutiesDB: - def __init__(self, path: str, *, default_num_validators: Optional[int] = None): - self.path = path - self.default_num_validators = default_num_validators - self._init_schema() - - def _connect(self) -> sqlite3.Connection: - conn = sqlite3.connect(self.path, check_same_thread=False, timeout=30.0) - conn.execute("PRAGMA journal_mode=WAL;") - conn.execute("PRAGMA synchronous=NORMAL;") - conn.execute("PRAGMA temp_store=MEMORY;") - return conn - - def _init_schema(self): - conn = self._connect() - cur = conn.cursor() - cur.execute( + def __init__(self, path: str): + self._path = path + self._conn = sqlite3.connect(self._path, check_same_thread=False, timeout=30.0) # TODO: Timeout? + # Optimize SQLite for performance: WAL mode for concurrent access, + # normal sync for speed/safety balance, memory temp storage + self._conn.execute("PRAGMA journal_mode=WAL;") + self._conn.execute("PRAGMA synchronous=NORMAL;") + self._conn.execute("PRAGMA temp_store=MEMORY;") + self._conn.execute( """ CREATE TABLE IF NOT EXISTS duties ( epoch INTEGER PRIMARY KEY, - blob BLOB NOT NULL + blob BLOB NOT NULL ); """ ) - conn.commit() - conn.close() + self._conn.commit() + + def __del__(self): + if self._conn: + self._conn.close() + self._conn = None + + @contextmanager + def connection(self): + try: + yield self._conn.cursor() + finally: + self._conn.commit() def store_epoch( self, @@ -47,56 +52,41 @@ def store_epoch( return blob def _store_blob(self, epoch: int, blob: bytes) -> None: - conn = self._connect() - cur = conn.cursor() - try: + with self.connection() as cur: cur.execute( "INSERT OR REPLACE INTO duties(epoch, blob) VALUES(?, ?)", (epoch, sqlite3.Binary(blob)), ) - conn.commit() - finally: - conn.close() def _auto_prune(self, current_epoch: int) -> None: - retention = int(getattr(variables, 'PERFORMANCE_COLLECTOR_RETENTION_EPOCHS', 0)) - if retention <= 0: + if variables.PERFORMANCE_COLLECTOR_RETENTION_EPOCHS <= 0: return - threshold = int(current_epoch) - retention + threshold = int(current_epoch) - variables.PERFORMANCE_COLLECTOR_RETENTION_EPOCHS if threshold <= 0: return - conn = self._connect() - try: - cur = conn.cursor() + with self.connection() as cur: cur.execute("DELETE FROM duties WHERE epoch < ?", (threshold,)) - conn.commit() - finally: - conn.close() def is_range_available(self, l_epoch: int, r_epoch: int) -> bool: if int(l_epoch) > int(r_epoch): raise ValueError("Invalid epoch range") - conn = self._connect() - cur = conn.cursor() - cur.execute( - "SELECT COUNT(1) FROM duties WHERE epoch BETWEEN ? AND ?", - (int(l_epoch), int(r_epoch)), - ) - (cnt,) = cur.fetchone() or (0,) - conn.close() + with self.connection() as cur: + cur.execute( + "SELECT COUNT(1) FROM duties WHERE epoch BETWEEN ? AND ?", + (int(l_epoch), int(r_epoch)), + ) + (cnt,) = cur.fetchone() or (0,) return int(cnt) == (r_epoch - l_epoch + 1) def missing_epochs_in(self, l_epoch: int, r_epoch: int) -> list[int]: if l_epoch > r_epoch: raise ValueError("Invalid epoch range") - conn = self._connect() - cur = conn.cursor() - cur.execute( - "SELECT epoch FROM duties WHERE epoch BETWEEN ? AND ? ORDER BY epoch", - (l_epoch, r_epoch), - ) - present = [int(row[0]) for row in cur.fetchall()] - conn.close() + with self.connection() as cur: + cur.execute( + "SELECT epoch FROM duties WHERE epoch BETWEEN ? AND ? ORDER BY epoch", + (l_epoch, r_epoch), + ) + present = [int(row[0]) for row in cur.fetchall()] missing = [] exp = l_epoch for e in present: @@ -110,11 +100,9 @@ def missing_epochs_in(self, l_epoch: int, r_epoch: int) -> list[int]: return missing def _get_entry(self, epoch: int) -> Optional[bytes]: - conn = self._connect() - cur = conn.cursor() - cur.execute("SELECT blob FROM duties WHERE epoch=?", (int(epoch),)) - row = cur.fetchone() - conn.close() + with self.connection() as cur: + cur.execute("SELECT blob FROM duties WHERE epoch=?", (int(epoch),)) + row = cur.fetchone() if not row: return None return bytes(row[0]) @@ -123,47 +111,38 @@ def get_epoch_blob(self, epoch: int) -> Optional[bytes]: return self._get_entry(epoch) def has_epoch(self, epoch: int) -> bool: - conn = self._connect() - cur = conn.cursor() - cur.execute("SELECT 1 FROM duties WHERE epoch=? LIMIT 1", (int(epoch),)) - ok = cur.fetchone() is not None - conn.close() + with self.connection() as cur: + cur.execute("SELECT 1 FROM duties WHERE epoch=? LIMIT 1", (int(epoch),)) + ok = cur.fetchone() is not None return ok def min_epoch(self) -> int: - conn = self._connect() - cur = conn.cursor() - cur.execute("SELECT MIN(epoch) FROM duties") - val = int(cur.fetchone()[0] or 0) - conn.close() + with self.connection() as cur: + cur.execute("SELECT MIN(epoch) FROM duties") + val = int(cur.fetchone()[0] or 0) return val def max_epoch(self) -> int: - conn = self._connect() - cur = conn.cursor() - cur.execute("SELECT MAX(epoch) FROM duties") - val = int(cur.fetchone()[0] or 0) - conn.close() + with self.connection() as cur: + cur.execute("SELECT MAX(epoch) FROM duties") + val = int(cur.fetchone()[0] or 0) return val def min_unprocessed_epoch(self) -> int: - conn = self._connect() - cur = conn.cursor() - cur.execute("SELECT MIN(epoch), MAX(epoch) FROM duties") - row = cur.fetchone() - if not row or row[0] is None or row[1] is None: - conn.close() - return 0 - l_epoch, r_epoch = int(row[0]), int(row[1]) - cur.execute( - """ - SELECT MIN(t.epoch + 1) - FROM duties t - LEFT JOIN duties d2 ON d2.epoch = t.epoch + 1 - WHERE t.epoch BETWEEN ? AND ? AND d2.epoch IS NULL - """, - (l_epoch, r_epoch), - ) - (missing,) = cur.fetchone() - conn.close() + with self.connection() as cur: + cur.execute("SELECT MIN(epoch), MAX(epoch) FROM duties") + row = cur.fetchone() + if not row or row[0] is None or row[1] is None: + return 0 + l_epoch, r_epoch = int(row[0]), int(row[1]) + cur.execute( + """ + SELECT MIN(t.epoch + 1) + FROM duties t + LEFT JOIN duties d2 ON d2.epoch = t.epoch + 1 + WHERE t.epoch BETWEEN ? AND ? AND d2.epoch IS NULL + """, + (l_epoch, r_epoch), + ) + (missing,) = cur.fetchone() return int(missing) if missing else (r_epoch + 1) From fa5ed3cce8720b7c11a4756a74b881c22c078f04 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Wed, 29 Oct 2025 11:27:46 +0100 Subject: [PATCH 10/35] feat: `epochs_demand` --- src/modules/csm/csm.py | 52 +++++++++++--- .../performance_collector/checkpoint.py | 1 + src/modules/performance_collector/db.py | 72 +++++++++++++++---- .../performance_collector/http_server.py | 29 +++++++- .../performance_collector.py | 60 ++++++++++++++-- src/providers/http_provider.py | 13 +++- src/providers/performance/client.py | 16 +++++ src/variables.py | 4 -- tests/fork/test_csm_oracle_cycle.py | 3 - .../performance_collector/test_codec.py | 16 ++--- 10 files changed, 221 insertions(+), 45 deletions(-) diff --git a/src/modules/csm/csm.py b/src/modules/csm/csm.py index 54a24b058..c9e4ab8e2 100644 --- a/src/modules/csm/csm.py +++ b/src/modules/csm/csm.py @@ -71,6 +71,8 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute if not self._check_compatability(last_finalized_blockstamp): return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH + self.send_epochs_to_collect_demand(last_finalized_blockstamp) + report_blockstamp = self.get_blockstamp_for_report(last_finalized_blockstamp) if not report_blockstamp: return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH @@ -82,6 +84,20 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute self.process_report(report_blockstamp) return ModuleExecuteDelay.NEXT_SLOT + def send_epochs_to_collect_demand(self, blockstamp: BlockStamp): + consumer = self.__class__.__name__ + l_epoch, r_epoch = self.get_epochs_range_to_process(blockstamp) + current_demands = self.w3.performance.get_epochs_demand() + current_demand = current_demands.get(consumer, (-1, -1)) + curr_l_epoch, curr_r_epoch = EpochNumber(current_demand[0]), EpochNumber(current_demand[1]) + if (curr_l_epoch, curr_r_epoch) != (l_epoch, r_epoch): + logger.info({ + "msg": f"Updating epochs demand for {consumer} for Performance Collector", + "old": (curr_l_epoch, curr_r_epoch), + "new": (l_epoch, r_epoch) + }) + self.w3.performance.post_epochs_demand(consumer, l_epoch, r_epoch) + @duration_meter() def collect_data(self, blockstamp: ReferenceBlockStamp) -> bool: logger.info({"msg": "Collecting data for the report"}) @@ -89,7 +105,7 @@ def collect_data(self, blockstamp: ReferenceBlockStamp) -> bool: converter = self.converter(blockstamp) l_epoch, r_epoch = self.get_epochs_range_to_process(blockstamp) - logger.info({"msg": f"Epochs range for performance data collect: [{l_epoch};{r_epoch}]"}) + logger.info({"msg": f"Epochs range for performance data collection: [{l_epoch};{r_epoch}]"}) self.state.migrate(l_epoch, r_epoch, converter.frame_config.epochs_per_frame) self.state.log_progress() @@ -105,8 +121,11 @@ def collect_data(self, blockstamp: ReferenceBlockStamp) -> bool: l_epoch_, r_epoch_ ) if not is_data_range_available: - logger.warning({"msg": f"Performance data range is not available yet for [{l_epoch_};{r_epoch_}] frame"}) - # TODO: set r_epoch r_epoch for FrameCheckpointsIterator softly through POST request + logger.warning({ + "msg": f"Performance data range is not available yet", + "start_epoch": l_epoch_, + "end_epoch": r_epoch_ + }) return False else: logger.info({ @@ -121,7 +140,8 @@ def collect_data(self, blockstamp: ReferenceBlockStamp) -> bool: @lru_cache(maxsize=1) @duration_meter() def build_report(self, blockstamp: ReferenceBlockStamp) -> tuple: - l_epoch, r_epoch = self.get_epochs_range_to_process(blockstamp) + l_epoch, _ = self.get_epochs_range_to_process(blockstamp) + r_epoch = blockstamp.ref_epoch self.state.validate(l_epoch, r_epoch) last_report = self._get_last_report(blockstamp) @@ -286,7 +306,7 @@ def publish_log(self, logs: list[FramePerfLog]) -> CID: return log_cid @lru_cache(maxsize=1) - def get_epochs_range_to_process(self, blockstamp: ReferenceBlockStamp) -> tuple[EpochNumber, EpochNumber]: + def get_epochs_range_to_process(self, blockstamp: BlockStamp) -> tuple[EpochNumber, EpochNumber]: converter = self.converter(blockstamp) far_future_initial_epoch = converter.get_epoch_by_timestamp(UINT64_MAX) @@ -294,30 +314,46 @@ def get_epochs_range_to_process(self, blockstamp: ReferenceBlockStamp) -> tuple[ raise ValueError("CSM oracle initial epoch is not set yet") l_ref_slot = last_processing_ref_slot = self.w3.csm.get_csm_last_processing_ref_slot(blockstamp) + r_ref_slot = initial_ref_slot = self.get_initial_ref_slot(blockstamp) if last_processing_ref_slot > blockstamp.slot_number: raise InconsistentData(f"{last_processing_ref_slot=} > {blockstamp.slot_number=}") # The very first report, no previous ref slot. if not last_processing_ref_slot: - initial_ref_slot = self.get_initial_ref_slot(blockstamp) l_ref_slot = SlotNumber(initial_ref_slot - converter.slots_per_frame) if l_ref_slot < 0: raise CSMError("Invalid frame configuration for the current network") - r_ref_slot = blockstamp.slot_number + # NOTE: before the initial slot the contract can't return current frame + if blockstamp.slot_number > initial_ref_slot: + r_ref_slot = self.get_initial_or_current_frame(blockstamp).ref_slot + + # We are between reports, next report slot didn't happen yet. Predicting the next ref slot for the report + # to calculate epochs range to collect the data. + if l_ref_slot == r_ref_slot: + r_ref_slot = converter.get_epoch_last_slot( + EpochNumber(converter.get_epoch_by_slot(l_ref_slot) + converter.frame_config.epochs_per_frame) + ) + if l_ref_slot < last_processing_ref_slot: raise CSMError(f"Got invalid epochs range: {l_ref_slot=} < {last_processing_ref_slot=}") if l_ref_slot >= r_ref_slot: raise CSMError(f"Got invalid epochs range {r_ref_slot=}, {l_ref_slot=}") l_epoch = converter.get_epoch_by_slot(SlotNumber(l_ref_slot + 1)) - r_epoch = blockstamp.ref_epoch + r_epoch = converter.get_epoch_by_slot(r_ref_slot) # Update Prometheus metrics CSM_CURRENT_FRAME_RANGE_L_EPOCH.set(l_epoch) CSM_CURRENT_FRAME_RANGE_R_EPOCH.set(r_epoch) + logger.info({ + "msg": "Epochs range for the report", + "l_epoch": l_epoch, + "r_epoch": r_epoch + }) + return l_epoch, r_epoch def converter(self, blockstamp: BlockStamp) -> Web3Converter: diff --git a/src/modules/performance_collector/checkpoint.py b/src/modules/performance_collector/checkpoint.py index a50a84fe9..813c93a39 100644 --- a/src/modules/performance_collector/checkpoint.py +++ b/src/modules/performance_collector/checkpoint.py @@ -299,6 +299,7 @@ def _check_duties( raise ValueError(f"Invalid number of propose duties prepared in epoch {duty_epoch}") if len(sync_duties) > SYNC_COMMITTEE_SIZE: raise ValueError(f"Invalid number of sync duties prepared in epoch {duty_epoch}") + # TODO: log progress with remaining time? self.db.store_epoch( duty_epoch, att_misses=att_misses, diff --git a/src/modules/performance_collector/db.py b/src/modules/performance_collector/db.py index 77dcf6575..895456337 100644 --- a/src/modules/performance_collector/db.py +++ b/src/modules/performance_collector/db.py @@ -25,6 +25,16 @@ def __init__(self, path: str): ); """ ) + self._conn.execute( + """ + CREATE TABLE IF NOT EXISTS epochs_demand + ( + consumer STRING PRIMARY KEY, + l_epoch INTEGER, + r_epoch INTEGER + ) + """ + ) self._conn.commit() def __del__(self): @@ -39,6 +49,13 @@ def connection(self): finally: self._conn.commit() + def store_demand(self, consumer: str, l_epoch: int, r_epoch: int) -> None: + with self.connection() as cur: + cur.execute( + "INSERT OR REPLACE INTO epochs_demand(consumer, l_epoch, r_epoch) VALUES(?, ?, ?)", + (consumer, l_epoch, r_epoch), + ) + def store_epoch( self, epoch: EpochNumber, @@ -65,6 +82,7 @@ def _auto_prune(self, current_epoch: int) -> None: if threshold <= 0: return with self.connection() as cur: + # TODO: logging? cur.execute("DELETE FROM duties WHERE epoch < ?", (threshold,)) def is_range_available(self, l_epoch: int, r_epoch: int) -> bool: @@ -128,21 +146,49 @@ def max_epoch(self) -> int: val = int(cur.fetchone()[0] or 0) return val - def min_unprocessed_epoch(self) -> int: + def min_unprocessed_epoch(self, l_epoch: int, r_epoch: int) -> int | None: with self.connection() as cur: - cur.execute("SELECT MIN(epoch), MAX(epoch) FROM duties") - row = cur.fetchone() - if not row or row[0] is None or row[1] is None: - return 0 - l_epoch, r_epoch = int(row[0]), int(row[1]) + cur.execute( + "SELECT COUNT(*) FROM duties WHERE epoch BETWEEN ? AND ?", + (l_epoch, r_epoch), + ) + (count,) = cur.fetchone() + expected_count = r_epoch - l_epoch + 1 + + if count >= expected_count: + # No gaps in the requested range + return None + + cur.execute("SELECT 1 FROM duties WHERE epoch = ? LIMIT 1", (l_epoch,)) + if cur.fetchone() is None: + return l_epoch + + # Find first gap in the requested range cur.execute( """ - SELECT MIN(t.epoch + 1) - FROM duties t - LEFT JOIN duties d2 ON d2.epoch = t.epoch + 1 - WHERE t.epoch BETWEEN ? AND ? AND d2.epoch IS NULL + SELECT epoch + 1 as missing_epoch + FROM ( + SELECT + epoch, + LAG(epoch, 1, epoch - 1) OVER (ORDER BY epoch) as prev_epoch + FROM duties + WHERE epoch BETWEEN ? AND ? + ORDER BY epoch + ) + WHERE epoch - prev_epoch > 1 + LIMIT 1 """, - (l_epoch, r_epoch), + (l_epoch, r_epoch) ) - (missing,) = cur.fetchone() - return int(missing) if missing else (r_epoch + 1) + + result = cur.fetchone() + return result[0] if result else None + + def epochs_demand(self) -> dict[str, tuple[int, int]]: + data = {} + with self.connection() as cur: + cur.execute("SELECT consumer, l_epoch, r_epoch FROM epochs_demand") + demands = cur.fetchall() + for consumer, l_epoch, r_epoch in demands: + data[consumer] = (int(l_epoch), int(r_epoch)) + return data diff --git a/src/modules/performance_collector/http_server.py b/src/modules/performance_collector/http_server.py index 791b95a4a..ba6e4c013 100644 --- a/src/modules/performance_collector/http_server.py +++ b/src/modules/performance_collector/http_server.py @@ -112,7 +112,34 @@ def debug_epoch_details(epoch: int): except Exception as e: return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 - # TODO: POST endpoint for setting l_epoch and r_epoch for FrameCheckpointsIterator + @app.post("/epochs/demand") + def set_epochs_demand(): + try: + data = request.get_json() + if not data or "consumer" not in data or "l_epoch" not in data or "r_epoch" not in data: + return jsonify({"error": "Missing 'consumer' or 'l_epoch' or 'r_epoch' in request body"}), 400 + + consumer = data["consumer"] + l_epoch = data["l_epoch"] + r_epoch = data["r_epoch"] + + if not isinstance(l_epoch, int) or not isinstance(r_epoch, int) or l_epoch > r_epoch: + return jsonify({"error": "'l_epoch' and 'r_epoch' must be integers, and 'l_epoch' <= 'r_epoch'"}), 400 + + db = DutiesDB(app.config["DB_PATH"]) + db.store_demand(consumer, l_epoch, r_epoch) + + return jsonify({"status": "ok", "consumer": consumer, "l_epoch": l_epoch, "r_epoch": r_epoch}) + except Exception as e: + return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 + + @app.get("/epochs/demand") + def get_epochs_demand(): + try: + db = DutiesDB(app.config["DB_PATH"]) + return jsonify({"result": db.epochs_demand()}) + except Exception as e: + return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 return app diff --git a/src/modules/performance_collector/performance_collector.py b/src/modules/performance_collector/performance_collector.py index cbcfc9a22..f6e346280 100644 --- a/src/modules/performance_collector/performance_collector.py +++ b/src/modules/performance_collector/performance_collector.py @@ -53,12 +53,19 @@ def _build_converter(self) -> ChainConverter: def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecuteDelay: converter = self._build_converter() - db_min_unprocessed_epoch = self.db.min_unprocessed_epoch() - start_epoch = EpochNumber( - max(db_min_unprocessed_epoch, variables.PERFORMANCE_COLLECTOR_SERVER_START_EPOCH) - ) - end_epoch = variables.PERFORMANCE_COLLECTOR_SERVER_END_EPOCH - # TODO: adjust range by incoming POST requests + epochs_range = self.define_epochs_to_process_range() + if not epochs_range: + return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH + start_epoch, end_epoch = epochs_range + + db_min_unprocessed_epoch_in_range = self.db.min_unprocessed_epoch(start_epoch, end_epoch) + logger.info({ + "msg": "Adjust collecting data range by already processed epochs from DB", + "start_epoch": start_epoch, + "end_epoch": end_epoch, + "db_min_unprocessed_epoch_in_range": db_min_unprocessed_epoch_in_range + }) + start_epoch = max(start_epoch, EpochNumber(db_min_unprocessed_epoch_in_range or 0)) finalized_epoch = EpochNumber(converter.get_epoch_by_slot(last_finalized_blockstamp.slot_number) - 1) @@ -67,7 +74,6 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute 'start_epoch': start_epoch, 'end_epoch': end_epoch, 'finalized_epoch': finalized_epoch, - 'db_min_unprocessed_epoch': db_min_unprocessed_epoch }) try: @@ -84,6 +90,16 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute checkpoint_count = 0 for checkpoint in checkpoints: + # Check if new epochs demand is found during processing + new_epochs_range = self.define_epochs_to_process_range() + if new_epochs_range: + new_start_epoch, new_end_epoch = new_epochs_range + if new_start_epoch != start_epoch or new_end_epoch != end_epoch: + logger.info({ + "msg": "New epochs range to process is found, stopping current epochs range processing" + }) + return ModuleExecuteDelay.NEXT_SLOT + processed_epochs = processor.exec(checkpoint) checkpoint_count += 1 logger.info({ @@ -100,3 +116,33 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute }) return ModuleExecuteDelay.NEXT_SLOT + + def define_epochs_to_process_range(self) -> tuple[EpochNumber, EpochNumber] | None: + start_epoch = end_epoch = None + + epochs_demand = self.db.epochs_demand() + for consumer, (l_epoch, r_epoch) in epochs_demand.items(): + logger.info({ + "msg": "Epochs demand is found", + "consumer": consumer, + "l_epoch": l_epoch, + "r_epoch": r_epoch + }) + satisfied = self.db.is_range_available(l_epoch, r_epoch) + if satisfied: + logger.info({ + "msg": "Epochs demand is already satisfied, skipping", + "start_epoch": l_epoch, + "end_epoch": r_epoch + }) + continue + # To collect little data range first + # TODO: might be issue. need to check with finalized epoch + start_epoch = max(start_epoch, l_epoch) if start_epoch else l_epoch + end_epoch = min(end_epoch, r_epoch) if end_epoch else r_epoch + + if not start_epoch and not end_epoch: + logger.info({'msg': 'No epochs demand to process, waiting for any next demand'}) + return None + + return start_epoch, end_epoch diff --git a/src/providers/http_provider.py b/src/providers/http_provider.py index 4af9c347f..90d8819cf 100644 --- a/src/providers/http_provider.py +++ b/src/providers/http_provider.py @@ -8,7 +8,7 @@ from json_stream import requests as json_stream_requests # type: ignore from json_stream.base import TransientStreamingJSONObject # type: ignore from prometheus_client import Histogram -from requests import JSONDecodeError, Session +from requests import JSONDecodeError, Session, Response from requests.adapters import HTTPAdapter from urllib3 import Retry @@ -96,6 +96,17 @@ def _urljoin(host, url): host += '/' return urljoin(host, url) + def _post(self, endpoint: str, data: dict) -> dict: + # TODO: proper implementation + for host in self.hosts: + resp = self.session.post( + self._urljoin(host, endpoint), + json=data, + timeout=self.request_timeout, + ) + return resp.json() + raise ValueError("No hosts provided") + def _get( self, endpoint: str, diff --git a/src/providers/performance/client.py b/src/providers/performance/client.py index 1d4e29f0e..250f5f593 100644 --- a/src/providers/performance/client.py +++ b/src/providers/performance/client.py @@ -17,6 +17,7 @@ class PerformanceClient(HTTPProvider): API_EPOCHS_CHECK = 'epochs/check' API_EPOCHS_MISSING = 'epochs/missing' API_EPOCHS_BLOB = 'epochs/blob' + API_EPOCHS_DEMAND = 'epochs/demand' def is_range_available(self, l_epoch: int, r_epoch: int) -> bool: data, _ = self._get( @@ -60,3 +61,18 @@ def get_epochs(self, l_epoch: int, r_epoch: int) -> list[EpochData]: def get_epoch(self, epoch: int) -> EpochData | None: blob = self.get_epoch_blob(epoch) return EpochDataCodec.decode(bytes.fromhex(blob)) if blob else None + + def get_epochs_demand(self) -> dict[str, tuple[EpochNumber, EpochNumber]]: + data, _ = self._get( + self.API_EPOCHS_DEMAND, + retval_validator=data_is_dict, + ) + return data['result'] + + def post_epochs_demand(self, consumer: str, l_epoch: EpochNumber, r_epoch: EpochNumber) -> None: + # TODO: proper implementation + resp = self._post( + self.API_EPOCHS_DEMAND, + data={'consumer': consumer, 'l_epoch': l_epoch, 'r_epoch': r_epoch}, + ) + diff --git a/src/variables.py b/src/variables.py index 4049374e3..6695e6b2c 100644 --- a/src/variables.py +++ b/src/variables.py @@ -105,9 +105,6 @@ HEALTHCHECK_SERVER_PORT: Final = int(os.getenv('HEALTHCHECK_SERVER_PORT', 9010)) PERFORMANCE_COLLECTOR_SERVER_API_PORT: Final = int(os.getenv('PERFORMANCE_COLLECTOR_SERVER_API_PORT', 9020)) PERFORMANCE_COLLECTOR_RETENTION_EPOCHS: Final = int(os.getenv('PERFORMANCE_COLLECTOR_RETENTION_EPOCHS', 28 * 225 * 6)) -PERFORMANCE_COLLECTOR_SERVER_START_EPOCH: Final = int(os.getenv('PERFORMANCE_COLLECTOR_SERVER_START_EPOCH', 0)) -# TODO: endless? -PERFORMANCE_COLLECTOR_SERVER_END_EPOCH: Final = int(os.getenv('PERFORMANCE_COLLECTOR_SERVER_END_EPOCH', 2 ** 64 - 1)) MAX_CYCLE_LIFETIME_IN_SECONDS: Final = int(os.getenv("MAX_CYCLE_LIFETIME_IN_SECONDS", 3000)) @@ -175,7 +172,6 @@ def raise_from_errors(errors): 'HEALTHCHECK_SERVER_PORT': HEALTHCHECK_SERVER_PORT, 'PERFORMANCE_COLLECTOR_SERVER_API_PORT': PERFORMANCE_COLLECTOR_SERVER_API_PORT, 'PERFORMANCE_COLLECTOR_RETENTION_EPOCHS': PERFORMANCE_COLLECTOR_RETENTION_EPOCHS, - 'PERFORMANCE_COLLECTOR_SERVER_START_EPOCH': PERFORMANCE_COLLECTOR_SERVER_START_EPOCH, 'HTTP_REQUEST_TIMEOUT_PERFORMANCE': HTTP_REQUEST_TIMEOUT_PERFORMANCE, 'HTTP_REQUEST_RETRY_COUNT_PERFORMANCE': HTTP_REQUEST_RETRY_COUNT_PERFORMANCE, 'HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE': HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE, diff --git a/tests/fork/test_csm_oracle_cycle.py b/tests/fork/test_csm_oracle_cycle.py index dcc36d5c3..2732772d8 100644 --- a/tests/fork/test_csm_oracle_cycle.py +++ b/tests/fork/test_csm_oracle_cycle.py @@ -22,8 +22,6 @@ def csm_module(web3: Web3): @pytest.fixture() def performance_collector(web3: Web3, frame_config: FrameConfig): - variables.PERFORMANCE_COLLECTOR_SERVER_START_EPOCH = frame_config.initial_epoch - frame_config.epochs_per_frame - variables.PERFORMANCE_COLLECTOR_SERVER_END_EPOCH = frame_config.initial_epoch yield PerformanceCollector(web3) @@ -78,7 +76,6 @@ def test_csm_module_report(performance_collector, module, set_oracle_members, ru module._receive_last_finalized_slot() # pylint: disable=protected-access ) # NOTE: Patch the var to bypass `FrameCheckpointsIterator.MIN_CHECKPOINT_STEP` - variables.PERFORMANCE_COLLECTOR_SERVER_END_EPOCH = report_frame.ref_slot // 32 last_processing_after_report = module.w3.csm.oracle.get_last_processing_ref_slot() assert ( diff --git a/tests/modules/performance_collector/test_codec.py b/tests/modules/performance_collector/test_codec.py index 5ab1c9a20..46e553b13 100644 --- a/tests/modules/performance_collector/test_codec.py +++ b/tests/modules/performance_collector/test_codec.py @@ -6,7 +6,7 @@ SyncDuty, SyncDutiesCodec, AttDutiesMissCodec, - EpochBlobCodec, + EpochDataCodec, ) @@ -254,8 +254,8 @@ def test_epoch_blob_codec_roundtrip(): proposals = PROPOSALS_EXAMPLE syncs = SYNCS_EXAMPLE - blob = EpochBlobCodec.encode(att_misses=att_misses, proposals=proposals, sync_misses=syncs) - att_decoded, proposals_decoded, syncs_decoded = EpochBlobCodec.decode(blob) + blob = EpochDataCodec.encode(att_misses=att_misses, proposals=proposals, sync_misses=syncs) + att_decoded, proposals_decoded, syncs_decoded = EpochDataCodec.decode(blob) # att_decoded may be a set (non-empty) or BitMap; normalize to set from pyroaring import BitMap # type: ignore @@ -274,17 +274,17 @@ def test_epoch_blob_codec_bad_version(): proposals = PROPOSALS_EXAMPLE syncs = SYNCS_EXAMPLE - blob = EpochBlobCodec.encode(att_misses=att_misses, proposals=proposals, sync_misses=syncs) + blob = EpochDataCodec.encode(att_misses=att_misses, proposals=proposals, sync_misses=syncs) bad = bytes([255]) + blob[1:] with pytest.raises(ValueError): - EpochBlobCodec.decode(bad) + EpochDataCodec.decode(bad) @pytest.mark.unit def test_epoch_blob_codec_short_header(): with pytest.raises(ValueError): - EpochBlobCodec.decode(b"\x01\x00") + EpochDataCodec.decode(b"\x01\x00") @pytest.mark.unit @@ -293,8 +293,8 @@ def test_epoch_blob_codec_truncated_payload(): proposals = PROPOSALS_EXAMPLE syncs = SYNCS_EXAMPLE - blob = EpochBlobCodec.encode(att_misses=att_misses, proposals=proposals, sync_misses=syncs) + blob = EpochDataCodec.encode(att_misses=att_misses, proposals=proposals, sync_misses=syncs) bad_blob = blob[:-1] with pytest.raises(ValueError): - EpochBlobCodec.decode(bad_blob) + EpochDataCodec.decode(bad_blob) From a5d07f769200a96af7736ba53182bf6adc292dab Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Tue, 4 Nov 2025 10:14:02 +0100 Subject: [PATCH 11/35] fix: `missing_epochs_in` --- src/modules/performance_collector/db.py | 51 ++----------------------- 1 file changed, 4 insertions(+), 47 deletions(-) diff --git a/src/modules/performance_collector/db.py b/src/modules/performance_collector/db.py index 895456337..b5109784b 100644 --- a/src/modules/performance_collector/db.py +++ b/src/modules/performance_collector/db.py @@ -5,6 +5,7 @@ from src import variables from src.modules.performance_collector.codec import ProposalDuty, SyncDuty, EpochDataCodec, AttDutyMisses from src.types import EpochNumber +from src.utils.range import sequence class DutiesDB: @@ -106,15 +107,9 @@ def missing_epochs_in(self, l_epoch: int, r_epoch: int) -> list[int]: ) present = [int(row[0]) for row in cur.fetchall()] missing = [] - exp = l_epoch - for e in present: - while exp < e: - missing.append(exp) - exp += 1 - exp = e + 1 - while exp <= r_epoch: - missing.append(exp) - exp += 1 + for epoch in sequence(l_epoch, r_epoch): + if epoch not in present: + missing.append(epoch) return missing def _get_entry(self, epoch: int) -> Optional[bytes]: @@ -146,44 +141,6 @@ def max_epoch(self) -> int: val = int(cur.fetchone()[0] or 0) return val - def min_unprocessed_epoch(self, l_epoch: int, r_epoch: int) -> int | None: - with self.connection() as cur: - cur.execute( - "SELECT COUNT(*) FROM duties WHERE epoch BETWEEN ? AND ?", - (l_epoch, r_epoch), - ) - (count,) = cur.fetchone() - expected_count = r_epoch - l_epoch + 1 - - if count >= expected_count: - # No gaps in the requested range - return None - - cur.execute("SELECT 1 FROM duties WHERE epoch = ? LIMIT 1", (l_epoch,)) - if cur.fetchone() is None: - return l_epoch - - # Find first gap in the requested range - cur.execute( - """ - SELECT epoch + 1 as missing_epoch - FROM ( - SELECT - epoch, - LAG(epoch, 1, epoch - 1) OVER (ORDER BY epoch) as prev_epoch - FROM duties - WHERE epoch BETWEEN ? AND ? - ORDER BY epoch - ) - WHERE epoch - prev_epoch > 1 - LIMIT 1 - """, - (l_epoch, r_epoch) - ) - - result = cur.fetchone() - return result[0] if result else None - def epochs_demand(self) -> dict[str, tuple[int, int]]: data = {} with self.connection() as cur: From 16539430c5ae53bf5195ed8b4af9190ec1ea5983 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Tue, 4 Nov 2025 15:29:48 +0100 Subject: [PATCH 12/35] fix: logic, logging --- src/modules/csm/csm.py | 8 +- .../performance_collector.py | 108 ++++++++++++------ 2 files changed, 73 insertions(+), 43 deletions(-) diff --git a/src/modules/csm/csm.py b/src/modules/csm/csm.py index c9e4ab8e2..3f06b6e64 100644 --- a/src/modules/csm/csm.py +++ b/src/modules/csm/csm.py @@ -104,7 +104,8 @@ def collect_data(self, blockstamp: ReferenceBlockStamp) -> bool: converter = self.converter(blockstamp) - l_epoch, r_epoch = self.get_epochs_range_to_process(blockstamp) + l_epoch, _ = self.get_epochs_range_to_process(blockstamp) + r_epoch = blockstamp.ref_epoch logger.info({"msg": f"Epochs range for performance data collection: [{l_epoch};{r_epoch}]"}) self.state.migrate(l_epoch, r_epoch, converter.frame_config.epochs_per_frame) @@ -112,11 +113,6 @@ def collect_data(self, blockstamp: ReferenceBlockStamp) -> bool: if not self.state.is_fulfilled: for l_epoch_, r_epoch_ in self.state.frames: - logger.info({ - "msg": "Requesting performance data availability check", - "start_epoch": l_epoch_, - "end_epoch": r_epoch_ - }) is_data_range_available = self.w3.performance.is_range_available( l_epoch_, r_epoch_ ) diff --git a/src/modules/performance_collector/performance_collector.py b/src/modules/performance_collector/performance_collector.py index f6e346280..c8b7c5f2b 100644 --- a/src/modules/performance_collector/performance_collector.py +++ b/src/modules/performance_collector/performance_collector.py @@ -11,6 +11,7 @@ from src.modules.submodules.oracle_module import BaseModule, ModuleExecuteDelay from src.modules.submodules.types import ChainConfig from src.types import BlockStamp, EpochNumber +from src.utils.range import sequence from src.utils.web3converter import ChainConverter from src import variables @@ -53,33 +54,29 @@ def _build_converter(self) -> ChainConverter: def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecuteDelay: converter = self._build_converter() - epochs_range = self.define_epochs_to_process_range() + finalized_epoch = EpochNumber(converter.get_epoch_by_slot(last_finalized_blockstamp.slot_number) - 1) + + epochs_range = self.define_epochs_to_process_range(finalized_epoch) if not epochs_range: return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH start_epoch, end_epoch = epochs_range - db_min_unprocessed_epoch_in_range = self.db.min_unprocessed_epoch(start_epoch, end_epoch) - logger.info({ - "msg": "Adjust collecting data range by already processed epochs from DB", - "start_epoch": start_epoch, - "end_epoch": end_epoch, - "db_min_unprocessed_epoch_in_range": db_min_unprocessed_epoch_in_range - }) - start_epoch = max(start_epoch, EpochNumber(db_min_unprocessed_epoch_in_range or 0)) - - finalized_epoch = EpochNumber(converter.get_epoch_by_slot(last_finalized_blockstamp.slot_number) - 1) + min_unprocessed_epoch = min(self.db.missing_epochs_in(start_epoch, end_epoch), default=None) + if not min_unprocessed_epoch: + raise ValueError("There should be at least one epoch to process.") logger.info({ 'msg': 'Starting epoch range processing', - 'start_epoch': start_epoch, - 'end_epoch': end_epoch, - 'finalized_epoch': finalized_epoch, + "start_epoch": start_epoch, + "end_epoch": end_epoch, + "min_unprocessed_epoch": min_unprocessed_epoch, + "finalized_epoch": finalized_epoch }) try: checkpoints = FrameCheckpointsIterator( converter, - start_epoch, + EpochNumber(min_unprocessed_epoch), end_epoch, finalized_epoch, ) @@ -90,8 +87,9 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute checkpoint_count = 0 for checkpoint in checkpoints: - # Check if new epochs demand is found during processing - new_epochs_range = self.define_epochs_to_process_range() + curr_finalized_slot = self._receive_last_finalized_slot() + curr_finalized_epoch = EpochNumber(converter.get_epoch_by_slot(curr_finalized_slot.slot_number) - 1) + new_epochs_range = self.define_epochs_to_process_range(curr_finalized_epoch, log=False) if new_epochs_range: new_start_epoch, new_end_epoch = new_epochs_range if new_start_epoch != start_epoch or new_end_epoch != end_epoch: @@ -117,32 +115,68 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute return ModuleExecuteDelay.NEXT_SLOT - def define_epochs_to_process_range(self) -> tuple[EpochNumber, EpochNumber] | None: - start_epoch = end_epoch = None - + def define_epochs_to_process_range(self, finalized_epoch: EpochNumber, log=True) -> tuple[EpochNumber, EpochNumber] | None: + unsatisfied_demands = [] epochs_demand = self.db.epochs_demand() for consumer, (l_epoch, r_epoch) in epochs_demand.items(): - logger.info({ - "msg": "Epochs demand is found", - "consumer": consumer, - "l_epoch": l_epoch, - "r_epoch": r_epoch - }) - satisfied = self.db.is_range_available(l_epoch, r_epoch) - if satisfied: + if log: logger.info({ - "msg": "Epochs demand is already satisfied, skipping", - "start_epoch": l_epoch, - "end_epoch": r_epoch + "msg": "Epochs demand is found", + "consumer": consumer, + "l_epoch": l_epoch, + "r_epoch": r_epoch }) + satisfied = self.db.is_range_available(l_epoch, r_epoch) + if satisfied: + if log: + logger.info({ + "msg": "Epochs demand is already satisfied, skipping", + "start_epoch": l_epoch, + "end_epoch": r_epoch + }) continue - # To collect little data range first - # TODO: might be issue. need to check with finalized epoch - start_epoch = max(start_epoch, l_epoch) if start_epoch else l_epoch - end_epoch = min(end_epoch, r_epoch) if end_epoch else r_epoch + unsatisfied_demands.append((consumer, l_epoch, r_epoch)) - if not start_epoch and not end_epoch: - logger.info({'msg': 'No epochs demand to process, waiting for any next demand'}) + if not unsatisfied_demands: + if log: + logger.info({'msg': 'No epochs demand to process, waiting for any next demand'}) return None + faced_deadline = [] + for consumer, l_epoch, r_epoch in unsatisfied_demands: + if finalized_epoch >= r_epoch: + if log: + logger.warning({ + "msg": "Epochs demand is passed deadline due to current finalized epoch", + "consumer": consumer, + "l_epoch": l_epoch, + "r_epoch": r_epoch, + "finalized_epoch": finalized_epoch + }) + faced_deadline.append((consumer, l_epoch, r_epoch)) + + def missing_epochs(_, l_epoch_, r_epoch_): + return self.db.missing_epochs_in(l_epoch_, r_epoch_) + + if not faced_deadline: + unsatisfied_demands.sort( + # Demand with the largest count of unprocessed epochs goes first + key=lambda demand: (-1 * len(missing_epochs(*demand))) + ) + consumer, start_epoch, end_epoch = unsatisfied_demands[0] + else: + faced_deadline.sort( + # Demand with the least count of unprocessed epochs goes first + key=lambda demand: len(missing_epochs(*demand)) + ) + consumer, start_epoch, end_epoch = faced_deadline[0] + + if log: + logger.info({ + "msg": "Epochs demand is chosen to process", + "consumer": consumer, + "start_epoch": start_epoch, + "end_epoch": end_epoch, + }) + return start_epoch, end_epoch From aa871b37931c1d94b49e2028ef59f5b957ebc091 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Tue, 4 Nov 2025 16:20:48 +0100 Subject: [PATCH 13/35] fix: csm.execute_module --- src/modules/csm/csm.py | 46 +++++++++++++++++++----------------------- 1 file changed, 21 insertions(+), 25 deletions(-) diff --git a/src/modules/csm/csm.py b/src/modules/csm/csm.py index 3f06b6e64..e007bd925 100644 --- a/src/modules/csm/csm.py +++ b/src/modules/csm/csm.py @@ -71,63 +71,59 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute if not self._check_compatability(last_finalized_blockstamp): return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH - self.send_epochs_to_collect_demand(last_finalized_blockstamp) + self.set_epochs_range_to_collect(last_finalized_blockstamp) report_blockstamp = self.get_blockstamp_for_report(last_finalized_blockstamp) if not report_blockstamp: return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH - collected = self.collect_data(report_blockstamp) + collected = self.collect_data() if not collected: return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH self.process_report(report_blockstamp) return ModuleExecuteDelay.NEXT_SLOT - def send_epochs_to_collect_demand(self, blockstamp: BlockStamp): + @duration_meter() + def set_epochs_range_to_collect(self, blockstamp: BlockStamp): consumer = self.__class__.__name__ + converter = self.converter(blockstamp) + l_epoch, r_epoch = self.get_epochs_range_to_process(blockstamp) + self.state.migrate(l_epoch, r_epoch, converter.frame_config.epochs_per_frame) + self.state.log_progress() + current_demands = self.w3.performance.get_epochs_demand() - current_demand = current_demands.get(consumer, (-1, -1)) - curr_l_epoch, curr_r_epoch = EpochNumber(current_demand[0]), EpochNumber(current_demand[1]) - if (curr_l_epoch, curr_r_epoch) != (l_epoch, r_epoch): + current_demand = current_demands.get(consumer) + if current_demand != (l_epoch, r_epoch): logger.info({ - "msg": f"Updating epochs demand for {consumer} for Performance Collector", - "old": (curr_l_epoch, curr_r_epoch), + "msg": f"Updating {consumer} epochs demand for Performance Collector", + "old": current_demand, "new": (l_epoch, r_epoch) }) self.w3.performance.post_epochs_demand(consumer, l_epoch, r_epoch) @duration_meter() - def collect_data(self, blockstamp: ReferenceBlockStamp) -> bool: - logger.info({"msg": "Collecting data for the report"}) - - converter = self.converter(blockstamp) - - l_epoch, _ = self.get_epochs_range_to_process(blockstamp) - r_epoch = blockstamp.ref_epoch - logger.info({"msg": f"Epochs range for performance data collection: [{l_epoch};{r_epoch}]"}) - - self.state.migrate(l_epoch, r_epoch, converter.frame_config.epochs_per_frame) - self.state.log_progress() + def collect_data(self) -> bool: + logger.info({"msg": "Collecting data for the report from Performance Collector"}) if not self.state.is_fulfilled: - for l_epoch_, r_epoch_ in self.state.frames: + for l_epoch, r_epoch in self.state.frames: is_data_range_available = self.w3.performance.is_range_available( - l_epoch_, r_epoch_ + l_epoch, r_epoch ) if not is_data_range_available: logger.warning({ "msg": f"Performance data range is not available yet", - "start_epoch": l_epoch_, - "end_epoch": r_epoch_ + "start_epoch": l_epoch, + "end_epoch": r_epoch }) return False else: logger.info({ "msg": "Performance data range is available", - "start_epoch": l_epoch_, - "end_epoch": r_epoch_ + "start_epoch": l_epoch, + "end_epoch": r_epoch }) self.fulfill_state() From 2a7019948cc12009882439768484c5de1fb1400f Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Tue, 4 Nov 2025 16:22:01 +0100 Subject: [PATCH 14/35] feat: add `_post` for http_provider --- src/providers/http_provider.py | 127 +++++++++++++++++++++++++--- src/providers/performance/client.py | 11 +-- 2 files changed, 120 insertions(+), 18 deletions(-) diff --git a/src/providers/http_provider.py b/src/providers/http_provider.py index 90d8819cf..493161e9e 100644 --- a/src/providers/http_provider.py +++ b/src/providers/http_provider.py @@ -96,17 +96,6 @@ def _urljoin(host, url): host += '/' return urljoin(host, url) - def _post(self, endpoint: str, data: dict) -> dict: - # TODO: proper implementation - for host in self.hosts: - resp = self.session.post( - self._urljoin(host, endpoint), - json=data, - timeout=self.request_timeout, - ) - return resp.json() - raise ValueError("No hosts provided") - def _get( self, endpoint: str, @@ -117,7 +106,7 @@ def _get( stream: bool = False, ) -> tuple[Any, dict]: """ - Get plain or streamed request with fallbacks + Plain or streamed GET request with fallbacks Returns (data, meta) or raises exception force_raise - function that returns an Exception if it should be thrown immediately. @@ -163,7 +152,7 @@ def _get_without_fallbacks( retval_validator: ReturnValueValidator = data_is_any, ) -> tuple[Any, dict]: """ - Simple get request without fallbacks + Simple GET request without fallbacks Returns (data, meta) or raises an exception """ complete_endpoint = endpoint.format(*path_params) if path_params else endpoint @@ -227,6 +216,118 @@ def _get_without_fallbacks( retval_validator(data, meta, endpoint=endpoint) return data, meta + def _post( + self, + endpoint: str, + path_params: Sequence[str | int] | None = None, + query_params: dict | None = None, + body_data: dict | None = None, + force_raise: Callable[..., Exception | None] = lambda _: None, + retval_validator: ReturnValueValidator = data_is_any, + ) -> tuple[dict, dict]: + """ + Plain POST request with fallbacks + Returns (data, meta) or raises exception + + force_raise - function that returns an Exception if it should be thrown immediately. + Sometimes NotOk response from first provider is the response that we are expecting. + """ + errors: list[Exception] = [] + + for host in self.hosts: + try: + return self._post_without_fallbacks( + host, + endpoint, + path_params, + query_params, + body_data, + retval_validator=retval_validator, + ) + except Exception as e: # pylint: disable=W0703 + errors.append(e) + + # Check if exception should be raised immediately + if to_force_raise := force_raise(errors): + raise to_force_raise from e + + logger.warning( + { + 'msg': f'[{self.__class__.__name__}] Host [{urlparse(host).netloc}] responded with error', + 'error': str(e), + 'provider': urlparse(host).netloc, + } + ) + + # Raise error from last provider. + raise errors[-1] + + def _post_without_fallbacks( + self, + host: str, + endpoint: str, + path_params: Sequence[str | int] | None = None, + query_params: dict | None = None, + body_data: dict | None = None, + retval_validator: ReturnValueValidator = data_is_any, + ) -> tuple[dict, dict]: + """ + Simple POST request without fallbacks + Returns (data, meta) or raises an exception + """ + complete_endpoint = endpoint.format(*path_params) if path_params else endpoint + + with self.PROMETHEUS_HISTOGRAM.time() as t: + try: + response = self.session.post( + self._urljoin(host, complete_endpoint if path_params else endpoint), + params=query_params, + json=body_data, + timeout=self.request_timeout, + ) + except Exception as error: + logger.error({'msg': str(error)}) + t.labels( + endpoint=endpoint, + code=0, + domain=urlparse(host).netloc, + ) + raise self.PROVIDER_EXCEPTION(status=0, text='Response error.') from error + + t.labels( + endpoint=endpoint, + code=response.status_code, + domain=urlparse(host).netloc, + ) + + if response.status_code != HTTPStatus.OK: + response_fail_msg = ( + f'Response from {complete_endpoint} [{response.status_code}]' + f' with text: "{str(response.text)}" returned.' + ) + logger.debug({'msg': response_fail_msg}) + raise self.PROVIDER_EXCEPTION(response_fail_msg, status=response.status_code, text=response.text) + + try: + json_response = response.json() + except JSONDecodeError as error: + response_fail_msg = ( + f'Failed to decode JSON response from {complete_endpoint} with text: "{str(response.text)}"' + ) + logger.debug({'msg': response_fail_msg}) + raise self.PROVIDER_EXCEPTION(status=0, text='JSON decode error.') from error + + try: + data = json_response["data"] + del json_response["data"] + meta = json_response + except KeyError: + data = json_response + meta = {} + + retval_validator(data, meta, endpoint=endpoint) + return data, meta + def get_all_providers(self) -> list[str]: return self.hosts diff --git a/src/providers/performance/client.py b/src/providers/performance/client.py index 250f5f593..e0c5975ac 100644 --- a/src/providers/performance/client.py +++ b/src/providers/performance/client.py @@ -67,12 +67,13 @@ def get_epochs_demand(self) -> dict[str, tuple[EpochNumber, EpochNumber]]: self.API_EPOCHS_DEMAND, retval_validator=data_is_dict, ) - return data['result'] + return { + consumer: (EpochNumber(demand[0]), EpochNumber(demand[1])) + for consumer, demand in data['result'].items() + } def post_epochs_demand(self, consumer: str, l_epoch: EpochNumber, r_epoch: EpochNumber) -> None: - # TODO: proper implementation - resp = self._post( + self._post( self.API_EPOCHS_DEMAND, - data={'consumer': consumer, 'l_epoch': l_epoch, 'r_epoch': r_epoch}, + body_data={'consumer': consumer, 'l_epoch': l_epoch, 'r_epoch': r_epoch}, ) - From db1026707016c697e9474337cd7c837389ab9a0d Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Tue, 4 Nov 2025 16:58:19 +0100 Subject: [PATCH 15/35] fix: SafeBorder SafeBorder inheritance issue --- src/services/safe_border.py | 6 ------ src/utils/web3converter.py | 1 - 2 files changed, 7 deletions(-) diff --git a/src/services/safe_border.py b/src/services/safe_border.py index ffe668392..47ed5fcc9 100644 --- a/src/services/safe_border.py +++ b/src/services/safe_border.py @@ -30,10 +30,7 @@ class SafeBorder(Web3Converter): 2. Negative rebase border 3. Associated slashing border """ - chain_config: ChainConfig - frame_config: FrameConfig blockstamp: ReferenceBlockStamp - converter: Web3Converter def __init__( self, @@ -48,10 +45,7 @@ def __init__( self.lido_contracts = w3.lido_contracts self.blockstamp = blockstamp - self.chain_config = chain_config - self.frame_config = frame_config - self.converter = Web3Converter(chain_config, frame_config) self._retrieve_constants() def _retrieve_constants(self): diff --git a/src/utils/web3converter.py b/src/utils/web3converter.py index def531363..58a705e97 100644 --- a/src/utils/web3converter.py +++ b/src/utils/web3converter.py @@ -38,7 +38,6 @@ class Web3Converter(ChainConverter): frame_config: FrameConfig def __init__(self, chain_config: ChainConfig, frame_config: FrameConfig): - # TODO: fix SafeBorder inheritance issue in Web3Converter super().__init__(chain_config) self.frame_config = frame_config From 0925e5f369bcf06dd9fbeaf1ed45f527cca43a52 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Tue, 4 Nov 2025 16:58:48 +0100 Subject: [PATCH 16/35] fix: remove TODOs --- src/modules/performance_collector/checkpoint.py | 1 - src/modules/performance_collector/db.py | 1 - 2 files changed, 2 deletions(-) diff --git a/src/modules/performance_collector/checkpoint.py b/src/modules/performance_collector/checkpoint.py index 813c93a39..a50a84fe9 100644 --- a/src/modules/performance_collector/checkpoint.py +++ b/src/modules/performance_collector/checkpoint.py @@ -299,7 +299,6 @@ def _check_duties( raise ValueError(f"Invalid number of propose duties prepared in epoch {duty_epoch}") if len(sync_duties) > SYNC_COMMITTEE_SIZE: raise ValueError(f"Invalid number of sync duties prepared in epoch {duty_epoch}") - # TODO: log progress with remaining time? self.db.store_epoch( duty_epoch, att_misses=att_misses, diff --git a/src/modules/performance_collector/db.py b/src/modules/performance_collector/db.py index b5109784b..14dd419d4 100644 --- a/src/modules/performance_collector/db.py +++ b/src/modules/performance_collector/db.py @@ -83,7 +83,6 @@ def _auto_prune(self, current_epoch: int) -> None: if threshold <= 0: return with self.connection() as cur: - # TODO: logging? cur.execute("DELETE FROM duties WHERE epoch < ?", (threshold,)) def is_range_available(self, l_epoch: int, r_epoch: int) -> bool: From 84b2e4760a0b02fa3abc812a5fe91ed28552ace1 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Tue, 4 Nov 2025 17:02:59 +0100 Subject: [PATCH 17/35] feat: add `PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT` --- src/modules/performance_collector/db.py | 8 +++++--- src/variables.py | 6 ++++-- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/src/modules/performance_collector/db.py b/src/modules/performance_collector/db.py index 14dd419d4..84d42904e 100644 --- a/src/modules/performance_collector/db.py +++ b/src/modules/performance_collector/db.py @@ -11,7 +11,9 @@ class DutiesDB: def __init__(self, path: str): self._path = path - self._conn = sqlite3.connect(self._path, check_same_thread=False, timeout=30.0) # TODO: Timeout? + self._conn = sqlite3.connect( + self._path, check_same_thread=False, timeout=variables.PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT + ) # Optimize SQLite for performance: WAL mode for concurrent access, # normal sync for speed/safety balance, memory temp storage self._conn.execute("PRAGMA journal_mode=WAL;") @@ -77,9 +79,9 @@ def _store_blob(self, epoch: int, blob: bytes) -> None: ) def _auto_prune(self, current_epoch: int) -> None: - if variables.PERFORMANCE_COLLECTOR_RETENTION_EPOCHS <= 0: + if variables.PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS <= 0: return - threshold = int(current_epoch) - variables.PERFORMANCE_COLLECTOR_RETENTION_EPOCHS + threshold = int(current_epoch) - variables.PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS if threshold <= 0: return with self.connection() as cur: diff --git a/src/variables.py b/src/variables.py index 6695e6b2c..01c695d3b 100644 --- a/src/variables.py +++ b/src/variables.py @@ -104,7 +104,8 @@ HEALTHCHECK_SERVER_PORT: Final = int(os.getenv('HEALTHCHECK_SERVER_PORT', 9010)) PERFORMANCE_COLLECTOR_SERVER_API_PORT: Final = int(os.getenv('PERFORMANCE_COLLECTOR_SERVER_API_PORT', 9020)) -PERFORMANCE_COLLECTOR_RETENTION_EPOCHS: Final = int(os.getenv('PERFORMANCE_COLLECTOR_RETENTION_EPOCHS', 28 * 225 * 6)) +PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS: Final = int(os.getenv('PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS', 28 * 225 * 6)) +PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT: Final = int(os.getenv('PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT', 30)) MAX_CYCLE_LIFETIME_IN_SECONDS: Final = int(os.getenv("MAX_CYCLE_LIFETIME_IN_SECONDS", 3000)) @@ -171,7 +172,8 @@ def raise_from_errors(errors): 'PROMETHEUS_PREFIX': PROMETHEUS_PREFIX, 'HEALTHCHECK_SERVER_PORT': HEALTHCHECK_SERVER_PORT, 'PERFORMANCE_COLLECTOR_SERVER_API_PORT': PERFORMANCE_COLLECTOR_SERVER_API_PORT, - 'PERFORMANCE_COLLECTOR_RETENTION_EPOCHS': PERFORMANCE_COLLECTOR_RETENTION_EPOCHS, + 'PERFORMANCE_COLLECTOR_RETENTION_EPOCHS': PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS, + 'PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT': PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT, 'HTTP_REQUEST_TIMEOUT_PERFORMANCE': HTTP_REQUEST_TIMEOUT_PERFORMANCE, 'HTTP_REQUEST_RETRY_COUNT_PERFORMANCE': HTTP_REQUEST_RETRY_COUNT_PERFORMANCE, 'HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE': HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE, From b08865a5b2d1a4b9d589cfcc0ad3a83e9c2275b0 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Fri, 7 Nov 2025 15:17:58 +0100 Subject: [PATCH 18/35] feat: use finalized epoch if no demands --- .../performance_collector/checkpoint.py | 1 - .../performance_collector.py | 43 +++++++++++-------- src/variables.py | 2 +- 3 files changed, 27 insertions(+), 19 deletions(-) diff --git a/src/modules/performance_collector/checkpoint.py b/src/modules/performance_collector/checkpoint.py index a50a84fe9..d35b4446e 100644 --- a/src/modules/performance_collector/checkpoint.py +++ b/src/modules/performance_collector/checkpoint.py @@ -58,7 +58,6 @@ class FrameCheckpointsIterator: max_available_epoch_to_check: EpochNumber # Min checkpoint step is 10 because it's a reasonable number of epochs to process at once (~1 hour) - # FIXME: frame might change while waiting for the next checkpoint MIN_CHECKPOINT_STEP = 10 # Max checkpoint step is 255 epochs because block_roots size from state is 8192 slots (256 epochs) # to check duty of every epoch, we need to check 64 slots (32 slots of duty epoch + 32 slots of next epoch). diff --git a/src/modules/performance_collector/performance_collector.py b/src/modules/performance_collector/performance_collector.py index c8b7c5f2b..2cd9cfa4b 100644 --- a/src/modules/performance_collector/performance_collector.py +++ b/src/modules/performance_collector/performance_collector.py @@ -56,10 +56,14 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute finalized_epoch = EpochNumber(converter.get_epoch_by_slot(last_finalized_blockstamp.slot_number) - 1) - epochs_range = self.define_epochs_to_process_range(finalized_epoch) - if not epochs_range: - return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH - start_epoch, end_epoch = epochs_range + epochs_range_demand = self.define_epochs_to_process_range(finalized_epoch) + if epochs_range_demand: + start_epoch, end_epoch = epochs_range_demand + else: + logger.info({'msg': 'No epochs demand to process. Default epochs range is used.'}) + gap = FrameCheckpointsIterator.MIN_CHECKPOINT_STEP + FrameCheckpointsIterator.CHECKPOINT_SLOT_DELAY_EPOCHS + start_epoch = self.db.max_epoch() or max(0, finalized_epoch - gap) + end_epoch = finalized_epoch min_unprocessed_epoch = min(self.db.missing_epochs_in(start_epoch, end_epoch), default=None) if not min_unprocessed_epoch: @@ -87,17 +91,6 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute checkpoint_count = 0 for checkpoint in checkpoints: - curr_finalized_slot = self._receive_last_finalized_slot() - curr_finalized_epoch = EpochNumber(converter.get_epoch_by_slot(curr_finalized_slot.slot_number) - 1) - new_epochs_range = self.define_epochs_to_process_range(curr_finalized_epoch, log=False) - if new_epochs_range: - new_start_epoch, new_end_epoch = new_epochs_range - if new_start_epoch != start_epoch or new_end_epoch != end_epoch: - logger.info({ - "msg": "New epochs range to process is found, stopping current epochs range processing" - }) - return ModuleExecuteDelay.NEXT_SLOT - processed_epochs = processor.exec(checkpoint) checkpoint_count += 1 logger.info({ @@ -108,6 +101,12 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute # Reset BaseOracle cycle timeout to avoid timeout errors during long checkpoints processing self._reset_cycle_timeout() + if self.new_epochs_range_demand_appeared(converter, start_epoch, end_epoch): + logger.info({ + "msg": "New epochs range to process is found, stopping current epochs range processing" + }) + return ModuleExecuteDelay.NEXT_SLOT + logger.info({ 'msg': 'All checkpoints processing completed', 'total_checkpoints_processed': checkpoint_count @@ -115,6 +114,18 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute return ModuleExecuteDelay.NEXT_SLOT + def new_epochs_range_demand_appeared( + self, converter: ChainConverter, start_epoch: EpochNumber, end_epoch: EpochNumber + ) -> bool: + curr_finalized_slot = self._receive_last_finalized_slot() + curr_finalized_epoch = EpochNumber(converter.get_epoch_by_slot(curr_finalized_slot.slot_number) - 1) + new_epochs_range = self.define_epochs_to_process_range(curr_finalized_epoch, log=False) + if new_epochs_range: + new_start_epoch, new_end_epoch = new_epochs_range + if new_start_epoch != start_epoch or new_end_epoch != end_epoch: + return True + return False + def define_epochs_to_process_range(self, finalized_epoch: EpochNumber, log=True) -> tuple[EpochNumber, EpochNumber] | None: unsatisfied_demands = [] epochs_demand = self.db.epochs_demand() @@ -138,8 +149,6 @@ def define_epochs_to_process_range(self, finalized_epoch: EpochNumber, log=True) unsatisfied_demands.append((consumer, l_epoch, r_epoch)) if not unsatisfied_demands: - if log: - logger.info({'msg': 'No epochs demand to process, waiting for any next demand'}) return None faced_deadline = [] diff --git a/src/variables.py b/src/variables.py index 01c695d3b..3b1f6fbcb 100644 --- a/src/variables.py +++ b/src/variables.py @@ -172,7 +172,7 @@ def raise_from_errors(errors): 'PROMETHEUS_PREFIX': PROMETHEUS_PREFIX, 'HEALTHCHECK_SERVER_PORT': HEALTHCHECK_SERVER_PORT, 'PERFORMANCE_COLLECTOR_SERVER_API_PORT': PERFORMANCE_COLLECTOR_SERVER_API_PORT, - 'PERFORMANCE_COLLECTOR_RETENTION_EPOCHS': PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS, + 'PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS': PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS, 'PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT': PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT, 'HTTP_REQUEST_TIMEOUT_PERFORMANCE': HTTP_REQUEST_TIMEOUT_PERFORMANCE, 'HTTP_REQUEST_RETRY_COUNT_PERFORMANCE': HTTP_REQUEST_RETRY_COUNT_PERFORMANCE, From baf8c83c88b03bf171e738a0f5c619dcd4c324dc Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Mon, 10 Nov 2025 11:50:20 +0100 Subject: [PATCH 19/35] fix: `validate_state` --- src/modules/csm/csm.py | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/modules/csm/csm.py b/src/modules/csm/csm.py index e007bd925..0f7d70295 100644 --- a/src/modules/csm/csm.py +++ b/src/modules/csm/csm.py @@ -132,9 +132,7 @@ def collect_data(self) -> bool: @lru_cache(maxsize=1) @duration_meter() def build_report(self, blockstamp: ReferenceBlockStamp) -> tuple: - l_epoch, _ = self.get_epochs_range_to_process(blockstamp) - r_epoch = blockstamp.ref_epoch - self.state.validate(l_epoch, r_epoch) + self.validate_state(blockstamp) last_report = self._get_last_report(blockstamp) rewards_tree_root, rewards_cid = last_report.rewards_tree_root, last_report.rewards_tree_cid @@ -194,6 +192,14 @@ def is_reporting_allowed(self, blockstamp: ReferenceBlockStamp) -> bool: CONTRACT_ON_PAUSE.labels("csm").set(on_pause) return not on_pause + def validate_state(self, blockstamp: ReferenceBlockStamp) -> None: + # NOTE: We cannot use `r_epoch` from the `current_frame_range` call because the `blockstamp` is a + # `ReferenceBlockStamp`, hence it's a block the frame ends at. We use `ref_epoch` instead. + l_epoch, _ = self.get_epochs_range_to_process(blockstamp) + r_epoch = blockstamp.ref_epoch + + self.state.validate(l_epoch, r_epoch) + def fulfill_state(self): finalized_blockstamp = self._receive_last_finalized_slot() validators = self.w3.cc.get_validators(finalized_blockstamp) From e72cff25fc46e223fbaf84d06a5ff6eb6b9b6a19 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Mon, 10 Nov 2025 11:50:44 +0100 Subject: [PATCH 20/35] WIP: `test_csm_module.py` --- tests/modules/csm/test_csm_module.py | 257 ++++++++++++++------------- 1 file changed, 132 insertions(+), 125 deletions(-) diff --git a/tests/modules/csm/test_csm_module.py b/tests/modules/csm/test_csm_module.py index f82b6b518..36a3eb2a7 100644 --- a/tests/modules/csm/test_csm_module.py +++ b/tests/modules/csm/test_csm_module.py @@ -223,128 +223,130 @@ class CollectDataTestParam: expected_result: bool | Exception -@pytest.mark.parametrize( - "param", - [ - pytest.param( - CollectDataTestParam( - collect_blockstamp=Mock(slot_number=64), - collect_frame_range=Mock(return_value=(0, 1)), - report_blockstamp=Mock(ref_epoch=3), - state=Mock(), - expected_msg="Epochs range has been changed, but the change is not yet observed on finalized epoch 1", - expected_result=False, - ), - id="frame_changed_forward", - ), - pytest.param( - CollectDataTestParam( - collect_blockstamp=Mock(slot_number=64), - collect_frame_range=Mock(return_value=(0, 2)), - report_blockstamp=Mock(ref_epoch=1), - state=Mock(), - expected_msg="Epochs range has been changed, but the change is not yet observed on finalized epoch 1", - expected_result=False, - ), - id="frame_changed_backward", - ), - pytest.param( - CollectDataTestParam( - collect_blockstamp=Mock(slot_number=32), - collect_frame_range=Mock(return_value=(1, 2)), - report_blockstamp=Mock(ref_epoch=2), - state=Mock(), - expected_msg="The starting epoch of the epochs range is not finalized yet", - expected_result=False, - ), - id="starting_epoch_not_finalized", - ), - pytest.param( - CollectDataTestParam( - collect_blockstamp=Mock(slot_number=32), - collect_frame_range=Mock(return_value=(0, 2)), - report_blockstamp=Mock(ref_epoch=2), - state=Mock( - migrate=Mock(), - log_status=Mock(), - is_fulfilled=True, - ), - expected_msg="All epochs are already processed. Nothing to collect", - expected_result=True, - ), - id="state_fulfilled", - ), - pytest.param( - CollectDataTestParam( - collect_blockstamp=Mock(slot_number=320), - collect_frame_range=Mock(return_value=(0, 100)), - report_blockstamp=Mock(ref_epoch=100), - state=Mock( - migrate=Mock(), - log_status=Mock(), - unprocessed_epochs=[5], - is_fulfilled=False, - ), - expected_msg="Minimum checkpoint step is not reached, current delay is 2 epochs", - expected_result=False, - ), - id="min_step_not_reached", - ), - ], -) -@pytest.mark.unit -def test_collect_data( - module: CSOracle, - param: CollectDataTestParam, - mock_chain_config: NoReturn, - mock_frame_config: NoReturn, - caplog, - monkeypatch, -): - module.w3 = Mock() - module._receive_last_finalized_slot = Mock() - module.state = param.state - module.get_epochs_range_to_process = param.collect_frame_range - module.get_blockstamp_for_report = Mock(return_value=param.report_blockstamp) - - with caplog.at_level(logging.DEBUG): - if isinstance(param.expected_result, Exception): - with pytest.raises(type(param.expected_result)): - module.collect_data(blockstamp=param.collect_blockstamp) - else: - collected = module.collect_data(blockstamp=param.collect_blockstamp) - assert collected == param.expected_result - - msg = list(filter(lambda log: param.expected_msg in log, caplog.messages)) - assert len(msg), f"Expected message '{param.expected_msg}' not found in logs" - - -@pytest.mark.unit -def test_collect_data_outdated_checkpoint( - module: CSOracle, mock_chain_config: NoReturn, mock_frame_config: NoReturn, caplog -): - module.w3 = Mock() - module._receive_last_finalized_slot = Mock() - module.state = Mock( - migrate=Mock(), - log_status=Mock(), - unprocessed_epochs=list(range(0, 101)), - is_fulfilled=False, - ) - module.get_epochs_range_to_process = Mock(side_effect=[(0, 100), (50, 150)]) - module.get_blockstamp_for_report = Mock(return_value=Mock(ref_epoch=100)) - - with caplog.at_level(logging.DEBUG): - with pytest.raises(ValueError): - module.collect_data(blockstamp=Mock(slot_number=640)) - - msg = list( - filter( - lambda log: "Checkpoints were prepared for an outdated epochs range, stop processing" in log, - caplog.messages, - ) - ) - assert len(msg), "Expected message not found in logs" +# TODO: move to performance collector tests +# @pytest.mark.parametrize( +# "param", +# [ +# pytest.param( +# CollectDataTestParam( +# collect_blockstamp=Mock(slot_number=64), +# collect_frame_range=Mock(return_value=(0, 1)), +# report_blockstamp=Mock(ref_epoch=3), +# state=Mock(), +# expected_msg="Epochs range has been changed, but the change is not yet observed on finalized epoch 1", +# expected_result=False, +# ), +# id="frame_changed_forward", +# ), +# pytest.param( +# CollectDataTestParam( +# collect_blockstamp=Mock(slot_number=64), +# collect_frame_range=Mock(return_value=(0, 2)), +# report_blockstamp=Mock(ref_epoch=1), +# state=Mock(), +# expected_msg="Epochs range has been changed, but the change is not yet observed on finalized epoch 1", +# expected_result=False, +# ), +# id="frame_changed_backward", +# ), +# pytest.param( +# CollectDataTestParam( +# collect_blockstamp=Mock(slot_number=32), +# collect_frame_range=Mock(return_value=(1, 2)), +# report_blockstamp=Mock(ref_epoch=2), +# state=Mock(), +# expected_msg="The starting epoch of the epochs range is not finalized yet", +# expected_result=False, +# ), +# id="starting_epoch_not_finalized", +# ), +# pytest.param( +# CollectDataTestParam( +# collect_blockstamp=Mock(slot_number=32), +# collect_frame_range=Mock(return_value=(0, 2)), +# report_blockstamp=Mock(ref_epoch=2), +# state=Mock( +# migrate=Mock(), +# log_status=Mock(), +# is_fulfilled=True, +# ), +# expected_msg="All epochs are already processed. Nothing to collect", +# expected_result=True, +# ), +# id="state_fulfilled", +# ), +# pytest.param( +# CollectDataTestParam( +# collect_blockstamp=Mock(slot_number=320), +# collect_frame_range=Mock(return_value=(0, 100)), +# report_blockstamp=Mock(ref_epoch=100), +# state=Mock( +# migrate=Mock(), +# log_status=Mock(), +# unprocessed_epochs=[5], +# is_fulfilled=False, +# ), +# expected_msg="Minimum checkpoint step is not reached, current delay is 2 epochs", +# expected_result=False, +# ), +# id="min_step_not_reached", +# ), +# ], +# ) +# @pytest.mark.unit +# def test_collect_data( +# module: CSOracle, +# param: CollectDataTestParam, +# mock_chain_config: NoReturn, +# mock_frame_config: NoReturn, +# caplog, +# monkeypatch, +# ): +# module.w3 = Mock() +# module._receive_last_finalized_slot = Mock() +# module.state = param.state +# module.get_epochs_range_to_process = param.collect_frame_range +# module.get_blockstamp_for_report = Mock(return_value=param.report_blockstamp) +# +# with caplog.at_level(logging.DEBUG): +# if isinstance(param.expected_result, Exception): +# with pytest.raises(type(param.expected_result)): +# module.collect_data(blockstamp=param.collect_blockstamp) +# else: +# collected = module.collect_data(blockstamp=param.collect_blockstamp) +# assert collected == param.expected_result +# +# msg = list(filter(lambda log: param.expected_msg in log, caplog.messages)) +# assert len(msg), f"Expected message '{param.expected_msg}' not found in logs" +# +# +# +# @pytest.mark.unit +# def test_collect_data_outdated_checkpoint( +# module: CSOracle, mock_chain_config: NoReturn, mock_frame_config: NoReturn, caplog +# ): +# module.w3 = Mock() +# module._receive_last_finalized_slot = Mock() +# module.state = Mock( +# migrate=Mock(), +# log_status=Mock(), +# unprocessed_epochs=list(range(0, 101)), +# is_fulfilled=False, +# ) +# module.get_epochs_range_to_process = Mock(side_effect=[(0, 100), (50, 150)]) +# module.get_blockstamp_for_report = Mock(return_value=Mock(ref_epoch=100)) +# +# with caplog.at_level(logging.DEBUG): +# with pytest.raises(ValueError): +# module.collect_data(blockstamp=Mock(slot_number=640)) +# +# msg = list( +# filter( +# lambda log: "Checkpoints were prepared for an outdated epochs range, stop processing" in log, +# caplog.messages, +# ) +# ) +# assert len(msg), "Expected message not found in logs" @pytest.mark.unit @@ -358,15 +360,17 @@ def test_collect_data_fulfilled_state( migrate=Mock(), log_status=Mock(), unprocessed_epochs=list(range(0, 101)), + frames=[[0, 100]], ) type(module.state).is_fulfilled = PropertyMock(side_effect=[False, True]) module.get_epochs_range_to_process = Mock(return_value=(0, 100)) module.get_blockstamp_for_report = Mock(return_value=Mock(ref_epoch=100)) + module.w3.performance.is_range_available = Mock(return_value=True) + module.fulfill_state = Mock() with caplog.at_level(logging.DEBUG): - with patch('src.modules.csm.csm.FrameCheckpointProcessor.exec', return_value=None): - collected = module.collect_data(blockstamp=Mock(slot_number=640)) - assert collected is True + collected = module.collect_data() + assert collected is True # assert that it is not early return from function msg = list(filter(lambda log: "All epochs are already processed. Nothing to collect" in log, caplog.messages)) @@ -593,6 +597,7 @@ def test_build_report(module: CSOracle, param: BuildReportTestParam): def test_execute_module_not_collected(module: CSOracle): module._check_compatability = Mock(return_value=True) module.get_blockstamp_for_report = Mock(return_value=Mock(slot_number=100500)) + module.set_epochs_range_to_collect = Mock() module.collect_data = Mock(return_value=False) execute_delay = module.execute_module( @@ -616,6 +621,7 @@ def test_execute_module_skips_collecting_if_forward_compatible(module: CSOracle) @pytest.mark.unit def test_execute_module_no_report_blockstamp(module: CSOracle): module._check_compatability = Mock(return_value=True) + module.set_epochs_range_to_collect = Mock() module.collect_data = Mock(return_value=True) module.get_blockstamp_for_report = Mock(return_value=None) @@ -627,6 +633,7 @@ def test_execute_module_no_report_blockstamp(module: CSOracle): @pytest.mark.unit def test_execute_module_processed(module: CSOracle): + module.set_epochs_range_to_collect = Mock() module.collect_data = Mock(return_value=True) module.get_blockstamp_for_report = Mock(return_value=Mock(slot_number=100500)) module.process_report = Mock() From 1d2fb60f83aa204d81525b57e6bad85b9576eeb6 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Mon, 10 Nov 2025 16:14:15 +0100 Subject: [PATCH 21/35] WIP: `test_csm_module.py` --- poetry.lock | 100 +++- pyproject.toml | 1 + src/modules/csm/csm.py | 2 +- src/modules/performance_collector/codec.py | 1 + tests/modules/csm/test_csm_module.py | 504 ++++++++++++++------- 5 files changed, 446 insertions(+), 162 deletions(-) diff --git a/poetry.lock b/poetry.lock index f0d5df6c1..f13a04dbe 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.1.3 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -2130,7 +2130,7 @@ eth-hash = {version = "^0.7.0", extras = ["pycryptodome"]} [package.source] type = "git" url = "https://github.com/lidofinance/oz-merkle-tree" -reference = "HEAD" +reference = "f4ad6e006b8daf05ce2ce255e123eb9f923d8ef8" resolved_reference = "f4ad6e006b8daf05ce2ce255e123eb9f923d8ef8" [[package]] @@ -2691,6 +2691,100 @@ tomlkit = ">=0.10.1" spelling = ["pyenchant (>=3.2,<4.0)"] testutils = ["gitpython (>3)"] +[[package]] +name = "pyroaring" +version = "1.0.3" +description = "Library for handling efficiently sorted integer sets." +optional = false +python-versions = "*" +groups = ["main"] +files = [ + {file = "pyroaring-1.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c10e4cfbe203a578c78808406af491e3615d5e46cf69a7709050243346cd68bc"}, + {file = "pyroaring-1.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cc329c62e504f2531c4008240f31736bcd2dee4339071f1eac0648068e6d17fa"}, + {file = "pyroaring-1.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8c7fb6ddf6ef31148f0939bc5c26b681d63df301ee1e372525012dd7bfe4a30a"}, + {file = "pyroaring-1.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd18446832ea04a7d33bd6b78270b0be14eabcda5937af3428d6cb3d2bf98e54"}, + {file = "pyroaring-1.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f0cbc766df2a24e28f23d69b66bbec64e691799219fd82c2f2236f03fc88e2e"}, + {file = "pyroaring-1.0.3-cp310-cp310-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:96a51e96f8f473381615f0f852f7238ad0a47f28e4a35e9f082468c5cfe4e9c3"}, + {file = "pyroaring-1.0.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:654af38b1f1c9bdc27b4f6d331fc5d91599df96e72a6df1886f4d95eea60ab29"}, + {file = "pyroaring-1.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6721036afa31c07bdcbb4fcafa166660cf9c2eac695dcd495f8778549fa55899"}, + {file = "pyroaring-1.0.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:0caa10f20329d09233fac6550b2adce4d9f173f748a9a9a5ea3b7033827dfe2d"}, + {file = "pyroaring-1.0.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f109be8af937e85c52cb920d3fd120db52b172f59460852d2e3d2e3d13a4f52a"}, + {file = "pyroaring-1.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ddc80bfcd313c7c524a2742d263e73cae088b6a611b77dcc46fa90c306f6dace"}, + {file = "pyroaring-1.0.3-cp310-cp310-win32.whl", hash = "sha256:5a183f5ec069757fe5b60e37f7c6fa8a53178eacf0d76601b739e2890edee036"}, + {file = "pyroaring-1.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:051bd9a66ce855a1143faa2b879ea6c6ca2905209e172ce9eedf79834897c730"}, + {file = "pyroaring-1.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:3043ff5c85375310ca3cd3e01944e03026e0ec07885e52dfabcfcd9dc303867f"}, + {file = "pyroaring-1.0.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:755cdac1f9a1b7b5c621e570d4f6dbcf3b8e4a1e35a66f976104ecb35dce4ed2"}, + {file = "pyroaring-1.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ebab073db620f26f0ba11e13fa2f35e3b1298209fba47b6bc8cb6f0e2c9627f9"}, + {file = "pyroaring-1.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:684fb8dffe19bdb7f91897c65eac6eee23b1e46043c47eb24288f28a1170fe04"}, + {file = "pyroaring-1.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:678d31fc24e82945a1bfb14816c77823983382ffea76985d494782aa2f058427"}, + {file = "pyroaring-1.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d815f624e0285db3669f673d1725cb754b120ec70d0032d7c7166103a96c96d"}, + {file = "pyroaring-1.0.3-cp311-cp311-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:57fd5b80dacb8e888402b6b7508a734c6a527063e4e24e882ff2e0fd90721ada"}, + {file = "pyroaring-1.0.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab26a7a45a0bb46c00394d1a60a9f2d57c220f84586e30d59b39784b0f94aee6"}, + {file = "pyroaring-1.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9232f3f606315d59049c128154100fd05008d5c5c211e48b21848cd41ee64d26"}, + {file = "pyroaring-1.0.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:f34b44b3ec3df97b978799f2901fefb2a48d367496fd1cde3cc5fe8b3bc13510"}, + {file = "pyroaring-1.0.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:25a83ec6bac3106568bd3fdd316f0fee52aa0be8c72da565ad02b10ae7905924"}, + {file = "pyroaring-1.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c17d4ec53b5b6b333d9a9515051213a691293ada785dc8c025d3641482597ed3"}, + {file = "pyroaring-1.0.3-cp311-cp311-win32.whl", hash = "sha256:d54024459ace600f1d1ffbc6dc3c60eb47cca3b678701f06148f59e10f6f8d7b"}, + {file = "pyroaring-1.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:c28750148ef579a7447a8cb60b39e5943e03f8c29bce8f2788728f6f23d1887a"}, + {file = "pyroaring-1.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:535d8deccbd8db2c6bf38629243e9646756905574a742b2a72ff51d6461d616c"}, + {file = "pyroaring-1.0.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:add3e4c78eb590a76526ecce8d1566eecdd5822e351c36b3697997f4a80ed808"}, + {file = "pyroaring-1.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ebaffe846cf4ba4f00ce6b8a9f39613f24e2d09447e77be4fa6e898bc36451b6"}, + {file = "pyroaring-1.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a9459f27498f97d08031a34a5ead230b77eb0ab3cc3d85b7f54faa2fd548acd6"}, + {file = "pyroaring-1.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2b2eb8bd1c35c772994889be9f7dda09477475d7aa1e2af9ab4ef18619326f6"}, + {file = "pyroaring-1.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d31f4c1c906f1af14ce61a3959d04a14a64c594f8a768399146a45bbd341f21f"}, + {file = "pyroaring-1.0.3-cp312-cp312-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53be988fc86698d56c11049bfe5113a2f6990adb1fa2782b29636509808b6aa7"}, + {file = "pyroaring-1.0.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7df84d223424523b19a23781f4246cc247fd6d821e1bc0853c2f25669136f7d0"}, + {file = "pyroaring-1.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:34a781f1f9766897f63ef18be129827340ae37764015b83fdcff1efb9e29136d"}, + {file = "pyroaring-1.0.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1f414343b4ed0756734328cdf2a91022fc54503769e3f8d79bd0b672ea815a16"}, + {file = "pyroaring-1.0.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d16ae185c72dc64f76335dbe53e53a892e78115adc92194957d1b7ef74d230b9"}, + {file = "pyroaring-1.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f888447bf22dde7759108bfe6dfbeb6bbb61b14948de9c4cb6843c4dd57e2215"}, + {file = "pyroaring-1.0.3-cp312-cp312-win32.whl", hash = "sha256:fbbdc44c51a0a3efd7be3dbe04466278ce098fcd101aa1905849319042159770"}, + {file = "pyroaring-1.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:3b217c4b3ad953b4c759a0d2f9bd95316f0c345b9f7adb49e6ded7a1f5106bd4"}, + {file = "pyroaring-1.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:e6bcf838564c21bab8fe6c2748b4990d4cd90612d8c470c04889def7bb5114ea"}, + {file = "pyroaring-1.0.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:20bc947054b197d1baa76cd05d70b8e04f95b82e698266e2f8f2f4b36d764477"}, + {file = "pyroaring-1.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ba5909b4c66bb85cab345e2f3a87e5ce671509c94b8c9823d8db64e107cbe854"}, + {file = "pyroaring-1.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b744746ba5da27fad760067f12633f5d384db6a1e65648d00244ceacbbd87731"}, + {file = "pyroaring-1.0.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b16c2a2791a5a09c4b59c0e1069ac1c877d0df25cae3155579c7eac8844676e"}, + {file = "pyroaring-1.0.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7f68dfcf8d01177267f4bc06c4960fe8e39577470d1b52c9af8b61a72ca8767"}, + {file = "pyroaring-1.0.3-cp313-cp313-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:dba4e4700030182a981a3c887aa73887697145fc9ffb192f908aa59b718fbbdd"}, + {file = "pyroaring-1.0.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e26dd1dc1edba02288902914bdb559e53e346e9155defa43c31fcab831b55342"}, + {file = "pyroaring-1.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6eb98d2cacfc6d51c6a69893f04075e07b3df761eac71ba162c43b9b4c4452ad"}, + {file = "pyroaring-1.0.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a967e9eddb9485cbdd95d6371e3dada67880844d836c0283d3b11efe9225d1b7"}, + {file = "pyroaring-1.0.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b12ef7f992ba7be865f91c7c098fd8ac6c413563aaa14d5b1e2bcb8cb43a4614"}, + {file = "pyroaring-1.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:82ca5be174b85c40be7b00bc6bf39b2931a1b4a465f3af17ec6b9c48e9aa6fe0"}, + {file = "pyroaring-1.0.3-cp313-cp313-win32.whl", hash = "sha256:f758c681e63ffe74b20423695e71f0410920f41b075cee679ffb5bc2bf38440b"}, + {file = "pyroaring-1.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:428c3bb384fe4c483feb5cf7aa3aef1621fb0a5c4f3d391da67b2c4a43f08a10"}, + {file = "pyroaring-1.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:9c0c856e8aa5606e8aed5f30201286e404fdc9093f81fefe82d2e79e67472bb2"}, + {file = "pyroaring-1.0.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6321a95b5b2ba69aa32e920dd1aa7f8fc4fac55b75981978aa4f2378724dee27"}, + {file = "pyroaring-1.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:054eb6ef04ff9d2ed3ddd18ae21e5e51e02d0f8cdd7e5cb948648f77ddb04ea2"}, + {file = "pyroaring-1.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4d064aea3827e63eb60294ae3e6623e29613f5c8844869646d06f3735a425dd9"}, + {file = "pyroaring-1.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c84d5b17ef628c3956d9a79c2f78c5bea7dda6f7aeb01f34671034d2650b9efb"}, + {file = "pyroaring-1.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8038f7dd25eb83c277b8e0ea14c5e61f085cc76bd0c6b9f6679f1770e33541ec"}, + {file = "pyroaring-1.0.3-cp38-cp38-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:defc508ef7acaf58d07e603c55feda6742c4034f5262cfd616f92cc3adbc2815"}, + {file = "pyroaring-1.0.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd0831326971b0ffa08ccce79abe7c2450d5d9254804d855e23a8ba31f70351a"}, + {file = "pyroaring-1.0.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:03e063329481396cbb70f1ce8b8ca0f01d74a45ee9d908b6645b0282b23832b0"}, + {file = "pyroaring-1.0.3-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:7a1b1c82d2da0bedc7c22d4047bd62544ef0e25c6be86ccf4b9d1ccc38876ee8"}, + {file = "pyroaring-1.0.3-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:dd7f9e5b7366b8f9bafca2a0fcf83fa534a00cc12d4ca01e301d8662bcdb805c"}, + {file = "pyroaring-1.0.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a5a1db84e0952805223a7bf77eae58384b700a6b9affc53fb9772dddf868c712"}, + {file = "pyroaring-1.0.3-cp38-cp38-win32.whl", hash = "sha256:54cb0c2bddd330e22099773c4681aca90847265afe56a9201a92c1a758494261"}, + {file = "pyroaring-1.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:47d985293f861df1f2b03b41cef4fd3249c1c9608081750bcf3153051c2312d0"}, + {file = "pyroaring-1.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d46eb5db78b673d8d8ca83651a1cce1e15eec5a922f2951b1f61014463b72af5"}, + {file = "pyroaring-1.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ce202452de2b58bffa3eb02e27c681eefcfb54e27f8ef85b5c93ebaada50f3f3"}, + {file = "pyroaring-1.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:531b6ae56989b61742dde1b64fedc5537acc046cf04a333548322366c1bf3922"}, + {file = "pyroaring-1.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3035db9459bd8635a0145b4a9e3102869d621cb0b3648051115f06d31ffd1976"}, + {file = "pyroaring-1.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c8fb6b0ad0e8db1b9559b2da180b103b48adddf0e4f24404269e2a3b5db268d"}, + {file = "pyroaring-1.0.3-cp39-cp39-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8d5df95d9511bc83048da9348c7ab1c20f97ff4d95faf27ee1fdf2e8a96e200e"}, + {file = "pyroaring-1.0.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65d2d81e5aed7698fab23058db70fb2b65fad221090be037a0af498569109915"}, + {file = "pyroaring-1.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e195636034a0b62ec0e5325ed2f610f39cc8955ace3f47a5bc7f484159f02341"}, + {file = "pyroaring-1.0.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:bb7f2561e3ec26c3c869458431cbcba6b83f7e925b024460c136dbb5fadf3b31"}, + {file = "pyroaring-1.0.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8e996939de01f448eb9448d91b47ab60bff0555c2a80d5c12a8405814072cd35"}, + {file = "pyroaring-1.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c656d62d0cf96ede0edc4e7d392889238777bdf88b32afd5d51c3cab016c29a0"}, + {file = "pyroaring-1.0.3-cp39-cp39-win32.whl", hash = "sha256:a7a7d14822c64841ae64e98309697e1631ebadba55ded33daa7cd16d1b487d11"}, + {file = "pyroaring-1.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:a86b88adbe0531b75f94f87279a6d4ee68e63335e29bbdab4400a05704fc2587"}, + {file = "pyroaring-1.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:1ed2e9c7af46052466b5fa0392fe540331474718d97b9756cefa23233bfdb3ea"}, + {file = "pyroaring-1.0.3.tar.gz", hash = "sha256:cd7392d1c010c9e41c11c62cd0610c8852e7e9698b1f7f6c2fcdefe50e7ef6da"}, +] + [[package]] name = "pytest" version = "7.4.4" @@ -3588,4 +3682,4 @@ propcache = ">=0.2.1" [metadata] lock-version = "2.1" python-versions = "^3.12" -content-hash = "3660c739c071839b795775dd6c6a904b555d6d5b28c315385f9557849bb476ab" +content-hash = "921c985f7c0f2dca304da6b3e3252a2f7a78c2dfe5d2a3dd18feb4e4dcb7e2cc" diff --git a/pyproject.toml b/pyproject.toml index 48ae9b1c8..5dff2f7f5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,6 +32,7 @@ protobuf="^6.31.1" dag-cbor="^0.3.3" flask = "^3.0.0" waitress = "^3.0.2" +pyroaring = "^1.0.3" [tool.poetry.group.dev.dependencies] base58 = "^2.1.1" diff --git a/src/modules/csm/csm.py b/src/modules/csm/csm.py index 0f7d70295..dbb3a1f33 100644 --- a/src/modules/csm/csm.py +++ b/src/modules/csm/csm.py @@ -256,7 +256,7 @@ def fulfill_state(self): self.state.save_prop_duty(EpochNumber(epoch), vid, included=bool(p.is_proposed)) blocks_in_epoch += p.is_proposed - if blocks_in_epoch and syncs: + if blocks_in_epoch: for rec in syncs: vid = ValidatorIndex(rec.validator_index) fulfilled = max(0, blocks_in_epoch - rec.missed_count) diff --git a/src/modules/performance_collector/codec.py b/src/modules/performance_collector/codec.py index 330d0c4d1..d0c21c8f4 100644 --- a/src/modules/performance_collector/codec.py +++ b/src/modules/performance_collector/codec.py @@ -6,6 +6,7 @@ from src.types import ValidatorIndex + @dataclass class ProposalDuty: validator_index: int diff --git a/tests/modules/csm/test_csm_module.py b/tests/modules/csm/test_csm_module.py index 36a3eb2a7..c29c03ccf 100644 --- a/tests/modules/csm/test_csm_module.py +++ b/tests/modules/csm/test_csm_module.py @@ -2,21 +2,25 @@ from collections import defaultdict from dataclasses import dataclass from typing import Literal, NoReturn, Type -from unittest.mock import Mock, PropertyMock, patch +from unittest.mock import Mock, PropertyMock, call, patch import pytest from hexbytes import HexBytes from src.constants import UINT64_MAX -from src.modules.csm.csm import CSOracle, LastReport +from src.modules.csm.csm import CSMError, CSOracle, LastReport from src.modules.csm.distribution import Distribution +from src.modules.csm.log import FramePerfLog from src.modules.csm.state import State from src.modules.csm.tree import RewardsTree, StrikesTree from src.modules.csm.types import StrikesList +from src.modules.performance_collector.codec import ProposalDuty, SyncDuty from src.modules.submodules.oracle_module import ModuleExecuteDelay from src.modules.submodules.types import ZERO_HASH, CurrentFrame +from src.providers.consensus.types import Validator, ValidatorState +from src.providers.execution.exceptions import InconsistentData from src.providers.ipfs import CID -from src.types import NodeOperatorId, SlotNumber, FrameNumber +from src.types import EpochNumber, FrameNumber, Gwei, NodeOperatorId, SlotNumber, ValidatorIndex from src.utils.types import hex_str_to_bytes from src.web3py.types import Web3 from tests.factory.blockstamp import ReferenceBlockStampFactory @@ -49,6 +53,23 @@ def slot_to_epoch(slot: int) -> int: return slot // 32 +def make_validator(index: int, activation_epoch: int = 0, exit_epoch: int = 100) -> Validator: + return Validator( + index=ValidatorIndex(index), + balance=Gwei(0), + validator=ValidatorState( + pubkey=f"0x{index:02x}", + withdrawal_credentials="0x00", + effective_balance=Gwei(0), + slashed=False, + activation_eligibility_epoch=EpochNumber(activation_epoch), + activation_epoch=EpochNumber(activation_epoch), + exit_epoch=EpochNumber(exit_epoch), + withdrawable_epoch=EpochNumber(exit_epoch + 1), + ), + ) + + @pytest.fixture() def mock_chain_config(module: CSOracle): module.get_chain_config = Mock( @@ -73,7 +94,7 @@ class FrameTestParam: last_processing_ref_slot: int current_ref_slot: int finalized_slot: int - expected_frame: tuple[int, int] | Type[ValueError] + expected_frame: tuple[int, int] | Type[Exception] @pytest.mark.parametrize( @@ -168,6 +189,28 @@ class FrameTestParam: ), id="initial_epoch_moved_forward_with_missed_frame", ), + pytest.param( + FrameTestParam( + epochs_per_frame=32, + initial_ref_slot=last_slot_of_epoch(10), + last_processing_ref_slot=last_slot_of_epoch(20), + current_ref_slot=last_slot_of_epoch(15), + finalized_slot=last_slot_of_epoch(15), + expected_frame=InconsistentData, + ), + id="last_processing_ref_slot_in_future", + ), + pytest.param( + FrameTestParam( + epochs_per_frame=4, + initial_ref_slot=last_slot_of_epoch(1), + last_processing_ref_slot=0, + current_ref_slot=last_slot_of_epoch(1), + finalized_slot=last_slot_of_epoch(1), + expected_frame=CSMError, + ), + id="negative_first_frame", + ), ], ) @pytest.mark.unit @@ -190,8 +233,8 @@ def test_current_frame_range(module: CSOracle, mock_chain_config: NoReturn, para module.get_initial_ref_slot = Mock(return_value=param.initial_ref_slot) ref_epoch = slot_to_epoch(param.current_ref_slot) - if param.expected_frame is ValueError: - with pytest.raises(ValueError): + if isinstance(param.expected_frame, type) and issubclass(param.expected_frame, Exception): + with pytest.raises(param.expected_frame): module.get_epochs_range_to_process( ReferenceBlockStampFactory.build(slot_number=param.current_ref_slot, ref_epoch=ref_epoch) ) @@ -202,6 +245,45 @@ def test_current_frame_range(module: CSOracle, mock_chain_config: NoReturn, para assert (l_epoch, r_epoch) == param.expected_frame +@pytest.mark.unit +def test_set_epochs_range_to_collect_posts_new_demand(module: CSOracle, mock_chain_config: NoReturn): + blockstamp = ReferenceBlockStampFactory.build() + module.state = Mock(migrate=Mock(), log_progress=Mock()) + converter = Mock() + converter.frame_config = Mock(epochs_per_frame=4) + module.converter = Mock(return_value=converter) + module.get_epochs_range_to_process = Mock(return_value=(10, 20)) + module.w3 = Mock() + module.w3.performance.get_epochs_demand = Mock(return_value={}) + module.w3.performance.post_epochs_demand = Mock() + + module.set_epochs_range_to_collect(blockstamp) + + module.state.migrate.assert_called_once_with(10, 20, 4) + module.state.log_progress.assert_called_once() + module.w3.performance.get_epochs_demand.assert_called_once() + module.w3.performance.post_epochs_demand.assert_called_once_with("CSOracle", 10, 20) + + +@pytest.mark.unit +def test_set_epochs_range_to_collect_skips_post_when_demand_same(module: CSOracle, mock_chain_config: NoReturn): + blockstamp = ReferenceBlockStampFactory.build() + module.state = Mock(migrate=Mock(), log_progress=Mock()) + converter = Mock() + converter.frame_config = Mock(epochs_per_frame=4) + module.converter = Mock(return_value=converter) + module.get_epochs_range_to_process = Mock(return_value=(10, 20)) + module.w3 = Mock() + module.w3.performance.get_epochs_demand = Mock(return_value={"CSOracle": (10, 20)}) + module.w3.performance.post_epochs_demand = Mock() + + module.set_epochs_range_to_collect(blockstamp) + + module.state.migrate.assert_called_once_with(10, 20, 4) + module.state.log_progress.assert_called_once() + module.w3.performance.post_epochs_demand.assert_not_called() + + @pytest.fixture() def mock_frame_config(module: CSOracle): module.get_frame_config = Mock( @@ -214,167 +296,273 @@ def mock_frame_config(module: CSOracle): @dataclass(frozen=True) -class CollectDataTestParam: - collect_blockstamp: Mock - collect_frame_range: Mock - report_blockstamp: Mock - state: Mock - expected_msg: str - expected_result: bool | Exception - - -# TODO: move to performance collector tests -# @pytest.mark.parametrize( -# "param", -# [ -# pytest.param( -# CollectDataTestParam( -# collect_blockstamp=Mock(slot_number=64), -# collect_frame_range=Mock(return_value=(0, 1)), -# report_blockstamp=Mock(ref_epoch=3), -# state=Mock(), -# expected_msg="Epochs range has been changed, but the change is not yet observed on finalized epoch 1", -# expected_result=False, -# ), -# id="frame_changed_forward", -# ), -# pytest.param( -# CollectDataTestParam( -# collect_blockstamp=Mock(slot_number=64), -# collect_frame_range=Mock(return_value=(0, 2)), -# report_blockstamp=Mock(ref_epoch=1), -# state=Mock(), -# expected_msg="Epochs range has been changed, but the change is not yet observed on finalized epoch 1", -# expected_result=False, -# ), -# id="frame_changed_backward", -# ), -# pytest.param( -# CollectDataTestParam( -# collect_blockstamp=Mock(slot_number=32), -# collect_frame_range=Mock(return_value=(1, 2)), -# report_blockstamp=Mock(ref_epoch=2), -# state=Mock(), -# expected_msg="The starting epoch of the epochs range is not finalized yet", -# expected_result=False, -# ), -# id="starting_epoch_not_finalized", -# ), -# pytest.param( -# CollectDataTestParam( -# collect_blockstamp=Mock(slot_number=32), -# collect_frame_range=Mock(return_value=(0, 2)), -# report_blockstamp=Mock(ref_epoch=2), -# state=Mock( -# migrate=Mock(), -# log_status=Mock(), -# is_fulfilled=True, -# ), -# expected_msg="All epochs are already processed. Nothing to collect", -# expected_result=True, -# ), -# id="state_fulfilled", -# ), -# pytest.param( -# CollectDataTestParam( -# collect_blockstamp=Mock(slot_number=320), -# collect_frame_range=Mock(return_value=(0, 100)), -# report_blockstamp=Mock(ref_epoch=100), -# state=Mock( -# migrate=Mock(), -# log_status=Mock(), -# unprocessed_epochs=[5], -# is_fulfilled=False, -# ), -# expected_msg="Minimum checkpoint step is not reached, current delay is 2 epochs", -# expected_result=False, -# ), -# id="min_step_not_reached", -# ), -# ], -# ) -# @pytest.mark.unit -# def test_collect_data( -# module: CSOracle, -# param: CollectDataTestParam, -# mock_chain_config: NoReturn, -# mock_frame_config: NoReturn, -# caplog, -# monkeypatch, -# ): -# module.w3 = Mock() -# module._receive_last_finalized_slot = Mock() -# module.state = param.state -# module.get_epochs_range_to_process = param.collect_frame_range -# module.get_blockstamp_for_report = Mock(return_value=param.report_blockstamp) -# -# with caplog.at_level(logging.DEBUG): -# if isinstance(param.expected_result, Exception): -# with pytest.raises(type(param.expected_result)): -# module.collect_data(blockstamp=param.collect_blockstamp) -# else: -# collected = module.collect_data(blockstamp=param.collect_blockstamp) -# assert collected == param.expected_result -# -# msg = list(filter(lambda log: param.expected_msg in log, caplog.messages)) -# assert len(msg), f"Expected message '{param.expected_msg}' not found in logs" -# -# -# -# @pytest.mark.unit -# def test_collect_data_outdated_checkpoint( -# module: CSOracle, mock_chain_config: NoReturn, mock_frame_config: NoReturn, caplog -# ): -# module.w3 = Mock() -# module._receive_last_finalized_slot = Mock() -# module.state = Mock( -# migrate=Mock(), -# log_status=Mock(), -# unprocessed_epochs=list(range(0, 101)), -# is_fulfilled=False, -# ) -# module.get_epochs_range_to_process = Mock(side_effect=[(0, 100), (50, 150)]) -# module.get_blockstamp_for_report = Mock(return_value=Mock(ref_epoch=100)) -# -# with caplog.at_level(logging.DEBUG): -# with pytest.raises(ValueError): -# module.collect_data(blockstamp=Mock(slot_number=640)) -# -# msg = list( -# filter( -# lambda log: "Checkpoints were prepared for an outdated epochs range, stop processing" in log, -# caplog.messages, -# ) -# ) -# assert len(msg), "Expected message not found in logs" +class CollectDataCase: + frames: list[tuple[int, int]] + range_available: bool + is_fulfilled_side_effect: list[bool] + expected_result: bool + expect_fulfill_call: bool + expect_range_call: tuple[int, int] + check_no_completed_msg: bool +@pytest.mark.parametrize( + "case", + [ + pytest.param( + CollectDataCase( + frames=[(10, 12)], + range_available=False, + is_fulfilled_side_effect=[False], + expected_result=False, + expect_fulfill_call=False, + expect_range_call=(10, 12), + check_no_completed_msg=False, + ), + id="range_not_available", + ), + pytest.param( + CollectDataCase( + frames=[(10, 12)], + range_available=True, + is_fulfilled_side_effect=[False, True], + expected_result=True, + expect_fulfill_call=True, + expect_range_call=(10, 12), + check_no_completed_msg=False, + ), + id="range_available", + ), + pytest.param( + CollectDataCase( + frames=[(0, 100)], + range_available=True, + is_fulfilled_side_effect=[False, True], + expected_result=True, + expect_fulfill_call=True, + expect_range_call=(0, 100), + check_no_completed_msg=True, + ), + id="fulfilled_state", + ), + ], +) @pytest.mark.unit -def test_collect_data_fulfilled_state( - module: CSOracle, mock_chain_config: NoReturn, mock_frame_config: NoReturn, caplog +def test_collect_data_handles_range_availability( + module: CSOracle, mock_chain_config: NoReturn, mock_frame_config: NoReturn, caplog, case: CollectDataCase ): module.w3 = Mock() - module._reset_cycle_timeout = Mock() - module._receive_last_finalized_slot = Mock() - module.state = Mock( - migrate=Mock(), - log_status=Mock(), - unprocessed_epochs=list(range(0, 101)), - frames=[[0, 100]], - ) - type(module.state).is_fulfilled = PropertyMock(side_effect=[False, True]) - module.get_epochs_range_to_process = Mock(return_value=(0, 100)) - module.get_blockstamp_for_report = Mock(return_value=Mock(ref_epoch=100)) - module.w3.performance.is_range_available = Mock(return_value=True) + module.w3.performance.is_range_available = Mock(return_value=case.range_available) module.fulfill_state = Mock() + state = Mock(frames=case.frames) + type(state).is_fulfilled = PropertyMock(side_effect=case.is_fulfilled_side_effect) + module.state = state with caplog.at_level(logging.DEBUG): - collected = module.collect_data() - assert collected is True + result = module.collect_data() + + assert result is case.expected_result + module.w3.performance.is_range_available.assert_called_once_with(*case.expect_range_call) + if case.expect_fulfill_call: + module.fulfill_state.assert_called_once() + else: + module.fulfill_state.assert_not_called() + + if case.check_no_completed_msg: + assert "All epochs are already processed. Nothing to collect" not in caplog.messages + + +@pytest.mark.parametrize( + "epoch_data_missing", [pytest.param(False, id="duties_recorded"), pytest.param(True, id="epoch_missing")] +) +@pytest.mark.unit +def test_fulfill_state_handles_epoch_data(module: CSOracle, epoch_data_missing: bool): + module._receive_last_finalized_slot = Mock(return_value="finalized") + validator_a = make_validator(0, activation_epoch=0, exit_epoch=10) + validator_b = make_validator(1, activation_epoch=0, exit_epoch=10) + module.w3 = Mock() + module.w3.cc.get_validators = Mock(return_value=[validator_a, validator_b]) + + if epoch_data_missing: + module.w3.performance.get_epoch = Mock(return_value=None) + frames = [(0, 0)] + unprocessed = {0} + else: + module.w3.performance.get_epoch = Mock( + side_effect=[ + ( + {validator_a.index}, + [ + ProposalDuty(validator_index=int(validator_a.index), is_proposed=True), + ProposalDuty(validator_index=int(validator_b.index), is_proposed=False), + ], + [ + SyncDuty(validator_index=int(validator_a.index), missed_count=0), + SyncDuty(validator_index=int(validator_b.index), missed_count=1), + ], + ), + ( + set(), + [ + ProposalDuty(validator_index=int(validator_b.index), is_proposed=True), + ], + [ + SyncDuty(validator_index=int(validator_a.index), missed_count=2), + SyncDuty(validator_index=int(validator_b.index), missed_count=3), + ], + ), + ] + ) + frames = [(0, 1)] + unprocessed = {0, 1} + + state = Mock() + state.frames = frames + state.unprocessed_epochs = unprocessed + state.save_att_duty = Mock() + state.save_prop_duty = Mock() + state.save_sync_duty = Mock() + state.add_processed_epoch = Mock() + state.log_progress = Mock() + module.state = state + + module.fulfill_state() + + module._receive_last_finalized_slot.assert_called_once() + module.w3.cc.get_validators.assert_called_once_with("finalized") + + if epoch_data_missing: + module.w3.performance.get_epoch.assert_called_once_with(0) + state.save_att_duty.assert_not_called() + state.save_prop_duty.assert_not_called() + state.save_sync_duty.assert_not_called() + state.add_processed_epoch.assert_not_called() + state.log_progress.assert_not_called() + else: + module.w3.performance.get_epoch.assert_has_calls([call(0), call(1)]) + assert state.save_att_duty.call_args_list == [ + call(EpochNumber(0), validator_a.index, included=False), + call(EpochNumber(0), validator_b.index, included=True), + call(EpochNumber(1), validator_a.index, included=True), + call(EpochNumber(1), validator_b.index, included=True), + ] + assert state.save_prop_duty.call_args_list == [ + call(EpochNumber(0), ValidatorIndex(int(validator_a.index)), included=True), + call(EpochNumber(0), ValidatorIndex(int(validator_b.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_b.index)), included=True), + ] + assert state.save_sync_duty.call_args_list == [ + call(EpochNumber(0), ValidatorIndex(int(validator_a.index)), included=True), + call(EpochNumber(0), ValidatorIndex(int(validator_b.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_a.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_a.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_b.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_b.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_b.index)), included=False), + ] + assert state.add_processed_epoch.call_args_list == [ + call(EpochNumber(0)), + call(EpochNumber(1)), + ] + assert state.log_progress.call_count == 2 + + +@pytest.mark.unit +def test_fulfill_state_raises_on_inactive_missed_attestation(module: CSOracle): + inactive_validator = make_validator(5, activation_epoch=10, exit_epoch=20) + module._receive_last_finalized_slot = Mock(return_value="finalized") + module.w3 = Mock() + module.w3.cc.get_validators = Mock(return_value=[inactive_validator]) + module.w3.performance.get_epoch = Mock(return_value=({inactive_validator.index}, [], [])) + state = Mock() + state.frames = [(0, 0)] + state.unprocessed_epochs = {0} + state.save_att_duty = Mock() + state.save_prop_duty = Mock() + state.save_sync_duty = Mock() + state.add_processed_epoch = Mock() + state.log_progress = Mock() + module.state = state + + with pytest.raises(ValueError, match="not active"): + module.fulfill_state() + + module.w3.performance.get_epoch.assert_called_once_with(0) + state.save_att_duty.assert_not_called() + state.add_processed_epoch.assert_not_called() + + +@pytest.mark.unit +def test_validate_state_uses_ref_epoch(module: CSOracle): + blockstamp = ReferenceBlockStampFactory.build(ref_epoch=123) + module.get_epochs_range_to_process = Mock(return_value=(5, 10)) + module.state = Mock(validate=Mock()) + + module.validate_state(blockstamp) + + module.get_epochs_range_to_process.assert_called_once_with(blockstamp) + module.state.validate.assert_called_once_with(5, 123) + + +@pytest.mark.parametrize( + "last_ref_slot,current_ref_slot,expected", + [ + pytest.param(64, 64, True, id="already_submitted"), + pytest.param(32, 64, False, id="pending_submission"), + ], +) +@pytest.mark.unit +def test_is_main_data_submitted(module: CSOracle, last_ref_slot: int, current_ref_slot: int, expected: bool): + blockstamp = ReferenceBlockStampFactory.build() + module.w3 = Mock() + module.w3.csm.get_csm_last_processing_ref_slot = Mock(return_value=SlotNumber(last_ref_slot)) + module.get_initial_or_current_frame = Mock( + return_value=CurrentFrame( + ref_slot=SlotNumber(current_ref_slot), + report_processing_deadline_slot=SlotNumber(0), + ) + ) + + assert module.is_main_data_submitted(blockstamp) is expected + + +@pytest.mark.parametrize("submitted", [True, False]) +@pytest.mark.unit +def test_is_contract_reportable_relies_on_is_main_data_submitted(module: CSOracle, submitted: bool): + module.is_main_data_submitted = Mock(return_value=submitted) + + result = module.is_contract_reportable(ReferenceBlockStampFactory.build()) + + module.is_main_data_submitted.assert_called_once() + assert result is (not submitted) + + +@pytest.mark.unit +def test_publish_tree_uploads_encoded_tree(module: CSOracle): + tree = Mock() + tree.encode.return_value = b"tree" + module.w3 = Mock() + module.w3.ipfs.publish = Mock(return_value=CID("QmTree")) + + cid = module.publish_tree(tree) + + module.w3.ipfs.publish.assert_called_once_with(b"tree") + assert cid == CID("QmTree") + + +@pytest.mark.unit +def test_publish_log_uploads_encoded_log(module: CSOracle, monkeypatch: pytest.MonkeyPatch): + logs = [Mock(spec=FramePerfLog)] + encode_mock = Mock(return_value=b"log") + monkeypatch.setattr("src.modules.csm.csm.FramePerfLog.encode", encode_mock) + module.w3 = Mock() + module.w3.ipfs.publish = Mock(return_value=CID("QmLog")) + + cid = module.publish_log(logs) - # assert that it is not early return from function - msg = list(filter(lambda log: "All epochs are already processed. Nothing to collect" in log, caplog.messages)) - assert len(msg) == 0, "Unexpected message found in logs" + encode_mock.assert_called_once_with(logs) + module.w3.ipfs.publish.assert_called_once_with(b"log") + assert cid == CID("QmLog") @dataclass(frozen=True) From e9758b0b86969bfb2dc8dcf837a07b5009dec6c4 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Mon, 10 Nov 2025 16:47:12 +0100 Subject: [PATCH 22/35] fix: linter --- src/modules/csm/csm.py | 13 +- src/modules/performance_collector/codec.py | 4 +- .../performance_collector/http_server.py | 199 ++++++++++-------- .../performance_collector.py | 1 - src/providers/http_provider.py | 2 +- src/web3py/extensions/performance.py | 1 - 6 files changed, 118 insertions(+), 102 deletions(-) diff --git a/src/modules/csm/csm.py b/src/modules/csm/csm.py index dbb3a1f33..5b37cee71 100644 --- a/src/modules/csm/csm.py +++ b/src/modules/csm/csm.py @@ -114,17 +114,16 @@ def collect_data(self) -> bool: ) if not is_data_range_available: logger.warning({ - "msg": f"Performance data range is not available yet", + "msg": "Performance data range is not available yet", "start_epoch": l_epoch, "end_epoch": r_epoch }) return False - else: - logger.info({ - "msg": "Performance data range is available", - "start_epoch": l_epoch, - "end_epoch": r_epoch - }) + logger.info({ + "msg": "Performance data range is available", + "start_epoch": l_epoch, + "end_epoch": r_epoch + }) self.fulfill_state() return self.state.is_fulfilled diff --git a/src/modules/performance_collector/codec.py b/src/modules/performance_collector/codec.py index d0c21c8f4..0b36dd1f8 100644 --- a/src/modules/performance_collector/codec.py +++ b/src/modules/performance_collector/codec.py @@ -83,7 +83,9 @@ def encode(misses: AttDutyMisses) -> bytes: @staticmethod def decode(blob: bytes) -> AttDutyMisses: - return set([ValidatorIndex(i) for i in BitMap.deserialize(blob)]) + # Non-iterable value BitMap.deserialize(blob) is used in an iterating context, + # but it IS iterable. + return {ValidatorIndex(i) for i in BitMap.deserialize(blob)} # pylint: disable=E1133 EpochData: TypeAlias = tuple[AttDutyMisses, list[ProposalDuty], list[SyncDuty]] diff --git a/src/modules/performance_collector/http_server.py b/src/modules/performance_collector/http_server.py index ba6e4c013..c24887609 100644 --- a/src/modules/performance_collector/http_server.py +++ b/src/modules/performance_collector/http_server.py @@ -1,3 +1,4 @@ +from functools import wraps from threading import Thread from typing import Any, Dict, Optional @@ -15,11 +16,8 @@ def _parse_from_to(args: Dict[str, Any]) -> Optional[tuple[int, int]]: t = args.get("to") if f is None or t is None: return None - try: - fi = int(f) - ti = int(t) - except Exception: - return None + fi = int(f) + ti = int(t) if fi > ti: return None return fi, ti @@ -29,119 +27,138 @@ def _create_app(db_path: str) -> Flask: app = Flask(__name__) app.config["DB_PATH"] = db_path + _register_health_route(app) + _register_epoch_range_routes(app) + _register_epoch_blob_routes(app) + _register_debug_routes(app) + _register_demand_routes(app) + + return app + + +def _register_health_route(app: Flask) -> None: @app.get("/health") def health(): return jsonify({"status": "ok"}) + +def _register_epoch_range_routes(app: Flask) -> None: @app.get("/epochs/check") + @_with_error_handling def epochs_check(): - try: - parsed = _parse_from_to(request.args) - if not parsed: - return jsonify({"error": "Invalid or missing 'from'/'to' params"}), 400 - l, r = parsed - db = DutiesDB(app.config["DB_PATH"]) - result = db.is_range_available(l, r) - return jsonify({"result": bool(result)}) - except Exception as e: - return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 + l_epoch, r_epoch = _require_epoch_range(request.args) + db = _db(app) + return jsonify({"result": bool(db.is_range_available(l_epoch, r_epoch))}) @app.get("/epochs/missing") + @_with_error_handling def epochs_missing(): - try: - parsed = _parse_from_to(request.args) - if not parsed: - return jsonify({"error": "Invalid or missing 'from'/'to' params"}), 400 - l, r = parsed - db = DutiesDB(app.config["DB_PATH"]) - result = db.missing_epochs_in(l, r) - return jsonify({"result": result}) - except Exception as e: - return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 - + l_epoch, r_epoch = _require_epoch_range(request.args) + db = _db(app) + return jsonify({"result": db.missing_epochs_in(l_epoch, r_epoch)}) + + +def _register_epoch_blob_routes(app: Flask) -> None: @app.get("/epochs/blob") + @_with_error_handling def epochs_blob(): - try: - parsed = _parse_from_to(request.args) - if not parsed: - return jsonify({"error": "Invalid or missing 'from'/'to' params"}), 400 - l, r = parsed - db = DutiesDB(app.config["DB_PATH"]) - epochs: list[str | None] = [] - for e in range(l, r + 1): - blob = db.get_epoch_blob(e) - epochs.append(blob.hex() if blob is not None else None) - return jsonify({"result": epochs}) - except Exception as e: - return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 + l_epoch, r_epoch = _require_epoch_range(request.args) + db = _db(app) + epochs: list[str | None] = [] + for epoch in range(l_epoch, r_epoch + 1): + blob = db.get_epoch_blob(epoch) + epochs.append(blob.hex() if blob is not None else None) + return jsonify({"result": epochs}) @app.get("/epochs/blob/") + @_with_error_handling def epoch_blob(epoch: int): - try: - db = DutiesDB(app.config["DB_PATH"]) - blob = db.get_epoch_blob(epoch) - return jsonify({"result": blob.hex() if blob is not None else None}) - except Exception as e: - return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 + db = _db(app) + blob = db.get_epoch_blob(epoch) + return jsonify({"result": blob.hex() if blob is not None else None}) + +def _register_debug_routes(app: Flask) -> None: @app.get("/debug/epochs/") + @_with_error_handling def debug_epoch_details(epoch: int): - try: - db = DutiesDB(app.config["DB_PATH"]) - blob = db.get_epoch_blob(epoch) - if blob is None: - return jsonify({"error": "epoch not found", "epoch": epoch}), 404 - - misses, props, syncs = EpochDataCodec.decode(blob) - - proposals = [ - {"validator_index": int(p.validator_index), "is_proposed": bool(p.is_proposed)} for p in props - ] - sync_misses = [ - {"validator_index": int(s.validator_index), "missed_count": int(s.missed_count)} for s in syncs - ] - - return jsonify( - { - "epoch": int(epoch), - "att_misses": list(misses), - "proposals": proposals, - "sync_misses": sync_misses, - } - ) - except Exception as e: - return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 + db = _db(app) + blob = db.get_epoch_blob(epoch) + if blob is None: + return jsonify({"error": "epoch not found", "epoch": epoch}), 404 - @app.post("/epochs/demand") - def set_epochs_demand(): - try: - data = request.get_json() - if not data or "consumer" not in data or "l_epoch" not in data or "r_epoch" not in data: - return jsonify({"error": "Missing 'consumer' or 'l_epoch' or 'r_epoch' in request body"}), 400 + misses, props, syncs = EpochDataCodec.decode(blob) + + proposals = [{"validator_index": int(p.validator_index), "is_proposed": bool(p.is_proposed)} for p in props] + sync_misses = [ + {"validator_index": int(s.validator_index), "missed_count": int(s.missed_count)} for s in syncs + ] - consumer = data["consumer"] - l_epoch = data["l_epoch"] - r_epoch = data["r_epoch"] + return jsonify( + { + "epoch": int(epoch), + "att_misses": list(misses), + "proposals": proposals, + "sync_misses": sync_misses, + } + ) - if not isinstance(l_epoch, int) or not isinstance(r_epoch, int) or l_epoch > r_epoch: - return jsonify({"error": "'l_epoch' and 'r_epoch' must be integers, and 'l_epoch' <= 'r_epoch'"}), 400 - db = DutiesDB(app.config["DB_PATH"]) - db.store_demand(consumer, l_epoch, r_epoch) +def _register_demand_routes(app: Flask) -> None: + @app.post("/epochs/demand") + @_with_error_handling + def set_epochs_demand(): + data = _require_json(request.get_json(), {"consumer", "l_epoch", "r_epoch"}) + _validate_epoch_bounds(data["l_epoch"], data["r_epoch"]) - return jsonify({"status": "ok", "consumer": consumer, "l_epoch": l_epoch, "r_epoch": r_epoch}) - except Exception as e: - return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 + db = _db(app) + db.store_demand(data["consumer"], data["l_epoch"], data["r_epoch"]) + + return jsonify({"status": "ok", "consumer": data["consumer"], "l_epoch": data["l_epoch"], "r_epoch": data["r_epoch"]}) @app.get("/epochs/demand") + @_with_error_handling def get_epochs_demand(): + db = _db(app) + return jsonify({"result": db.epochs_demand()}) + + +def _db(app: Flask) -> DutiesDB: + return DutiesDB(app.config["DB_PATH"]) + + +def _require_epoch_range(args: Dict[str, Any]) -> tuple[int, int]: + parsed = _parse_from_to(args) + if not parsed: + raise ValueError("Invalid or missing 'from'/'to' params") + return parsed + + +def _require_json(data: Optional[Dict[str, Any]], required: set[str]) -> Dict[str, Any]: + if not data: + raise ValueError(f"Missing JSON body or required fields: {', '.join(sorted(required))}") + missing = required.difference(data) + if missing: + raise ValueError(f"Missing required fields: {', '.join(sorted(missing))}") + return data + + +def _validate_epoch_bounds(l_epoch: Any, r_epoch: Any) -> None: + if not isinstance(l_epoch, int) or not isinstance(r_epoch, int) or l_epoch > r_epoch: + raise ValueError("'l_epoch' and 'r_epoch' must be integers, and 'l_epoch' <= 'r_epoch'") + + +def _with_error_handling(func): + @wraps(func) + def wrapper(*args, **kwargs): try: - db = DutiesDB(app.config["DB_PATH"]) - return jsonify({"result": db.epochs_demand()}) - except Exception as e: - return jsonify({"error": repr(e), "trace": traceback.format_exc()}), 500 + return func(*args, **kwargs) + except ValueError as exc: + return jsonify({"error": str(exc)}), 400 + except Exception as exc: # pylint: disable=broad-exception-caught + return jsonify({"error": repr(exc), "trace": traceback.format_exc()}), 500 - return app + return wrapper def start_performance_api_server(db_path): diff --git a/src/modules/performance_collector/performance_collector.py b/src/modules/performance_collector/performance_collector.py index 2cd9cfa4b..b8dbd1fd2 100644 --- a/src/modules/performance_collector/performance_collector.py +++ b/src/modules/performance_collector/performance_collector.py @@ -11,7 +11,6 @@ from src.modules.submodules.oracle_module import BaseModule, ModuleExecuteDelay from src.modules.submodules.types import ChainConfig from src.types import BlockStamp, EpochNumber -from src.utils.range import sequence from src.utils.web3converter import ChainConverter from src import variables diff --git a/src/providers/http_provider.py b/src/providers/http_provider.py index 493161e9e..72418187e 100644 --- a/src/providers/http_provider.py +++ b/src/providers/http_provider.py @@ -8,7 +8,7 @@ from json_stream import requests as json_stream_requests # type: ignore from json_stream.base import TransientStreamingJSONObject # type: ignore from prometheus_client import Histogram -from requests import JSONDecodeError, Session, Response +from requests import JSONDecodeError, Session from requests.adapters import HTTPAdapter from urllib3 import Retry diff --git a/src/web3py/extensions/performance.py b/src/web3py/extensions/performance.py index 61a044125..708c14503 100644 --- a/src/web3py/extensions/performance.py +++ b/src/web3py/extensions/performance.py @@ -1,4 +1,3 @@ -from web3 import Web3 from web3.module import Module from src.providers.performance.client import PerformanceClient From f932b99cc5d49458e418b4e1078a9e9a5ca60c88 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Tue, 11 Nov 2025 14:14:41 +0100 Subject: [PATCH 23/35] fix: `define_epochs_to_process_range` --- .../performance_collector/checkpoint.py | 28 +-- src/modules/performance_collector/db.py | 16 ++ .../performance_collector.py | 177 ++++++++---------- 3 files changed, 98 insertions(+), 123 deletions(-) diff --git a/src/modules/performance_collector/checkpoint.py b/src/modules/performance_collector/checkpoint.py index d35b4446e..651f86208 100644 --- a/src/modules/performance_collector/checkpoint.py +++ b/src/modules/performance_collector/checkpoint.py @@ -36,9 +36,6 @@ type SyncDuties = list[SyncDuty] -class MinStepIsNotReached(Exception): ... - - class SlotOutOfRootsRange(Exception): ... @@ -57,8 +54,6 @@ class FrameCheckpointsIterator: # Max available epoch to process according to the finalized epoch max_available_epoch_to_check: EpochNumber - # Min checkpoint step is 10 because it's a reasonable number of epochs to process at once (~1 hour) - MIN_CHECKPOINT_STEP = 10 # Max checkpoint step is 255 epochs because block_roots size from state is 8192 slots (256 epochs) # to check duty of every epoch, we need to check 64 slots (32 slots of duty epoch + 32 slots of next epoch). # In the end we got 255 committees and 8192 block_roots to check them for every checkpoint. @@ -82,8 +77,11 @@ def __init__( self.r_epoch, EpochNumber(finalized_epoch - self.CHECKPOINT_SLOT_DELAY_EPOCHS) ) - if self.r_epoch > self.max_available_epoch_to_check and not self._is_min_step_reached(): - raise MinStepIsNotReached() + if self.l_epoch > self.max_available_epoch_to_check: + raise ValueError(f"Left border epoch is greater than max available epoch to check: {l_epoch=} > {self.max_available_epoch_to_check=}") + + if self.r_epoch > self.max_available_epoch_to_check: + raise ValueError(f"Right border epoch is greater than max available epoch to check: {r_epoch=} > {self.max_available_epoch_to_check=}") def __iter__(self): for checkpoint_epochs in batched( @@ -98,22 +96,6 @@ def __iter__(self): ) yield FrameCheckpoint(checkpoint_slot, checkpoint_epochs) - def _is_min_step_reached(self): - # NOTE: processing delay can be negative - # if the finalized epoch is less than next epoch to check (l_epoch) - processing_delay = self.max_available_epoch_to_check - self.l_epoch - if processing_delay >= self.MIN_CHECKPOINT_STEP: - return True - logger.info( - { - "msg": f"Minimum checkpoint step is not reached, current delay is {processing_delay} epochs", - "max_available_epoch_to_check": self.max_available_epoch_to_check, - "l_epoch": self.l_epoch, - "r_epoch": self.r_epoch, - } - ) - return False - class SyncCommitteesCache(UserDict): diff --git a/src/modules/performance_collector/db.py b/src/modules/performance_collector/db.py index 84d42904e..64a0546da 100644 --- a/src/modules/performance_collector/db.py +++ b/src/modules/performance_collector/db.py @@ -38,6 +38,15 @@ def __init__(self, path: str): ) """ ) + self._conn.execute( + """ + CREATE TABLE IF NOT EXISTS epochs_demand_nonce + ( + value INTEGER NOT NULL + ) + """ + ) + self._conn.execute("INSERT INTO epochs_demand_nonce (value) VALUES (0);") self._conn.commit() def __del__(self): @@ -58,6 +67,7 @@ def store_demand(self, consumer: str, l_epoch: int, r_epoch: int) -> None: "INSERT OR REPLACE INTO epochs_demand(consumer, l_epoch, r_epoch) VALUES(?, ?, ?)", (consumer, l_epoch, r_epoch), ) + cur.execute("UPDATE epochs_demand_nonce SET value = value + 1") def store_epoch( self, @@ -150,3 +160,9 @@ def epochs_demand(self) -> dict[str, tuple[int, int]]: for consumer, l_epoch, r_epoch in demands: data[consumer] = (int(l_epoch), int(r_epoch)) return data + + def epochs_demand_nonce(self) -> int: + with self.connection() as cur: + cur.execute("SELECT value FROM epochs_demand_nonce LIMIT 1") + val = int(cur.fetchone()[0] or 0) + return val diff --git a/src/modules/performance_collector/performance_collector.py b/src/modules/performance_collector/performance_collector.py index b8dbd1fd2..e0756e00b 100644 --- a/src/modules/performance_collector/performance_collector.py +++ b/src/modules/performance_collector/performance_collector.py @@ -1,10 +1,9 @@ import logging -from typing import Optional +from typing import Optional, Final from src.modules.performance_collector.checkpoint import ( FrameCheckpointsIterator, FrameCheckpointProcessor, - MinStepIsNotReached, ) from src.modules.performance_collector.db import DutiesDB from src.modules.performance_collector.http_server import start_performance_api_server @@ -21,6 +20,9 @@ class PerformanceCollector(BaseModule): """ Continuously collects performance data from Consensus Layer into db for the given epoch range. """ + DEFAULT_EPOCHS_STEP_TO_COLLECT: Final = 10 + + last_epochs_demand_nonce: int = 0 def __init__(self, w3, db_path: Optional[str] = None): super().__init__(w3) @@ -35,6 +37,7 @@ def __init__(self, w3, db_path: Optional[str] = None): except Exception as e: logger.error({'msg': 'Failed to start performance API server', 'error': repr(e)}) raise + self.last_epochs_demand_nonce = self.db.epochs_demand_nonce() def refresh_contracts(self): # No need to refresh contracts for this module. There are no contracts used. @@ -56,36 +59,16 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute finalized_epoch = EpochNumber(converter.get_epoch_by_slot(last_finalized_blockstamp.slot_number) - 1) epochs_range_demand = self.define_epochs_to_process_range(finalized_epoch) - if epochs_range_demand: - start_epoch, end_epoch = epochs_range_demand - else: - logger.info({'msg': 'No epochs demand to process. Default epochs range is used.'}) - gap = FrameCheckpointsIterator.MIN_CHECKPOINT_STEP + FrameCheckpointsIterator.CHECKPOINT_SLOT_DELAY_EPOCHS - start_epoch = self.db.max_epoch() or max(0, finalized_epoch - gap) - end_epoch = finalized_epoch - - min_unprocessed_epoch = min(self.db.missing_epochs_in(start_epoch, end_epoch), default=None) - if not min_unprocessed_epoch: - raise ValueError("There should be at least one epoch to process.") - - logger.info({ - 'msg': 'Starting epoch range processing', - "start_epoch": start_epoch, - "end_epoch": end_epoch, - "min_unprocessed_epoch": min_unprocessed_epoch, - "finalized_epoch": finalized_epoch - }) - - try: - checkpoints = FrameCheckpointsIterator( - converter, - EpochNumber(min_unprocessed_epoch), - end_epoch, - finalized_epoch, - ) - except MinStepIsNotReached: - return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH - + if not epochs_range_demand: + return ModuleExecuteDelay.NEXT_SLOT + start_epoch, end_epoch = epochs_range_demand + + checkpoints = FrameCheckpointsIterator( + converter, + start_epoch, + end_epoch, + finalized_epoch, + ) processor = FrameCheckpointProcessor(self.w3.cc, self.db, converter, last_finalized_blockstamp) checkpoint_count = 0 @@ -100,10 +83,8 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute # Reset BaseOracle cycle timeout to avoid timeout errors during long checkpoints processing self._reset_cycle_timeout() - if self.new_epochs_range_demand_appeared(converter, start_epoch, end_epoch): - logger.info({ - "msg": "New epochs range to process is found, stopping current epochs range processing" - }) + if self.new_epochs_range_demand_appeared(): + logger.info({"msg": "New epochs demand is found during processing"}) return ModuleExecuteDelay.NEXT_SLOT logger.info({ @@ -113,78 +94,74 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute return ModuleExecuteDelay.NEXT_SLOT - def new_epochs_range_demand_appeared( - self, converter: ChainConverter, start_epoch: EpochNumber, end_epoch: EpochNumber - ) -> bool: - curr_finalized_slot = self._receive_last_finalized_slot() - curr_finalized_epoch = EpochNumber(converter.get_epoch_by_slot(curr_finalized_slot.slot_number) - 1) - new_epochs_range = self.define_epochs_to_process_range(curr_finalized_epoch, log=False) - if new_epochs_range: - new_start_epoch, new_end_epoch = new_epochs_range - if new_start_epoch != start_epoch or new_end_epoch != end_epoch: - return True - return False + def define_epochs_to_process_range(self, finalized_epoch: EpochNumber) -> tuple[EpochNumber, EpochNumber] | None: + max_available_epoch_to_check = max(0, finalized_epoch - FrameCheckpointsIterator.CHECKPOINT_SLOT_DELAY_EPOCHS) + start_epoch = EpochNumber(max(0, max_available_epoch_to_check - self.DEFAULT_EPOCHS_STEP_TO_COLLECT)) + end_epoch = EpochNumber(max_available_epoch_to_check) + + min_epoch_in_db = self.db.min_epoch() + max_epoch_in_db = self.db.max_epoch() + if not min_epoch_in_db and not max_epoch_in_db: + logger.info({ + "msg": "Empty Performance Collector DB. Start with the default range calculation", + "start_epoch": start_epoch, + "end_epoch": end_epoch + }) + return start_epoch, end_epoch + + gap = self.db.missing_epochs_in(min_epoch_in_db, max_epoch_in_db) + if gap: + start_epoch = min(gap) + else: + # Start from the next epoch after the last epoch in the DB. + start_epoch = max_epoch_in_db + 1 - def define_epochs_to_process_range(self, finalized_epoch: EpochNumber, log=True) -> tuple[EpochNumber, EpochNumber] | None: - unsatisfied_demands = [] epochs_demand = self.db.epochs_demand() + if not epochs_demand: + logger.info({"msg": "No epochs demand found"}) for consumer, (l_epoch, r_epoch) in epochs_demand.items(): - if log: - logger.info({ - "msg": "Epochs demand is found", - "consumer": consumer, - "l_epoch": l_epoch, - "r_epoch": r_epoch - }) satisfied = self.db.is_range_available(l_epoch, r_epoch) if satisfied: - if log: - logger.info({ - "msg": "Epochs demand is already satisfied, skipping", - "start_epoch": l_epoch, - "end_epoch": r_epoch - }) + logger.info({ + "msg": "Satisfied epochs demand", "consumer": consumer, "l_epoch": l_epoch, "r_epoch": r_epoch + }) continue - unsatisfied_demands.append((consumer, l_epoch, r_epoch)) - - if not unsatisfied_demands: - return None - - faced_deadline = [] - for consumer, l_epoch, r_epoch in unsatisfied_demands: - if finalized_epoch >= r_epoch: - if log: - logger.warning({ - "msg": "Epochs demand is passed deadline due to current finalized epoch", - "consumer": consumer, - "l_epoch": l_epoch, - "r_epoch": r_epoch, - "finalized_epoch": finalized_epoch - }) - faced_deadline.append((consumer, l_epoch, r_epoch)) - - def missing_epochs(_, l_epoch_, r_epoch_): - return self.db.missing_epochs_in(l_epoch_, r_epoch_) - - if not faced_deadline: - unsatisfied_demands.sort( - # Demand with the largest count of unprocessed epochs goes first - key=lambda demand: (-1 * len(missing_epochs(*demand))) - ) - consumer, start_epoch, end_epoch = unsatisfied_demands[0] - else: - faced_deadline.sort( - # Demand with the least count of unprocessed epochs goes first - key=lambda demand: len(missing_epochs(*demand)) - ) - consumer, start_epoch, end_epoch = faced_deadline[0] + logger.info({ + "msg": "Unsatisfied epochs demand", "consumer": consumer, "l_epoch": l_epoch, "r_epoch": r_epoch + }) + if l_epoch < min_epoch_in_db: + start_epoch = min(start_epoch, l_epoch) + if r_epoch > max_epoch_in_db: + end_epoch = min(end_epoch, r_epoch) - if log: + log_meta_info = { + "start_epoch": start_epoch, + "end_epoch": end_epoch, + "finalized_epoch": finalized_epoch, + "max_available_epoch_to_check": max_available_epoch_to_check, + "min_epoch_in_db": min_epoch_in_db, + "max_epoch_in_db": max_epoch_in_db, + "gap_in_db_len": len(gap) if gap else None + } + + if start_epoch > max_available_epoch_to_check: logger.info({ - "msg": "Epochs demand is chosen to process", - "consumer": consumer, - "start_epoch": start_epoch, - "end_epoch": end_epoch, + "msg": "No available to process epochs range demand yet", + **log_meta_info }) + return None + + logger.info({ + "msg": "Epochs range to process is determined", + **log_meta_info + }) return start_epoch, end_epoch + + def new_epochs_range_demand_appeared(self) -> bool: + db_epochs_demand_nonce = self.db.epochs_demand_nonce() + nonce_changed = self.last_epochs_demand_nonce != db_epochs_demand_nonce + if nonce_changed: + self.last_epochs_demand_nonce = db_epochs_demand_nonce + return True + return False From 13330ea3630027880a50e7d2e130994adec302da Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Tue, 11 Nov 2025 15:46:10 +0100 Subject: [PATCH 24/35] fix: `define_epochs_to_process_range`. Simple AI tests --- .../performance_collector.py | 16 +- .../test_performance_collector.py | 395 ++++++++++++++++++ 2 files changed, 406 insertions(+), 5 deletions(-) create mode 100644 tests/modules/performance_collector/test_performance_collector.py diff --git a/src/modules/performance_collector/performance_collector.py b/src/modules/performance_collector/performance_collector.py index e0756e00b..378674c14 100644 --- a/src/modules/performance_collector/performance_collector.py +++ b/src/modules/performance_collector/performance_collector.py @@ -95,7 +95,11 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute return ModuleExecuteDelay.NEXT_SLOT def define_epochs_to_process_range(self, finalized_epoch: EpochNumber) -> tuple[EpochNumber, EpochNumber] | None: - max_available_epoch_to_check = max(0, finalized_epoch - FrameCheckpointsIterator.CHECKPOINT_SLOT_DELAY_EPOCHS) + max_available_epoch_to_check = finalized_epoch - FrameCheckpointsIterator.CHECKPOINT_SLOT_DELAY_EPOCHS + if max_available_epoch_to_check < 0: + logger.info({"msg": "No available epochs to process yet"}) + return None + start_epoch = EpochNumber(max(0, max_available_epoch_to_check - self.DEFAULT_EPOCHS_STEP_TO_COLLECT)) end_epoch = EpochNumber(max_available_epoch_to_check) @@ -109,6 +113,11 @@ def define_epochs_to_process_range(self, finalized_epoch: EpochNumber) -> tuple[ }) return start_epoch, end_epoch + if max_available_epoch_to_check < min_epoch_in_db: + raise ValueError( + "Max available epoch to check is lower than the minimum epoch in the DB. CL node is not synced" + ) + gap = self.db.missing_epochs_in(min_epoch_in_db, max_epoch_in_db) if gap: start_epoch = min(gap) @@ -129,10 +138,7 @@ def define_epochs_to_process_range(self, finalized_epoch: EpochNumber) -> tuple[ logger.info({ "msg": "Unsatisfied epochs demand", "consumer": consumer, "l_epoch": l_epoch, "r_epoch": r_epoch }) - if l_epoch < min_epoch_in_db: - start_epoch = min(start_epoch, l_epoch) - if r_epoch > max_epoch_in_db: - end_epoch = min(end_epoch, r_epoch) + start_epoch = min(start_epoch, l_epoch) log_meta_info = { "start_epoch": start_epoch, diff --git a/tests/modules/performance_collector/test_performance_collector.py b/tests/modules/performance_collector/test_performance_collector.py new file mode 100644 index 000000000..b8b619cb7 --- /dev/null +++ b/tests/modules/performance_collector/test_performance_collector.py @@ -0,0 +1,395 @@ +import pytest +from unittest.mock import Mock, patch + +from src.modules.performance_collector.performance_collector import PerformanceCollector +from src.modules.performance_collector.db import DutiesDB +from src.types import EpochNumber + + +@pytest.fixture +def mock_w3(): + """Mock Web3 instance""" + return Mock() + + +@pytest.fixture +def mock_db(): + """Mock DutiesDB instance""" + return Mock(spec=DutiesDB) + + +@pytest.fixture +def performance_collector(mock_w3, mock_db): + """Create PerformanceCollector instance with mocked dependencies""" + from pathlib import Path + mock_cache_path = Path('/tmp') + + with patch('src.modules.performance_collector.performance_collector.DutiesDB', return_value=mock_db), \ + patch('src.modules.performance_collector.performance_collector.start_performance_api_server'), \ + patch('src.modules.performance_collector.performance_collector.variables.CACHE_PATH', mock_cache_path), \ + patch('src.modules.performance_collector.performance_collector.variables.PERFORMANCE_COLLECTOR_SERVER_API_PORT', 8080): + collector = PerformanceCollector(mock_w3) + collector.db = mock_db + return collector + + +class TestDefineEpochsToProcessRange: + """Test cases for define_epochs_to_process_range method""" + + @pytest.mark.unit + def test_empty_db_default_range(self, performance_collector, mock_db): + """Test when database is empty - should return default range""" + finalized_epoch = EpochNumber(100) + + # Setup empty DB + mock_db.min_epoch.return_value = None + mock_db.max_epoch.return_value = None + mock_db.epochs_demand.return_value = {} + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Expected calculations: + # max_available_epoch_to_check = max(0, 100 - 2) = 98 + # start_epoch = max(0, 98 - 10) = 88 + # end_epoch = 98 + assert result == (EpochNumber(88), EpochNumber(98)) + + @pytest.mark.unit + def test_empty_db_with_low_finalized_epoch(self, performance_collector, mock_db): + """Test when finalized epoch is low and DB is empty""" + finalized_epoch = EpochNumber(5) + + # Setup empty DB + mock_db.min_epoch.return_value = None + mock_db.max_epoch.return_value = None + mock_db.epochs_demand.return_value = {} + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Expected calculations: + # max_available_epoch_to_check = max(0, 5 - 2) = 3 + # start_epoch = max(0, 3 - 10) = 0 + # end_epoch = 3 + assert result == (EpochNumber(0), EpochNumber(3)) + + @pytest.mark.unit + def test_db_with_gap_in_range(self, performance_collector, mock_db): + """Test when there's a gap in the database""" + finalized_epoch = EpochNumber(100) + + # Setup DB with gap + mock_db.min_epoch.return_value = 10 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [50, 51, 52] # Gap in the middle + mock_db.epochs_demand.return_value = {} + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should start from the first missing epoch + assert result[0] == EpochNumber(50) + # End epoch should be max_available = max(0, 100 - 2) = 98 + assert result[1] == EpochNumber(98) + + @pytest.mark.unit + def test_db_without_gap_continuous_collection(self, performance_collector, mock_db): + """Test when DB has no gaps - should collect next epochs""" + finalized_epoch = EpochNumber(100) + + # Setup DB without gaps + mock_db.min_epoch.return_value = 10 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [] # No gaps + mock_db.epochs_demand.return_value = {} + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should start from next epoch after max + # start_epoch = 90 + 1 = 91 + assert result[0] == EpochNumber(91) + # End epoch should be max_available = max(0, 100 - 2) = 98 + assert result[1] == EpochNumber(98) + + @pytest.mark.unit + def test_unsatisfied_epochs_demand_before_db_range(self, performance_collector, mock_db): + """Test when there's unsatisfied demand before existing DB range""" + finalized_epoch = EpochNumber(100) + + # Setup DB + mock_db.min_epoch.return_value = 50 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [] + + # Setup epochs demand before DB range + mock_db.epochs_demand.return_value = { + 'consumer1': (20, 30) # Demand before min_epoch_in_db + } + mock_db.is_range_available.return_value = False # Unsatisfied demand + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should start from the earliest demand + assert result[0] == EpochNumber(20) + # End epoch should be max_available = max(0, 100 - 2) = 98 + assert result[1] == EpochNumber(98) + + @pytest.mark.unit + def test_unsatisfied_epochs_demand_after_db_range(self, performance_collector, mock_db): + """Test when there's unsatisfied demand after existing DB range""" + finalized_epoch = EpochNumber(200) + + # Setup DB + mock_db.min_epoch.return_value = 50 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [] + + # Setup epochs demand after DB range + mock_db.epochs_demand.return_value = { + 'consumer1': (95, 105) # Demand after max_epoch_in_db + } + mock_db.is_range_available.return_value = False # Unsatisfied demand + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should start from next epoch after max DB epoch + assert result[0] == EpochNumber(91) + assert result[1] == EpochNumber(198) + + @pytest.mark.unit + def test_satisfied_epochs_demand_ignored(self, performance_collector, mock_db): + """Test that satisfied epochs demand is ignored""" + finalized_epoch = EpochNumber(100) + + # Setup DB + mock_db.min_epoch.return_value = 50 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [] + + # Setup satisfied epochs demand + mock_db.epochs_demand.return_value = { + 'consumer1': (60, 70) # Demand within DB range + } + mock_db.is_range_available.return_value = True # Satisfied demand + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should start from next epoch after max (ignoring satisfied demand) + assert result[0] == EpochNumber(91) + # End epoch should be max_available = max(0, 100 - 2) = 98 + assert result[1] == EpochNumber(98) + + @pytest.mark.unit + def test_multiple_unsatisfied_demands(self, performance_collector, mock_db): + """Test with multiple unsatisfied demands""" + finalized_epoch = EpochNumber(200) + + # Setup DB + mock_db.min_epoch.return_value = 50 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [] + + # Setup multiple unsatisfied demands + mock_db.epochs_demand.return_value = { + 'consumer1': (20, 30), # Before DB range + 'consumer2': (95, 105), # After DB range + 'consumer3': (60, 70), # Within DB range (satisfied) + } + + def mock_is_range_available(l_epoch, r_epoch): + if l_epoch == 60 and r_epoch == 70: + return True # Satisfied + return False # Unsatisfied + + mock_db.is_range_available.side_effect = mock_is_range_available + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should take the minimum of unsatisfied demands for start + # start_epoch = min(91, 20) = 20 (91 from DB continuation, 20 from demand) + assert result[0] == EpochNumber(20) + assert result[1] == EpochNumber(198) + + @pytest.mark.unit + def test_very_low_finalized_epoch(self, performance_collector, mock_db): + """Test with very low finalized epoch (edge case)""" + finalized_epoch = EpochNumber(1) + + # Setup empty DB + mock_db.min_epoch.return_value = None + mock_db.max_epoch.return_value = None + mock_db.epochs_demand.return_value = {} + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # max_available_epoch_to_check = (1 - 2) = -1 + assert result is None + + @pytest.mark.unit + def test_no_epochs_demand_logged(self, performance_collector, mock_db, caplog): + """Test logging when no epochs demand is found""" + finalized_epoch = EpochNumber(100) + + # Setup DB + mock_db.min_epoch.return_value = 50 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [] + mock_db.epochs_demand.return_value = {} # No demand + + with caplog.at_level('INFO'): + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + assert "No epochs demand found" in caplog.text + assert result is not None + + @pytest.mark.unit + def test_complex_scenario_with_gap_and_demand(self, performance_collector, mock_db): + """Test complex scenario with both gaps and unsatisfied demand""" + finalized_epoch = EpochNumber(200) + + # Setup DB with gap + mock_db.min_epoch.return_value = 30 + mock_db.max_epoch.return_value = 150 + mock_db.missing_epochs_in.return_value = [100, 101, 102] # Gap in DB + + # Setup unsatisfied demand + mock_db.epochs_demand.return_value = { + 'consumer1': (10, 20), # Before DB range + } + mock_db.is_range_available.return_value = False # Unsatisfied + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should start from gap (100) vs demand (10) -> min(100, 10) = 10 + assert result[0] == EpochNumber(10) + # End epoch should be max_available = max(0, 200 - 2) = 198 + assert result[1] == EpochNumber(198) + + @pytest.mark.unit + def test_finalized_epoch_zero(self, performance_collector, mock_db): + """Test with zero finalized epoch (edge case)""" + finalized_epoch = EpochNumber(0) + + # Setup empty DB + mock_db.min_epoch.return_value = None + mock_db.max_epoch.return_value = None + mock_db.epochs_demand.return_value = {} + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # max_available_epoch_to_check = -2 + assert result is None + + @pytest.mark.unit + def test_epochs_demand_exactly_at_db_boundaries(self, performance_collector, mock_db): + """Test epochs demand exactly at database boundaries""" + finalized_epoch = EpochNumber(200) + + # Setup DB + mock_db.min_epoch.return_value = 50 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [] + + # Setup demand exactly at boundaries + mock_db.epochs_demand.return_value = { + 'consumer1': (50, 90), # Exactly the DB range + } + mock_db.is_range_available.return_value = True # Satisfied demand + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should ignore satisfied demand and continue from max + 1 + assert result[0] == EpochNumber(91) + # End epoch should be max_available = max(0, 200 - 2) = 198 + assert result[1] == EpochNumber(198) + + @pytest.mark.unit + def test_negative_start_epoch_calculation(self, performance_collector, mock_db): + """Test when calculation would result in negative start epoch""" + finalized_epoch = EpochNumber(5) # Very low + + # Setup DB that would lead to high start epoch + mock_db.min_epoch.return_value = 100 + mock_db.max_epoch.return_value = 200 + mock_db.missing_epochs_in.return_value = [] + mock_db.epochs_demand.return_value = {} + + with pytest.raises(ValueError): + # Finalized epoch is lower than min_epoch_in_db + performance_collector.define_epochs_to_process_range(finalized_epoch) + + @pytest.mark.unit + def test_overlapping_epochs_demands(self, performance_collector, mock_db): + """Test with overlapping epochs demands""" + finalized_epoch = EpochNumber(200) + + # Setup DB + mock_db.min_epoch.return_value = 80 + mock_db.max_epoch.return_value = 120 + mock_db.missing_epochs_in.return_value = [] + + # Setup overlapping demands + mock_db.epochs_demand.return_value = { + 'consumer1': (40, 60), # Before DB range + 'consumer2': (50, 70), # Overlapping with consumer1 + 'consumer3': (140, 160), # After DB range + } + mock_db.is_range_available.return_value = False # All unsatisfied + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should take the earliest start (40) and appropriate end + assert result[0] == EpochNumber(40) + assert result[1] == EpochNumber(198) + + @pytest.mark.unit + def test_empty_epochs_demand_dict(self, performance_collector, mock_db): + """Test with explicitly empty epochs demand dictionary""" + finalized_epoch = EpochNumber(100) + + # Setup DB + mock_db.min_epoch.return_value = 50 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [] + mock_db.epochs_demand.return_value = {} # Explicitly empty + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should proceed with normal DB continuation logic + assert result[0] == EpochNumber(91) + assert result[1] == EpochNumber(98) # max_available = 98 + + @pytest.mark.unit + def test_gap_at_beginning_of_db_range(self, performance_collector, mock_db): + """Test when gap is at the very beginning of DB range""" + finalized_epoch = EpochNumber(100) + + # Setup DB with gap at the beginning + mock_db.min_epoch.return_value = 10 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [10, 11, 12] # Gap at beginning + mock_db.epochs_demand.return_value = {} + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should start from the first missing epoch + assert result[0] == EpochNumber(10) + # End epoch should be max_available = max(0, 100 - 2) = 98 + assert result[1] == EpochNumber(98) + + @pytest.mark.unit + def test_gap_at_end_of_db_range(self, performance_collector, mock_db): + """Test when gap is at the very end of DB range""" + finalized_epoch = EpochNumber(100) + + # Setup DB with gap at the end + mock_db.min_epoch.return_value = 10 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [88, 89, 90] # Gap at end + mock_db.epochs_demand.return_value = {} + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should start from the first missing epoch + assert result[0] == EpochNumber(88) + # End epoch should be max_available = max(0, 100 - 2) = 98 + assert result[1] == EpochNumber(98) + From 84ff68e206a07dda33f2ca3b71ac8f20b85a8d65 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Wed, 12 Nov 2025 15:12:33 +0100 Subject: [PATCH 25/35] fix: remove `DEFAULT_EPOCHS_STEP_TO_COLLECT` --- .../performance_collector/performance_collector.py | 5 ++--- .../performance_collector/test_performance_collector.py | 8 ++++---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/src/modules/performance_collector/performance_collector.py b/src/modules/performance_collector/performance_collector.py index 378674c14..20605467b 100644 --- a/src/modules/performance_collector/performance_collector.py +++ b/src/modules/performance_collector/performance_collector.py @@ -20,8 +20,6 @@ class PerformanceCollector(BaseModule): """ Continuously collects performance data from Consensus Layer into db for the given epoch range. """ - DEFAULT_EPOCHS_STEP_TO_COLLECT: Final = 10 - last_epochs_demand_nonce: int = 0 def __init__(self, w3, db_path: Optional[str] = None): @@ -56,6 +54,7 @@ def _build_converter(self) -> ChainConverter: def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecuteDelay: converter = self._build_converter() + # TODO: return comment about finalized_epoch finalized_epoch = EpochNumber(converter.get_epoch_by_slot(last_finalized_blockstamp.slot_number) - 1) epochs_range_demand = self.define_epochs_to_process_range(finalized_epoch) @@ -100,7 +99,7 @@ def define_epochs_to_process_range(self, finalized_epoch: EpochNumber) -> tuple[ logger.info({"msg": "No available epochs to process yet"}) return None - start_epoch = EpochNumber(max(0, max_available_epoch_to_check - self.DEFAULT_EPOCHS_STEP_TO_COLLECT)) + start_epoch = EpochNumber(max_available_epoch_to_check) end_epoch = EpochNumber(max_available_epoch_to_check) min_epoch_in_db = self.db.min_epoch() diff --git a/tests/modules/performance_collector/test_performance_collector.py b/tests/modules/performance_collector/test_performance_collector.py index b8b619cb7..0c583ef59 100644 --- a/tests/modules/performance_collector/test_performance_collector.py +++ b/tests/modules/performance_collector/test_performance_collector.py @@ -50,9 +50,9 @@ def test_empty_db_default_range(self, performance_collector, mock_db): # Expected calculations: # max_available_epoch_to_check = max(0, 100 - 2) = 98 - # start_epoch = max(0, 98 - 10) = 88 + # start_epoch = 98 # end_epoch = 98 - assert result == (EpochNumber(88), EpochNumber(98)) + assert result == (EpochNumber(98), EpochNumber(98)) @pytest.mark.unit def test_empty_db_with_low_finalized_epoch(self, performance_collector, mock_db): @@ -68,9 +68,9 @@ def test_empty_db_with_low_finalized_epoch(self, performance_collector, mock_db) # Expected calculations: # max_available_epoch_to_check = max(0, 5 - 2) = 3 - # start_epoch = max(0, 3 - 10) = 0 + # start_epoch = 3 # end_epoch = 3 - assert result == (EpochNumber(0), EpochNumber(3)) + assert result == (EpochNumber(3), EpochNumber(3)) @pytest.mark.unit def test_db_with_gap_in_range(self, performance_collector, mock_db): From 6adb0aeb9b7074c4c31b3e89c80db697afd7c12c Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Fri, 14 Nov 2025 11:42:35 +0100 Subject: [PATCH 26/35] fix: review --- poetry.lock | 96 +----- pyproject.toml | 1 - src/modules/csm/csm.py | 3 +- .../performance_collector/checkpoint.py | 25 +- src/modules/performance_collector/codec.py | 22 +- src/modules/performance_collector/db.py | 133 ++++---- .../performance_collector.py | 76 ++--- src/utils/serializable_set.py | 164 ++++++++++ .../performance_collector/test_codec.py | 18 +- tests/utils/test_serializable_set.py | 293 ++++++++++++++++++ 10 files changed, 579 insertions(+), 252 deletions(-) create mode 100644 src/utils/serializable_set.py create mode 100644 tests/utils/test_serializable_set.py diff --git a/poetry.lock b/poetry.lock index f13a04dbe..84b6d1358 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2691,100 +2691,6 @@ tomlkit = ">=0.10.1" spelling = ["pyenchant (>=3.2,<4.0)"] testutils = ["gitpython (>3)"] -[[package]] -name = "pyroaring" -version = "1.0.3" -description = "Library for handling efficiently sorted integer sets." -optional = false -python-versions = "*" -groups = ["main"] -files = [ - {file = "pyroaring-1.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:c10e4cfbe203a578c78808406af491e3615d5e46cf69a7709050243346cd68bc"}, - {file = "pyroaring-1.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:cc329c62e504f2531c4008240f31736bcd2dee4339071f1eac0648068e6d17fa"}, - {file = "pyroaring-1.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8c7fb6ddf6ef31148f0939bc5c26b681d63df301ee1e372525012dd7bfe4a30a"}, - {file = "pyroaring-1.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd18446832ea04a7d33bd6b78270b0be14eabcda5937af3428d6cb3d2bf98e54"}, - {file = "pyroaring-1.0.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f0cbc766df2a24e28f23d69b66bbec64e691799219fd82c2f2236f03fc88e2e"}, - {file = "pyroaring-1.0.3-cp310-cp310-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:96a51e96f8f473381615f0f852f7238ad0a47f28e4a35e9f082468c5cfe4e9c3"}, - {file = "pyroaring-1.0.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:654af38b1f1c9bdc27b4f6d331fc5d91599df96e72a6df1886f4d95eea60ab29"}, - {file = "pyroaring-1.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:6721036afa31c07bdcbb4fcafa166660cf9c2eac695dcd495f8778549fa55899"}, - {file = "pyroaring-1.0.3-cp310-cp310-musllinux_1_2_armv7l.whl", hash = "sha256:0caa10f20329d09233fac6550b2adce4d9f173f748a9a9a5ea3b7033827dfe2d"}, - {file = "pyroaring-1.0.3-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:f109be8af937e85c52cb920d3fd120db52b172f59460852d2e3d2e3d13a4f52a"}, - {file = "pyroaring-1.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:ddc80bfcd313c7c524a2742d263e73cae088b6a611b77dcc46fa90c306f6dace"}, - {file = "pyroaring-1.0.3-cp310-cp310-win32.whl", hash = "sha256:5a183f5ec069757fe5b60e37f7c6fa8a53178eacf0d76601b739e2890edee036"}, - {file = "pyroaring-1.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:051bd9a66ce855a1143faa2b879ea6c6ca2905209e172ce9eedf79834897c730"}, - {file = "pyroaring-1.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:3043ff5c85375310ca3cd3e01944e03026e0ec07885e52dfabcfcd9dc303867f"}, - {file = "pyroaring-1.0.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:755cdac1f9a1b7b5c621e570d4f6dbcf3b8e4a1e35a66f976104ecb35dce4ed2"}, - {file = "pyroaring-1.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ebab073db620f26f0ba11e13fa2f35e3b1298209fba47b6bc8cb6f0e2c9627f9"}, - {file = "pyroaring-1.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:684fb8dffe19bdb7f91897c65eac6eee23b1e46043c47eb24288f28a1170fe04"}, - {file = "pyroaring-1.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:678d31fc24e82945a1bfb14816c77823983382ffea76985d494782aa2f058427"}, - {file = "pyroaring-1.0.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7d815f624e0285db3669f673d1725cb754b120ec70d0032d7c7166103a96c96d"}, - {file = "pyroaring-1.0.3-cp311-cp311-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:57fd5b80dacb8e888402b6b7508a734c6a527063e4e24e882ff2e0fd90721ada"}, - {file = "pyroaring-1.0.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ab26a7a45a0bb46c00394d1a60a9f2d57c220f84586e30d59b39784b0f94aee6"}, - {file = "pyroaring-1.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:9232f3f606315d59049c128154100fd05008d5c5c211e48b21848cd41ee64d26"}, - {file = "pyroaring-1.0.3-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:f34b44b3ec3df97b978799f2901fefb2a48d367496fd1cde3cc5fe8b3bc13510"}, - {file = "pyroaring-1.0.3-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:25a83ec6bac3106568bd3fdd316f0fee52aa0be8c72da565ad02b10ae7905924"}, - {file = "pyroaring-1.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:c17d4ec53b5b6b333d9a9515051213a691293ada785dc8c025d3641482597ed3"}, - {file = "pyroaring-1.0.3-cp311-cp311-win32.whl", hash = "sha256:d54024459ace600f1d1ffbc6dc3c60eb47cca3b678701f06148f59e10f6f8d7b"}, - {file = "pyroaring-1.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:c28750148ef579a7447a8cb60b39e5943e03f8c29bce8f2788728f6f23d1887a"}, - {file = "pyroaring-1.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:535d8deccbd8db2c6bf38629243e9646756905574a742b2a72ff51d6461d616c"}, - {file = "pyroaring-1.0.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:add3e4c78eb590a76526ecce8d1566eecdd5822e351c36b3697997f4a80ed808"}, - {file = "pyroaring-1.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:ebaffe846cf4ba4f00ce6b8a9f39613f24e2d09447e77be4fa6e898bc36451b6"}, - {file = "pyroaring-1.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:a9459f27498f97d08031a34a5ead230b77eb0ab3cc3d85b7f54faa2fd548acd6"}, - {file = "pyroaring-1.0.3-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2b2eb8bd1c35c772994889be9f7dda09477475d7aa1e2af9ab4ef18619326f6"}, - {file = "pyroaring-1.0.3-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d31f4c1c906f1af14ce61a3959d04a14a64c594f8a768399146a45bbd341f21f"}, - {file = "pyroaring-1.0.3-cp312-cp312-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:53be988fc86698d56c11049bfe5113a2f6990adb1fa2782b29636509808b6aa7"}, - {file = "pyroaring-1.0.3-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7df84d223424523b19a23781f4246cc247fd6d821e1bc0853c2f25669136f7d0"}, - {file = "pyroaring-1.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:34a781f1f9766897f63ef18be129827340ae37764015b83fdcff1efb9e29136d"}, - {file = "pyroaring-1.0.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:1f414343b4ed0756734328cdf2a91022fc54503769e3f8d79bd0b672ea815a16"}, - {file = "pyroaring-1.0.3-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:d16ae185c72dc64f76335dbe53e53a892e78115adc92194957d1b7ef74d230b9"}, - {file = "pyroaring-1.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:f888447bf22dde7759108bfe6dfbeb6bbb61b14948de9c4cb6843c4dd57e2215"}, - {file = "pyroaring-1.0.3-cp312-cp312-win32.whl", hash = "sha256:fbbdc44c51a0a3efd7be3dbe04466278ce098fcd101aa1905849319042159770"}, - {file = "pyroaring-1.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:3b217c4b3ad953b4c759a0d2f9bd95316f0c345b9f7adb49e6ded7a1f5106bd4"}, - {file = "pyroaring-1.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:e6bcf838564c21bab8fe6c2748b4990d4cd90612d8c470c04889def7bb5114ea"}, - {file = "pyroaring-1.0.3-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:20bc947054b197d1baa76cd05d70b8e04f95b82e698266e2f8f2f4b36d764477"}, - {file = "pyroaring-1.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ba5909b4c66bb85cab345e2f3a87e5ce671509c94b8c9823d8db64e107cbe854"}, - {file = "pyroaring-1.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:b744746ba5da27fad760067f12633f5d384db6a1e65648d00244ceacbbd87731"}, - {file = "pyroaring-1.0.3-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b16c2a2791a5a09c4b59c0e1069ac1c877d0df25cae3155579c7eac8844676e"}, - {file = "pyroaring-1.0.3-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e7f68dfcf8d01177267f4bc06c4960fe8e39577470d1b52c9af8b61a72ca8767"}, - {file = "pyroaring-1.0.3-cp313-cp313-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:dba4e4700030182a981a3c887aa73887697145fc9ffb192f908aa59b718fbbdd"}, - {file = "pyroaring-1.0.3-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e26dd1dc1edba02288902914bdb559e53e346e9155defa43c31fcab831b55342"}, - {file = "pyroaring-1.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6eb98d2cacfc6d51c6a69893f04075e07b3df761eac71ba162c43b9b4c4452ad"}, - {file = "pyroaring-1.0.3-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:a967e9eddb9485cbdd95d6371e3dada67880844d836c0283d3b11efe9225d1b7"}, - {file = "pyroaring-1.0.3-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:b12ef7f992ba7be865f91c7c098fd8ac6c413563aaa14d5b1e2bcb8cb43a4614"}, - {file = "pyroaring-1.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:82ca5be174b85c40be7b00bc6bf39b2931a1b4a465f3af17ec6b9c48e9aa6fe0"}, - {file = "pyroaring-1.0.3-cp313-cp313-win32.whl", hash = "sha256:f758c681e63ffe74b20423695e71f0410920f41b075cee679ffb5bc2bf38440b"}, - {file = "pyroaring-1.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:428c3bb384fe4c483feb5cf7aa3aef1621fb0a5c4f3d391da67b2c4a43f08a10"}, - {file = "pyroaring-1.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:9c0c856e8aa5606e8aed5f30201286e404fdc9093f81fefe82d2e79e67472bb2"}, - {file = "pyroaring-1.0.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6321a95b5b2ba69aa32e920dd1aa7f8fc4fac55b75981978aa4f2378724dee27"}, - {file = "pyroaring-1.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:054eb6ef04ff9d2ed3ddd18ae21e5e51e02d0f8cdd7e5cb948648f77ddb04ea2"}, - {file = "pyroaring-1.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4d064aea3827e63eb60294ae3e6623e29613f5c8844869646d06f3735a425dd9"}, - {file = "pyroaring-1.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c84d5b17ef628c3956d9a79c2f78c5bea7dda6f7aeb01f34671034d2650b9efb"}, - {file = "pyroaring-1.0.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8038f7dd25eb83c277b8e0ea14c5e61f085cc76bd0c6b9f6679f1770e33541ec"}, - {file = "pyroaring-1.0.3-cp38-cp38-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:defc508ef7acaf58d07e603c55feda6742c4034f5262cfd616f92cc3adbc2815"}, - {file = "pyroaring-1.0.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd0831326971b0ffa08ccce79abe7c2450d5d9254804d855e23a8ba31f70351a"}, - {file = "pyroaring-1.0.3-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:03e063329481396cbb70f1ce8b8ca0f01d74a45ee9d908b6645b0282b23832b0"}, - {file = "pyroaring-1.0.3-cp38-cp38-musllinux_1_2_armv7l.whl", hash = "sha256:7a1b1c82d2da0bedc7c22d4047bd62544ef0e25c6be86ccf4b9d1ccc38876ee8"}, - {file = "pyroaring-1.0.3-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:dd7f9e5b7366b8f9bafca2a0fcf83fa534a00cc12d4ca01e301d8662bcdb805c"}, - {file = "pyroaring-1.0.3-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:a5a1db84e0952805223a7bf77eae58384b700a6b9affc53fb9772dddf868c712"}, - {file = "pyroaring-1.0.3-cp38-cp38-win32.whl", hash = "sha256:54cb0c2bddd330e22099773c4681aca90847265afe56a9201a92c1a758494261"}, - {file = "pyroaring-1.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:47d985293f861df1f2b03b41cef4fd3249c1c9608081750bcf3153051c2312d0"}, - {file = "pyroaring-1.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:d46eb5db78b673d8d8ca83651a1cce1e15eec5a922f2951b1f61014463b72af5"}, - {file = "pyroaring-1.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ce202452de2b58bffa3eb02e27c681eefcfb54e27f8ef85b5c93ebaada50f3f3"}, - {file = "pyroaring-1.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:531b6ae56989b61742dde1b64fedc5537acc046cf04a333548322366c1bf3922"}, - {file = "pyroaring-1.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3035db9459bd8635a0145b4a9e3102869d621cb0b3648051115f06d31ffd1976"}, - {file = "pyroaring-1.0.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7c8fb6b0ad0e8db1b9559b2da180b103b48adddf0e4f24404269e2a3b5db268d"}, - {file = "pyroaring-1.0.3-cp39-cp39-manylinux_2_24_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:8d5df95d9511bc83048da9348c7ab1c20f97ff4d95faf27ee1fdf2e8a96e200e"}, - {file = "pyroaring-1.0.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:65d2d81e5aed7698fab23058db70fb2b65fad221090be037a0af498569109915"}, - {file = "pyroaring-1.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:e195636034a0b62ec0e5325ed2f610f39cc8955ace3f47a5bc7f484159f02341"}, - {file = "pyroaring-1.0.3-cp39-cp39-musllinux_1_2_armv7l.whl", hash = "sha256:bb7f2561e3ec26c3c869458431cbcba6b83f7e925b024460c136dbb5fadf3b31"}, - {file = "pyroaring-1.0.3-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:8e996939de01f448eb9448d91b47ab60bff0555c2a80d5c12a8405814072cd35"}, - {file = "pyroaring-1.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:c656d62d0cf96ede0edc4e7d392889238777bdf88b32afd5d51c3cab016c29a0"}, - {file = "pyroaring-1.0.3-cp39-cp39-win32.whl", hash = "sha256:a7a7d14822c64841ae64e98309697e1631ebadba55ded33daa7cd16d1b487d11"}, - {file = "pyroaring-1.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:a86b88adbe0531b75f94f87279a6d4ee68e63335e29bbdab4400a05704fc2587"}, - {file = "pyroaring-1.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:1ed2e9c7af46052466b5fa0392fe540331474718d97b9756cefa23233bfdb3ea"}, - {file = "pyroaring-1.0.3.tar.gz", hash = "sha256:cd7392d1c010c9e41c11c62cd0610c8852e7e9698b1f7f6c2fcdefe50e7ef6da"}, -] - [[package]] name = "pytest" version = "7.4.4" @@ -3682,4 +3588,4 @@ propcache = ">=0.2.1" [metadata] lock-version = "2.1" python-versions = "^3.12" -content-hash = "921c985f7c0f2dca304da6b3e3252a2f7a78c2dfe5d2a3dd18feb4e4dcb7e2cc" +content-hash = "3660c739c071839b795775dd6c6a904b555d6d5b28c315385f9557849bb476ab" diff --git a/pyproject.toml b/pyproject.toml index 5dff2f7f5..48ae9b1c8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -32,7 +32,6 @@ protobuf="^6.31.1" dag-cbor="^0.3.3" flask = "^3.0.0" waitress = "^3.0.2" -pyroaring = "^1.0.3" [tool.poetry.group.dev.dependencies] base58 = "^2.1.1" diff --git a/src/modules/csm/csm.py b/src/modules/csm/csm.py index 5b37cee71..1adcbb748 100644 --- a/src/modules/csm/csm.py +++ b/src/modules/csm/csm.py @@ -228,8 +228,7 @@ def fulfill_state(self): }) epoch_data = self.w3.performance.get_epoch(epoch) if epoch_data is None: - logger.warning({"msg": f"Epoch {epoch} is missing in Performance Collector"}) - continue + raise ValueError(f"Epoch {epoch} is missing in Performance Collector") misses, props, syncs = epoch_data logger.info({ diff --git a/src/modules/performance_collector/checkpoint.py b/src/modules/performance_collector/checkpoint.py index 651f86208..44a53adbd 100644 --- a/src/modules/performance_collector/checkpoint.py +++ b/src/modules/performance_collector/checkpoint.py @@ -242,7 +242,7 @@ def _process( for future in as_completed(futures): future.result() except Exception as e: - logger.error({"msg": "Error processing epochs in threads", "error": repr(e)}) + logger.error({"msg": "Error processing epochs in threads", "error": str(e)}) raise SystemExit(1) from e finally: logger.info({"msg": "Shutting down the executor"}) @@ -274,18 +274,17 @@ def _check_duties( process_sync(sync_aggregate, sync_duties) process_attestations(attestations, att_committees, att_misses) - with lock: - propose_duties = list(propose_duties.values()) - if len(propose_duties) > self.converter.chain_config.slots_per_epoch: - raise ValueError(f"Invalid number of propose duties prepared in epoch {duty_epoch}") - if len(sync_duties) > SYNC_COMMITTEE_SIZE: - raise ValueError(f"Invalid number of sync duties prepared in epoch {duty_epoch}") - self.db.store_epoch( - duty_epoch, - att_misses=att_misses, - proposals=propose_duties, - syncs=sync_duties, - ) + propose_duties = list(propose_duties.values()) + if len(propose_duties) > self.converter.chain_config.slots_per_epoch: + raise ValueError(f"Invalid number of propose duties prepared in epoch {duty_epoch}") + if len(sync_duties) > SYNC_COMMITTEE_SIZE: + raise ValueError(f"Invalid number of sync duties prepared in epoch {duty_epoch}") + self.db.store_epoch( + duty_epoch, + att_misses=att_misses, + proposals=propose_duties, + syncs=sync_duties, + ) @timeit( lambda args, duration: logger.info( diff --git a/src/modules/performance_collector/codec.py b/src/modules/performance_collector/codec.py index 0b36dd1f8..3775693a1 100644 --- a/src/modules/performance_collector/codec.py +++ b/src/modules/performance_collector/codec.py @@ -2,9 +2,8 @@ from dataclasses import dataclass from typing import TypeAlias -from pyroaring import BitMap - from src.types import ValidatorIndex +from src.utils.serializable_set import SerializableSet @dataclass @@ -21,6 +20,8 @@ class ProposalDutiesCodec: @classmethod def encode(cls, proposals: list[ProposalDuty]) -> bytes: + if len(proposals) == 0: + raise ValueError("Invalid proposals count") items = sorted(((p.validator_index, p.is_proposed) for p in proposals), key=lambda t: t[0]) return b"".join(struct.pack(cls.PACK_FMT, vid, flag) for vid, flag in items) @@ -76,16 +77,15 @@ class AttDutiesMissCodec: @staticmethod def encode(misses: AttDutyMisses) -> bytes: - bm = BitMap(sorted(v for v in misses)) - bm.shrink_to_fit() - bm.run_optimize() + bm = SerializableSet(misses) return bm.serialize() @staticmethod def decode(blob: bytes) -> AttDutyMisses: - # Non-iterable value BitMap.deserialize(blob) is used in an iterating context, - # but it IS iterable. - return {ValidatorIndex(i) for i in BitMap.deserialize(blob)} # pylint: disable=E1133 + if not blob: + return SerializableSet() + bm = SerializableSet.deserialize(blob) + return {ValidatorIndex(i) for i in bm} EpochData: TypeAlias = tuple[AttDutyMisses, list[ProposalDuty], list[SyncDuty]] @@ -101,7 +101,7 @@ class EpochDataCodec: @classmethod def encode( cls, - att_misses: set[ValidatorIndex], + att_misses: AttDutyMisses, proposals: list[ProposalDuty], syncs: list[SyncDuty], ) -> bytes: @@ -128,5 +128,5 @@ def decode(cls, blob: bytes) -> EpochData: offset += props_size syncs = SyncDutiesCodec.decode(blob[offset:(offset + sync_size)]) offset += sync_size - att = AttDutiesMissCodec.decode(bytes(blob[offset:(offset + att_count)])) if att_count else BitMap() - return set(att), props, syncs + att = AttDutiesMissCodec.decode(bytes(blob[offset:(offset + att_count)])) if att_count else set() + return att, props, syncs diff --git a/src/modules/performance_collector/db.py b/src/modules/performance_collector/db.py index 64a0546da..cc263383b 100644 --- a/src/modules/performance_collector/db.py +++ b/src/modules/performance_collector/db.py @@ -1,4 +1,5 @@ import sqlite3 +from time import time from contextlib import contextmanager from typing import Optional @@ -11,63 +12,55 @@ class DutiesDB: def __init__(self, path: str): self._path = path - self._conn = sqlite3.connect( + self.migrate() + # Check SQLite thread safety. + # Doc: https://docs.python.org/3/library/sqlite3.html#sqlite3.threadsafety + assert sqlite3.threadsafety > 0, "SQLite is not compiled with thread safety" + + @contextmanager + def cursor(self): + conn = sqlite3.connect( self._path, check_same_thread=False, timeout=variables.PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT ) - # Optimize SQLite for performance: WAL mode for concurrent access, - # normal sync for speed/safety balance, memory temp storage - self._conn.execute("PRAGMA journal_mode=WAL;") - self._conn.execute("PRAGMA synchronous=NORMAL;") - self._conn.execute("PRAGMA temp_store=MEMORY;") - self._conn.execute( - """ - CREATE TABLE IF NOT EXISTS duties - ( - epoch INTEGER PRIMARY KEY, - blob BLOB NOT NULL - ); - """ - ) - self._conn.execute( - """ - CREATE TABLE IF NOT EXISTS epochs_demand - ( - consumer STRING PRIMARY KEY, - l_epoch INTEGER, - r_epoch INTEGER + yield conn.cursor() + conn.commit() + conn.close() + + def migrate(self): + with self.cursor() as cur: + # Optimize SQLite for performance: WAL mode for concurrent access, + # normal sync for speed/safety balance, memory temp storage + cur.execute("PRAGMA journal_mode=WAL;") + cur.execute("PRAGMA synchronous=NORMAL;") + cur.execute("PRAGMA temp_store=MEMORY;") + cur.execute( + """ + CREATE TABLE IF NOT EXISTS duties + ( + epoch INTEGER PRIMARY KEY, + blob BLOB NOT NULL + ); + """ ) - """ - ) - self._conn.execute( - """ - CREATE TABLE IF NOT EXISTS epochs_demand_nonce - ( - value INTEGER NOT NULL + cur.execute( + """ + CREATE TABLE IF NOT EXISTS epochs_demand + ( + consumer STRING PRIMARY KEY, + l_epoch INTEGER, + r_epoch INTEGER, + updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP + ) + """ ) - """ - ) - self._conn.execute("INSERT INTO epochs_demand_nonce (value) VALUES (0);") - self._conn.commit() - - def __del__(self): - if self._conn: - self._conn.close() - self._conn = None - - @contextmanager - def connection(self): - try: - yield self._conn.cursor() - finally: - self._conn.commit() def store_demand(self, consumer: str, l_epoch: int, r_epoch: int) -> None: - with self.connection() as cur: + with self.cursor() as cur: + updated_at = int(time()) cur.execute( - "INSERT OR REPLACE INTO epochs_demand(consumer, l_epoch, r_epoch) VALUES(?, ?, ?)", - (consumer, l_epoch, r_epoch), + "INSERT OR REPLACE INTO epochs_demand(consumer, l_epoch, r_epoch, updated_at) VALUES(?, ?, ?, ?)", + (consumer, l_epoch, r_epoch, updated_at), ) - cur.execute("UPDATE epochs_demand_nonce SET value = value + 1") def store_epoch( self, @@ -82,7 +75,7 @@ def store_epoch( return blob def _store_blob(self, epoch: int, blob: bytes) -> None: - with self.connection() as cur: + with self.cursor() as cur: cur.execute( "INSERT OR REPLACE INTO duties(epoch, blob) VALUES(?, ?)", (epoch, sqlite3.Binary(blob)), @@ -94,13 +87,13 @@ def _auto_prune(self, current_epoch: int) -> None: threshold = int(current_epoch) - variables.PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS if threshold <= 0: return - with self.connection() as cur: + with self.cursor() as cur: cur.execute("DELETE FROM duties WHERE epoch < ?", (threshold,)) def is_range_available(self, l_epoch: int, r_epoch: int) -> bool: if int(l_epoch) > int(r_epoch): raise ValueError("Invalid epoch range") - with self.connection() as cur: + with self.cursor() as cur: cur.execute( "SELECT COUNT(1) FROM duties WHERE epoch BETWEEN ? AND ?", (int(l_epoch), int(r_epoch)), @@ -111,7 +104,7 @@ def is_range_available(self, l_epoch: int, r_epoch: int) -> bool: def missing_epochs_in(self, l_epoch: int, r_epoch: int) -> list[int]: if l_epoch > r_epoch: raise ValueError("Invalid epoch range") - with self.connection() as cur: + with self.cursor() as cur: cur.execute( "SELECT epoch FROM duties WHERE epoch BETWEEN ? AND ? ORDER BY epoch", (l_epoch, r_epoch), @@ -124,7 +117,7 @@ def missing_epochs_in(self, l_epoch: int, r_epoch: int) -> list[int]: return missing def _get_entry(self, epoch: int) -> Optional[bytes]: - with self.connection() as cur: + with self.cursor() as cur: cur.execute("SELECT blob FROM duties WHERE epoch=?", (int(epoch),)) row = cur.fetchone() if not row: @@ -135,34 +128,28 @@ def get_epoch_blob(self, epoch: int) -> Optional[bytes]: return self._get_entry(epoch) def has_epoch(self, epoch: int) -> bool: - with self.connection() as cur: + with self.cursor() as cur: cur.execute("SELECT 1 FROM duties WHERE epoch=? LIMIT 1", (int(epoch),)) ok = cur.fetchone() is not None return ok - def min_epoch(self) -> int: - with self.connection() as cur: + def min_epoch(self) -> int | None: + with self.cursor() as cur: cur.execute("SELECT MIN(epoch) FROM duties") - val = int(cur.fetchone()[0] or 0) - return val + val = cur.fetchone()[0] + return int(val) if val else None - def max_epoch(self) -> int: - with self.connection() as cur: + def max_epoch(self) -> int | None: + with self.cursor() as cur: cur.execute("SELECT MAX(epoch) FROM duties") - val = int(cur.fetchone()[0] or 0) - return val + val = cur.fetchone()[0] + return int(val) if val else None def epochs_demand(self) -> dict[str, tuple[int, int]]: data = {} - with self.connection() as cur: - cur.execute("SELECT consumer, l_epoch, r_epoch FROM epochs_demand") + with self.cursor() as cur: + cur.execute("SELECT consumer, l_epoch, r_epoch, updated_at FROM epochs_demand") demands = cur.fetchall() - for consumer, l_epoch, r_epoch in demands: - data[consumer] = (int(l_epoch), int(r_epoch)) + for consumer, l_epoch, r_epoch, updated_at in demands: + data[consumer] = (int(l_epoch), int(r_epoch), int(updated_at)) return data - - def epochs_demand_nonce(self) -> int: - with self.connection() as cur: - cur.execute("SELECT value FROM epochs_demand_nonce LIMIT 1") - val = int(cur.fetchone()[0] or 0) - return val diff --git a/src/modules/performance_collector/performance_collector.py b/src/modules/performance_collector/performance_collector.py index 20605467b..df1a2d03f 100644 --- a/src/modules/performance_collector/performance_collector.py +++ b/src/modules/performance_collector/performance_collector.py @@ -20,7 +20,8 @@ class PerformanceCollector(BaseModule): """ Continuously collects performance data from Consensus Layer into db for the given epoch range. """ - last_epochs_demand_nonce: int = 0 + # Timestamp of the last epochs demand update + last_epochs_demand_update: int = 0 def __init__(self, w3, db_path: Optional[str] = None): super().__init__(w3) @@ -35,7 +36,7 @@ def __init__(self, w3, db_path: Optional[str] = None): except Exception as e: logger.error({'msg': 'Failed to start performance API server', 'error': repr(e)}) raise - self.last_epochs_demand_nonce = self.db.epochs_demand_nonce() + self.last_epochs_demand_update = self.get_epochs_demand_max_updated_at() def refresh_contracts(self): # No need to refresh contracts for this module. There are no contracts used. @@ -54,7 +55,10 @@ def _build_converter(self) -> ChainConverter: def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecuteDelay: converter = self._build_converter() - # TODO: return comment about finalized_epoch + # NOTE: Finalized slot is the first slot of justifying epoch, so we need to take the previous. But if the first + # slot of the justifying epoch is empty, blockstamp.slot_number will point to the slot where the last finalized + # block was created. As a result, finalized_epoch in this case will be less than the actual number of the last + # finalized epoch. As a result we can have a delay in frame finalization. finalized_epoch = EpochNumber(converter.get_epoch_by_slot(last_finalized_blockstamp.slot_number) - 1) epochs_range_demand = self.define_epochs_to_process_range(finalized_epoch) @@ -99,46 +103,33 @@ def define_epochs_to_process_range(self, finalized_epoch: EpochNumber) -> tuple[ logger.info({"msg": "No available epochs to process yet"}) return None - start_epoch = EpochNumber(max_available_epoch_to_check) - end_epoch = EpochNumber(max_available_epoch_to_check) - min_epoch_in_db = self.db.min_epoch() max_epoch_in_db = self.db.max_epoch() - if not min_epoch_in_db and not max_epoch_in_db: - logger.info({ - "msg": "Empty Performance Collector DB. Start with the default range calculation", - "start_epoch": start_epoch, - "end_epoch": end_epoch - }) - return start_epoch, end_epoch - if max_available_epoch_to_check < min_epoch_in_db: + if min_epoch_in_db and max_available_epoch_to_check < min_epoch_in_db: raise ValueError( "Max available epoch to check is lower than the minimum epoch in the DB. CL node is not synced" ) - gap = self.db.missing_epochs_in(min_epoch_in_db, max_epoch_in_db) - if gap: - start_epoch = min(gap) - else: - # Start from the next epoch after the last epoch in the DB. - start_epoch = max_epoch_in_db + 1 + start_epoch = EpochNumber(max_available_epoch_to_check) + end_epoch = EpochNumber(max_available_epoch_to_check) epochs_demand = self.db.epochs_demand() if not epochs_demand: - logger.info({"msg": "No epochs demand found"}) - for consumer, (l_epoch, r_epoch) in epochs_demand.items(): - satisfied = self.db.is_range_available(l_epoch, r_epoch) - if satisfied: - logger.info({ - "msg": "Satisfied epochs demand", "consumer": consumer, "l_epoch": l_epoch, "r_epoch": r_epoch - }) - continue + logger.info({"msg": "No epoch demands found"}) + for consumer, (l_epoch, r_epoch, updated_at) in epochs_demand.items(): logger.info({ - "msg": "Unsatisfied epochs demand", "consumer": consumer, "l_epoch": l_epoch, "r_epoch": r_epoch + "msg": "Epochs demand", "consumer": consumer, "l_epoch": l_epoch, "r_epoch": r_epoch, "updated_at": updated_at }) start_epoch = min(start_epoch, l_epoch) + missing_epochs = self.db.missing_epochs_in(start_epoch, end_epoch) + if missing_epochs: + start_epoch = min(missing_epochs) + else: + # Start from the next epoch after the last epoch in the DB. + start_epoch = EpochNumber(max_epoch_in_db + 1) + log_meta_info = { "start_epoch": start_epoch, "end_epoch": end_epoch, @@ -146,27 +137,28 @@ def define_epochs_to_process_range(self, finalized_epoch: EpochNumber) -> tuple[ "max_available_epoch_to_check": max_available_epoch_to_check, "min_epoch_in_db": min_epoch_in_db, "max_epoch_in_db": max_epoch_in_db, - "gap_in_db_len": len(gap) if gap else None + "missing_epochs": len(missing_epochs) if missing_epochs else 0, } if start_epoch > max_available_epoch_to_check: - logger.info({ - "msg": "No available to process epochs range demand yet", - **log_meta_info - }) + logger.info({"msg": "No available to process epochs range demand yet", **log_meta_info}) return None - logger.info({ - "msg": "Epochs range to process is determined", - **log_meta_info - }) + logger.info({"msg": "Epochs range to process is determined", **log_meta_info}) return start_epoch, end_epoch def new_epochs_range_demand_appeared(self) -> bool: - db_epochs_demand_nonce = self.db.epochs_demand_nonce() - nonce_changed = self.last_epochs_demand_nonce != db_epochs_demand_nonce - if nonce_changed: - self.last_epochs_demand_nonce = db_epochs_demand_nonce + max_updated_at = self.get_epochs_demand_max_updated_at() + updated = self.last_epochs_demand_update != max_updated_at + if updated: + self.last_epochs_demand_update = max_updated_at return True return False + + def get_epochs_demand_max_updated_at(self) -> int: + max_updated_at = 0 + epochs_demand = self.db.epochs_demand() + for _, (_, _, updated_at) in epochs_demand.items(): + max_updated_at = max(max_updated_at, updated_at) + return max_updated_at diff --git a/src/utils/serializable_set.py b/src/utils/serializable_set.py new file mode 100644 index 000000000..9207c553d --- /dev/null +++ b/src/utils/serializable_set.py @@ -0,0 +1,164 @@ +""" +Serializable Set Implementation + +A set-like data structure with adaptive serialization that automatically chooses +the most efficient encoding strategy between run-length encoding and direct storage. +""" + + +class SerializableSet(set): + """ + An adaptive implementation with variable-length encoding. + + Extends built-in set with serialization that automatically chooses optimal strategy: + - Run-length encoding for clustered data (efficient for consecutive ranges) + - Direct value list for sparse data (efficient when ranges are ineffective) + - Automatically chooses the most compact representation during serialization + """ + + def _build_ranges(self, sorted_values: list[int]) -> list[tuple[int, int]]: + if not sorted_values: + return [] + + ranges = [] + start = sorted_values[0] + end = sorted_values[0] + + for val in sorted_values[1:]: + if val == end + 1: + end = val + else: + ranges.append((start, end)) + start = end = val + + ranges.append((start, end)) + return ranges + + def serialize(self) -> bytes: + """ + Serialize using adaptive encoding. + Chooses between run-length encoding and direct value list based on efficiency. + Format: [encoding_type: 1 byte] + data + - Type 0: Run-length encoding (ranges) + - Type 1: Direct value list + """ + if not self: + return bytes([0]) + self._encode_varint(0) + + sorted_values = sorted(self) + ranges = self._build_ranges(sorted_values) + + # Calculate size for run-length encoding + rle_data = [self._encode_varint(len(ranges))] + for start, end in ranges: + length = end - start + 1 + rle_data.append(self._encode_varint(start)) + rle_data.append(self._encode_varint(length)) + rle_bytes = b"".join(rle_data) + rle_size = 1 + len(rle_bytes) # +1 for type byte + + # Calculate size for direct value list + direct_data = [self._encode_varint(len(self))] + for value in sorted_values: + direct_data.append(self._encode_varint(value)) + direct_bytes = b"".join(direct_data) + direct_size = 1 + len(direct_bytes) # +1 for type byte + + # Choose more efficient encoding + if rle_size <= direct_size: + return bytes([0]) + rle_bytes # Use run-length encoding + else: + return bytes([1]) + direct_bytes # Use direct value list + + @classmethod + def deserialize(cls, data: bytes) -> "SerializableSet": + _set = cls() + + if not data: + return _set + + encoding_type = data[0] + offset = 1 + + if encoding_type == 0: + if offset >= len(data): + return _set + + num_ranges, offset = cls._decode_varint(data, offset) + + for _ in range(num_ranges): + start, offset = cls._decode_varint(data, offset) + length, offset = cls._decode_varint(data, offset) + end = start + length - 1 + # Add all values in this range to our set + _set.update(range(start, end + 1)) + + return _set + + if encoding_type == 1: + # Direct value list + num_values, offset = cls._decode_varint(data, offset) + + for _ in range(num_values): + value, offset = cls._decode_varint(data, offset) + _set.add(value) + + return _set + + raise ValueError(f"Unknown encoding type: {encoding_type}") + + @staticmethod + def _encode_varint(value: int) -> bytes: + # Reference: https://protobuf.dev/programming-guides/encoding/#varints + payload_mask = 0x7F + continuation_flag = 0x80 + + result = [] + while value >= continuation_flag: # While value does not fit in 7 bits + result.append((value & payload_mask) | continuation_flag) + value >>= 7 # Shift to the next byte + result.append(value & payload_mask) + return bytes(result) + + @staticmethod + def _decode_varint(data: bytes, offset: int) -> tuple[int, int]: + # Reference: https://protobuf.dev/programming-guides/encoding/#varints + payload_mask = 0x7F + continuation_flag = 0x80 + + decoded_value = 0 + bit_shift_position = 0 + current_offset = offset + + while current_offset < len(data): + current_byte = data[current_offset] + current_offset += 1 + + # Extract data bits and place them at the correct position + data_bits = current_byte & payload_mask + decoded_value |= (data_bits << bit_shift_position) + + # Check if this is the last byte (no continuation flag) + has_continuation = (current_byte & continuation_flag) != 0 + if not has_continuation: + break + + # Move to the next 7-bit group + bit_shift_position += 7 + + # Can't be greater than uint64 + if bit_shift_position >= 64: + raise ValueError("Varint too long") + else: + raise ValueError("Incomplete varint") + + return decoded_value, current_offset + + def __repr__(self) -> str: + return f"SerializableSet({sorted(self)})" + + def __str__(self) -> str: + return f"SerializableSet({len(self)} values)" + + def copy(self): + return SerializableSet(self) diff --git a/tests/modules/performance_collector/test_codec.py b/tests/modules/performance_collector/test_codec.py index 46e553b13..f678e1654 100644 --- a/tests/modules/performance_collector/test_codec.py +++ b/tests/modules/performance_collector/test_codec.py @@ -229,12 +229,6 @@ def test_sync_miss_duties_codec_empty(): SyncDutiesCodec.decode(SyncDutiesCodec.encode([])) -@pytest.mark.unit -def test_sync_miss_duties_codec_out_of_range(): - with pytest.raises(ValueError): - SyncDutiesCodec.encode([SyncDuty(validator_index=1, missed_count=33)]) - - @pytest.mark.unit def test_att_duties_miss_codec_roundtrip(): src = ATT_MISSES_EXAMPLE @@ -254,15 +248,9 @@ def test_epoch_blob_codec_roundtrip(): proposals = PROPOSALS_EXAMPLE syncs = SYNCS_EXAMPLE - blob = EpochDataCodec.encode(att_misses=att_misses, proposals=proposals, sync_misses=syncs) + blob = EpochDataCodec.encode(att_misses=att_misses, proposals=proposals, syncs=syncs) att_decoded, proposals_decoded, syncs_decoded = EpochDataCodec.decode(blob) - # att_decoded may be a set (non-empty) or BitMap; normalize to set - from pyroaring import BitMap # type: ignore - - if isinstance(att_decoded, BitMap): - att_decoded = set(att_decoded) # type: ignore - assert set(att_decoded) == set(att_misses) assert sorted(_proposals_to_tuples(proposals_decoded)) == sorted(_proposals_to_tuples(proposals)) assert sorted(_syncs_to_tuples(syncs_decoded)) == sorted(_syncs_to_tuples(syncs)) @@ -274,7 +262,7 @@ def test_epoch_blob_codec_bad_version(): proposals = PROPOSALS_EXAMPLE syncs = SYNCS_EXAMPLE - blob = EpochDataCodec.encode(att_misses=att_misses, proposals=proposals, sync_misses=syncs) + blob = EpochDataCodec.encode(att_misses=att_misses, proposals=proposals, syncs=syncs) bad = bytes([255]) + blob[1:] with pytest.raises(ValueError): @@ -293,7 +281,7 @@ def test_epoch_blob_codec_truncated_payload(): proposals = PROPOSALS_EXAMPLE syncs = SYNCS_EXAMPLE - blob = EpochDataCodec.encode(att_misses=att_misses, proposals=proposals, sync_misses=syncs) + blob = EpochDataCodec.encode(att_misses=att_misses, proposals=proposals, syncs=syncs) bad_blob = blob[:-1] with pytest.raises(ValueError): diff --git a/tests/utils/test_serializable_set.py b/tests/utils/test_serializable_set.py new file mode 100644 index 000000000..8ddfe85c8 --- /dev/null +++ b/tests/utils/test_serializable_set.py @@ -0,0 +1,293 @@ +""" +Tests for SerializableSet + +Comprehensive test suite covering all functionality of the SerializableSet class +including adaptive serialization, set operations, and edge cases. +""" + +import pytest +from src.utils.serializable_set import SerializableSet + + +class TestSerializableSet: + """Test suite for SerializableSet class.""" + + @pytest.mark.unit + def test_initialization_empty(self): + """Test creating an empty SerializableSet.""" + ss = SerializableSet() + assert len(ss) == 0 + assert isinstance(ss, set) + assert not ss + + @pytest.mark.unit + def test_initialization_with_values(self): + """Test creating SerializableSet with initial values.""" + values = [1, 2, 3, 5, 8] + ss = SerializableSet(values) + assert len(ss) == 5 + assert 3 in ss + assert 4 not in ss + assert sorted(ss) == [1, 2, 3, 5, 8] + + @pytest.mark.unit + def test_set_operations(self): + """Test basic set operations.""" + ss = SerializableSet([1, 2, 3]) + + # Add + ss.add(4) + assert 4 in ss + assert len(ss) == 4 + + # Remove + ss.remove(2) + assert 2 not in ss + assert len(ss) == 3 + + # Discard + ss.discard(10) # Should not raise error + ss.discard(1) + assert 1 not in ss + assert len(ss) == 2 + + # Update + ss.update([5, 6, 7]) + assert sorted(ss) == [3, 4, 5, 6, 7] + + # Clear + ss.clear() + assert len(ss) == 0 + + @pytest.mark.unit + def test_set_operators(self): + """Test set operators (union, intersection, etc.).""" + ss1 = SerializableSet([1, 2, 3, 4]) + ss2 = SerializableSet([3, 4, 5, 6]) + + # Union + union = ss1 | ss2 + assert sorted(union) == [1, 2, 3, 4, 5, 6] + + # Intersection + intersection = ss1 & ss2 + assert sorted(intersection) == [3, 4] + + # Difference + diff = ss1 - ss2 + assert sorted(diff) == [1, 2] + + # Symmetric difference + sym_diff = ss1 ^ ss2 + assert sorted(sym_diff) == [1, 2, 5, 6] + + @pytest.mark.unit + def test_equality(self): + """Test equality comparisons.""" + ss1 = SerializableSet([1, 2, 3]) + ss2 = SerializableSet([3, 1, 2]) + ss3 = SerializableSet([1, 2, 4]) + regular_set = {1, 2, 3} + + assert ss1 == ss2 + assert ss1 != ss3 + assert ss1 == regular_set + assert ss1 != [1, 2, 3] # Different type + + @pytest.mark.unit + def test_build_ranges(self): + """Test the internal _build_ranges method.""" + ss = SerializableSet() + + # Empty + ranges = ss._build_ranges([]) + assert ranges == [] + + # Single value + ranges = ss._build_ranges([5]) + assert ranges == [(5, 5)] + + # Consecutive sequence + ranges = ss._build_ranges([1, 2, 3, 4, 5]) + assert ranges == [(1, 5)] + + # Multiple ranges + ranges = ss._build_ranges([1, 2, 3, 7, 8, 10]) + assert ranges == [(1, 3), (7, 8), (10, 10)] + + # Sparse values + ranges = ss._build_ranges([1, 5, 10, 20]) + assert ranges == [(1, 1), (5, 5), (10, 10), (20, 20)] + + @pytest.mark.unit + def test_varint_encoding(self): + """Test varint encoding and decoding.""" + # Test small values (1 byte) + for value in [0, 1, 127]: + encoded = SerializableSet._encode_varint(value) + decoded, offset = SerializableSet._decode_varint(encoded, 0) + assert decoded == value + assert offset == len(encoded) + + # Test medium values (2 bytes) + for value in [128, 255, 16383]: + encoded = SerializableSet._encode_varint(value) + decoded, offset = SerializableSet._decode_varint(encoded, 0) + assert decoded == value + assert offset == len(encoded) + + # Test large values + for value in [16384, 65535, 1048575]: + encoded = SerializableSet._encode_varint(value) + decoded, offset = SerializableSet._decode_varint(encoded, 0) + assert decoded == value + assert offset == len(encoded) + + @pytest.mark.unit + def test_serialization_empty(self): + """Test serialization of empty set.""" + ss = SerializableSet() + serialized = ss.serialize() + deserialized = SerializableSet.deserialize(serialized) + + assert ss == deserialized + assert len(deserialized) == 0 + + @pytest.mark.unit + def test_serialization_single_value(self): + """Test serialization of single value.""" + ss = SerializableSet([42]) + serialized = ss.serialize() + deserialized = SerializableSet.deserialize(serialized) + + assert ss == deserialized + assert 42 in deserialized + assert len(deserialized) == 1 + + @pytest.mark.unit + def test_serialization_consecutive_values(self): + """Test serialization with consecutive values (should prefer RLE).""" + # Large consecutive range should use run-length encoding + ss = SerializableSet(range(1, 1001)) # 1000 consecutive numbers + serialized = ss.serialize() + deserialized = SerializableSet.deserialize(serialized) + + assert ss == deserialized + assert len(deserialized) == 1000 + assert min(deserialized) == 1 + assert max(deserialized) == 1000 + + # Should be very compact (RLE encoding) + assert len(serialized) < 20 # Much smaller than 1000 * varint_size + + @pytest.mark.unit + def test_serialization_sparse_values(self): + """Test serialization with sparse values (should prefer direct list).""" + # Sparse values should use direct encoding + sparse_values = [1, 100, 1000, 10000, 100000] + ss = SerializableSet(sparse_values) + serialized = ss.serialize() + deserialized = SerializableSet.deserialize(serialized) + + assert ss == deserialized + assert sorted(deserialized) == sparse_values + + @pytest.mark.unit + def test_serialization_mixed_ranges(self): + """Test serialization with mixed consecutive and sparse values.""" + # Mix of ranges and sparse values + values = list(range(1, 11)) + list(range(50, 61)) + [100, 200, 300] + ss = SerializableSet(values) + serialized = ss.serialize() + deserialized = SerializableSet.deserialize(serialized) + + assert ss == deserialized + assert len(deserialized) == len(values) + + @pytest.mark.unit + def test_serialization_adaptive_strategy(self): + """Test that serialization chooses the most efficient strategy.""" + # Test that RLE is chosen for consecutive data + consecutive_ss = SerializableSet(range(1, 100)) + consecutive_serialized = consecutive_ss.serialize() + + # Test that direct list is chosen for sparse data + sparse_ss = SerializableSet([1, 1000, 10000, 100000, 1000000]) + sparse_serialized = sparse_ss.serialize() + + # Consecutive should be more compact + assert len(consecutive_serialized) < 50 # Very compact with RLE + + # Both should deserialize correctly + assert consecutive_ss == SerializableSet.deserialize(consecutive_serialized) + assert sparse_ss == SerializableSet.deserialize(sparse_serialized) + + @pytest.mark.unit + def test_deserialization_invalid_data(self): + """Test deserialization with invalid data.""" + # Empty data + empty_ss = SerializableSet.deserialize(b"") + assert len(empty_ss) == 0 + + # Invalid encoding type + with pytest.raises(ValueError, match="Unknown encoding type"): + SerializableSet.deserialize(bytes([99, 1, 2, 3])) + + # Incomplete varint + with pytest.raises(ValueError, match="Incomplete varint"): + SerializableSet.deserialize(bytes([1, 0xFF])) # Incomplete varint + + @pytest.mark.unit + def test_repr_and_str(self): + """Test string representations.""" + ss = SerializableSet([3, 1, 2]) + + # __repr__ should show sorted values + assert repr(ss) == "SerializableSet([1, 2, 3])" + + # __str__ should show count + assert str(ss) == "SerializableSet(3 values)" + + # Empty set + empty_ss = SerializableSet() + assert repr(empty_ss) == "SerializableSet([])" + assert str(empty_ss) == "SerializableSet(0 values)" + + @pytest.mark.unit + def test_copy_and_iteration(self): + """Test copy and iteration functionality.""" + original = SerializableSet([1, 2, 3, 4, 5]) + + # Copy (inherited from set) + copied = original.copy() + assert copied == original + assert copied is not original + assert isinstance(copied, SerializableSet) + + # Iteration + values = list(original) + assert sorted(values) == [1, 2, 3, 4, 5] + + # Iteration is same as set iteration + set_values = list(set([1, 2, 3, 4, 5])) + assert sorted(values) == sorted(set_values) + + @pytest.mark.unit + def test_large_dataset_performance(self): + """Test performance with larger datasets.""" + # Create a large dataset with mixed patterns + large_values = ( + list(range(1, 1000)) + # Consecutive range + list(range(10000, 10100)) + # Another consecutive range + [50000, 60000, 70000, 80000] # Sparse values + ) + + ss = SerializableSet(large_values) + serialized = ss.serialize() + deserialized = SerializableSet.deserialize(serialized) + + assert ss == deserialized + assert len(deserialized) == len(large_values) + + # Should be reasonably compact + assert len(serialized) < len(large_values) * 4 # Much better than 4 bytes per value \ No newline at end of file From cc080b363777c7c91045e623efc8212a32e97c04 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Fri, 28 Nov 2025 13:20:54 +0100 Subject: [PATCH 27/35] fix: review --- poetry.lock | 604 ++++++++++++------ pyproject.toml | 7 +- src/main.py | 17 +- src/modules/csm/csm.py | 87 ++- .../__init__.py | 0 src/modules/performance/collector/__init__.py | 0 .../collector}/checkpoint.py | 10 +- .../collector/collector.py} | 41 +- src/modules/performance/common/__init__.py | 0 src/modules/performance/common/db.py | 189 ++++++ src/modules/performance/common/types.py | 19 + src/modules/performance/web/__init__.py | 0 src/modules/performance/web/metrics.py | 32 + src/modules/performance/web/server.py | 122 ++++ src/modules/performance_collector/codec.py | 132 ---- src/modules/performance_collector/db.py | 155 ----- .../performance_collector/http_server.py | 168 ----- src/providers/http_provider.py | 108 +++- src/providers/performance/client.py | 72 +-- src/types.py | 1 + src/utils/serializable_set.py | 164 ----- src/variables.py | 29 +- 22 files changed, 1028 insertions(+), 929 deletions(-) rename src/modules/{performance_collector => performance}/__init__.py (100%) create mode 100644 src/modules/performance/collector/__init__.py rename src/modules/{performance_collector => performance/collector}/checkpoint.py (98%) rename src/modules/{performance_collector/performance_collector.py => performance/collector/collector.py} (80%) create mode 100644 src/modules/performance/common/__init__.py create mode 100644 src/modules/performance/common/db.py create mode 100644 src/modules/performance/common/types.py create mode 100644 src/modules/performance/web/__init__.py create mode 100644 src/modules/performance/web/metrics.py create mode 100644 src/modules/performance/web/server.py delete mode 100644 src/modules/performance_collector/codec.py delete mode 100644 src/modules/performance_collector/db.py delete mode 100644 src/modules/performance_collector/http_server.py delete mode 100644 src/utils/serializable_set.py diff --git a/poetry.lock b/poetry.lock index 84b6d1358..869f4f603 100644 --- a/poetry.lock +++ b/poetry.lock @@ -135,6 +135,18 @@ files = [ [package.dependencies] frozenlist = ">=1.1.0" +[[package]] +name = "annotated-doc" +version = "0.0.4" +description = "Document parameters, class attributes, return types, and variables inline, with Annotated." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320"}, + {file = "annotated_doc-0.0.4.tar.gz", hash = "sha256:fbcda96e87e9c92ad167c2e53839e57503ecfda18804ea28102353485033faa4"}, +] + [[package]] name = "annotated-types" version = "0.7.0" @@ -147,6 +159,26 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] +[[package]] +name = "anyio" +version = "4.11.0" +description = "High-level concurrency and networking framework on top of asyncio or Trio" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "anyio-4.11.0-py3-none-any.whl", hash = "sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc"}, + {file = "anyio-4.11.0.tar.gz", hash = "sha256:82a8d0b81e318cc5ce71a5f1f8b5c4e63619620b63141ef8c995fa0db95a57c4"}, +] + +[package.dependencies] +idna = ">=2.8" +sniffio = ">=1.1" +typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} + +[package.extras] +trio = ["trio (>=0.31.0)"] + [[package]] name = "astroid" version = "3.3.10" @@ -418,18 +450,6 @@ d = ["aiohttp (>=3.10)"] jupyter = ["ipython (>=7.8.0)", "tokenize-rt (>=3.2.0)"] uvloop = ["uvloop (>=0.15.2)"] -[[package]] -name = "blinker" -version = "1.9.0" -description = "Fast, simple object-to-object and broadcast signaling" -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "blinker-1.9.0-py3-none-any.whl", hash = "sha256:ba0efaa9080b619ff2f3459d1d500c57bddea4a6b424b60a91141db6fd2f08bc"}, - {file = "blinker-1.9.0.tar.gz", hash = "sha256:b4ce2265a7abece45e7cc896e98dbebe6cead56bcf805a3d23136d145f5445bf"}, -] - [[package]] name = "certifi" version = "2025.4.26" @@ -1219,6 +1239,29 @@ files = [ [package.dependencies] tzdata = "*" +[[package]] +name = "fastapi" +version = "0.121.3" +description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "fastapi-0.121.3-py3-none-any.whl", hash = "sha256:0c78fc87587fcd910ca1bbf5bc8ba37b80e119b388a7206b39f0ecc95ebf53e9"}, + {file = "fastapi-0.121.3.tar.gz", hash = "sha256:0055bc24fe53e56a40e9e0ad1ae2baa81622c406e548e501e717634e2dfbc40b"}, +] + +[package.dependencies] +annotated-doc = ">=0.0.2" +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" +starlette = ">=0.40.0,<0.51.0" +typing-extensions = ">=4.8.0" + +[package.extras] +all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] +standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"] +standard-no-fastapi-cloud-cli = ["email-validator (>=2.0.0)", "fastapi-cli[standard-no-fastapi-cloud-cli] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"] + [[package]] name = "filelock" version = "3.18.0" @@ -1236,30 +1279,6 @@ docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3) testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"] typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] -[[package]] -name = "flask" -version = "3.1.2" -description = "A simple framework for building complex web applications." -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "flask-3.1.2-py3-none-any.whl", hash = "sha256:ca1d8112ec8a6158cc29ea4858963350011b5c846a414cdb7a954aa9e967d03c"}, - {file = "flask-3.1.2.tar.gz", hash = "sha256:bf656c15c80190ed628ad08cdfd3aaa35beb087855e2f494910aa3774cc4fd87"}, -] - -[package.dependencies] -blinker = ">=1.9.0" -click = ">=8.1.3" -itsdangerous = ">=2.2.0" -jinja2 = ">=3.1.2" -markupsafe = ">=2.1.1" -werkzeug = ">=3.1.0" - -[package.extras] -async = ["asgiref (>=3.2)"] -dotenv = ["python-dotenv"] - [[package]] name = "frozenlist" version = "1.6.2" @@ -1374,6 +1393,99 @@ files = [ {file = "frozenlist-1.6.2.tar.gz", hash = "sha256:effc641518696471cf4962e8e32050133bc1f7b2851ae8fd0cb8797dd70dc202"}, ] +[[package]] +name = "greenlet" +version = "3.2.4" +description = "Lightweight in-process concurrent programming" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\"" +files = [ + {file = "greenlet-3.2.4-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:8c68325b0d0acf8d91dde4e6f930967dd52a5302cd4062932a6b2e7c2969f47c"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:94385f101946790ae13da500603491f04a76b6e4c059dab271b3ce2e283b2590"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f10fd42b5ee276335863712fa3da6608e93f70629c631bf77145021600abc23c"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c8c9e331e58180d0d83c5b7999255721b725913ff6bc6cf39fa2a45841a4fd4b"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:58b97143c9cc7b86fc458f215bd0932f1757ce649e05b640fea2e79b54cedb31"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c2ca18a03a8cfb5b25bc1cbe20f3d9a4c80d8c3b13ba3df49ac3961af0b1018d"}, + {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9fe0a28a7b952a21e2c062cd5756d34354117796c6d9215a87f55e38d15402c5"}, + {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8854167e06950ca75b898b104b63cc646573aa5fef1353d4508ecdd1ee76254f"}, + {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f47617f698838ba98f4ff4189aef02e7343952df3a615f847bb575c3feb177a7"}, + {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af41be48a4f60429d5cad9d22175217805098a9ef7c40bfef44f7669fb9d74d8"}, + {file = "greenlet-3.2.4-cp310-cp310-win_amd64.whl", hash = "sha256:73f49b5368b5359d04e18d15828eecc1806033db5233397748f4ca813ff1056c"}, + {file = "greenlet-3.2.4-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:96378df1de302bc38e99c3a9aa311967b7dc80ced1dcc6f171e99842987882a2"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1ee8fae0519a337f2329cb78bd7a8e128ec0f881073d43f023c7b8d4831d5246"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:94abf90142c2a18151632371140b3dba4dee031633fe614cb592dbb6c9e17bc3"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:4d1378601b85e2e5171b99be8d2dc85f594c79967599328f95c1dc1a40f1c633"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0db5594dce18db94f7d1650d7489909b57afde4c580806b8d9203b6e79cdc079"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2523e5246274f54fdadbce8494458a2ebdcdbc7b802318466ac5606d3cded1f8"}, + {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1987de92fec508535687fb807a5cea1560f6196285a4cde35c100b8cd632cc52"}, + {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:55e9c5affaa6775e2c6b67659f3a71684de4c549b3dd9afca3bc773533d284fa"}, + {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c9c6de1940a7d828635fbd254d69db79e54619f165ee7ce32fda763a9cb6a58c"}, + {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03c5136e7be905045160b1b9fdca93dd6727b180feeafda6818e6496434ed8c5"}, + {file = "greenlet-3.2.4-cp311-cp311-win_amd64.whl", hash = "sha256:9c40adce87eaa9ddb593ccb0fa6a07caf34015a29bf8d344811665b573138db9"}, + {file = "greenlet-3.2.4-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3b67ca49f54cede0186854a008109d6ee71f66bd57bb36abd6d0a0267b540cdd"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddf9164e7a5b08e9d22511526865780a576f19ddd00d62f8a665949327fde8bb"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f28588772bb5fb869a8eb331374ec06f24a83a9c25bfa1f38b6993afe9c1e968"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5c9320971821a7cb77cfab8d956fa8e39cd07ca44b6070db358ceb7f8797c8c9"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c60a6d84229b271d44b70fb6e5fa23781abb5d742af7b808ae3f6efd7c9c60f6"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0"}, + {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0"}, + {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f"}, + {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ee7a6ec486883397d70eec05059353b8e83eca9168b9f3f9a361971e77e0bcd0"}, + {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:326d234cbf337c9c3def0676412eb7040a35a768efc92504b947b3e9cfc7543d"}, + {file = "greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02"}, + {file = "greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:710638eb93b1fa52823aa91bf75326f9ecdfd5e0466f00789246a5280f4ba0fc"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c5111ccdc9c88f423426df3fd1811bfc40ed66264d35aa373420a34377efc98a"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d76383238584e9711e20ebe14db6c88ddcedc1829a9ad31a584389463b5aa504"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e343822feb58ac4d0a1211bd9399de2b3a04963ddeec21530fc426cc121f19b"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ca7f6f1f2649b89ce02f6f229d7c19f680a6238af656f61e0115b24857917929"}, + {file = "greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b"}, + {file = "greenlet-3.2.4-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:49a30d5fda2507ae77be16479bdb62a660fa51b1eb4928b524975b3bde77b3c0"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:299fd615cd8fc86267b47597123e3f43ad79c9d8a22bebdce535e53550763e2f"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c17b6b34111ea72fc5a4e4beec9711d2226285f0386ea83477cbb97c30a3f3a5"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b4a1870c51720687af7fa3e7cda6d08d801dae660f75a76f3845b642b4da6ee1"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:061dc4cf2c34852b052a8620d40f36324554bc192be474b9e9770e8c042fd735"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44358b9bf66c8576a9f57a590d5f5d6e72fa4228b763d0e43fee6d3b06d3a337"}, + {file = "greenlet-3.2.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2917bdf657f5859fbf3386b12d68ede4cf1f04c90c3a6bc1f013dd68a22e2269"}, + {file = "greenlet-3.2.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:015d48959d4add5d6c9f6c5210ee3803a830dce46356e3bc326d6776bde54681"}, + {file = "greenlet-3.2.4-cp314-cp314-win_amd64.whl", hash = "sha256:e37ab26028f12dbb0ff65f29a8d3d44a765c61e729647bf2ddfbbed621726f01"}, + {file = "greenlet-3.2.4-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:b6a7c19cf0d2742d0809a4c05975db036fdff50cd294a93632d6a310bf9ac02c"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:27890167f55d2387576d1f41d9487ef171849ea0359ce1510ca6e06c8bece11d"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:18d9260df2b5fbf41ae5139e1be4e796d99655f023a636cd0e11e6406cca7d58"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:671df96c1f23c4a0d4077a325483c1503c96a1b7d9db26592ae770daa41233d4"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:16458c245a38991aa19676900d48bd1a6f2ce3e16595051a4db9d012154e8433"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c9913f1a30e4526f432991f89ae263459b1c64d1608c0d22a5c79c287b3c70df"}, + {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b90654e092f928f110e0007f572007c9727b5265f7632c2fa7415b4689351594"}, + {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:81701fd84f26330f0d5f4944d4e92e61afe6319dcd9775e39396e39d7c3e5f98"}, + {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:28a3c6b7cd72a96f61b0e4b2a36f681025b60ae4779cc73c1535eb5f29560b10"}, + {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:52206cd642670b0b320a1fd1cbfd95bca0e043179c1d8a045f2c6109dfe973be"}, + {file = "greenlet-3.2.4-cp39-cp39-win32.whl", hash = "sha256:65458b409c1ed459ea899e939f0e1cdb14f58dbc803f2f93c5eab5694d32671b"}, + {file = "greenlet-3.2.4-cp39-cp39-win_amd64.whl", hash = "sha256:d2e685ade4dafd447ede19c31277a224a239a0a1a4eca4e6390efedf20260cfb"}, + {file = "greenlet-3.2.4.tar.gz", hash = "sha256:0dca0d95ff849f9a364385f36ab49f50065d76964944638be9691e1832e9f86d"}, +] + +[package.extras] +docs = ["Sphinx", "furo"] +test = ["objgraph", "psutil", "setuptools"] + +[[package]] +name = "h11" +version = "0.16.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, + {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, +] + [[package]] name = "hexbytes" version = "1.3.1" @@ -1546,18 +1658,6 @@ files = [ colors = ["colorama"] plugins = ["setuptools"] -[[package]] -name = "itsdangerous" -version = "2.2.0" -description = "Safely pass data to untrusted environments and back." -optional = false -python-versions = ">=3.8" -groups = ["main"] -files = [ - {file = "itsdangerous-2.2.0-py3-none-any.whl", hash = "sha256:c6242fc49e35958c8b15141343aa660db5fc54d4f13a1db01a3f5891b98700ef"}, - {file = "itsdangerous-2.2.0.tar.gz", hash = "sha256:e0050c0b7da1eea53ffaf149c0cfbb5c6e2e2b69c4bef22c81fa6eb73e5f6173"}, -] - [[package]] name = "jedi" version = "0.19.2" @@ -1578,24 +1678,6 @@ docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alab qa = ["flake8 (==5.0.4)", "mypy (==0.971)", "types-setuptools (==67.2.0.1)"] testing = ["Django", "attrs", "colorama", "docopt", "pytest (<9.0.0)"] -[[package]] -name = "jinja2" -version = "3.1.6" -description = "A very fast and expressive template engine." -optional = false -python-versions = ">=3.7" -groups = ["main"] -files = [ - {file = "jinja2-3.1.6-py3-none-any.whl", hash = "sha256:85ece4451f492d0c13c5dd7c13a64681a86afae63a5f347908daf103ce6d2f67"}, - {file = "jinja2-3.1.6.tar.gz", hash = "sha256:0137fb05990d35f1275a587e9aee6d56da821fc83491a0fb838183be43f66d6d"}, -] - -[package.dependencies] -MarkupSafe = ">=2.0" - -[package.extras] -i18n = ["Babel (>=2.7)"] - [[package]] name = "json-stream" version = "2.3.3" @@ -1710,105 +1792,6 @@ files = [ benchmark = ["contexttimer (>=0.3,<0.4)", "json-stream-to-standard-types (>=0.1,<0.2)", "si-prefix (>=1.2,<2)", "tqdm (>=4.64,<5)", "typer (>=0.6,<0.7)"] test = ["json-stream (==2.3.2)", "json-stream-rs-tokenizer[benchmark]", "pytest (>7.1,<8)"] -[[package]] -name = "markupsafe" -version = "3.0.3" -description = "Safely add untrusted strings to HTML/XML markup." -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "markupsafe-3.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:2f981d352f04553a7171b8e44369f2af4055f888dfb147d55e42d29e29e74559"}, - {file = "markupsafe-3.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e1c1493fb6e50ab01d20a22826e57520f1284df32f2d8601fdd90b6304601419"}, - {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1ba88449deb3de88bd40044603fafffb7bc2b055d626a330323a9ed736661695"}, - {file = "markupsafe-3.0.3-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f42d0984e947b8adf7dd6dde396e720934d12c506ce84eea8476409563607591"}, - {file = "markupsafe-3.0.3-cp310-cp310-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:c0c0b3ade1c0b13b936d7970b1d37a57acde9199dc2aecc4c336773e1d86049c"}, - {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:0303439a41979d9e74d18ff5e2dd8c43ed6c6001fd40e5bf2e43f7bd9bbc523f"}, - {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:d2ee202e79d8ed691ceebae8e0486bd9a2cd4794cec4824e1c99b6f5009502f6"}, - {file = "markupsafe-3.0.3-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:177b5253b2834fe3678cb4a5f0059808258584c559193998be2601324fdeafb1"}, - {file = "markupsafe-3.0.3-cp310-cp310-win32.whl", hash = "sha256:2a15a08b17dd94c53a1da0438822d70ebcd13f8c3a95abe3a9ef9f11a94830aa"}, - {file = "markupsafe-3.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:c4ffb7ebf07cfe8931028e3e4c85f0357459a3f9f9490886198848f4fa002ec8"}, - {file = "markupsafe-3.0.3-cp310-cp310-win_arm64.whl", hash = "sha256:e2103a929dfa2fcaf9bb4e7c091983a49c9ac3b19c9061b6d5427dd7d14d81a1"}, - {file = "markupsafe-3.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1cc7ea17a6824959616c525620e387f6dd30fec8cb44f649e31712db02123dad"}, - {file = "markupsafe-3.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4bd4cd07944443f5a265608cc6aab442e4f74dff8088b0dfc8238647b8f6ae9a"}, - {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:6b5420a1d9450023228968e7e6a9ce57f65d148ab56d2313fcd589eee96a7a50"}, - {file = "markupsafe-3.0.3-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:0bf2a864d67e76e5c9a34dc26ec616a66b9888e25e7b9460e1c76d3293bd9dbf"}, - {file = "markupsafe-3.0.3-cp311-cp311-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:bc51efed119bc9cfdf792cdeaa4d67e8f6fcccab66ed4bfdd6bde3e59bfcbb2f"}, - {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:068f375c472b3e7acbe2d5318dea141359e6900156b5b2ba06a30b169086b91a"}, - {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:7be7b61bb172e1ed687f1754f8e7484f1c8019780f6f6b0786e76bb01c2ae115"}, - {file = "markupsafe-3.0.3-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:f9e130248f4462aaa8e2552d547f36ddadbeaa573879158d721bbd33dfe4743a"}, - {file = "markupsafe-3.0.3-cp311-cp311-win32.whl", hash = "sha256:0db14f5dafddbb6d9208827849fad01f1a2609380add406671a26386cdf15a19"}, - {file = "markupsafe-3.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:de8a88e63464af587c950061a5e6a67d3632e36df62b986892331d4620a35c01"}, - {file = "markupsafe-3.0.3-cp311-cp311-win_arm64.whl", hash = "sha256:3b562dd9e9ea93f13d53989d23a7e775fdfd1066c33494ff43f5418bc8c58a5c"}, - {file = "markupsafe-3.0.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d53197da72cc091b024dd97249dfc7794d6a56530370992a5e1a08983ad9230e"}, - {file = "markupsafe-3.0.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:1872df69a4de6aead3491198eaf13810b565bdbeec3ae2dc8780f14458ec73ce"}, - {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:3a7e8ae81ae39e62a41ec302f972ba6ae23a5c5396c8e60113e9066ef893da0d"}, - {file = "markupsafe-3.0.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:d6dd0be5b5b189d31db7cda48b91d7e0a9795f31430b7f271219ab30f1d3ac9d"}, - {file = "markupsafe-3.0.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:94c6f0bb423f739146aec64595853541634bde58b2135f27f61c1ffd1cd4d16a"}, - {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:be8813b57049a7dc738189df53d69395eba14fb99345e0a5994914a3864c8a4b"}, - {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:83891d0e9fb81a825d9a6d61e3f07550ca70a076484292a70fde82c4b807286f"}, - {file = "markupsafe-3.0.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:77f0643abe7495da77fb436f50f8dab76dbc6e5fd25d39589a0f1fe6548bfa2b"}, - {file = "markupsafe-3.0.3-cp312-cp312-win32.whl", hash = "sha256:d88b440e37a16e651bda4c7c2b930eb586fd15ca7406cb39e211fcff3bf3017d"}, - {file = "markupsafe-3.0.3-cp312-cp312-win_amd64.whl", hash = "sha256:26a5784ded40c9e318cfc2bdb30fe164bdb8665ded9cd64d500a34fb42067b1c"}, - {file = "markupsafe-3.0.3-cp312-cp312-win_arm64.whl", hash = "sha256:35add3b638a5d900e807944a078b51922212fb3dedb01633a8defc4b01a3c85f"}, - {file = "markupsafe-3.0.3-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e1cf1972137e83c5d4c136c43ced9ac51d0e124706ee1c8aa8532c1287fa8795"}, - {file = "markupsafe-3.0.3-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:116bb52f642a37c115f517494ea5feb03889e04df47eeff5b130b1808ce7c219"}, - {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:133a43e73a802c5562be9bbcd03d090aa5a1fe899db609c29e8c8d815c5f6de6"}, - {file = "markupsafe-3.0.3-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:ccfcd093f13f0f0b7fdd0f198b90053bf7b2f02a3927a30e63f3ccc9df56b676"}, - {file = "markupsafe-3.0.3-cp313-cp313-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:509fa21c6deb7a7a273d629cf5ec029bc209d1a51178615ddf718f5918992ab9"}, - {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:a4afe79fb3de0b7097d81da19090f4df4f8d3a2b3adaa8764138aac2e44f3af1"}, - {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:795e7751525cae078558e679d646ae45574b47ed6e7771863fcc079a6171a0fc"}, - {file = "markupsafe-3.0.3-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:8485f406a96febb5140bfeca44a73e3ce5116b2501ac54fe953e488fb1d03b12"}, - {file = "markupsafe-3.0.3-cp313-cp313-win32.whl", hash = "sha256:bdd37121970bfd8be76c5fb069c7751683bdf373db1ed6c010162b2a130248ed"}, - {file = "markupsafe-3.0.3-cp313-cp313-win_amd64.whl", hash = "sha256:9a1abfdc021a164803f4d485104931fb8f8c1efd55bc6b748d2f5774e78b62c5"}, - {file = "markupsafe-3.0.3-cp313-cp313-win_arm64.whl", hash = "sha256:7e68f88e5b8799aa49c85cd116c932a1ac15caaa3f5db09087854d218359e485"}, - {file = "markupsafe-3.0.3-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:218551f6df4868a8d527e3062d0fb968682fe92054e89978594c28e642c43a73"}, - {file = "markupsafe-3.0.3-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3524b778fe5cfb3452a09d31e7b5adefeea8c5be1d43c4f810ba09f2ceb29d37"}, - {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:4e885a3d1efa2eadc93c894a21770e4bc67899e3543680313b09f139e149ab19"}, - {file = "markupsafe-3.0.3-cp313-cp313t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:8709b08f4a89aa7586de0aadc8da56180242ee0ada3999749b183aa23df95025"}, - {file = "markupsafe-3.0.3-cp313-cp313t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:b8512a91625c9b3da6f127803b166b629725e68af71f8184ae7e7d54686a56d6"}, - {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:9b79b7a16f7fedff2495d684f2b59b0457c3b493778c9eed31111be64d58279f"}, - {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_riscv64.whl", hash = "sha256:12c63dfb4a98206f045aa9563db46507995f7ef6d83b2f68eda65c307c6829eb"}, - {file = "markupsafe-3.0.3-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:8f71bc33915be5186016f675cd83a1e08523649b0e33efdb898db577ef5bb009"}, - {file = "markupsafe-3.0.3-cp313-cp313t-win32.whl", hash = "sha256:69c0b73548bc525c8cb9a251cddf1931d1db4d2258e9599c28c07ef3580ef354"}, - {file = "markupsafe-3.0.3-cp313-cp313t-win_amd64.whl", hash = "sha256:1b4b79e8ebf6b55351f0d91fe80f893b4743f104bff22e90697db1590e47a218"}, - {file = "markupsafe-3.0.3-cp313-cp313t-win_arm64.whl", hash = "sha256:ad2cf8aa28b8c020ab2fc8287b0f823d0a7d8630784c31e9ee5edea20f406287"}, - {file = "markupsafe-3.0.3-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:eaa9599de571d72e2daf60164784109f19978b327a3910d3e9de8c97b5b70cfe"}, - {file = "markupsafe-3.0.3-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:c47a551199eb8eb2121d4f0f15ae0f923d31350ab9280078d1e5f12b249e0026"}, - {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:f34c41761022dd093b4b6896d4810782ffbabe30f2d443ff5f083e0cbbb8c737"}, - {file = "markupsafe-3.0.3-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:457a69a9577064c05a97c41f4e65148652db078a3a509039e64d3467b9e7ef97"}, - {file = "markupsafe-3.0.3-cp314-cp314-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e8afc3f2ccfa24215f8cb28dcf43f0113ac3c37c2f0f0806d8c70e4228c5cf4d"}, - {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:ec15a59cf5af7be74194f7ab02d0f59a62bdcf1a537677ce67a2537c9b87fcda"}, - {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:0eb9ff8191e8498cca014656ae6b8d61f39da5f95b488805da4bb029cccbfbaf"}, - {file = "markupsafe-3.0.3-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:2713baf880df847f2bece4230d4d094280f4e67b1e813eec43b4c0e144a34ffe"}, - {file = "markupsafe-3.0.3-cp314-cp314-win32.whl", hash = "sha256:729586769a26dbceff69f7a7dbbf59ab6572b99d94576a5592625d5b411576b9"}, - {file = "markupsafe-3.0.3-cp314-cp314-win_amd64.whl", hash = "sha256:bdc919ead48f234740ad807933cdf545180bfbe9342c2bb451556db2ed958581"}, - {file = "markupsafe-3.0.3-cp314-cp314-win_arm64.whl", hash = "sha256:5a7d5dc5140555cf21a6fefbdbf8723f06fcd2f63ef108f2854de715e4422cb4"}, - {file = "markupsafe-3.0.3-cp314-cp314t-macosx_10_13_x86_64.whl", hash = "sha256:1353ef0c1b138e1907ae78e2f6c63ff67501122006b0f9abad68fda5f4ffc6ab"}, - {file = "markupsafe-3.0.3-cp314-cp314t-macosx_11_0_arm64.whl", hash = "sha256:1085e7fbddd3be5f89cc898938f42c0b3c711fdcb37d75221de2666af647c175"}, - {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:1b52b4fb9df4eb9ae465f8d0c228a00624de2334f216f178a995ccdcf82c4634"}, - {file = "markupsafe-3.0.3-cp314-cp314t-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:fed51ac40f757d41b7c48425901843666a6677e3e8eb0abcff09e4ba6e664f50"}, - {file = "markupsafe-3.0.3-cp314-cp314t-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:f190daf01f13c72eac4efd5c430a8de82489d9cff23c364c3ea822545032993e"}, - {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_aarch64.whl", hash = "sha256:e56b7d45a839a697b5eb268c82a71bd8c7f6c94d6fd50c3d577fa39a9f1409f5"}, - {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_riscv64.whl", hash = "sha256:f3e98bb3798ead92273dc0e5fd0f31ade220f59a266ffd8a4f6065e0a3ce0523"}, - {file = "markupsafe-3.0.3-cp314-cp314t-musllinux_1_2_x86_64.whl", hash = "sha256:5678211cb9333a6468fb8d8be0305520aa073f50d17f089b5b4b477ea6e67fdc"}, - {file = "markupsafe-3.0.3-cp314-cp314t-win32.whl", hash = "sha256:915c04ba3851909ce68ccc2b8e2cd691618c4dc4c4232fb7982bca3f41fd8c3d"}, - {file = "markupsafe-3.0.3-cp314-cp314t-win_amd64.whl", hash = "sha256:4faffd047e07c38848ce017e8725090413cd80cbc23d86e55c587bf979e579c9"}, - {file = "markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa"}, - {file = "markupsafe-3.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:15d939a21d546304880945ca1ecb8a039db6b4dc49b2c5a400387cdae6a62e26"}, - {file = "markupsafe-3.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f71a396b3bf33ecaa1626c255855702aca4d3d9fea5e051b41ac59a9c1c41edc"}, - {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:0f4b68347f8c5eab4a13419215bdfd7f8c9b19f2b25520968adfad23eb0ce60c"}, - {file = "markupsafe-3.0.3-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:e8fc20152abba6b83724d7ff268c249fa196d8259ff481f3b1476383f8f24e42"}, - {file = "markupsafe-3.0.3-cp39-cp39-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:949b8d66bc381ee8b007cd945914c721d9aba8e27f71959d750a46f7c282b20b"}, - {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:3537e01efc9d4dccdf77221fb1cb3b8e1a38d5428920e0657ce299b20324d758"}, - {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:591ae9f2a647529ca990bc681daebdd52c8791ff06c2bfa05b65163e28102ef2"}, - {file = "markupsafe-3.0.3-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:a320721ab5a1aba0a233739394eb907f8c8da5c98c9181d1161e77a0c8e36f2d"}, - {file = "markupsafe-3.0.3-cp39-cp39-win32.whl", hash = "sha256:df2449253ef108a379b8b5d6b43f4b1a8e81a061d6537becd5582fba5f9196d7"}, - {file = "markupsafe-3.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:7c3fb7d25180895632e5d3148dbdc29ea38ccb7fd210aa27acbd1201a1902c6e"}, - {file = "markupsafe-3.0.3-cp39-cp39-win_arm64.whl", hash = "sha256:38664109c14ffc9e7437e86b4dceb442b0096dfe3541d7864d9cbe1da4cf36c8"}, - {file = "markupsafe-3.0.3.tar.gz", hash = "sha256:722695808f4b6457b320fdc131280796bdceb04ab50fe1795cd540799ebe1698"}, -] - [[package]] name = "matplotlib-inline" version = "0.1.7" @@ -2296,6 +2279,22 @@ files = [ [package.extras] twisted = ["twisted"] +[[package]] +name = "prometheus-fastapi-instrumentator" +version = "7.1.0" +description = "Instrument your FastAPI app with Prometheus metrics" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "prometheus_fastapi_instrumentator-7.1.0-py3-none-any.whl", hash = "sha256:978130f3c0bb7b8ebcc90d35516a6fe13e02d2eb358c8f83887cdef7020c31e9"}, + {file = "prometheus_fastapi_instrumentator-7.1.0.tar.gz", hash = "sha256:be7cd61eeea4e5912aeccb4261c6631b3f227d8924542d79eaf5af3f439cbe5e"}, +] + +[package.dependencies] +prometheus-client = ">=0.8.0,<1.0.0" +starlette = ">=0.30.0,<1.0.0" + [[package]] name = "prompt-toolkit" version = "3.0.51" @@ -2438,6 +2437,83 @@ files = [ {file = "protobuf-6.31.1.tar.gz", hash = "sha256:d8cac4c982f0b957a4dc73a80e2ea24fab08e679c0de9deb835f4a12d69aca9a"}, ] +[[package]] +name = "psycopg2-binary" +version = "2.9.11" +description = "psycopg2 - Python-PostgreSQL Database Adapter" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "psycopg2-binary-2.9.11.tar.gz", hash = "sha256:b6aed9e096bf63f9e75edf2581aa9a7e7186d97ab5c177aa6c87797cd591236c"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6fe6b47d0b42ce1c9f1fa3e35bb365011ca22e39db37074458f27921dca40f2"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a6c0e4262e089516603a09474ee13eabf09cb65c332277e39af68f6233911087"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c47676e5b485393f069b4d7a811267d3168ce46f988fa602658b8bb901e9e64d"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:a28d8c01a7b27a1e3265b11250ba7557e5f72b5ee9e5f3a2fa8d2949c29bf5d2"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5f3f2732cf504a1aa9e9609d02f79bea1067d99edf844ab92c247bbca143303b"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:865f9945ed1b3950d968ec4690ce68c55019d79e4497366d36e090327ce7db14"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:91537a8df2bde69b1c1db01d6d944c831ca793952e4f57892600e96cee95f2cd"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4dca1f356a67ecb68c81a7bc7809f1569ad9e152ce7fd02c2f2036862ca9f66b"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:0da4de5c1ac69d94ed4364b6cbe7190c1a70d325f112ba783d83f8440285f152"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37d8412565a7267f7d79e29ab66876e55cb5e8e7b3bbf94f8206f6795f8f7e7e"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:c665f01ec8ab273a61c62beeb8cce3014c214429ced8a308ca1fc410ecac3a39"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0e8480afd62362d0a6a27dd09e4ca2def6fa50ed3a4e7c09165266106b2ffa10"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:763c93ef1df3da6d1a90f86ea7f3f806dc06b21c198fa87c3c25504abec9404a"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e164359396576a3cc701ba8af4751ae68a07235d7a380c631184a611220d9a4"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:d57c9c387660b8893093459738b6abddbb30a7eab058b77b0d0d1c7d521ddfd7"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2c226ef95eb2250974bf6fa7a842082b31f68385c4f3268370e3f3870e7859ee"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a311f1edc9967723d3511ea7d2708e2c3592e3405677bf53d5c7246753591fbb"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ebb415404821b6d1c47353ebe9c8645967a5235e6d88f914147e7fd411419e6f"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f07c9c4a5093258a03b28fab9b4f151aa376989e7f35f855088234e656ee6a94"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:00ce1830d971f43b667abe4a56e42c1e2d594b32da4802e44a73bacacb25535f"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cffe9d7697ae7456649617e8bb8d7a45afb71cd13f7ab22af3e5c61f04840908"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:304fd7b7f97eef30e91b8f7e720b3db75fee010b520e434ea35ed1ff22501d03"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:be9b840ac0525a283a96b556616f5b4820e0526addb8dcf6525a0fa162730be4"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f090b7ddd13ca842ebfe301cd587a76a4cf0913b1e429eb92c1be5dbeb1a19bc"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ab8905b5dcb05bf3fb22e0cf90e10f469563486ffb6a96569e51f897c750a76a"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:bf940cd7e7fec19181fdbc29d76911741153d51cab52e5c21165f3262125685e"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fa0f693d3c68ae925966f0b14b8edda71696608039f4ed61b1fe9ffa468d16db"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a1cf393f1cdaf6a9b57c0a719a1068ba1069f022a59b8b1fe44b006745b59757"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ef7a6beb4beaa62f88592ccc65df20328029d721db309cb3250b0aae0fa146c3"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:31b32c457a6025e74d233957cc9736742ac5a6cb196c6b68499f6bb51390bd6a"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:edcb3aeb11cb4bf13a2af3c53a15b3d612edeb6409047ea0b5d6a21a9d744b34"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b6d93d7c0b61a1dd6197d208ab613eb7dcfdcca0a49c42ceb082257991de9d"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:b33fabeb1fde21180479b2d4667e994de7bbf0eec22832ba5d9b5e4cf65b6c6d"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b8fb3db325435d34235b044b199e56cdf9ff41223a4b9752e8576465170bb38c"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:366df99e710a2acd90efed3764bb1e28df6c675d33a7fb40df9b7281694432ee"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8c55b385daa2f92cb64b12ec4536c66954ac53654c7f15a203578da4e78105c0"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c0377174bf1dd416993d16edc15357f6eb17ac998244cca19bc67cdc0e2e5766"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5c6ff3335ce08c75afaed19e08699e8aacf95d4a260b495a4a8545244fe2ceb3"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:84011ba3109e06ac412f95399b704d3d6950e386b7994475b231cf61eec2fc1f"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ba34475ceb08cccbdd98f6b46916917ae6eeb92b5ae111df10b544c3a4621dc4"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b31e90fdd0f968c2de3b26ab014314fe814225b6c324f770952f7d38abf17e3c"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:d526864e0f67f74937a8fce859bd56c979f5e2ec57ca7c627f5f1071ef7fee60"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:04195548662fa544626c8ea0f06561eb6203f1984ba5b4562764fbeb4c3d14b1"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:efff12b432179443f54e230fdf60de1f6cc726b6c832db8701227d089310e8aa"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:92e3b669236327083a2e33ccfa0d320dd01b9803b3e14dd986a4fc54aa00f4e1"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:e0deeb03da539fa3577fcb0b3f2554a97f7e5477c246098dbb18091a4a01c16f"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9b52a3f9bb540a3e4ec0f6ba6d31339727b2950c9772850d6545b7eae0b9d7c5"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:db4fd476874ccfdbb630a54426964959e58da4c61c9feba73e6094d51303d7d8"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47f212c1d3be608a12937cc131bd85502954398aaa1320cb4c14421a0ffccf4c"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e35b7abae2b0adab776add56111df1735ccc71406e56203515e228a8dc07089f"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fcf21be3ce5f5659daefd2b3b3b6e4727b028221ddc94e6c1523425579664747"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:9bd81e64e8de111237737b29d68039b9c813bdf520156af36d26819c9a979e5f"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:32770a4d666fbdafab017086655bcddab791d7cb260a16679cc5a7338b64343b"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c3cb3a676873d7506825221045bd70e0427c905b9c8ee8d6acd70cfcbd6e576d"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-win_amd64.whl", hash = "sha256:4012c9c954dfaccd28f94e84ab9f94e12df76b4afb22331b1f0d3154893a6316"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:20e7fb94e20b03dcc783f76c0865f9da39559dcc0c28dd1a3fce0d01902a6b9c"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4bdab48575b6f870f465b397c38f1b415520e9879fdf10a53ee4f49dcbdf8a21"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9d3a9edcfbe77a3ed4bc72836d466dfce4174beb79eda79ea155cc77237ed9e8"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:44fc5c2b8fa871ce7f0023f619f1349a0aa03a0857f2c96fbc01c657dcbbdb49"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9c55460033867b4622cda1b6872edf445809535144152e5d14941ef591980edf"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:2d11098a83cca92deaeaed3d58cfd150d49b3b06ee0d0852be466bf87596899e"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:691c807d94aecfbc76a14e1408847d59ff5b5906a04a23e12a89007672b9e819"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:8b81627b691f29c4c30a8f322546ad039c40c328373b11dff7490a3e1b517855"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:b637d6d941209e8d96a072d7977238eea128046effbf37d1d8b2c0764750017d"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:41360b01c140c2a03d346cec3280cf8a71aa07d94f3b1509fa0161c366af66b4"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:875039274f8a2361e5207857899706da840768e2a775bf8c65e82f60b197df02"}, +] + [[package]] name = "ptyprocess" version = "0.7.0" @@ -3049,6 +3125,18 @@ files = [ {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + [[package]] name = "sortedcontainers" version = "2.4.0" @@ -3061,6 +3149,118 @@ files = [ {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, ] +[[package]] +name = "sqlalchemy" +version = "2.0.44" +description = "Database Abstraction Library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "SQLAlchemy-2.0.44-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:471733aabb2e4848d609141a9e9d56a427c0a038f4abf65dd19d7a21fd563632"}, + {file = "SQLAlchemy-2.0.44-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48bf7d383a35e668b984c805470518b635d48b95a3c57cb03f37eaa3551b5f9f"}, + {file = "SQLAlchemy-2.0.44-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bf4bb6b3d6228fcf3a71b50231199fb94d2dd2611b66d33be0578ea3e6c2726"}, + {file = "SQLAlchemy-2.0.44-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:e998cf7c29473bd077704cea3577d23123094311f59bdc4af551923b168332b1"}, + {file = "SQLAlchemy-2.0.44-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:ebac3f0b5732014a126b43c2b7567f2f0e0afea7d9119a3378bde46d3dcad88e"}, + {file = "SQLAlchemy-2.0.44-cp37-cp37m-win32.whl", hash = "sha256:3255d821ee91bdf824795e936642bbf43a4c7cedf5d1aed8d24524e66843aa74"}, + {file = "SQLAlchemy-2.0.44-cp37-cp37m-win_amd64.whl", hash = "sha256:78e6c137ba35476adb5432103ae1534f2f5295605201d946a4198a0dea4b38e7"}, + {file = "sqlalchemy-2.0.44-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c77f3080674fc529b1bd99489378c7f63fcb4ba7f8322b79732e0258f0ea3ce"}, + {file = "sqlalchemy-2.0.44-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4c26ef74ba842d61635b0152763d057c8d48215d5be9bb8b7604116a059e9985"}, + {file = "sqlalchemy-2.0.44-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4a172b31785e2f00780eccab00bc240ccdbfdb8345f1e6063175b3ff12ad1b0"}, + {file = "sqlalchemy-2.0.44-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9480c0740aabd8cb29c329b422fb65358049840b34aba0adf63162371d2a96e"}, + {file = "sqlalchemy-2.0.44-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:17835885016b9e4d0135720160db3095dc78c583e7b902b6be799fb21035e749"}, + {file = "sqlalchemy-2.0.44-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cbe4f85f50c656d753890f39468fcd8190c5f08282caf19219f684225bfd5fd2"}, + {file = "sqlalchemy-2.0.44-cp310-cp310-win32.whl", hash = "sha256:2fcc4901a86ed81dc76703f3b93ff881e08761c63263c46991081fd7f034b165"}, + {file = "sqlalchemy-2.0.44-cp310-cp310-win_amd64.whl", hash = "sha256:9919e77403a483ab81e3423151e8ffc9dd992c20d2603bf17e4a8161111e55f5"}, + {file = "sqlalchemy-2.0.44-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fe3917059c7ab2ee3f35e77757062b1bea10a0b6ca633c58391e3f3c6c488dd"}, + {file = "sqlalchemy-2.0.44-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:de4387a354ff230bc979b46b2207af841dc8bf29847b6c7dbe60af186d97aefa"}, + {file = "sqlalchemy-2.0.44-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3678a0fb72c8a6a29422b2732fe423db3ce119c34421b5f9955873eb9b62c1e"}, + {file = "sqlalchemy-2.0.44-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cf6872a23601672d61a68f390e44703442639a12ee9dd5a88bbce52a695e46e"}, + {file = "sqlalchemy-2.0.44-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:329aa42d1be9929603f406186630135be1e7a42569540577ba2c69952b7cf399"}, + {file = "sqlalchemy-2.0.44-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:70e03833faca7166e6a9927fbee7c27e6ecde436774cd0b24bbcc96353bce06b"}, + {file = "sqlalchemy-2.0.44-cp311-cp311-win32.whl", hash = "sha256:253e2f29843fb303eca6b2fc645aca91fa7aa0aa70b38b6950da92d44ff267f3"}, + {file = "sqlalchemy-2.0.44-cp311-cp311-win_amd64.whl", hash = "sha256:7a8694107eb4308a13b425ca8c0e67112f8134c846b6e1f722698708741215d5"}, + {file = "sqlalchemy-2.0.44-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:72fea91746b5890f9e5e0997f16cbf3d53550580d76355ba2d998311b17b2250"}, + {file = "sqlalchemy-2.0.44-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:585c0c852a891450edbb1eaca8648408a3cc125f18cf433941fa6babcc359e29"}, + {file = "sqlalchemy-2.0.44-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b94843a102efa9ac68a7a30cd46df3ff1ed9c658100d30a725d10d9c60a2f44"}, + {file = "sqlalchemy-2.0.44-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:119dc41e7a7defcefc57189cfa0e61b1bf9c228211aba432b53fb71ef367fda1"}, + {file = "sqlalchemy-2.0.44-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0765e318ee9179b3718c4fd7ba35c434f4dd20332fbc6857a5e8df17719c24d7"}, + {file = "sqlalchemy-2.0.44-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2e7b5b079055e02d06a4308d0481658e4f06bc7ef211567edc8f7d5dce52018d"}, + {file = "sqlalchemy-2.0.44-cp312-cp312-win32.whl", hash = "sha256:846541e58b9a81cce7dee8329f352c318de25aa2f2bbe1e31587eb1f057448b4"}, + {file = "sqlalchemy-2.0.44-cp312-cp312-win_amd64.whl", hash = "sha256:7cbcb47fd66ab294703e1644f78971f6f2f1126424d2b300678f419aa73c7b6e"}, + {file = "sqlalchemy-2.0.44-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ff486e183d151e51b1d694c7aa1695747599bb00b9f5f604092b54b74c64a8e1"}, + {file = "sqlalchemy-2.0.44-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0b1af8392eb27b372ddb783b317dea0f650241cea5bd29199b22235299ca2e45"}, + {file = "sqlalchemy-2.0.44-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b61188657e3a2b9ac4e8f04d6cf8e51046e28175f79464c67f2fd35bceb0976"}, + {file = "sqlalchemy-2.0.44-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b87e7b91a5d5973dda5f00cd61ef72ad75a1db73a386b62877d4875a8840959c"}, + {file = "sqlalchemy-2.0.44-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:15f3326f7f0b2bfe406ee562e17f43f36e16167af99c4c0df61db668de20002d"}, + {file = "sqlalchemy-2.0.44-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1e77faf6ff919aa8cd63f1c4e561cac1d9a454a191bb864d5dd5e545935e5a40"}, + {file = "sqlalchemy-2.0.44-cp313-cp313-win32.whl", hash = "sha256:ee51625c2d51f8baadf2829fae817ad0b66b140573939dd69284d2ba3553ae73"}, + {file = "sqlalchemy-2.0.44-cp313-cp313-win_amd64.whl", hash = "sha256:c1c80faaee1a6c3428cecf40d16a2365bcf56c424c92c2b6f0f9ad204b899e9e"}, + {file = "sqlalchemy-2.0.44-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2fc44e5965ea46909a416fff0af48a219faefd5773ab79e5f8a5fcd5d62b2667"}, + {file = "sqlalchemy-2.0.44-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:dc8b3850d2a601ca2320d081874033684e246d28e1c5e89db0864077cfc8f5a9"}, + {file = "sqlalchemy-2.0.44-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d733dec0614bb8f4bcb7c8af88172b974f685a31dc3a65cca0527e3120de5606"}, + {file = "sqlalchemy-2.0.44-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22be14009339b8bc16d6b9dc8780bacaba3402aa7581658e246114abbd2236e3"}, + {file = "sqlalchemy-2.0.44-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:357bade0e46064f88f2c3a99808233e67b0051cdddf82992379559322dfeb183"}, + {file = "sqlalchemy-2.0.44-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4848395d932e93c1595e59a8672aa7400e8922c39bb9b0668ed99ac6fa867822"}, + {file = "sqlalchemy-2.0.44-cp38-cp38-win32.whl", hash = "sha256:2f19644f27c76f07e10603580a47278abb2a70311136a7f8fd27dc2e096b9013"}, + {file = "sqlalchemy-2.0.44-cp38-cp38-win_amd64.whl", hash = "sha256:1df4763760d1de0dfc8192cc96d8aa293eb1a44f8f7a5fbe74caf1b551905c5e"}, + {file = "sqlalchemy-2.0.44-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f7027414f2b88992877573ab780c19ecb54d3a536bef3397933573d6b5068be4"}, + {file = "sqlalchemy-2.0.44-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3fe166c7d00912e8c10d3a9a0ce105569a31a3d0db1a6e82c4e0f4bf16d5eca9"}, + {file = "sqlalchemy-2.0.44-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3caef1ff89b1caefc28f0368b3bde21a7e3e630c2eddac16abd9e47bd27cc36a"}, + {file = "sqlalchemy-2.0.44-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc2856d24afa44295735e72f3c75d6ee7fdd4336d8d3a8f3d44de7aa6b766df2"}, + {file = "sqlalchemy-2.0.44-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:11bac86b0deada30b6b5f93382712ff0e911fe8d31cb9bf46e6b149ae175eff0"}, + {file = "sqlalchemy-2.0.44-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4d18cd0e9a0f37c9f4088e50e3839fcb69a380a0ec957408e0b57cff08ee0a26"}, + {file = "sqlalchemy-2.0.44-cp39-cp39-win32.whl", hash = "sha256:9e9018544ab07614d591a26c1bd4293ddf40752cc435caf69196740516af7100"}, + {file = "sqlalchemy-2.0.44-cp39-cp39-win_amd64.whl", hash = "sha256:8e0e4e66fd80f277a8c3de016a81a554e76ccf6b8d881ee0b53200305a8433f6"}, + {file = "sqlalchemy-2.0.44-py3-none-any.whl", hash = "sha256:19de7ca1246fbef9f9d1bff8f1ab25641569df226364a0e40457dc5457c54b05"}, + {file = "sqlalchemy-2.0.44.tar.gz", hash = "sha256:0ae7454e1ab1d780aee69fd2aae7d6b8670a581d8847f2d1e0f7ddfbf47e5a22"}, +] + +[package.dependencies] +greenlet = {version = ">=1", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} +typing-extensions = ">=4.6.0" + +[package.extras] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (>=1)"] +aioodbc = ["aioodbc", "greenlet (>=1)"] +aiosqlite = ["aiosqlite", "greenlet (>=1)", "typing_extensions (!=3.10.0.1)"] +asyncio = ["greenlet (>=1)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (>=1)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10)"] +mssql = ["pyodbc"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx_oracle (>=8)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg", "greenlet (>=1)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] +sqlcipher = ["sqlcipher3_binary"] + +[[package]] +name = "sqlmodel" +version = "0.0.27" +description = "SQLModel, SQL databases in Python, designed for simplicity, compatibility, and robustness." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "sqlmodel-0.0.27-py3-none-any.whl", hash = "sha256:667fe10aa8ff5438134668228dc7d7a08306f4c5c4c7e6ad3ad68defa0e7aa49"}, + {file = "sqlmodel-0.0.27.tar.gz", hash = "sha256:ad1227f2014a03905aef32e21428640848ac09ff793047744a73dfdd077ff620"}, +] + +[package.dependencies] +pydantic = ">=1.10.13,<3.0.0" +SQLAlchemy = ">=2.0.14,<2.1.0" + [[package]] name = "stack-data" version = "0.6.3" @@ -3081,6 +3281,25 @@ pure-eval = "*" [package.extras] tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] +[[package]] +name = "starlette" +version = "0.50.0" +description = "The little ASGI library that shines." +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "starlette-0.50.0-py3-none-any.whl", hash = "sha256:9e5391843ec9b6e472eed1365a78c8098cfceb7a74bfd4d6b1c0c0095efb3bca"}, + {file = "starlette-0.50.0.tar.gz", hash = "sha256:a2a17b22203254bcbc2e1f926d2d55f3f9497f769416b3190768befe598fa3ca"}, +] + +[package.dependencies] +anyio = ">=3.6.2,<5" +typing-extensions = {version = ">=4.10.0", markers = "python_version < \"3.13\""} + +[package.extras] +full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"] + [[package]] name = "timeout-decorator" version = "0.5.0" @@ -3256,6 +3475,25 @@ h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "uvicorn" +version = "0.38.0" +description = "The lightning-fast ASGI server." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "uvicorn-0.38.0-py3-none-any.whl", hash = "sha256:48c0afd214ceb59340075b4a052ea1ee91c16fbc2a9b1469cca0e54566977b02"}, + {file = "uvicorn-0.38.0.tar.gz", hash = "sha256:fd97093bdd120a2609fc0d3afe931d4d4ad688b6e75f0f929fde1bc36fe0e91d"}, +] + +[package.dependencies] +click = ">=7.0" +h11 = ">=0.8" + +[package.extras] +standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"] + [[package]] name = "varint" version = "1.0.2" @@ -3288,22 +3526,6 @@ platformdirs = ">=3.9.1,<5" docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"GraalVM\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""] -[[package]] -name = "waitress" -version = "3.0.2" -description = "Waitress WSGI server" -optional = false -python-versions = ">=3.9.0" -groups = ["main"] -files = [ - {file = "waitress-3.0.2-py3-none-any.whl", hash = "sha256:c56d67fd6e87c2ee598b76abdd4e96cfad1f24cacdea5078d382b1f9d7b5ed2e"}, - {file = "waitress-3.0.2.tar.gz", hash = "sha256:682aaaf2af0c44ada4abfb70ded36393f0e307f4ab9456a215ce0020baefc31f"}, -] - -[package.extras] -docs = ["Sphinx (>=1.8.1)", "docutils", "pylons-sphinx-themes (>=1.0.9)"] -testing = ["coverage (>=7.6.0)", "pytest", "pytest-cov"] - [[package]] name = "wcwidth" version = "0.2.13" @@ -3448,24 +3670,6 @@ files = [ {file = "websockets-15.0.1.tar.gz", hash = "sha256:82544de02076bafba038ce055ee6412d68da13ab47f0c60cab827346de828dee"}, ] -[[package]] -name = "werkzeug" -version = "3.1.3" -description = "The comprehensive WSGI web application library." -optional = false -python-versions = ">=3.9" -groups = ["main"] -files = [ - {file = "werkzeug-3.1.3-py3-none-any.whl", hash = "sha256:54b78bf3716d19a65be4fceccc0d1d7b89e608834989dfae50ea87564639213e"}, - {file = "werkzeug-3.1.3.tar.gz", hash = "sha256:60723ce945c19328679790e3282cc758aa4a6040e4bb330f53d30fa546d44746"}, -] - -[package.dependencies] -MarkupSafe = ">=2.1.1" - -[package.extras] -watchdog = ["watchdog (>=2.3)"] - [[package]] name = "yarl" version = "1.20.0" @@ -3588,4 +3792,4 @@ propcache = ">=0.2.1" [metadata] lock-version = "2.1" python-versions = "^3.12" -content-hash = "3660c739c071839b795775dd6c6a904b555d6d5b28c315385f9557849bb476ab" +content-hash = "87c1aa12c90c68ebf6c631c9c0177a675aad8e5c093d6487af1a2fe4e74a1a43" diff --git a/pyproject.toml b/pyproject.toml index 48ae9b1c8..eb5f741dc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,8 +30,11 @@ oz-merkle-tree = { git = "https://github.com/lidofinance/oz-merkle-tree", rev = multiformats = "^0.3.1" protobuf="^6.31.1" dag-cbor="^0.3.3" -flask = "^3.0.0" -waitress = "^3.0.2" +fastapi = "^0.121.3" +uvicorn = "^0.38.0" +sqlmodel = "^0.0.27" +psycopg2-binary = "^2.9.11" +prometheus-fastapi-instrumentator = "^7.1.0" [tool.poetry.group.dev.dependencies] base58 = "^2.1.1" diff --git a/src/main.py b/src/main.py index fd43ad295..df9fe4866 100644 --- a/src/main.py +++ b/src/main.py @@ -15,7 +15,7 @@ from src.modules.csm.csm import CSOracle from src.modules.ejector.ejector import Ejector from src.providers.ipfs import IPFSProvider, Kubo, LidoIPFS, Pinata, Storacha -from src.modules.performance_collector.performance_collector import PerformanceCollector +from src.modules.performance.collector.collector import PerformanceCollector from src.types import OracleModule from src.utils.build import get_build_info from src.utils.exception import IncompatibleException @@ -111,6 +111,8 @@ def main(module_name: OracleModule): logger.info({'msg': 'Initialize CSM performance oracle module.'}) instance = CSOracle(web3) elif module_name == OracleModule.PERFORMANCE_COLLECTOR: + logger.info({'msg': 'Initialize Performance Collector module.'}) + # FIXME: web3 object is overkill. only CONSENSUS_CLIENT_URI needed here. instance = PerformanceCollector(web3) else: raise ValueError(f'Unexpected arg: {module_name=}.') @@ -183,7 +185,6 @@ def ipfs_providers() -> Iterator[IPFSProvider]: ) - if __name__ == '__main__': module_name_arg = sys.argv[-1] if module_name_arg not in OracleModule: @@ -192,12 +193,20 @@ def ipfs_providers() -> Iterator[IPFSProvider]: raise ValueError(msg) module = OracleModule(module_name_arg) + if module is OracleModule.CHECK: errors = variables.check_uri_required_variables() variables.raise_from_errors(errors) - sys.exit(check()) - errors = variables.check_all_required_variables(module) + if module is OracleModule.PERFORMANCE_WEB_SERVER: + from src.modules.performance.web.server import serve + logger.info({'msg': f'Starting Performance Web Server on port {variables.PERFORMANCE_WEB_SERVER_API_PORT}'}) + sys.exit(serve()) + + if module is OracleModule.PERFORMANCE_COLLECTOR: + errors = variables.check_perf_collector_required_variables() + else: + errors = variables.check_all_required_variables(module) variables.raise_from_errors(errors) main(module) diff --git a/src/modules/csm/csm.py b/src/modules/csm/csm.py index 1adcbb748..f93dbadfb 100644 --- a/src/modules/csm/csm.py +++ b/src/modules/csm/csm.py @@ -1,3 +1,4 @@ +import atexit import logging from hexbytes import HexBytes @@ -59,14 +60,37 @@ class CSOracle(BaseModule, ConsensusModule): report_contract: CSFeeOracleContract def __init__(self, w3: Web3): + self.consumer = self.__class__.__name__ self.report_contract = w3.csm.oracle self.state = State.load() super().__init__(w3) + atexit.register(self._on_shutdown) def refresh_contracts(self): self.report_contract = self.w3.csm.oracle # type: ignore self.state.clear() + def _on_shutdown(self): + performance_client = getattr(self.w3, "performance", None) + if performance_client is None: + logger.debug({ + "msg": "Performance client is not attached, skipping demand cleanup", + "consumer": self.consumer, + }) + return + try: + performance_client.delete_epochs_demand(self.consumer) + logger.info({ + "msg": "Cleared Performance Collector demand on shutdown", + "consumer": self.consumer, + }) + except Exception as error: + logger.warning({ + "msg": "Unexpected error during Performance Collector demand cleanup", + "consumer": self.consumer, + "error": str(error), + }) + def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecuteDelay: if not self._check_compatability(last_finalized_blockstamp): return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH @@ -86,22 +110,30 @@ def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecute @duration_meter() def set_epochs_range_to_collect(self, blockstamp: BlockStamp): - consumer = self.__class__.__name__ converter = self.converter(blockstamp) l_epoch, r_epoch = self.get_epochs_range_to_process(blockstamp) self.state.migrate(l_epoch, r_epoch, converter.frame_config.epochs_per_frame) self.state.log_progress() - current_demands = self.w3.performance.get_epochs_demand() - current_demand = current_demands.get(consumer) - if current_demand != (l_epoch, r_epoch): + is_range_available = self.w3.performance.is_range_available(l_epoch, r_epoch) + if is_range_available: + logger.info({ + "msg": "Performance data range is already available", + "start_epoch": l_epoch, + "end_epoch": r_epoch + }) + return + + current_demand = self.w3.performance.get_epochs_demand(self.consumer) + current_epochs_range = (current_demand.l_epoch, current_demand.r_epoch) if current_demand else None + if current_epochs_range != (l_epoch, r_epoch): logger.info({ - "msg": f"Updating {consumer} epochs demand for Performance Collector", - "old": current_demand, + "msg": f"Updating {self.consumer} epochs demand for Performance Collector", + "old": current_epochs_range, "new": (l_epoch, r_epoch) }) - self.w3.performance.post_epochs_demand(consumer, l_epoch, r_epoch) + self.w3.performance.post_epochs_demand(self.consumer, l_epoch, r_epoch) @duration_meter() def collect_data(self) -> bool: @@ -226,45 +258,58 @@ def fulfill_state(self): "msg": "Requesting performance data from collector", "epoch": epoch }) - epoch_data = self.w3.performance.get_epoch(epoch) + epoch_data = self.w3.performance.get_epoch_data(epoch) if epoch_data is None: raise ValueError(f"Epoch {epoch} is missing in Performance Collector") - misses, props, syncs = epoch_data + misses, props_vids, props_flags, syncs_vids, syncs_misses = ( + epoch_data.attestations, + epoch_data.proposals_vids, + epoch_data.proposals_flags, + epoch_data.syncs_vids, + epoch_data.syncs_misses + ) + + if len(props_vids) != len(props_flags) or len(syncs_vids) != len(syncs_misses): + raise ValueError(f"Epoch {epoch} data is corrupted: {len(props_vids)=}, {len(props_flags)=}, {len(syncs_vids)=}, {len(syncs_misses)=}") + logger.info({ "msg": "Performance data received", "epoch": epoch, "misses_count": len(misses), - "proposals_count": len(props), - "sync_duties_count": len(syncs) + "proposals_count": len(props_vids), + "sync_duties_count": len(syncs_vids) }) + misses = set(misses) for validator in validators: missed_att = validator.index in misses included_att = validator.index not in misses - is_active = is_active_validator(validator, EpochNumber(epoch)) + is_active = is_active_validator(validator, epoch) if not is_active and missed_att: raise ValueError(f"Validator {validator.index} missed attestation in epoch {epoch}, but was not active") self.state.save_att_duty(EpochNumber(epoch), validator.index, included=included_att) blocks_in_epoch = 0 - for p in props: - vid = ValidatorIndex(p.validator_index) - self.state.save_prop_duty(EpochNumber(epoch), vid, included=bool(p.is_proposed)) - blocks_in_epoch += p.is_proposed + for i, vid in enumerate(props_vids): + proposed = props_flags[i] + self.state.save_prop_duty(EpochNumber(epoch), ValidatorIndex(vid), included=bool(proposed)) + blocks_in_epoch += proposed if blocks_in_epoch: - for rec in syncs: - vid = ValidatorIndex(rec.validator_index) - fulfilled = max(0, blocks_in_epoch - rec.missed_count) - for _ in range(fulfilled): + for i, vid in enumerate(syncs_vids): + vid = ValidatorIndex(vid) + s_misses = syncs_misses[i] + s_fulfilled = max(0, blocks_in_epoch - s_misses) + for _ in range(s_fulfilled): self.state.save_sync_duty(EpochNumber(epoch), vid, included=True) - for _ in range(rec.missed_count): + for _ in range(s_misses): self.state.save_sync_duty(EpochNumber(epoch), vid, included=False) self.state.add_processed_epoch(EpochNumber(epoch)) self.state.log_progress() + self.state.commit() def make_rewards_tree(self, shares: dict[NodeOperatorId, RewardsShares]) -> RewardsTree: if not shares: diff --git a/src/modules/performance_collector/__init__.py b/src/modules/performance/__init__.py similarity index 100% rename from src/modules/performance_collector/__init__.py rename to src/modules/performance/__init__.py diff --git a/src/modules/performance/collector/__init__.py b/src/modules/performance/collector/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/modules/performance_collector/checkpoint.py b/src/modules/performance/collector/checkpoint.py similarity index 98% rename from src/modules/performance_collector/checkpoint.py rename to src/modules/performance/collector/checkpoint.py index 44a53adbd..2c5bb9156 100644 --- a/src/modules/performance_collector/checkpoint.py +++ b/src/modules/performance/collector/checkpoint.py @@ -10,8 +10,8 @@ from src import variables from src.constants import SLOTS_PER_HISTORICAL_ROOT, EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SYNC_COMMITTEE_SIZE -from src.modules.performance_collector.codec import ProposalDuty, SyncDuty, AttDutyMisses -from src.modules.performance_collector.db import DutiesDB +from src.modules.performance.common.types import ProposalDuty, SyncDuty, AttDutyMisses +from src.modules.performance.common.db import DutiesDB from src.modules.submodules.types import ZERO_HASH from src.providers.consensus.client import ConsensusClient from src.providers.consensus.types import SyncCommittee, SyncAggregate @@ -133,7 +133,7 @@ def exec(self, checkpoint: FrameCheckpoint) -> int: logger.info( {"msg": f"Processing checkpoint for slot {checkpoint.slot} with {len(checkpoint.duty_epochs)} epochs"} ) - unprocessed_epochs = [e for e in checkpoint.duty_epochs if not self.db.has_epoch(int(e))] + unprocessed_epochs = [e for e in checkpoint.duty_epochs if not self.db.has_epoch(e)] if not unprocessed_epochs: logger.info({"msg": "Nothing to process in the checkpoint"}) return 0 @@ -310,7 +310,7 @@ def _prepare_sync_committee_duties(self, epoch: EpochNumber) -> SyncDuties: duties: SyncDuties = [] for vid in sync_committee.validators: - duties.append(SyncDuty(vid, missed_count=0)) + duties.append(SyncDuty(validator_index=vid, missed_count=0)) return duties @@ -354,7 +354,7 @@ def _prepare_propose_duties( dependent_root = self._get_dependent_root_for_proposer_duties(epoch, checkpoint_block_roots, checkpoint_slot) proposer_duties = self.cc.get_proposer_duties(epoch, dependent_root) for duty in proposer_duties: - duties[duty.slot] = ProposalDuty(duty.validator_index, is_proposed=False) + duties[duty.slot] = ProposalDuty(validator_index=duty.validator_index, is_proposed=False) return duties def _get_dependent_root_for_proposer_duties( diff --git a/src/modules/performance_collector/performance_collector.py b/src/modules/performance/collector/collector.py similarity index 80% rename from src/modules/performance_collector/performance_collector.py rename to src/modules/performance/collector/collector.py index df1a2d03f..974764551 100644 --- a/src/modules/performance_collector/performance_collector.py +++ b/src/modules/performance/collector/collector.py @@ -1,17 +1,14 @@ import logging -from typing import Optional, Final -from src.modules.performance_collector.checkpoint import ( +from src.modules.performance.collector.checkpoint import ( FrameCheckpointsIterator, FrameCheckpointProcessor, ) -from src.modules.performance_collector.db import DutiesDB -from src.modules.performance_collector.http_server import start_performance_api_server +from src.modules.performance.common.db import DutiesDB from src.modules.submodules.oracle_module import BaseModule, ModuleExecuteDelay from src.modules.submodules.types import ChainConfig from src.types import BlockStamp, EpochNumber from src.utils.web3converter import ChainConverter -from src import variables logger = logging.getLogger(__name__) @@ -23,19 +20,9 @@ class PerformanceCollector(BaseModule): # Timestamp of the last epochs demand update last_epochs_demand_update: int = 0 - def __init__(self, w3, db_path: Optional[str] = None): + def __init__(self, w3): super().__init__(w3) - logger.info({'msg': 'Initialize Performance Collector module.'}) - db_path = db_path or str((variables.CACHE_PATH / "eth_duties.sqlite").absolute()) - self.db = DutiesDB(db_path) - try: - logger.info( - {'msg': f'Start performance API server on port {variables.PERFORMANCE_COLLECTOR_SERVER_API_PORT}'} - ) - start_performance_api_server(db_path) - except Exception as e: - logger.error({'msg': 'Failed to start performance API server', 'error': repr(e)}) - raise + self.db = DutiesDB() self.last_epochs_demand_update = self.get_epochs_demand_max_updated_at() def refresh_contracts(self): @@ -114,14 +101,21 @@ def define_epochs_to_process_range(self, finalized_epoch: EpochNumber) -> tuple[ start_epoch = EpochNumber(max_available_epoch_to_check) end_epoch = EpochNumber(max_available_epoch_to_check) - epochs_demand = self.db.epochs_demand() + epochs_demand = self.db.get_epochs_demands() if not epochs_demand: logger.info({"msg": "No epoch demands found"}) - for consumer, (l_epoch, r_epoch, updated_at) in epochs_demand.items(): + for demand in epochs_demand: logger.info({ - "msg": "Epochs demand", "consumer": consumer, "l_epoch": l_epoch, "r_epoch": r_epoch, "updated_at": updated_at + "msg": "Epochs demand", **demand.model_dump() }) - start_epoch = min(start_epoch, l_epoch) + is_range_available = self.db.is_range_available(EpochNumber(demand.l_epoch), EpochNumber(demand.r_epoch)) + if is_range_available: + logger.info({ + "msg": f"Epochs demand for {demand.consumer} is already satisfied", + }) + # Remove from the DB just in case + self.db.delete_demand(demand.consumer) + start_epoch = min(start_epoch, demand.l_epoch) missing_epochs = self.db.missing_epochs_in(start_epoch, end_epoch) if missing_epochs: @@ -158,7 +152,6 @@ def new_epochs_range_demand_appeared(self) -> bool: def get_epochs_demand_max_updated_at(self) -> int: max_updated_at = 0 - epochs_demand = self.db.epochs_demand() - for _, (_, _, updated_at) in epochs_demand.items(): - max_updated_at = max(max_updated_at, updated_at) + for demand in self.db.get_epochs_demands(): + max_updated_at = max(max_updated_at, demand.updated_at) return max_updated_at diff --git a/src/modules/performance/common/__init__.py b/src/modules/performance/common/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/modules/performance/common/db.py b/src/modules/performance/common/db.py new file mode 100644 index 000000000..a3c435788 --- /dev/null +++ b/src/modules/performance/common/db.py @@ -0,0 +1,189 @@ +from typing import Sequence +from time import time + +from sqlalchemy import Column, Integer, Boolean, SmallInteger, ARRAY +from sqlmodel import SQLModel, Field, create_engine, Session, select + +from src import variables +from src.modules.performance.common.types import ProposalDuty, SyncDuty, AttDutyMisses +from src.types import EpochNumber +from src.utils.range import sequence + + +class Duty(SQLModel, table=True): + __tablename__ = "duties" + + epoch: int = Field(primary_key=True) + attestations: list[int] = Field(default=None, sa_column=Column(ARRAY(Integer()))) + proposals_vids: list[int] = Field(default=None, sa_column=Column(ARRAY(Integer()))) + proposals_flags: list[bool] = Field(default=None, sa_column=Column(ARRAY(Boolean()))) + syncs_vids: list[int] = Field(default=None, sa_column=Column(ARRAY(Integer()))) + syncs_misses: list[int] = Field(default=None, sa_column=Column(ARRAY(SmallInteger()))) + + +class EpochsDemand(SQLModel, table=True): + __tablename__ = "epochs_demands" + + consumer: str = Field(primary_key=True) + l_epoch: int + r_epoch: int + updated_at: int + + +class DutiesDB: + def __init__(self): + self.engine = create_engine( + self._get_database_url(), + echo=False, + pool_pre_ping=True, # Enable connection health checks~ + pool_recycle=3600, # Recycle connections every hour + pool_size=10, # Connection pool size + max_overflow=20, # Maximum overflow connections + ) + self._setup_database() + + @staticmethod + def _get_database_url() -> str: + """Get PostgreSQL database URL from environment variables""" + host = variables.PERFORMANCE_DB_HOST + port = variables.PERFORMANCE_DB_PORT + name = variables.PERFORMANCE_DB_NAME + user = variables.PERFORMANCE_DB_USER + password = variables.PERFORMANCE_DB_PASSWORD + return f"postgresql://{user}:{password}@{host}:{port}/{name}" + + def _setup_database(self): + SQLModel.metadata.create_all(self.engine) + + def get_session(self) -> Session: + return Session(self.engine) + + def store_demand(self, consumer: str, l_epoch: EpochNumber, r_epoch: EpochNumber) -> None: + with self.get_session() as session: + demand = session.get(EpochsDemand, consumer) + if demand: + demand.l_epoch = l_epoch + demand.r_epoch = r_epoch + demand.updated_at = int(time()) + else: + demand = EpochsDemand(consumer=consumer, l_epoch=l_epoch, r_epoch=r_epoch, updated_at=int(time())) + session.add(demand) + session.commit() + + def delete_demand(self, consumer: str) -> None: + with self.get_session() as session: + demand = session.get(EpochsDemand, consumer) + if demand: + session.delete(demand) + session.commit() + + def store_epoch( + self, + epoch: EpochNumber, + att_misses: AttDutyMisses, + proposals: list[ProposalDuty], + syncs: list[SyncDuty], + ) -> None: + # TODO: test that store and get are consistent + self._store_data(epoch, att_misses, proposals, syncs) + self._auto_prune(epoch) + + def _store_data( + self, + epoch: EpochNumber, + att_misses: AttDutyMisses, + proposals: list[ProposalDuty], + syncs: list[SyncDuty], + ): + att_list: list[int] = [int(v) for v in att_misses] if att_misses else [] + prop_vids: list[int] = [int(p.validator_index) for p in proposals] if proposals else [] + prop_flags: list[bool] = [bool(p.is_proposed) for p in proposals] if proposals else [] + sync_vids: list[int] = [int(s.validator_index) for s in syncs] if syncs else [] + sync_misses: list[int] = [int(s.missed_count) for s in syncs] if syncs else [] + + with self.get_session() as session: + duty = session.get(Duty, epoch) + if duty: + duty.attestations = att_list + duty.proposals_vids = prop_vids + duty.proposals_flags = prop_flags + duty.syncs_vids = sync_vids + duty.syncs_misses = sync_misses + else: + duty = Duty( + epoch=epoch, + attestations=att_list, + proposals_vids=prop_vids, + proposals_flags=prop_flags, + syncs_vids=sync_vids, + syncs_misses=sync_misses, + ) + session.add(duty) + session.commit() + + def _auto_prune(self, current_epoch: EpochNumber) -> None: + if variables.PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS <= 0: + return + threshold = int(current_epoch) - variables.PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS + if threshold <= 0: + return + + with self.get_session() as session: + duties_to_delete = session.exec(select(Duty).where(Duty.epoch < threshold)).all() + for duty in duties_to_delete: + session.delete(duty) + session.commit() + + def is_range_available(self, l_epoch: EpochNumber, r_epoch: EpochNumber) -> bool: + if int(l_epoch) > int(r_epoch): + raise ValueError("Invalid epoch range") + + with self.get_session() as session: + count = session.exec(select(Duty).where(Duty.epoch >= l_epoch, Duty.epoch <= r_epoch)).all() + return len(count) == (r_epoch - l_epoch + 1) + + def missing_epochs_in(self, l_epoch: EpochNumber, r_epoch: EpochNumber) -> list[EpochNumber]: + if l_epoch > r_epoch: + raise ValueError("Invalid epoch range") + + with self.get_session() as session: + present_duties = session.exec( + select(Duty.epoch).where(Duty.epoch >= l_epoch, Duty.epoch <= r_epoch).order_by(Duty.epoch) + ).all() + present = [int(epoch) for epoch in present_duties] + + missing = [] + for epoch in sequence(l_epoch, r_epoch): + if epoch not in present: + missing.append(epoch) + return missing + + def get_epochs_data(self, from_epoch: EpochNumber, to_epoch: EpochNumber) -> Sequence[Duty]: + with self.get_session() as session: + return session.exec(select(Duty).where(Duty.epoch >= from_epoch, Duty.epoch <= to_epoch)).all() + + def get_epoch_data(self, epoch: EpochNumber) -> Duty | None: + with self.get_session() as session: + return session.get(Duty, epoch) + + def has_epoch(self, epoch: EpochNumber) -> bool: + return self.get_epoch_data(epoch) is not None + + def min_epoch(self) -> EpochNumber | None: + with self.get_session() as session: + result = session.exec(select(Duty.epoch).order_by(Duty.epoch).limit(1)).first() + return EpochNumber(int(result)) if result else None + + def max_epoch(self) -> EpochNumber | None: + with self.get_session() as session: + # pylint: disable=no-member + result = session.exec(select(Duty.epoch).order_by(Duty.epoch.desc()).limit(1)).first() + return EpochNumber(int(result)) if result else None + + def get_epochs_demand(self, consumer: str) -> EpochsDemand | None: + with self.get_session() as session: + return session.get(EpochsDemand, consumer) + + def get_epochs_demands(self) -> Sequence[EpochsDemand]: + with self.get_session() as session: + return session.exec(select(EpochsDemand)).all() diff --git a/src/modules/performance/common/types.py b/src/modules/performance/common/types.py new file mode 100644 index 000000000..343f18403 --- /dev/null +++ b/src/modules/performance/common/types.py @@ -0,0 +1,19 @@ +from typing import TypeAlias + +from pydantic import BaseModel + +from src.types import ValidatorIndex + + +class ProposalDuty(BaseModel): + validator_index: int + is_proposed: bool + + +class SyncDuty(BaseModel): + validator_index: int + missed_count: int # 0..32 + + +AttDutyMisses: TypeAlias = set[ValidatorIndex] +EpochData: TypeAlias = tuple[AttDutyMisses, list[ProposalDuty], list[SyncDuty]] diff --git a/src/modules/performance/web/__init__.py b/src/modules/performance/web/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/modules/performance/web/metrics.py b/src/modules/performance/web/metrics.py new file mode 100644 index 000000000..52577a3a9 --- /dev/null +++ b/src/modules/performance/web/metrics.py @@ -0,0 +1,32 @@ +from fastapi import FastAPI +from prometheus_client import CollectorRegistry +from prometheus_fastapi_instrumentator import Instrumentator, metrics + +from src import variables +from src.metrics.prometheus.basic import BUILD_INFO +from src.utils.build import get_build_info + +# To avoid auto-scraping metrics from `src/metrics/prometheus` and any other possible places. +CUSTOM_REGISTRY = CollectorRegistry() +CUSTOM_REGISTRY.register(BUILD_INFO) + + +def attach_metrics(app: FastAPI): + build_info = get_build_info() + BUILD_INFO.info(build_info) + + instrumentator = Instrumentator( + excluded_handlers=["/health", "/metrics"], + registry=CUSTOM_REGISTRY, + ) + instrumentator.add( + metrics.default( + metric_namespace=variables.PERFORMANCE_WEB_SERVER_METRICS_PREFIX, + registry=CUSTOM_REGISTRY + ) + ) + instrumentator.instrument(app).expose( + app, + include_in_schema=True, + should_gzip=True, + ) diff --git a/src/modules/performance/web/server.py b/src/modules/performance/web/server.py new file mode 100644 index 000000000..ec20a5a6f --- /dev/null +++ b/src/modules/performance/web/server.py @@ -0,0 +1,122 @@ +from typing import Optional +import logging +from fastapi import FastAPI, HTTPException, Depends, Query +import uvicorn +from pydantic import BaseModel +from uvicorn.config import LOGGING_CONFIG + +from src.modules.performance.common.db import DutiesDB, Duty, EpochsDemand +from src import variables +from src.modules.performance.web.metrics import attach_metrics +from src.types import EpochNumber +from src.metrics.logging import JsonFormatter, handler + + +class EpochsDemandRequest(BaseModel): + consumer: str + l_epoch: EpochNumber + r_epoch: EpochNumber + + +class HealthCheckResp(BaseModel): + status: str = "ok" + + +app = FastAPI(title="Performance Collector API") +attach_metrics(app) + +_db_instance: Optional[DutiesDB] = None + + +async def get_db() -> DutiesDB: + global _db_instance + if _db_instance is None: + _db_instance = DutiesDB() + return _db_instance + + +async def validate_epoch_bounds(l_epoch: EpochNumber, r_epoch: EpochNumber) -> None: + if l_epoch > r_epoch: + raise HTTPException(status_code=400, detail="'l_epoch' must be <= 'r_epoch'") + + +@app.get("/health", response_model=HealthCheckResp) +async def health(): + return {"status": "ok"} + + +@app.get("/check-epochs", response_model=bool) +async def epochs_check( + from_epoch: EpochNumber = Query(..., alias="from"), + to_epoch: EpochNumber = Query(..., alias="to"), + db: DutiesDB = Depends(get_db), +): + await validate_epoch_bounds(from_epoch, to_epoch) + return bool(db.is_range_available(from_epoch, to_epoch)) + + +@app.get("/missing-epochs", response_model=list[EpochNumber]) +async def epochs_missing( + from_epoch: EpochNumber = Query(..., alias="from"), + to_epoch: EpochNumber = Query(..., alias="to"), + db: DutiesDB = Depends(get_db), +): + await validate_epoch_bounds(from_epoch, to_epoch) + return db.missing_epochs_in(from_epoch, to_epoch) + + +@app.get("/epochs", response_model=list[Duty]) +async def epochs_data( + from_epoch: EpochNumber = Query(..., alias="from"), + to_epoch: EpochNumber = Query(..., alias="to"), + db: DutiesDB = Depends(get_db), +): + await validate_epoch_bounds(from_epoch, to_epoch) + return db.get_epochs_data(from_epoch, to_epoch) + + +@app.get("/epochs/{epoch}", response_model=Duty | None) +async def epoch_data(epoch: EpochNumber, db: DutiesDB = Depends(get_db)): + return db.get_epoch_data(epoch) + + +@app.get("/demands", response_model=list[EpochsDemand]) +async def epochs_demands(db: DutiesDB = Depends(get_db)): + return db.get_epochs_demands() + + +@app.get("/demands/{consumer}", response_model=EpochsDemand | None) +async def one_epochs_demand(consumer: str, db: DutiesDB = Depends(get_db)): + return db.get_epochs_demand(consumer) + + +@app.post("/demands", response_model=EpochsDemand) +async def set_epochs_demand(demand_to_add: EpochsDemandRequest, db: DutiesDB = Depends(get_db)): + await validate_epoch_bounds(demand_to_add.l_epoch, demand_to_add.r_epoch) + db.store_demand(demand_to_add.consumer, demand_to_add.l_epoch, demand_to_add.r_epoch) + return db.get_epochs_demand(demand_to_add.consumer) + + +@app.delete("/demands", response_model=EpochsDemand) +async def delete_epochs_demand(consumer: str = Query(...), db: DutiesDB = Depends(get_db)): + to_delete = db.get_epochs_demand(consumer) + if not to_delete: + raise HTTPException(status_code=404, detail=f"No demand found for consumer '{consumer}'") + db.delete_demand(consumer) + return to_delete + + +def serve(): + # Prepare logging config with the app-wise formatter + logging_config = LOGGING_CONFIG.copy() + for formatter_name in logging_config["formatters"]: + logging_config["formatters"][formatter_name] = { + "()": JsonFormatter, + } + + uvicorn.run( + app, + host="0.0.0.0", + port=variables.PERFORMANCE_WEB_SERVER_API_PORT, + log_config=logging_config, + ) diff --git a/src/modules/performance_collector/codec.py b/src/modules/performance_collector/codec.py deleted file mode 100644 index 3775693a1..000000000 --- a/src/modules/performance_collector/codec.py +++ /dev/null @@ -1,132 +0,0 @@ -import struct -from dataclasses import dataclass -from typing import TypeAlias - -from src.types import ValidatorIndex -from src.utils.serializable_set import SerializableSet - - -@dataclass -class ProposalDuty: - validator_index: int - is_proposed: bool - - -class ProposalDutiesCodec: - # little-endian | uint64 validator_index | bool is_proposed - # See: https://docs.python.org/3/library/struct.html#format-characters - PACK_FMT = " bytes: - if len(proposals) == 0: - raise ValueError("Invalid proposals count") - items = sorted(((p.validator_index, p.is_proposed) for p in proposals), key=lambda t: t[0]) - return b"".join(struct.pack(cls.PACK_FMT, vid, flag) for vid, flag in items) - - @classmethod - def decode(cls, blob: bytes) -> list[ProposalDuty]: - out: list[ProposalDuty] = [] - if not blob: - return out - if len(blob) % cls.ITEM_SIZE != 0: - raise ValueError("Invalid proposals bytes length") - for i in range(0, len(blob), cls.ITEM_SIZE): - vid, p = struct.unpack_from(cls.PACK_FMT, blob, i) - out.append(ProposalDuty(validator_index=int(vid), is_proposed=p)) - return out - - -@dataclass -class SyncDuty: - validator_index: int - missed_count: int # 0..32 - - -class SyncDutiesCodec: - # little-endian | uint64 validator_index | uint8 missed_count - # See: https://docs.python.org/3/library/struct.html#format-characters - PACK_FMT = " bytes: - if len(syncs) == 0: - raise ValueError("Invalid syncs count") - items_sorted = sorted(((m.validator_index, m.missed_count) for m in syncs), key=lambda t: t[0]) - return b"".join(struct.pack(cls.PACK_FMT, vid, cnt) for vid, cnt in items_sorted) - - @classmethod - def decode(cls, blob: bytes) -> list[SyncDuty]: - out: list[SyncDuty] = [] - if not blob: - return out - if len(blob) % cls.ITEM_SIZE != 0: - raise ValueError("invalid sync misses bytes length") - for i in range(0, len(blob), cls.ITEM_SIZE): - vid, m = struct.unpack_from(cls.PACK_FMT, blob, i) - out.append(SyncDuty(validator_index=int(vid), missed_count=int(m))) - return out - - -AttDutyMisses: TypeAlias = set[ValidatorIndex] - - -class AttDutiesMissCodec: - - @staticmethod - def encode(misses: AttDutyMisses) -> bytes: - bm = SerializableSet(misses) - return bm.serialize() - - @staticmethod - def decode(blob: bytes) -> AttDutyMisses: - if not blob: - return SerializableSet() - bm = SerializableSet.deserialize(blob) - return {ValidatorIndex(i) for i in bm} - - -EpochData: TypeAlias = tuple[AttDutyMisses, list[ProposalDuty], list[SyncDuty]] - - -class EpochDataCodec: - # little-endian | uint8 version | uint32 att_count | uint8 prop_count | uint16 sync_count - # See: https://docs.python.org/3/library/struct.html#format-characters - HEADER_FMT = " bytes: - att_bytes = AttDutiesMissCodec.encode(att_misses) - prop_bytes = ProposalDutiesCodec.encode(proposals) - sync_bytes = SyncDutiesCodec.encode(syncs) - header = struct.pack(cls.HEADER_FMT, cls.VERSION, len(att_bytes), len(proposals), len(syncs)) - return header + prop_bytes + sync_bytes + att_bytes - - @classmethod - def decode(cls, blob: bytes) -> EpochData: - if len(blob) < cls.HEADER_SIZE: - raise ValueError(f"Epoch blob too short to decode: header size is {cls.HEADER_SIZE} but full blob size is {len(blob)}") - ver, att_count, prop_count, sync_count = struct.unpack_from(cls.HEADER_FMT, blob, 0) - if ver != cls.VERSION: - raise ValueError(f"Unsupported epoch blob version: {ver}") - props_size = int(prop_count) * ProposalDutiesCodec.ITEM_SIZE - sync_size = int(sync_count) * SyncDutiesCodec.ITEM_SIZE - expected_blob_size = cls.HEADER_SIZE + props_size + sync_size + att_count - if len(blob) < expected_blob_size: - raise ValueError(f"Epoch blob size mismatch: expected {expected_blob_size} but got {len(blob)}") - offset = cls.HEADER_SIZE - props = ProposalDutiesCodec.decode(blob[offset:(offset + props_size)]) - offset += props_size - syncs = SyncDutiesCodec.decode(blob[offset:(offset + sync_size)]) - offset += sync_size - att = AttDutiesMissCodec.decode(bytes(blob[offset:(offset + att_count)])) if att_count else set() - return att, props, syncs diff --git a/src/modules/performance_collector/db.py b/src/modules/performance_collector/db.py deleted file mode 100644 index cc263383b..000000000 --- a/src/modules/performance_collector/db.py +++ /dev/null @@ -1,155 +0,0 @@ -import sqlite3 -from time import time -from contextlib import contextmanager -from typing import Optional - -from src import variables -from src.modules.performance_collector.codec import ProposalDuty, SyncDuty, EpochDataCodec, AttDutyMisses -from src.types import EpochNumber -from src.utils.range import sequence - - -class DutiesDB: - def __init__(self, path: str): - self._path = path - self.migrate() - # Check SQLite thread safety. - # Doc: https://docs.python.org/3/library/sqlite3.html#sqlite3.threadsafety - assert sqlite3.threadsafety > 0, "SQLite is not compiled with thread safety" - - @contextmanager - def cursor(self): - conn = sqlite3.connect( - self._path, check_same_thread=False, timeout=variables.PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT - ) - yield conn.cursor() - conn.commit() - conn.close() - - def migrate(self): - with self.cursor() as cur: - # Optimize SQLite for performance: WAL mode for concurrent access, - # normal sync for speed/safety balance, memory temp storage - cur.execute("PRAGMA journal_mode=WAL;") - cur.execute("PRAGMA synchronous=NORMAL;") - cur.execute("PRAGMA temp_store=MEMORY;") - cur.execute( - """ - CREATE TABLE IF NOT EXISTS duties - ( - epoch INTEGER PRIMARY KEY, - blob BLOB NOT NULL - ); - """ - ) - cur.execute( - """ - CREATE TABLE IF NOT EXISTS epochs_demand - ( - consumer STRING PRIMARY KEY, - l_epoch INTEGER, - r_epoch INTEGER, - updated_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP - ) - """ - ) - - def store_demand(self, consumer: str, l_epoch: int, r_epoch: int) -> None: - with self.cursor() as cur: - updated_at = int(time()) - cur.execute( - "INSERT OR REPLACE INTO epochs_demand(consumer, l_epoch, r_epoch, updated_at) VALUES(?, ?, ?, ?)", - (consumer, l_epoch, r_epoch, updated_at), - ) - - def store_epoch( - self, - epoch: EpochNumber, - att_misses: AttDutyMisses, - proposals: list[ProposalDuty], - syncs: list[SyncDuty], - ) -> bytes: - blob = EpochDataCodec.encode(att_misses, proposals, syncs) - self._store_blob(epoch, blob) - self._auto_prune(epoch) - return blob - - def _store_blob(self, epoch: int, blob: bytes) -> None: - with self.cursor() as cur: - cur.execute( - "INSERT OR REPLACE INTO duties(epoch, blob) VALUES(?, ?)", - (epoch, sqlite3.Binary(blob)), - ) - - def _auto_prune(self, current_epoch: int) -> None: - if variables.PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS <= 0: - return - threshold = int(current_epoch) - variables.PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS - if threshold <= 0: - return - with self.cursor() as cur: - cur.execute("DELETE FROM duties WHERE epoch < ?", (threshold,)) - - def is_range_available(self, l_epoch: int, r_epoch: int) -> bool: - if int(l_epoch) > int(r_epoch): - raise ValueError("Invalid epoch range") - with self.cursor() as cur: - cur.execute( - "SELECT COUNT(1) FROM duties WHERE epoch BETWEEN ? AND ?", - (int(l_epoch), int(r_epoch)), - ) - (cnt,) = cur.fetchone() or (0,) - return int(cnt) == (r_epoch - l_epoch + 1) - - def missing_epochs_in(self, l_epoch: int, r_epoch: int) -> list[int]: - if l_epoch > r_epoch: - raise ValueError("Invalid epoch range") - with self.cursor() as cur: - cur.execute( - "SELECT epoch FROM duties WHERE epoch BETWEEN ? AND ? ORDER BY epoch", - (l_epoch, r_epoch), - ) - present = [int(row[0]) for row in cur.fetchall()] - missing = [] - for epoch in sequence(l_epoch, r_epoch): - if epoch not in present: - missing.append(epoch) - return missing - - def _get_entry(self, epoch: int) -> Optional[bytes]: - with self.cursor() as cur: - cur.execute("SELECT blob FROM duties WHERE epoch=?", (int(epoch),)) - row = cur.fetchone() - if not row: - return None - return bytes(row[0]) - - def get_epoch_blob(self, epoch: int) -> Optional[bytes]: - return self._get_entry(epoch) - - def has_epoch(self, epoch: int) -> bool: - with self.cursor() as cur: - cur.execute("SELECT 1 FROM duties WHERE epoch=? LIMIT 1", (int(epoch),)) - ok = cur.fetchone() is not None - return ok - - def min_epoch(self) -> int | None: - with self.cursor() as cur: - cur.execute("SELECT MIN(epoch) FROM duties") - val = cur.fetchone()[0] - return int(val) if val else None - - def max_epoch(self) -> int | None: - with self.cursor() as cur: - cur.execute("SELECT MAX(epoch) FROM duties") - val = cur.fetchone()[0] - return int(val) if val else None - - def epochs_demand(self) -> dict[str, tuple[int, int]]: - data = {} - with self.cursor() as cur: - cur.execute("SELECT consumer, l_epoch, r_epoch, updated_at FROM epochs_demand") - demands = cur.fetchall() - for consumer, l_epoch, r_epoch, updated_at in demands: - data[consumer] = (int(l_epoch), int(r_epoch), int(updated_at)) - return data diff --git a/src/modules/performance_collector/http_server.py b/src/modules/performance_collector/http_server.py deleted file mode 100644 index c24887609..000000000 --- a/src/modules/performance_collector/http_server.py +++ /dev/null @@ -1,168 +0,0 @@ -from functools import wraps -from threading import Thread -from typing import Any, Dict, Optional - -from flask import Flask, jsonify, request -from waitress import serve -import traceback - -from src.modules.performance_collector.db import DutiesDB -from src.modules.performance_collector.codec import EpochDataCodec -from src import variables - - -def _parse_from_to(args: Dict[str, Any]) -> Optional[tuple[int, int]]: - f = args.get("from") - t = args.get("to") - if f is None or t is None: - return None - fi = int(f) - ti = int(t) - if fi > ti: - return None - return fi, ti - - -def _create_app(db_path: str) -> Flask: - app = Flask(__name__) - app.config["DB_PATH"] = db_path - - _register_health_route(app) - _register_epoch_range_routes(app) - _register_epoch_blob_routes(app) - _register_debug_routes(app) - _register_demand_routes(app) - - return app - - -def _register_health_route(app: Flask) -> None: - @app.get("/health") - def health(): - return jsonify({"status": "ok"}) - - -def _register_epoch_range_routes(app: Flask) -> None: - @app.get("/epochs/check") - @_with_error_handling - def epochs_check(): - l_epoch, r_epoch = _require_epoch_range(request.args) - db = _db(app) - return jsonify({"result": bool(db.is_range_available(l_epoch, r_epoch))}) - - @app.get("/epochs/missing") - @_with_error_handling - def epochs_missing(): - l_epoch, r_epoch = _require_epoch_range(request.args) - db = _db(app) - return jsonify({"result": db.missing_epochs_in(l_epoch, r_epoch)}) - - -def _register_epoch_blob_routes(app: Flask) -> None: - @app.get("/epochs/blob") - @_with_error_handling - def epochs_blob(): - l_epoch, r_epoch = _require_epoch_range(request.args) - db = _db(app) - epochs: list[str | None] = [] - for epoch in range(l_epoch, r_epoch + 1): - blob = db.get_epoch_blob(epoch) - epochs.append(blob.hex() if blob is not None else None) - return jsonify({"result": epochs}) - - @app.get("/epochs/blob/") - @_with_error_handling - def epoch_blob(epoch: int): - db = _db(app) - blob = db.get_epoch_blob(epoch) - return jsonify({"result": blob.hex() if blob is not None else None}) - - -def _register_debug_routes(app: Flask) -> None: - @app.get("/debug/epochs/") - @_with_error_handling - def debug_epoch_details(epoch: int): - db = _db(app) - blob = db.get_epoch_blob(epoch) - if blob is None: - return jsonify({"error": "epoch not found", "epoch": epoch}), 404 - - misses, props, syncs = EpochDataCodec.decode(blob) - - proposals = [{"validator_index": int(p.validator_index), "is_proposed": bool(p.is_proposed)} for p in props] - sync_misses = [ - {"validator_index": int(s.validator_index), "missed_count": int(s.missed_count)} for s in syncs - ] - - return jsonify( - { - "epoch": int(epoch), - "att_misses": list(misses), - "proposals": proposals, - "sync_misses": sync_misses, - } - ) - - -def _register_demand_routes(app: Flask) -> None: - @app.post("/epochs/demand") - @_with_error_handling - def set_epochs_demand(): - data = _require_json(request.get_json(), {"consumer", "l_epoch", "r_epoch"}) - _validate_epoch_bounds(data["l_epoch"], data["r_epoch"]) - - db = _db(app) - db.store_demand(data["consumer"], data["l_epoch"], data["r_epoch"]) - - return jsonify({"status": "ok", "consumer": data["consumer"], "l_epoch": data["l_epoch"], "r_epoch": data["r_epoch"]}) - - @app.get("/epochs/demand") - @_with_error_handling - def get_epochs_demand(): - db = _db(app) - return jsonify({"result": db.epochs_demand()}) - - -def _db(app: Flask) -> DutiesDB: - return DutiesDB(app.config["DB_PATH"]) - - -def _require_epoch_range(args: Dict[str, Any]) -> tuple[int, int]: - parsed = _parse_from_to(args) - if not parsed: - raise ValueError("Invalid or missing 'from'/'to' params") - return parsed - - -def _require_json(data: Optional[Dict[str, Any]], required: set[str]) -> Dict[str, Any]: - if not data: - raise ValueError(f"Missing JSON body or required fields: {', '.join(sorted(required))}") - missing = required.difference(data) - if missing: - raise ValueError(f"Missing required fields: {', '.join(sorted(missing))}") - return data - - -def _validate_epoch_bounds(l_epoch: Any, r_epoch: Any) -> None: - if not isinstance(l_epoch, int) or not isinstance(r_epoch, int) or l_epoch > r_epoch: - raise ValueError("'l_epoch' and 'r_epoch' must be integers, and 'l_epoch' <= 'r_epoch'") - - -def _with_error_handling(func): - @wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except ValueError as exc: - return jsonify({"error": str(exc)}), 400 - except Exception as exc: # pylint: disable=broad-exception-caught - return jsonify({"error": repr(exc), "trace": traceback.format_exc()}), 500 - - return wrapper - - -def start_performance_api_server(db_path): - host = "0.0.0.0" - app = _create_app(db_path) - t = Thread(target=lambda: serve(app, host=host, port=variables.PERFORMANCE_COLLECTOR_SERVER_API_PORT), daemon=True) - t.start() diff --git a/src/providers/http_provider.py b/src/providers/http_provider.py index 72418187e..751d8c467 100644 --- a/src/providers/http_provider.py +++ b/src/providers/http_provider.py @@ -208,8 +208,8 @@ def _get_without_fallbacks( if not stream: del json_response["data"] meta = json_response - except KeyError: - # NOTE: Used by KeysAPIClient only. + except (KeyError, TypeError): + # NOTE: Used by KeysAPIClient and PerformanceClient only. data = json_response meta = {} @@ -328,6 +328,110 @@ def _post_without_fallbacks( retval_validator(data, meta, endpoint=endpoint) return data, meta + def _delete( + self, + endpoint: str, + path_params: Sequence[str | int] | None = None, + query_params: dict | None = None, + body_data: dict | None = None, + force_raise: Callable[..., Exception | None] = lambda _: None, + retval_validator: ReturnValueValidator = data_is_any, + ) -> tuple[dict, dict]: + errors: list[Exception] = [] + + for host in self.hosts: + try: + return self._delete_without_fallbacks( + host, + endpoint, + path_params, + query_params, + body_data, + retval_validator=retval_validator, + ) + except Exception as e: # pylint: disable=W0703 + errors.append(e) + + if to_force_raise := force_raise(errors): + raise to_force_raise from e + + logger.warning( + { + 'msg': f'[{self.__class__.__name__}] Host [{urlparse(host).netloc}] responded with error', + 'error': str(e), + 'provider': urlparse(host).netloc, + } + ) + + if not errors: + raise RuntimeError('No hosts available for DELETE request') + raise errors[-1] + + def _delete_without_fallbacks( + self, + host: str, + endpoint: str, + path_params: Sequence[str | int] | None = None, + query_params: dict | None = None, + body_data: dict | None = None, + retval_validator: ReturnValueValidator = data_is_any, + ) -> tuple[dict, dict]: + complete_endpoint = endpoint.format(*path_params) if path_params else endpoint + + with self.PROMETHEUS_HISTOGRAM.time() as t: + try: + response = self.session.delete( + self._urljoin(host, complete_endpoint if path_params else endpoint), + params=query_params, + json=body_data, + timeout=self.request_timeout, + ) + except Exception as error: # pylint: disable=W0703 + logger.error({'msg': str(error)}) + t.labels( + endpoint=endpoint, + code=0, + domain=urlparse(host).netloc, + ) + raise self.PROVIDER_EXCEPTION(status=0, text='Response error.') from error + + t.labels( + endpoint=endpoint, + code=response.status_code, + domain=urlparse(host).netloc, + ) + + if response.status_code != HTTPStatus.OK: + response_fail_msg = ( + f'Response from {complete_endpoint} [{response.status_code}]' + f' with text: "{str(response.text)}" returned.' + ) + logger.debug({'msg': response_fail_msg}) + raise self.PROVIDER_EXCEPTION(response_fail_msg, status=response.status_code, text=response.text) + + if not response.content: + json_response: dict = {} + else: + try: + json_response = response.json() + except JSONDecodeError as error: + response_fail_msg = ( + f'Failed to decode JSON response from {complete_endpoint} with text: "{str(response.text)}"' + ) + logger.debug({'msg': response_fail_msg}) + raise self.PROVIDER_EXCEPTION(status=0, text='JSON decode error.') from error + + try: + data = json_response["data"] + del json_response["data"] + meta = json_response + except KeyError: + data = json_response + meta = {} + + retval_validator(data, meta, endpoint=endpoint) + return data, meta + def get_all_providers(self) -> list[str]: return self.hosts diff --git a/src/providers/performance/client.py b/src/providers/performance/client.py index e0c5975ac..d49740363 100644 --- a/src/providers/performance/client.py +++ b/src/providers/performance/client.py @@ -1,8 +1,9 @@ -from eth_typing import HexStr - from src.metrics.prometheus.basic import PERFORMANCE_REQUESTS_DURATION -from src.modules.performance_collector.codec import EpochDataCodec, EpochData -from src.providers.http_provider import HTTPProvider, NotOkResponse, data_is_dict +from src.modules.performance.common.db import Duty, EpochsDemand +from src.providers.http_provider import ( + HTTPProvider, + NotOkResponse, +) from src.types import EpochNumber @@ -14,66 +15,37 @@ class PerformanceClient(HTTPProvider): PROVIDER_EXCEPTION = PerformanceClientError PROMETHEUS_HISTOGRAM = PERFORMANCE_REQUESTS_DURATION - API_EPOCHS_CHECK = 'epochs/check' - API_EPOCHS_MISSING = 'epochs/missing' - API_EPOCHS_BLOB = 'epochs/blob' - API_EPOCHS_DEMAND = 'epochs/demand' + API_EPOCHS_CHECK = 'check-epochs' + API_EPOCHS_DATA = 'epochs' + API_EPOCHS_DEMAND = 'demands' - def is_range_available(self, l_epoch: int, r_epoch: int) -> bool: + def is_range_available(self, l_epoch: EpochNumber, r_epoch: EpochNumber) -> bool: data, _ = self._get( self.API_EPOCHS_CHECK, query_params={'from': l_epoch, 'to': r_epoch}, - retval_validator=data_is_dict, - ) - return data['result'] - - def missing_epochs_in(self, l_epoch: int, r_epoch: int) -> list[EpochNumber]: - data, _ = self._get( - self.API_EPOCHS_MISSING, - query_params={'from': l_epoch, 'to': r_epoch}, - retval_validator=data_is_dict, ) - return data['result'] + return bool(data) - def get_epoch_blobs(self, l_epoch: int, r_epoch: int) -> list[HexStr | None]: + def get_epoch_data(self, epoch: EpochNumber) -> Duty | None: data, _ = self._get( - self.API_EPOCHS_BLOB, - query_params={'from': l_epoch, 'to': r_epoch}, - retval_validator=data_is_dict, + self.API_EPOCHS_DATA + f"/{epoch}", ) - return data['result'] + return Duty.model_validate(data) if data else None - def get_epoch_blob(self, epoch: int) -> HexStr | None: + def get_epochs_demand(self, consumer: str) -> EpochsDemand | None: data, _ = self._get( - self.API_EPOCHS_BLOB + f"/{epoch}", - retval_validator=data_is_dict, + self.API_EPOCHS_DEMAND + f"/{consumer}", ) - return data['result'] - - def get_epochs(self, l_epoch: int, r_epoch: int) -> list[EpochData]: - epochs_data = self.get_epoch_blobs(l_epoch, r_epoch) - return [ - EpochDataCodec.decode(bytes.fromhex(blob)) - if (blob := epoch_data['blob']) else None - for epoch_data in epochs_data - ] - - def get_epoch(self, epoch: int) -> EpochData | None: - blob = self.get_epoch_blob(epoch) - return EpochDataCodec.decode(bytes.fromhex(blob)) if blob else None - - def get_epochs_demand(self) -> dict[str, tuple[EpochNumber, EpochNumber]]: - data, _ = self._get( - self.API_EPOCHS_DEMAND, - retval_validator=data_is_dict, - ) - return { - consumer: (EpochNumber(demand[0]), EpochNumber(demand[1])) - for consumer, demand in data['result'].items() - } + return EpochsDemand.model_validate(data) if data else None def post_epochs_demand(self, consumer: str, l_epoch: EpochNumber, r_epoch: EpochNumber) -> None: self._post( self.API_EPOCHS_DEMAND, body_data={'consumer': consumer, 'l_epoch': l_epoch, 'r_epoch': r_epoch}, ) + + def delete_epochs_demand(self, consumer: str) -> None: + self._delete( + self.API_EPOCHS_DEMAND, + query_params={'consumer': consumer}, + ) diff --git a/src/types.py b/src/types.py index dc2b4168f..dfb2e8f74 100644 --- a/src/types.py +++ b/src/types.py @@ -11,6 +11,7 @@ class OracleModule(StrEnum): EJECTOR = 'ejector' CHECK = 'check' CSM = 'csm' + PERFORMANCE_WEB_SERVER = 'performance_web_server' PERFORMANCE_COLLECTOR = 'performance_collector' diff --git a/src/utils/serializable_set.py b/src/utils/serializable_set.py deleted file mode 100644 index 9207c553d..000000000 --- a/src/utils/serializable_set.py +++ /dev/null @@ -1,164 +0,0 @@ -""" -Serializable Set Implementation - -A set-like data structure with adaptive serialization that automatically chooses -the most efficient encoding strategy between run-length encoding and direct storage. -""" - - -class SerializableSet(set): - """ - An adaptive implementation with variable-length encoding. - - Extends built-in set with serialization that automatically chooses optimal strategy: - - Run-length encoding for clustered data (efficient for consecutive ranges) - - Direct value list for sparse data (efficient when ranges are ineffective) - - Automatically chooses the most compact representation during serialization - """ - - def _build_ranges(self, sorted_values: list[int]) -> list[tuple[int, int]]: - if not sorted_values: - return [] - - ranges = [] - start = sorted_values[0] - end = sorted_values[0] - - for val in sorted_values[1:]: - if val == end + 1: - end = val - else: - ranges.append((start, end)) - start = end = val - - ranges.append((start, end)) - return ranges - - def serialize(self) -> bytes: - """ - Serialize using adaptive encoding. - Chooses between run-length encoding and direct value list based on efficiency. - Format: [encoding_type: 1 byte] + data - - Type 0: Run-length encoding (ranges) - - Type 1: Direct value list - """ - if not self: - return bytes([0]) + self._encode_varint(0) - - sorted_values = sorted(self) - ranges = self._build_ranges(sorted_values) - - # Calculate size for run-length encoding - rle_data = [self._encode_varint(len(ranges))] - for start, end in ranges: - length = end - start + 1 - rle_data.append(self._encode_varint(start)) - rle_data.append(self._encode_varint(length)) - rle_bytes = b"".join(rle_data) - rle_size = 1 + len(rle_bytes) # +1 for type byte - - # Calculate size for direct value list - direct_data = [self._encode_varint(len(self))] - for value in sorted_values: - direct_data.append(self._encode_varint(value)) - direct_bytes = b"".join(direct_data) - direct_size = 1 + len(direct_bytes) # +1 for type byte - - # Choose more efficient encoding - if rle_size <= direct_size: - return bytes([0]) + rle_bytes # Use run-length encoding - else: - return bytes([1]) + direct_bytes # Use direct value list - - @classmethod - def deserialize(cls, data: bytes) -> "SerializableSet": - _set = cls() - - if not data: - return _set - - encoding_type = data[0] - offset = 1 - - if encoding_type == 0: - if offset >= len(data): - return _set - - num_ranges, offset = cls._decode_varint(data, offset) - - for _ in range(num_ranges): - start, offset = cls._decode_varint(data, offset) - length, offset = cls._decode_varint(data, offset) - end = start + length - 1 - # Add all values in this range to our set - _set.update(range(start, end + 1)) - - return _set - - if encoding_type == 1: - # Direct value list - num_values, offset = cls._decode_varint(data, offset) - - for _ in range(num_values): - value, offset = cls._decode_varint(data, offset) - _set.add(value) - - return _set - - raise ValueError(f"Unknown encoding type: {encoding_type}") - - @staticmethod - def _encode_varint(value: int) -> bytes: - # Reference: https://protobuf.dev/programming-guides/encoding/#varints - payload_mask = 0x7F - continuation_flag = 0x80 - - result = [] - while value >= continuation_flag: # While value does not fit in 7 bits - result.append((value & payload_mask) | continuation_flag) - value >>= 7 # Shift to the next byte - result.append(value & payload_mask) - return bytes(result) - - @staticmethod - def _decode_varint(data: bytes, offset: int) -> tuple[int, int]: - # Reference: https://protobuf.dev/programming-guides/encoding/#varints - payload_mask = 0x7F - continuation_flag = 0x80 - - decoded_value = 0 - bit_shift_position = 0 - current_offset = offset - - while current_offset < len(data): - current_byte = data[current_offset] - current_offset += 1 - - # Extract data bits and place them at the correct position - data_bits = current_byte & payload_mask - decoded_value |= (data_bits << bit_shift_position) - - # Check if this is the last byte (no continuation flag) - has_continuation = (current_byte & continuation_flag) != 0 - if not has_continuation: - break - - # Move to the next 7-bit group - bit_shift_position += 7 - - # Can't be greater than uint64 - if bit_shift_position >= 64: - raise ValueError("Varint too long") - else: - raise ValueError("Incomplete varint") - - return decoded_value, current_offset - - def __repr__(self) -> str: - return f"SerializableSet({sorted(self)})" - - def __str__(self) -> str: - return f"SerializableSet({len(self)} values)" - - def copy(self): - return SerializableSet(self) diff --git a/src/variables.py b/src/variables.py index 3b1f6fbcb..54c98433f 100644 --- a/src/variables.py +++ b/src/variables.py @@ -103,10 +103,21 @@ OPSGENIE_API_URL: Final[str] = os.getenv('OPSGENIE_API_URL', '') HEALTHCHECK_SERVER_PORT: Final = int(os.getenv('HEALTHCHECK_SERVER_PORT', 9010)) -PERFORMANCE_COLLECTOR_SERVER_API_PORT: Final = int(os.getenv('PERFORMANCE_COLLECTOR_SERVER_API_PORT', 9020)) + +# - Performance Web-server and Collector +PERFORMANCE_WEB_SERVER_API_PORT: Final = int(os.getenv('PERFORMANCE_WEB_SERVER_API_PORT', 9020)) +PERFORMANCE_WEB_SERVER_DB_CONNECTION_TIMEOUT: Final = int(os.getenv('PERFORMANCE_WEB_SERVER_DB_CONNECTION_TIMEOUT', 30)) +PERFORMANCE_WEB_SERVER_METRICS_PREFIX: Final = os.getenv("PERFORMANCE_WEB_SERVER_METRICS_PREFIX", "lido_performance_web") + PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS: Final = int(os.getenv('PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS', 28 * 225 * 6)) PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT: Final = int(os.getenv('PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT', 30)) +PERFORMANCE_DB_HOST: Final = os.getenv('PERFORMANCE_DB_HOST', 'localhost') +PERFORMANCE_DB_PORT: Final = int(os.getenv('PERFORMANCE_DB_PORT', 5432)) +PERFORMANCE_DB_NAME: Final = os.getenv('PERFORMANCE_DB_NAME', 'performance') +PERFORMANCE_DB_USER: Final = os.getenv('PERFORMANCE_DB_USER', 'performance') +PERFORMANCE_DB_PASSWORD: Final = os.getenv('PERFORMANCE_DB_PASSWORD', 'performance') + MAX_CYCLE_LIFETIME_IN_SECONDS: Final = int(os.getenv("MAX_CYCLE_LIFETIME_IN_SECONDS", 3000)) CACHE_PATH: Final = Path(os.getenv("CACHE_PATH", ".")) @@ -114,6 +125,7 @@ VAULT_PAGINATION_LIMIT: Final = int(os.getenv("VAULT_PAGINATION_LIMIT", 1_000)) VAULT_VALIDATOR_STAGES_BATCH_SIZE: Final = int(os.getenv("VAULT_VALIDATOR_STAGES_BATCH_SIZE", 1_00)) + def check_all_required_variables(module: OracleModule): errors = check_uri_required_variables() if not LIDO_LOCATOR_ADDRESS: @@ -134,6 +146,13 @@ def check_uri_required_variables(): return [name for name, uri in required_uris.items() if '' in uri] +def check_perf_collector_required_variables(): + required_uris = { + 'CONSENSUS_CLIENT_URI': CONSENSUS_CLIENT_URI, + } + return [name for name, uri in required_uris.items() if '' in uri] + + def raise_from_errors(errors): if errors: raise ValueError("The following variables are required: " + ", ".join(errors)) @@ -171,9 +190,15 @@ def raise_from_errors(errors): 'PROMETHEUS_PORT': PROMETHEUS_PORT, 'PROMETHEUS_PREFIX': PROMETHEUS_PREFIX, 'HEALTHCHECK_SERVER_PORT': HEALTHCHECK_SERVER_PORT, - 'PERFORMANCE_COLLECTOR_SERVER_API_PORT': PERFORMANCE_COLLECTOR_SERVER_API_PORT, + 'PERFORMANCE_WEB_SERVER_API_PORT': PERFORMANCE_WEB_SERVER_API_PORT, + 'PERFORMANCE_WEB_SERVER_DB_CONNECTION_TIMEOUT': PERFORMANCE_WEB_SERVER_DB_CONNECTION_TIMEOUT, + 'PERFORMANCE_WEB_SERVER_METRICS_PREFIX': PERFORMANCE_WEB_SERVER_METRICS_PREFIX, 'PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS': PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS, 'PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT': PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT, + 'PERFORMANCE_DB_HOST': PERFORMANCE_DB_HOST, + 'PERFORMANCE_DB_PORT': PERFORMANCE_DB_PORT, + 'PERFORMANCE_DB_NAME': PERFORMANCE_DB_NAME, + 'PERFORMANCE_DB_USER': PERFORMANCE_DB_USER, 'HTTP_REQUEST_TIMEOUT_PERFORMANCE': HTTP_REQUEST_TIMEOUT_PERFORMANCE, 'HTTP_REQUEST_RETRY_COUNT_PERFORMANCE': HTTP_REQUEST_RETRY_COUNT_PERFORMANCE, 'HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE': HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE, From 12034487f35d7a2d268cae90711482e62f1e2814 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Fri, 28 Nov 2025 13:21:22 +0100 Subject: [PATCH 28/35] fix: remove old tests --- .../performance_collector/test_codec.py | 288 ----------------- tests/utils/test_serializable_set.py | 293 ------------------ 2 files changed, 581 deletions(-) delete mode 100644 tests/modules/performance_collector/test_codec.py delete mode 100644 tests/utils/test_serializable_set.py diff --git a/tests/modules/performance_collector/test_codec.py b/tests/modules/performance_collector/test_codec.py deleted file mode 100644 index f678e1654..000000000 --- a/tests/modules/performance_collector/test_codec.py +++ /dev/null @@ -1,288 +0,0 @@ -import pytest - -from src.modules.performance_collector.codec import ( - ProposalDuty, - ProposalDutiesCodec, - SyncDuty, - SyncDutiesCodec, - AttDutiesMissCodec, - EpochDataCodec, -) - - -def _proposals_to_tuples(items: list[ProposalDuty]) -> list[tuple[int, int]]: - return [(int(i.validator_index), int(bool(i.is_proposed))) for i in items] - - -def _syncs_to_tuples(items: list[SyncDuty]) -> list[tuple[int, int]]: - return [(int(i.validator_index), int(i.missed_count)) for i in items] - - -PROPOSALS_EXAMPLE: list[ProposalDuty] = [ - ProposalDuty(validator_index=1001, is_proposed=True), - ProposalDuty(validator_index=1023, is_proposed=False), - ProposalDuty(validator_index=1098, is_proposed=True), - ProposalDuty(validator_index=1110, is_proposed=True), - ProposalDuty(validator_index=1177, is_proposed=False), - ProposalDuty(validator_index=1205, is_proposed=True), - ProposalDuty(validator_index=1266, is_proposed=False), - ProposalDuty(validator_index=1314, is_proposed=True), - ProposalDuty(validator_index=1333, is_proposed=False), - ProposalDuty(validator_index=1402, is_proposed=True), - ProposalDuty(validator_index=1444, is_proposed=True), - ProposalDuty(validator_index=1509, is_proposed=False), - ProposalDuty(validator_index=1531, is_proposed=True), - ProposalDuty(validator_index=1600, is_proposed=False), - ProposalDuty(validator_index=1625, is_proposed=True), - ProposalDuty(validator_index=1702, is_proposed=True), - ProposalDuty(validator_index=1737, is_proposed=False), - ProposalDuty(validator_index=1801, is_proposed=True), - ProposalDuty(validator_index=1822, is_proposed=False), - ProposalDuty(validator_index=1905, is_proposed=True), - ProposalDuty(validator_index=1950, is_proposed=False), - ProposalDuty(validator_index=2007, is_proposed=True), - ProposalDuty(validator_index=2059, is_proposed=True), - ProposalDuty(validator_index=2103, is_proposed=False), - ProposalDuty(validator_index=2166, is_proposed=True), - ProposalDuty(validator_index=2201, is_proposed=False), - ProposalDuty(validator_index=2255, is_proposed=True), - ProposalDuty(validator_index=2311, is_proposed=False), - ProposalDuty(validator_index=2399, is_proposed=True), - ProposalDuty(validator_index=2420, is_proposed=False), - ProposalDuty(validator_index=2504, is_proposed=True), - ProposalDuty(validator_index=2570, is_proposed=False), -] - - -SYNCS_EXAMPLE: list[SyncDuty] = [ - SyncDuty(validator_index=8000, missed_count=0), - SyncDuty(validator_index=8001, missed_count=1), - SyncDuty(validator_index=8002, missed_count=2), - SyncDuty(validator_index=8003, missed_count=3), - SyncDuty(validator_index=8004, missed_count=4), - SyncDuty(validator_index=8005, missed_count=5), - SyncDuty(validator_index=8006, missed_count=6), - SyncDuty(validator_index=8007, missed_count=7), - SyncDuty(validator_index=8008, missed_count=8), - SyncDuty(validator_index=8009, missed_count=9), - SyncDuty(validator_index=8010, missed_count=10), - SyncDuty(validator_index=8011, missed_count=11), - SyncDuty(validator_index=8012, missed_count=12), - SyncDuty(validator_index=8013, missed_count=13), - SyncDuty(validator_index=8014, missed_count=14), - SyncDuty(validator_index=8015, missed_count=15), - SyncDuty(validator_index=8016, missed_count=16), - SyncDuty(validator_index=8017, missed_count=17), - SyncDuty(validator_index=8018, missed_count=18), - SyncDuty(validator_index=8019, missed_count=19), - SyncDuty(validator_index=8020, missed_count=20), - SyncDuty(validator_index=8021, missed_count=21), - SyncDuty(validator_index=8022, missed_count=22), - SyncDuty(validator_index=8023, missed_count=23), - SyncDuty(validator_index=8024, missed_count=24), - SyncDuty(validator_index=8025, missed_count=25), - SyncDuty(validator_index=8026, missed_count=26), - SyncDuty(validator_index=8027, missed_count=27), - SyncDuty(validator_index=8028, missed_count=28), - SyncDuty(validator_index=8029, missed_count=29), - SyncDuty(validator_index=8030, missed_count=30), - SyncDuty(validator_index=8031, missed_count=31), - SyncDuty(validator_index=8032, missed_count=32), - SyncDuty(validator_index=8033, missed_count=0), - SyncDuty(validator_index=8034, missed_count=2), - SyncDuty(validator_index=8035, missed_count=4), - SyncDuty(validator_index=8036, missed_count=6), - SyncDuty(validator_index=8037, missed_count=8), - SyncDuty(validator_index=8038, missed_count=10), - SyncDuty(validator_index=8039, missed_count=12), - SyncDuty(validator_index=8040, missed_count=14), - SyncDuty(validator_index=8041, missed_count=16), - SyncDuty(validator_index=8042, missed_count=18), - SyncDuty(validator_index=8043, missed_count=20), - SyncDuty(validator_index=8044, missed_count=22), - SyncDuty(validator_index=8045, missed_count=24), - SyncDuty(validator_index=8046, missed_count=26), - SyncDuty(validator_index=8047, missed_count=28), - SyncDuty(validator_index=8048, missed_count=30), - SyncDuty(validator_index=8049, missed_count=32), - SyncDuty(validator_index=8050, missed_count=1), - SyncDuty(validator_index=8051, missed_count=3), - SyncDuty(validator_index=8052, missed_count=5), - SyncDuty(validator_index=8053, missed_count=7), - SyncDuty(validator_index=8054, missed_count=9), - SyncDuty(validator_index=8055, missed_count=11), - SyncDuty(validator_index=8056, missed_count=13), - SyncDuty(validator_index=8057, missed_count=15), - SyncDuty(validator_index=8058, missed_count=17), - SyncDuty(validator_index=8059, missed_count=19), - SyncDuty(validator_index=8060, missed_count=21), - SyncDuty(validator_index=8061, missed_count=23), - SyncDuty(validator_index=8062, missed_count=25), - SyncDuty(validator_index=8063, missed_count=27), -] - - -ATT_MISSES_EXAMPLE: set[int] = { - 10, - 17, - 21, - 28, - 35, - 41, - 43, - 49, - 57, - 60, - 66, - 72, - 75, - 81, - 86, - 90, - 97, - 101, - 108, - 112, - 119, - 123, - 127, - 130, - 137, - 141, - 149, - 152, - 159, - 162, - 170, - 173, - 177, - 182, - 189, - 193, - 197, - 201, - 206, - 210, - 215, - 219, - 223, - 228, - 234, - 239, - 241, - 246, - 251, - 257, - 260, - 266, - 270, - 274, - 279, - 283, - 288, - 292, - 297, - 301, - 305, - 309, - 314, - 318, - 323, - 327, - 330, - 336, - 340, - 345, -} - - -@pytest.mark.unit -def test_proposal_duties_codec_roundtrip(): - src = PROPOSALS_EXAMPLE - - blob = ProposalDutiesCodec.encode(src) - dst = ProposalDutiesCodec.decode(blob) - - # The codec sorts on encode; compare as sorted tuples - assert sorted(_proposals_to_tuples(dst)) == sorted(_proposals_to_tuples(src)) - - -@pytest.mark.unit -def test_proposal_duties_codec_empty(): - with pytest.raises(ValueError): - ProposalDutiesCodec.decode(ProposalDutiesCodec.encode([])) - - -@pytest.mark.unit -def test_sync_miss_duties_codec_roundtrip(): - src = SYNCS_EXAMPLE - - blob = SyncDutiesCodec.encode(src) - dst = SyncDutiesCodec.decode(blob) - - assert sorted(_syncs_to_tuples(dst)) == sorted(_syncs_to_tuples(src)) - - -@pytest.mark.unit -def test_sync_miss_duties_codec_empty(): - with pytest.raises(ValueError): - SyncDutiesCodec.decode(SyncDutiesCodec.encode([])) - - -@pytest.mark.unit -def test_att_duties_miss_codec_roundtrip(): - src = ATT_MISSES_EXAMPLE - blob = AttDutiesMissCodec.encode(src) - dst = AttDutiesMissCodec.decode(blob) - assert set(dst) == set(src) - - -@pytest.mark.unit -def test_att_duties_miss_codec_empty(): - AttDutiesMissCodec.decode(AttDutiesMissCodec.encode(set())) - - -@pytest.mark.unit -def test_epoch_blob_codec_roundtrip(): - att_misses = ATT_MISSES_EXAMPLE - proposals = PROPOSALS_EXAMPLE - syncs = SYNCS_EXAMPLE - - blob = EpochDataCodec.encode(att_misses=att_misses, proposals=proposals, syncs=syncs) - att_decoded, proposals_decoded, syncs_decoded = EpochDataCodec.decode(blob) - - assert set(att_decoded) == set(att_misses) - assert sorted(_proposals_to_tuples(proposals_decoded)) == sorted(_proposals_to_tuples(proposals)) - assert sorted(_syncs_to_tuples(syncs_decoded)) == sorted(_syncs_to_tuples(syncs)) - - -@pytest.mark.unit -def test_epoch_blob_codec_bad_version(): - att_misses = set() - proposals = PROPOSALS_EXAMPLE - syncs = SYNCS_EXAMPLE - - blob = EpochDataCodec.encode(att_misses=att_misses, proposals=proposals, syncs=syncs) - - bad = bytes([255]) + blob[1:] - with pytest.raises(ValueError): - EpochDataCodec.decode(bad) - - -@pytest.mark.unit -def test_epoch_blob_codec_short_header(): - with pytest.raises(ValueError): - EpochDataCodec.decode(b"\x01\x00") - - -@pytest.mark.unit -def test_epoch_blob_codec_truncated_payload(): - att_misses = set() - proposals = PROPOSALS_EXAMPLE - syncs = SYNCS_EXAMPLE - - blob = EpochDataCodec.encode(att_misses=att_misses, proposals=proposals, syncs=syncs) - bad_blob = blob[:-1] - - with pytest.raises(ValueError): - EpochDataCodec.decode(bad_blob) diff --git a/tests/utils/test_serializable_set.py b/tests/utils/test_serializable_set.py deleted file mode 100644 index 8ddfe85c8..000000000 --- a/tests/utils/test_serializable_set.py +++ /dev/null @@ -1,293 +0,0 @@ -""" -Tests for SerializableSet - -Comprehensive test suite covering all functionality of the SerializableSet class -including adaptive serialization, set operations, and edge cases. -""" - -import pytest -from src.utils.serializable_set import SerializableSet - - -class TestSerializableSet: - """Test suite for SerializableSet class.""" - - @pytest.mark.unit - def test_initialization_empty(self): - """Test creating an empty SerializableSet.""" - ss = SerializableSet() - assert len(ss) == 0 - assert isinstance(ss, set) - assert not ss - - @pytest.mark.unit - def test_initialization_with_values(self): - """Test creating SerializableSet with initial values.""" - values = [1, 2, 3, 5, 8] - ss = SerializableSet(values) - assert len(ss) == 5 - assert 3 in ss - assert 4 not in ss - assert sorted(ss) == [1, 2, 3, 5, 8] - - @pytest.mark.unit - def test_set_operations(self): - """Test basic set operations.""" - ss = SerializableSet([1, 2, 3]) - - # Add - ss.add(4) - assert 4 in ss - assert len(ss) == 4 - - # Remove - ss.remove(2) - assert 2 not in ss - assert len(ss) == 3 - - # Discard - ss.discard(10) # Should not raise error - ss.discard(1) - assert 1 not in ss - assert len(ss) == 2 - - # Update - ss.update([5, 6, 7]) - assert sorted(ss) == [3, 4, 5, 6, 7] - - # Clear - ss.clear() - assert len(ss) == 0 - - @pytest.mark.unit - def test_set_operators(self): - """Test set operators (union, intersection, etc.).""" - ss1 = SerializableSet([1, 2, 3, 4]) - ss2 = SerializableSet([3, 4, 5, 6]) - - # Union - union = ss1 | ss2 - assert sorted(union) == [1, 2, 3, 4, 5, 6] - - # Intersection - intersection = ss1 & ss2 - assert sorted(intersection) == [3, 4] - - # Difference - diff = ss1 - ss2 - assert sorted(diff) == [1, 2] - - # Symmetric difference - sym_diff = ss1 ^ ss2 - assert sorted(sym_diff) == [1, 2, 5, 6] - - @pytest.mark.unit - def test_equality(self): - """Test equality comparisons.""" - ss1 = SerializableSet([1, 2, 3]) - ss2 = SerializableSet([3, 1, 2]) - ss3 = SerializableSet([1, 2, 4]) - regular_set = {1, 2, 3} - - assert ss1 == ss2 - assert ss1 != ss3 - assert ss1 == regular_set - assert ss1 != [1, 2, 3] # Different type - - @pytest.mark.unit - def test_build_ranges(self): - """Test the internal _build_ranges method.""" - ss = SerializableSet() - - # Empty - ranges = ss._build_ranges([]) - assert ranges == [] - - # Single value - ranges = ss._build_ranges([5]) - assert ranges == [(5, 5)] - - # Consecutive sequence - ranges = ss._build_ranges([1, 2, 3, 4, 5]) - assert ranges == [(1, 5)] - - # Multiple ranges - ranges = ss._build_ranges([1, 2, 3, 7, 8, 10]) - assert ranges == [(1, 3), (7, 8), (10, 10)] - - # Sparse values - ranges = ss._build_ranges([1, 5, 10, 20]) - assert ranges == [(1, 1), (5, 5), (10, 10), (20, 20)] - - @pytest.mark.unit - def test_varint_encoding(self): - """Test varint encoding and decoding.""" - # Test small values (1 byte) - for value in [0, 1, 127]: - encoded = SerializableSet._encode_varint(value) - decoded, offset = SerializableSet._decode_varint(encoded, 0) - assert decoded == value - assert offset == len(encoded) - - # Test medium values (2 bytes) - for value in [128, 255, 16383]: - encoded = SerializableSet._encode_varint(value) - decoded, offset = SerializableSet._decode_varint(encoded, 0) - assert decoded == value - assert offset == len(encoded) - - # Test large values - for value in [16384, 65535, 1048575]: - encoded = SerializableSet._encode_varint(value) - decoded, offset = SerializableSet._decode_varint(encoded, 0) - assert decoded == value - assert offset == len(encoded) - - @pytest.mark.unit - def test_serialization_empty(self): - """Test serialization of empty set.""" - ss = SerializableSet() - serialized = ss.serialize() - deserialized = SerializableSet.deserialize(serialized) - - assert ss == deserialized - assert len(deserialized) == 0 - - @pytest.mark.unit - def test_serialization_single_value(self): - """Test serialization of single value.""" - ss = SerializableSet([42]) - serialized = ss.serialize() - deserialized = SerializableSet.deserialize(serialized) - - assert ss == deserialized - assert 42 in deserialized - assert len(deserialized) == 1 - - @pytest.mark.unit - def test_serialization_consecutive_values(self): - """Test serialization with consecutive values (should prefer RLE).""" - # Large consecutive range should use run-length encoding - ss = SerializableSet(range(1, 1001)) # 1000 consecutive numbers - serialized = ss.serialize() - deserialized = SerializableSet.deserialize(serialized) - - assert ss == deserialized - assert len(deserialized) == 1000 - assert min(deserialized) == 1 - assert max(deserialized) == 1000 - - # Should be very compact (RLE encoding) - assert len(serialized) < 20 # Much smaller than 1000 * varint_size - - @pytest.mark.unit - def test_serialization_sparse_values(self): - """Test serialization with sparse values (should prefer direct list).""" - # Sparse values should use direct encoding - sparse_values = [1, 100, 1000, 10000, 100000] - ss = SerializableSet(sparse_values) - serialized = ss.serialize() - deserialized = SerializableSet.deserialize(serialized) - - assert ss == deserialized - assert sorted(deserialized) == sparse_values - - @pytest.mark.unit - def test_serialization_mixed_ranges(self): - """Test serialization with mixed consecutive and sparse values.""" - # Mix of ranges and sparse values - values = list(range(1, 11)) + list(range(50, 61)) + [100, 200, 300] - ss = SerializableSet(values) - serialized = ss.serialize() - deserialized = SerializableSet.deserialize(serialized) - - assert ss == deserialized - assert len(deserialized) == len(values) - - @pytest.mark.unit - def test_serialization_adaptive_strategy(self): - """Test that serialization chooses the most efficient strategy.""" - # Test that RLE is chosen for consecutive data - consecutive_ss = SerializableSet(range(1, 100)) - consecutive_serialized = consecutive_ss.serialize() - - # Test that direct list is chosen for sparse data - sparse_ss = SerializableSet([1, 1000, 10000, 100000, 1000000]) - sparse_serialized = sparse_ss.serialize() - - # Consecutive should be more compact - assert len(consecutive_serialized) < 50 # Very compact with RLE - - # Both should deserialize correctly - assert consecutive_ss == SerializableSet.deserialize(consecutive_serialized) - assert sparse_ss == SerializableSet.deserialize(sparse_serialized) - - @pytest.mark.unit - def test_deserialization_invalid_data(self): - """Test deserialization with invalid data.""" - # Empty data - empty_ss = SerializableSet.deserialize(b"") - assert len(empty_ss) == 0 - - # Invalid encoding type - with pytest.raises(ValueError, match="Unknown encoding type"): - SerializableSet.deserialize(bytes([99, 1, 2, 3])) - - # Incomplete varint - with pytest.raises(ValueError, match="Incomplete varint"): - SerializableSet.deserialize(bytes([1, 0xFF])) # Incomplete varint - - @pytest.mark.unit - def test_repr_and_str(self): - """Test string representations.""" - ss = SerializableSet([3, 1, 2]) - - # __repr__ should show sorted values - assert repr(ss) == "SerializableSet([1, 2, 3])" - - # __str__ should show count - assert str(ss) == "SerializableSet(3 values)" - - # Empty set - empty_ss = SerializableSet() - assert repr(empty_ss) == "SerializableSet([])" - assert str(empty_ss) == "SerializableSet(0 values)" - - @pytest.mark.unit - def test_copy_and_iteration(self): - """Test copy and iteration functionality.""" - original = SerializableSet([1, 2, 3, 4, 5]) - - # Copy (inherited from set) - copied = original.copy() - assert copied == original - assert copied is not original - assert isinstance(copied, SerializableSet) - - # Iteration - values = list(original) - assert sorted(values) == [1, 2, 3, 4, 5] - - # Iteration is same as set iteration - set_values = list(set([1, 2, 3, 4, 5])) - assert sorted(values) == sorted(set_values) - - @pytest.mark.unit - def test_large_dataset_performance(self): - """Test performance with larger datasets.""" - # Create a large dataset with mixed patterns - large_values = ( - list(range(1, 1000)) + # Consecutive range - list(range(10000, 10100)) + # Another consecutive range - [50000, 60000, 70000, 80000] # Sparse values - ) - - ss = SerializableSet(large_values) - serialized = ss.serialize() - deserialized = SerializableSet.deserialize(serialized) - - assert ss == deserialized - assert len(deserialized) == len(large_values) - - # Should be reasonably compact - assert len(serialized) < len(large_values) * 4 # Much better than 4 bytes per value \ No newline at end of file From 3efa8b9469f1f2d953ee945244df87f10f9564dd Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Fri, 28 Nov 2025 13:22:03 +0100 Subject: [PATCH 29/35] fix: imports in test --- tests/modules/csm/test_csm_module.py | 20 +++++++++---------- .../performance_collector/test_checkpoint.py | 6 +++--- .../test_performance_collector.py | 4 ++-- .../test_processing_attestation.py | 2 +- 4 files changed, 16 insertions(+), 16 deletions(-) diff --git a/tests/modules/csm/test_csm_module.py b/tests/modules/csm/test_csm_module.py index c29c03ccf..629f4355e 100644 --- a/tests/modules/csm/test_csm_module.py +++ b/tests/modules/csm/test_csm_module.py @@ -14,7 +14,7 @@ from src.modules.csm.state import State from src.modules.csm.tree import RewardsTree, StrikesTree from src.modules.csm.types import StrikesList -from src.modules.performance_collector.codec import ProposalDuty, SyncDuty +from src.modules.performance.common.types import ProposalDuty, SyncDuty from src.modules.submodules.oracle_module import ModuleExecuteDelay from src.modules.submodules.types import ZERO_HASH, CurrentFrame from src.providers.consensus.types import Validator, ValidatorState @@ -254,14 +254,14 @@ def test_set_epochs_range_to_collect_posts_new_demand(module: CSOracle, mock_cha module.converter = Mock(return_value=converter) module.get_epochs_range_to_process = Mock(return_value=(10, 20)) module.w3 = Mock() - module.w3.performance.get_epochs_demand = Mock(return_value={}) + module.w3.performance.get_epochs_demands = Mock(return_value={}) module.w3.performance.post_epochs_demand = Mock() module.set_epochs_range_to_collect(blockstamp) module.state.migrate.assert_called_once_with(10, 20, 4) module.state.log_progress.assert_called_once() - module.w3.performance.get_epochs_demand.assert_called_once() + module.w3.performance.get_epochs_demands.assert_called_once() module.w3.performance.post_epochs_demand.assert_called_once_with("CSOracle", 10, 20) @@ -274,7 +274,7 @@ def test_set_epochs_range_to_collect_skips_post_when_demand_same(module: CSOracl module.converter = Mock(return_value=converter) module.get_epochs_range_to_process = Mock(return_value=(10, 20)) module.w3 = Mock() - module.w3.performance.get_epochs_demand = Mock(return_value={"CSOracle": (10, 20)}) + module.w3.performance.get_epochs_demands = Mock(return_value={"CSOracle": (10, 20)}) module.w3.performance.post_epochs_demand = Mock() module.set_epochs_range_to_collect(blockstamp) @@ -384,11 +384,11 @@ def test_fulfill_state_handles_epoch_data(module: CSOracle, epoch_data_missing: module.w3.cc.get_validators = Mock(return_value=[validator_a, validator_b]) if epoch_data_missing: - module.w3.performance.get_epoch = Mock(return_value=None) + module.w3.performance.get_epoch_data = Mock(return_value=None) frames = [(0, 0)] unprocessed = {0} else: - module.w3.performance.get_epoch = Mock( + module.w3.performance.get_epoch_data = Mock( side_effect=[ ( {validator_a.index}, @@ -432,14 +432,14 @@ def test_fulfill_state_handles_epoch_data(module: CSOracle, epoch_data_missing: module.w3.cc.get_validators.assert_called_once_with("finalized") if epoch_data_missing: - module.w3.performance.get_epoch.assert_called_once_with(0) + module.w3.performance.get_epoch_data.assert_called_once_with(0) state.save_att_duty.assert_not_called() state.save_prop_duty.assert_not_called() state.save_sync_duty.assert_not_called() state.add_processed_epoch.assert_not_called() state.log_progress.assert_not_called() else: - module.w3.performance.get_epoch.assert_has_calls([call(0), call(1)]) + module.w3.performance.get_epoch_data.assert_has_calls([call(0), call(1)]) assert state.save_att_duty.call_args_list == [ call(EpochNumber(0), validator_a.index, included=False), call(EpochNumber(0), validator_b.index, included=True), @@ -473,7 +473,7 @@ def test_fulfill_state_raises_on_inactive_missed_attestation(module: CSOracle): module._receive_last_finalized_slot = Mock(return_value="finalized") module.w3 = Mock() module.w3.cc.get_validators = Mock(return_value=[inactive_validator]) - module.w3.performance.get_epoch = Mock(return_value=({inactive_validator.index}, [], [])) + module.w3.performance.get_epoch_data = Mock(return_value=({inactive_validator.index}, [], [])) state = Mock() state.frames = [(0, 0)] state.unprocessed_epochs = {0} @@ -487,7 +487,7 @@ def test_fulfill_state_raises_on_inactive_missed_attestation(module: CSOracle): with pytest.raises(ValueError, match="not active"): module.fulfill_state() - module.w3.performance.get_epoch.assert_called_once_with(0) + module.w3.performance.get_epoch_data.assert_called_once_with(0) state.save_att_duty.assert_not_called() state.add_processed_epoch.assert_not_called() diff --git a/tests/modules/performance_collector/test_checkpoint.py b/tests/modules/performance_collector/test_checkpoint.py index 94f02ffe2..7c165701f 100644 --- a/tests/modules/performance_collector/test_checkpoint.py +++ b/tests/modules/performance_collector/test_checkpoint.py @@ -4,9 +4,9 @@ import pytest -import src.modules.performance_collector.checkpoint as checkpoint_module +import src.modules.performance.collector.checkpoint as checkpoint_module from src.constants import EPOCHS_PER_SYNC_COMMITTEE_PERIOD -from src.modules.performance_collector.checkpoint import ( +from src.modules.performance.collector.checkpoint import ( FrameCheckpoint, FrameCheckpointProcessor, FrameCheckpointsIterator, @@ -17,7 +17,7 @@ ValidatorDuty, process_attestations, ) -from src.modules.performance_collector.db import DutiesDB +from src.modules.performance.common.db import DutiesDB from src.modules.submodules.types import ChainConfig, FrameConfig from src.providers.consensus.client import ConsensusClient from src.providers.consensus.types import BeaconSpecResponse, BlockAttestation, SlotAttestationCommittee, SyncCommittee diff --git a/tests/modules/performance_collector/test_performance_collector.py b/tests/modules/performance_collector/test_performance_collector.py index 0c583ef59..b5455f31c 100644 --- a/tests/modules/performance_collector/test_performance_collector.py +++ b/tests/modules/performance_collector/test_performance_collector.py @@ -1,8 +1,8 @@ import pytest from unittest.mock import Mock, patch -from src.modules.performance_collector.performance_collector import PerformanceCollector -from src.modules.performance_collector.db import DutiesDB +from src.modules.performance.collector.collector import PerformanceCollector +from src.modules.performance.common.db import DutiesDB from src.types import EpochNumber diff --git a/tests/modules/performance_collector/test_processing_attestation.py b/tests/modules/performance_collector/test_processing_attestation.py index 9e29c2d1a..79ad30873 100644 --- a/tests/modules/performance_collector/test_processing_attestation.py +++ b/tests/modules/performance_collector/test_processing_attestation.py @@ -3,7 +3,7 @@ import pytest -from src.modules.performance_collector.checkpoint import ( +from src.modules.performance.collector.checkpoint import ( get_committee_indices, hex_bitlist_to_list, hex_bitvector_to_list, From 90c790ec9d8cc552a8e8fc419a1263af0be5c92c Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Fri, 28 Nov 2025 15:46:38 +0100 Subject: [PATCH 30/35] fix: test_csm_module --- tests/modules/csm/test_csm_module.py | 131 ++++++++++++--------------- 1 file changed, 56 insertions(+), 75 deletions(-) diff --git a/tests/modules/csm/test_csm_module.py b/tests/modules/csm/test_csm_module.py index 629f4355e..ffd1b8975 100644 --- a/tests/modules/csm/test_csm_module.py +++ b/tests/modules/csm/test_csm_module.py @@ -2,7 +2,7 @@ from collections import defaultdict from dataclasses import dataclass from typing import Literal, NoReturn, Type -from unittest.mock import Mock, PropertyMock, call, patch +from unittest.mock import Mock, PropertyMock, call import pytest from hexbytes import HexBytes @@ -14,7 +14,7 @@ from src.modules.csm.state import State from src.modules.csm.tree import RewardsTree, StrikesTree from src.modules.csm.types import StrikesList -from src.modules.performance.common.types import ProposalDuty, SyncDuty +from src.modules.performance.common.db import Duty from src.modules.submodules.oracle_module import ModuleExecuteDelay from src.modules.submodules.types import ZERO_HASH, CurrentFrame from src.providers.consensus.types import Validator, ValidatorState @@ -254,14 +254,16 @@ def test_set_epochs_range_to_collect_posts_new_demand(module: CSOracle, mock_cha module.converter = Mock(return_value=converter) module.get_epochs_range_to_process = Mock(return_value=(10, 20)) module.w3 = Mock() - module.w3.performance.get_epochs_demands = Mock(return_value={}) + module.w3.performance.is_range_available = Mock(return_value=False) + module.w3.performance.get_epochs_demand = Mock(return_value={}) module.w3.performance.post_epochs_demand = Mock() module.set_epochs_range_to_collect(blockstamp) module.state.migrate.assert_called_once_with(10, 20, 4) module.state.log_progress.assert_called_once() - module.w3.performance.get_epochs_demands.assert_called_once() + module.w3.performance.is_range_available.assert_called_once_with(10, 20) + module.w3.performance.get_epochs_demand.assert_called_once() module.w3.performance.post_epochs_demand.assert_called_once_with("CSOracle", 10, 20) @@ -372,49 +374,36 @@ def test_collect_data_handles_range_availability( assert "All epochs are already processed. Nothing to collect" not in caplog.messages -@pytest.mark.parametrize( - "epoch_data_missing", [pytest.param(False, id="duties_recorded"), pytest.param(True, id="epoch_missing")] -) @pytest.mark.unit -def test_fulfill_state_handles_epoch_data(module: CSOracle, epoch_data_missing: bool): +def test_fulfill_state_handles_epoch_data(module: CSOracle): module._receive_last_finalized_slot = Mock(return_value="finalized") validator_a = make_validator(0, activation_epoch=0, exit_epoch=10) validator_b = make_validator(1, activation_epoch=0, exit_epoch=10) module.w3 = Mock() module.w3.cc.get_validators = Mock(return_value=[validator_a, validator_b]) - if epoch_data_missing: - module.w3.performance.get_epoch_data = Mock(return_value=None) - frames = [(0, 0)] - unprocessed = {0} - else: - module.w3.performance.get_epoch_data = Mock( - side_effect=[ - ( - {validator_a.index}, - [ - ProposalDuty(validator_index=int(validator_a.index), is_proposed=True), - ProposalDuty(validator_index=int(validator_b.index), is_proposed=False), - ], - [ - SyncDuty(validator_index=int(validator_a.index), missed_count=0), - SyncDuty(validator_index=int(validator_b.index), missed_count=1), - ], - ), - ( - set(), - [ - ProposalDuty(validator_index=int(validator_b.index), is_proposed=True), - ], - [ - SyncDuty(validator_index=int(validator_a.index), missed_count=2), - SyncDuty(validator_index=int(validator_b.index), missed_count=3), - ], - ), - ] - ) - frames = [(0, 1)] - unprocessed = {0, 1} + module.w3.performance.get_epoch_data = Mock( + side_effect=[ + Duty( + epoch_number=EpochNumber(0), + attestations=[validator_a.index], + proposals_vids=[int(validator_a.index), int(validator_b.index)], + proposals_flags=[True, False], + syncs_vids=[int(validator_a.index), int(validator_b.index)], + syncs_misses=[0, 1], + ), + Duty( + epoch_number=EpochNumber(1), + attestations=[], + proposals_vids=[int(validator_b.index)], + proposals_flags=[True], + syncs_vids=[int(validator_a.index), int(validator_b.index)], + syncs_misses=[2, 3], + ), + ] + ) + frames = [(0, 1)] + unprocessed = {0, 1} state = Mock() state.frames = frames @@ -431,40 +420,32 @@ def test_fulfill_state_handles_epoch_data(module: CSOracle, epoch_data_missing: module._receive_last_finalized_slot.assert_called_once() module.w3.cc.get_validators.assert_called_once_with("finalized") - if epoch_data_missing: - module.w3.performance.get_epoch_data.assert_called_once_with(0) - state.save_att_duty.assert_not_called() - state.save_prop_duty.assert_not_called() - state.save_sync_duty.assert_not_called() - state.add_processed_epoch.assert_not_called() - state.log_progress.assert_not_called() - else: - module.w3.performance.get_epoch_data.assert_has_calls([call(0), call(1)]) - assert state.save_att_duty.call_args_list == [ - call(EpochNumber(0), validator_a.index, included=False), - call(EpochNumber(0), validator_b.index, included=True), - call(EpochNumber(1), validator_a.index, included=True), - call(EpochNumber(1), validator_b.index, included=True), - ] - assert state.save_prop_duty.call_args_list == [ - call(EpochNumber(0), ValidatorIndex(int(validator_a.index)), included=True), - call(EpochNumber(0), ValidatorIndex(int(validator_b.index)), included=False), - call(EpochNumber(1), ValidatorIndex(int(validator_b.index)), included=True), - ] - assert state.save_sync_duty.call_args_list == [ - call(EpochNumber(0), ValidatorIndex(int(validator_a.index)), included=True), - call(EpochNumber(0), ValidatorIndex(int(validator_b.index)), included=False), - call(EpochNumber(1), ValidatorIndex(int(validator_a.index)), included=False), - call(EpochNumber(1), ValidatorIndex(int(validator_a.index)), included=False), - call(EpochNumber(1), ValidatorIndex(int(validator_b.index)), included=False), - call(EpochNumber(1), ValidatorIndex(int(validator_b.index)), included=False), - call(EpochNumber(1), ValidatorIndex(int(validator_b.index)), included=False), - ] - assert state.add_processed_epoch.call_args_list == [ - call(EpochNumber(0)), - call(EpochNumber(1)), - ] - assert state.log_progress.call_count == 2 + module.w3.performance.get_epoch_data.assert_has_calls([call(0), call(1)]) + assert state.save_att_duty.call_args_list == [ + call(EpochNumber(0), validator_a.index, included=False), + call(EpochNumber(0), validator_b.index, included=True), + call(EpochNumber(1), validator_a.index, included=True), + call(EpochNumber(1), validator_b.index, included=True), + ] + assert state.save_prop_duty.call_args_list == [ + call(EpochNumber(0), ValidatorIndex(int(validator_a.index)), included=True), + call(EpochNumber(0), ValidatorIndex(int(validator_b.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_b.index)), included=True), + ] + assert state.save_sync_duty.call_args_list == [ + call(EpochNumber(0), ValidatorIndex(int(validator_a.index)), included=True), + call(EpochNumber(0), ValidatorIndex(int(validator_b.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_a.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_a.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_b.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_b.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_b.index)), included=False), + ] + assert state.add_processed_epoch.call_args_list == [ + call(EpochNumber(0)), + call(EpochNumber(1)), + ] + assert state.log_progress.call_count == 2 @pytest.mark.unit @@ -473,7 +454,7 @@ def test_fulfill_state_raises_on_inactive_missed_attestation(module: CSOracle): module._receive_last_finalized_slot = Mock(return_value="finalized") module.w3 = Mock() module.w3.cc.get_validators = Mock(return_value=[inactive_validator]) - module.w3.performance.get_epoch_data = Mock(return_value=({inactive_validator.index}, [], [])) + module.w3.performance.get_epoch_data = Mock(return_value=Duty(epoch=0, attestations=[inactive_validator.index], proposals_vids=[], proposals_flags= [], syncs_vids=[], syncs_misses=[])) state = Mock() state.frames = [(0, 0)] state.unprocessed_epochs = {0} From d95584fb0ce3e31dba26448a3cf6ecd09c7d01f3 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Tue, 2 Dec 2025 14:54:29 +0100 Subject: [PATCH 31/35] fix: linter --- src/modules/performance/collector/checkpoint.py | 6 ++---- src/modules/performance/common/types.py | 1 - src/modules/performance/web/server.py | 4 ++-- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/src/modules/performance/collector/checkpoint.py b/src/modules/performance/collector/checkpoint.py index 2c5bb9156..6d03ee1a8 100644 --- a/src/modules/performance/collector/checkpoint.py +++ b/src/modules/performance/collector/checkpoint.py @@ -33,8 +33,6 @@ type AttestationCommittees = dict[tuple[SlotNumber, CommitteeIndex], list[ValidatorIndex]] -type SyncDuties = list[SyncDuty] - class SlotOutOfRootsRange(Exception): ... @@ -304,11 +302,11 @@ def _prepare_attestation_duties(self, epoch: EpochNumber) -> tuple[AttestationCo {"msg": f"Sync Committee for epoch {args.epoch} prepared in {duration:.2f} seconds"} ) ) - def _prepare_sync_committee_duties(self, epoch: EpochNumber) -> SyncDuties: + def _prepare_sync_committee_duties(self, epoch: EpochNumber) -> list[SyncDuty]: with lock: sync_committee = self._get_sync_committee(epoch) - duties: SyncDuties = [] + duties: list[SyncDuty] = [] for vid in sync_committee.validators: duties.append(SyncDuty(validator_index=vid, missed_count=0)) diff --git a/src/modules/performance/common/types.py b/src/modules/performance/common/types.py index 343f18403..3941f71d8 100644 --- a/src/modules/performance/common/types.py +++ b/src/modules/performance/common/types.py @@ -16,4 +16,3 @@ class SyncDuty(BaseModel): AttDutyMisses: TypeAlias = set[ValidatorIndex] -EpochData: TypeAlias = tuple[AttDutyMisses, list[ProposalDuty], list[SyncDuty]] diff --git a/src/modules/performance/web/server.py b/src/modules/performance/web/server.py index ec20a5a6f..0c4246540 100644 --- a/src/modules/performance/web/server.py +++ b/src/modules/performance/web/server.py @@ -6,7 +6,7 @@ from uvicorn.config import LOGGING_CONFIG from src.modules.performance.common.db import DutiesDB, Duty, EpochsDemand -from src import variables +from src.variables import PERFORMANCE_WEB_SERVER_API_PORT from src.modules.performance.web.metrics import attach_metrics from src.types import EpochNumber from src.metrics.logging import JsonFormatter, handler @@ -117,6 +117,6 @@ def serve(): uvicorn.run( app, host="0.0.0.0", - port=variables.PERFORMANCE_WEB_SERVER_API_PORT, + port=PERFORMANCE_WEB_SERVER_API_PORT, log_config=logging_config, ) From f676fa0eca40884f0e6d5a01f446d8871c591b80 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Tue, 2 Dec 2025 14:54:36 +0100 Subject: [PATCH 32/35] fix: some tests --- .../performance_collector/test_checkpoint.py | 73 ++++++++----------- .../test_performance_collector.py | 37 ++++------ 2 files changed, 45 insertions(+), 65 deletions(-) diff --git a/tests/modules/performance_collector/test_checkpoint.py b/tests/modules/performance_collector/test_checkpoint.py index 7c165701f..4be093e60 100644 --- a/tests/modules/performance_collector/test_checkpoint.py +++ b/tests/modules/performance_collector/test_checkpoint.py @@ -10,14 +10,13 @@ FrameCheckpoint, FrameCheckpointProcessor, FrameCheckpointsIterator, - MinStepIsNotReached, SlotNumber, SlotOutOfRootsRange, SyncCommitteesCache, - ValidatorDuty, process_attestations, ) from src.modules.performance.common.db import DutiesDB +from src.modules.performance.common.types import AttDutyMisses, ProposalDuty, SyncDuty from src.modules.submodules.types import ChainConfig, FrameConfig from src.providers.consensus.client import ConsensusClient from src.providers.consensus.types import BeaconSpecResponse, BlockAttestation, SlotAttestationCommittee, SyncCommittee @@ -65,12 +64,6 @@ def sync_committees_cache(): yield cache -@pytest.mark.unit -def test_checkpoints_iterator_min_epoch_is_not_reached(converter): - with pytest.raises(MinStepIsNotReached): - FrameCheckpointsIterator(converter, 100, 600, 109) - - @pytest.mark.unit @pytest.mark.parametrize( "l_epoch,r_epoch,finalized_epoch,expected_checkpoints", @@ -335,10 +328,10 @@ def test_checkpoints_processor_process_attestations_undefined_committee( @pytest.fixture def frame_checkpoint_processor(): cc = Mock() - state = Mock() + db = Mock() converter = Mock() finalized_blockstamp = Mock(slot_number=SlotNumber(0)) - return FrameCheckpointProcessor(cc, state, converter, finalized_blockstamp) + return FrameCheckpointProcessor(cc, db, converter, finalized_blockstamp) @pytest.mark.unit @@ -348,18 +341,18 @@ def test_check_duties_processes_epoch_with_attestations_and_sync_committee(frame duty_epoch = EpochNumber(10) duty_epoch_roots = [(SlotNumber(100), Mock(spec=BlockRoot)), (SlotNumber(101), Mock(spec=BlockRoot))] next_epoch_roots = [(SlotNumber(102), Mock(spec=BlockRoot)), (SlotNumber(103), Mock(spec=BlockRoot))] - frame_checkpoint_processor._prepare_attestation_duties = Mock( - return_value={SlotNumber(100): [ValidatorDuty(1, False)]} - ) + frame_checkpoint_processor._prepare_attestation_duties = Mock(return_value={SlotNumber(100): AttDutyMisses([1])}) frame_checkpoint_processor._prepare_propose_duties = Mock( - return_value={SlotNumber(100): ValidatorDuty(1, False), SlotNumber(101): ValidatorDuty(1, False)} - ) - frame_checkpoint_processor._prepare_sync_committee_duties = Mock( return_value={ - 100: [ValidatorDuty(1, False) for _ in range(32)], - 101: [ValidatorDuty(1, False) for _ in range(32)], + SlotNumber(100): ProposalDuty(validator_index=1, is_proposed=False), + SlotNumber(101): ProposalDuty(validator_index=1, is_proposed=False), } ) + frame_checkpoint_processor._prepare_sync_committee_duties = Mock( + return_value=[ + SyncDuty(validator_index=1, missed_count=2), + ] + ) attestation = Mock() attestation.data.slot = SlotNumber(100) @@ -389,10 +382,15 @@ def test_check_duties_processes_epoch_with_no_attestations(frame_checkpoint_proc next_epoch_roots = [(SlotNumber(102), Mock(spec=BlockRoot)), (SlotNumber(103), Mock(spec=BlockRoot))] frame_checkpoint_processor._prepare_attestation_duties = Mock(return_value={}) frame_checkpoint_processor._prepare_propose_duties = Mock( - return_value={SlotNumber(100): ValidatorDuty(1, False), SlotNumber(101): ValidatorDuty(1, False)} + return_value={ + SlotNumber(100): ProposalDuty(validator_index=1, is_proposed=False), + SlotNumber(101): ProposalDuty(validator_index=1, is_proposed=False), + } ) frame_checkpoint_processor._prepare_sync_committee_duties = Mock( - return_value={100: [ValidatorDuty(1, False)], 101: [ValidatorDuty(1, False)]} + return_value=[ + SyncDuty(validator_index=1, missed_count=2), + ] ) sync_aggregate = Mock() @@ -405,7 +403,7 @@ def test_check_duties_processes_epoch_with_no_attestations(frame_checkpoint_proc checkpoint_block_roots, checkpoint_slot, duty_epoch, duty_epoch_roots, next_epoch_roots ) - frame_checkpoint_processor.db.store_epoch_from_duties.assert_called() + frame_checkpoint_processor.db.store_epoch.assert_called() @pytest.mark.unit @@ -418,18 +416,11 @@ def test_prepare_sync_committee_returns_duties_for_valid_sync_committee(frame_ch duties = frame_checkpoint_processor._prepare_sync_committee_duties(epoch, duty_block_roots) - expected_duties = { - SlotNumber(100): [ - ValidatorDuty(validator_index=1, included=False), - ValidatorDuty(validator_index=2, included=False), - ValidatorDuty(validator_index=3, included=False), - ], - SlotNumber(101): [ - ValidatorDuty(validator_index=1, included=False), - ValidatorDuty(validator_index=2, included=False), - ValidatorDuty(validator_index=3, included=False), - ], - } + expected_duties = [ + SyncDuty(validator_index=1, missed_count=2), + SyncDuty(validator_index=2, missed_count=2), + SyncDuty(validator_index=3, missed_count=2), + ] assert duties == expected_duties @@ -443,13 +434,11 @@ def test_prepare_sync_committee_skips_duties_for_missed_slots(frame_checkpoint_p duties = frame_checkpoint_processor._prepare_sync_committee_duties(epoch, duty_block_roots) - expected_duties = { - SlotNumber(101): [ - ValidatorDuty(validator_index=1, included=False), - ValidatorDuty(validator_index=2, included=False), - ValidatorDuty(validator_index=3, included=False), - ] - } + expected_duties = [ + SyncDuty(validator_index=1, missed_count=1), + SyncDuty(validator_index=2, missed_count=1), + SyncDuty(validator_index=3, missed_count=1), + ] assert duties == expected_duties @@ -530,8 +519,8 @@ def test_prepare_propose_duties(frame_checkpoint_processor): duties = frame_checkpoint_processor._prepare_propose_duties(epoch, checkpoint_block_roots, checkpoint_slot) expected_duties = { - SlotNumber(101): ValidatorDuty(validator_index=1, included=False), - SlotNumber(102): ValidatorDuty(validator_index=2, included=False), + SlotNumber(101): ProposalDuty(validator_index=1, is_proposed=False), + SlotNumber(102): ProposalDuty(validator_index=2, is_proposed=False), } assert duties == expected_duties diff --git a/tests/modules/performance_collector/test_performance_collector.py b/tests/modules/performance_collector/test_performance_collector.py index b5455f31c..93ac50783 100644 --- a/tests/modules/performance_collector/test_performance_collector.py +++ b/tests/modules/performance_collector/test_performance_collector.py @@ -21,13 +21,11 @@ def mock_db(): @pytest.fixture def performance_collector(mock_w3, mock_db): """Create PerformanceCollector instance with mocked dependencies""" - from pathlib import Path - mock_cache_path = Path('/tmp') - - with patch('src.modules.performance_collector.performance_collector.DutiesDB', return_value=mock_db), \ - patch('src.modules.performance_collector.performance_collector.start_performance_api_server'), \ - patch('src.modules.performance_collector.performance_collector.variables.CACHE_PATH', mock_cache_path), \ - patch('src.modules.performance_collector.performance_collector.variables.PERFORMANCE_COLLECTOR_SERVER_API_PORT', 8080): + with patch('src.modules.performance.common.db.DutiesDB', return_value=mock_db), patch( + 'src.modules.performance.web.server.serve' + ), patch( + 'src.modules.performance.web.server.PERFORMANCE_WEB_SERVER_API_PORT', 8080 + ): collector = PerformanceCollector(mock_w3) collector.db = mock_db return collector @@ -120,9 +118,7 @@ def test_unsatisfied_epochs_demand_before_db_range(self, performance_collector, mock_db.missing_epochs_in.return_value = [] # Setup epochs demand before DB range - mock_db.epochs_demand.return_value = { - 'consumer1': (20, 30) # Demand before min_epoch_in_db - } + mock_db.epochs_demand.return_value = {'consumer1': (20, 30)} # Demand before min_epoch_in_db mock_db.is_range_available.return_value = False # Unsatisfied demand result = performance_collector.define_epochs_to_process_range(finalized_epoch) @@ -143,9 +139,7 @@ def test_unsatisfied_epochs_demand_after_db_range(self, performance_collector, m mock_db.missing_epochs_in.return_value = [] # Setup epochs demand after DB range - mock_db.epochs_demand.return_value = { - 'consumer1': (95, 105) # Demand after max_epoch_in_db - } + mock_db.epochs_demand.return_value = {'consumer1': (95, 105)} # Demand after max_epoch_in_db mock_db.is_range_available.return_value = False # Unsatisfied demand result = performance_collector.define_epochs_to_process_range(finalized_epoch) @@ -165,9 +159,7 @@ def test_satisfied_epochs_demand_ignored(self, performance_collector, mock_db): mock_db.missing_epochs_in.return_value = [] # Setup satisfied epochs demand - mock_db.epochs_demand.return_value = { - 'consumer1': (60, 70) # Demand within DB range - } + mock_db.epochs_demand.return_value = {'consumer1': (60, 70)} # Demand within DB range mock_db.is_range_available.return_value = True # Satisfied demand result = performance_collector.define_epochs_to_process_range(finalized_epoch) @@ -189,9 +181,9 @@ def test_multiple_unsatisfied_demands(self, performance_collector, mock_db): # Setup multiple unsatisfied demands mock_db.epochs_demand.return_value = { - 'consumer1': (20, 30), # Before DB range + 'consumer1': (20, 30), # Before DB range 'consumer2': (95, 105), # After DB range - 'consumer3': (60, 70), # Within DB range (satisfied) + 'consumer3': (60, 70), # Within DB range (satisfied) } def mock_is_range_available(l_epoch, r_epoch): @@ -252,7 +244,7 @@ def test_complex_scenario_with_gap_and_demand(self, performance_collector, mock_ # Setup unsatisfied demand mock_db.epochs_demand.return_value = { - 'consumer1': (10, 20), # Before DB range + 'consumer1': (10, 20), # Before DB range } mock_db.is_range_available.return_value = False # Unsatisfied @@ -328,9 +320,9 @@ def test_overlapping_epochs_demands(self, performance_collector, mock_db): # Setup overlapping demands mock_db.epochs_demand.return_value = { - 'consumer1': (40, 60), # Before DB range - 'consumer2': (50, 70), # Overlapping with consumer1 - 'consumer3': (140, 160), # After DB range + 'consumer1': (40, 60), # Before DB range + 'consumer2': (50, 70), # Overlapping with consumer1 + 'consumer3': (140, 160), # After DB range } mock_db.is_range_available.return_value = False # All unsatisfied @@ -392,4 +384,3 @@ def test_gap_at_end_of_db_range(self, performance_collector, mock_db): assert result[0] == EpochNumber(88) # End epoch should be max_available = max(0, 100 - 2) = 98 assert result[1] == EpochNumber(98) - From 9c1c902c96a7219c0ddc16e7370d5f68b9dccc4f Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Tue, 2 Dec 2025 14:54:58 +0100 Subject: [PATCH 33/35] fix: fork tests --- tests/fork/test_csm_oracle_cycle.py | 65 ++++++++++++++++++++++++----- 1 file changed, 54 insertions(+), 11 deletions(-) diff --git a/tests/fork/test_csm_oracle_cycle.py b/tests/fork/test_csm_oracle_cycle.py index 2732772d8..aab43efd4 100644 --- a/tests/fork/test_csm_oracle_cycle.py +++ b/tests/fork/test_csm_oracle_cycle.py @@ -1,8 +1,9 @@ +from threading import Thread + import pytest -from src import variables from src.modules.csm.csm import CSOracle -from src.modules.performance_collector.performance_collector import PerformanceCollector +from src.modules.performance.collector.collector import PerformanceCollector from src.modules.submodules.types import FrameConfig from src.utils.range import sequence from src.web3py.types import Web3 @@ -21,28 +22,69 @@ def csm_module(web3: Web3): @pytest.fixture() -def performance_collector(web3: Web3, frame_config: FrameConfig): +def performance_local_db(testrun_path): + from unittest.mock import patch + from pathlib import Path + from sqlmodel import create_engine + from sqlalchemy import JSON + from src.modules.performance.common.db import Duty + + def mock_get_database_url(self): + db_path = Path(testrun_path) / "test_duties.db" + return f"sqlite:///{db_path}" + + def mock_init(self): + self.engine = create_engine( + self._get_database_url(), + echo=False + ) + self._setup_database() + + table = Duty.__table__ + for col_name in ("attestations", "proposals_vids", "proposals_flags", "syncs_vids", "syncs_misses"): + if col_name in table.c: + table.c[col_name].type = JSON() + + with patch('src.modules.performance.common.db.DutiesDB._get_database_url', mock_get_database_url): + with patch('src.modules.performance.common.db.DutiesDB.__init__', mock_init): + yield + + +@pytest.fixture() +def performance_collector(performance_local_db, web3: Web3, frame_config: FrameConfig): yield PerformanceCollector(web3) +@pytest.fixture() +def performance_web_server(performance_local_db): + from src.modules.performance.web.server import serve + Thread(target=serve, daemon=True).start() + yield + + +@pytest.fixture +def cycle_iterations(): + return 4 + + @pytest.fixture -def start_before_initial_epoch(frame_config: FrameConfig): +def start_before_initial_epoch(frame_config: FrameConfig, cycle_iterations): _from = frame_config.initial_epoch - 1 - _to = frame_config.initial_epoch + 4 + _to = frame_config.initial_epoch + cycle_iterations return [first_slot_of_epoch(i) for i in sequence(_from, _to)] @pytest.fixture -def start_after_initial_epoch(frame_config: FrameConfig): +def start_after_initial_epoch(frame_config: FrameConfig, cycle_iterations): _from = frame_config.initial_epoch + 1 - _to = frame_config.initial_epoch + 4 + _to = frame_config.initial_epoch + cycle_iterations return [first_slot_of_epoch(i) for i in sequence(_from, _to)] @pytest.fixture -def missed_initial_frame(frame_config: FrameConfig): +def missed_initial_frame(frame_config: FrameConfig, cycle_iterations): _from = frame_config.initial_epoch + frame_config.epochs_per_frame + 1 - _to = _from + 4 + _to = _from + cycle_iterations return [first_slot_of_epoch(i) for i in sequence(_from, _to)] @@ -57,7 +99,9 @@ def missed_initial_frame(frame_config: FrameConfig): [start_before_initial_epoch, start_after_initial_epoch, missed_initial_frame], indirect=True, ) -def test_csm_module_report(performance_collector, module, set_oracle_members, running_finalized_slots, account_from): +def test_csm_module_report( + performance_web_server, performance_collector, module, set_oracle_members, running_finalized_slots, account_from +): assert module.report_contract.get_last_processing_ref_slot() == 0, "Last processing ref slot should be 0" members = set_oracle_members(count=2) @@ -75,7 +119,6 @@ def test_csm_module_report(performance_collector, module, set_oracle_members, ru report_frame = module.get_initial_or_current_frame( module._receive_last_finalized_slot() # pylint: disable=protected-access ) - # NOTE: Patch the var to bypass `FrameCheckpointsIterator.MIN_CHECKPOINT_STEP` last_processing_after_report = module.w3.csm.oracle.get_last_processing_ref_slot() assert ( From 53aac869061694de8a45f39adddf682c66826eb8 Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Thu, 11 Dec 2025 15:24:51 +0100 Subject: [PATCH 34/35] fix: review --- src/main.py | 2 + src/modules/csm/csm.py | 26 +++-- src/modules/csm/state.py | 6 +- .../performance/collector/checkpoint.py | 19 ++-- .../performance/collector/collector.py | 23 +++-- src/modules/performance/common/db.py | 49 ++++++---- src/modules/performance/web/middleware.py | 22 +++++ src/modules/performance/web/server.py | 97 ++++++++++++------- src/variables.py | 36 ++++++- 9 files changed, 196 insertions(+), 84 deletions(-) create mode 100644 src/modules/performance/web/middleware.py diff --git a/src/main.py b/src/main.py index df9fe4866..0a059f88f 100644 --- a/src/main.py +++ b/src/main.py @@ -201,6 +201,8 @@ def ipfs_providers() -> Iterator[IPFSProvider]: if module is OracleModule.PERFORMANCE_WEB_SERVER: from src.modules.performance.web.server import serve + errors = variables.check_perf_web_server_required_variables() + variables.raise_from_errors(errors) logger.info({'msg': f'Starting Performance Web Server on port {variables.PERFORMANCE_WEB_SERVER_API_PORT}'}) sys.exit(serve()) diff --git a/src/modules/csm/csm.py b/src/modules/csm/csm.py index f93dbadfb..301996545 100644 --- a/src/modules/csm/csm.py +++ b/src/modules/csm/csm.py @@ -139,6 +139,8 @@ def set_epochs_range_to_collect(self, blockstamp: BlockStamp): def collect_data(self) -> bool: logger.info({"msg": "Collecting data for the report from Performance Collector"}) + self.state.ensure_initialized() + if not self.state.is_fulfilled: for l_epoch, r_epoch in self.state.frames: is_data_range_available = self.w3.performance.is_range_available( @@ -235,6 +237,8 @@ def fulfill_state(self): finalized_blockstamp = self._receive_last_finalized_slot() validators = self.w3.cc.get_validators(finalized_blockstamp) + self.state.ensure_initialized() + logger.info({ "msg": "Starting state fulfillment", "total_frames": len(self.state.frames), @@ -262,12 +266,18 @@ def fulfill_state(self): if epoch_data is None: raise ValueError(f"Epoch {epoch} is missing in Performance Collector") - misses, props_vids, props_flags, syncs_vids, syncs_misses = ( - epoch_data.attestations, - epoch_data.proposals_vids, - epoch_data.proposals_flags, - epoch_data.syncs_vids, - epoch_data.syncs_misses + ( + misses_raw, + props_vids, + props_flags, + syncs_vids, + syncs_misses, + ) = ( + [ValidatorIndex(vid) for vid in epoch_data.attestations], + [ValidatorIndex(vid) for vid in epoch_data.proposals_vids], + epoch_data.proposals_flags, # proposed or not status + [ValidatorIndex(vid) for vid in epoch_data.syncs_vids], + epoch_data.syncs_misses, # count of missed blocks in sync duties ) if len(props_vids) != len(props_flags) or len(syncs_vids) != len(syncs_misses): @@ -276,12 +286,12 @@ def fulfill_state(self): logger.info({ "msg": "Performance data received", "epoch": epoch, - "misses_count": len(misses), + "misses_count": len(misses_raw), "proposals_count": len(props_vids), "sync_duties_count": len(syncs_vids) }) - misses = set(misses) + misses = set(misses_raw) for validator in validators: missed_att = validator.index in misses included_att = validator.index not in misses diff --git a/src/modules/csm/state.py b/src/modules/csm/state.py index 65cd2e9e1..6f183f4b2 100644 --- a/src/modules/csm/state.py +++ b/src/modules/csm/state.py @@ -135,6 +135,10 @@ def buffer(self) -> Path: def is_empty(self) -> bool: return not self.data and not self._epochs_to_process and not self._processed_epochs + def ensure_initialized(self) -> None: + if self.is_empty or not self._epochs_to_process or not self.frames: + raise InvalidState("State is not initialized; call migrate() before processing") + @property def frames(self) -> list[Frame]: return list(self.data.keys()) @@ -142,7 +146,7 @@ def frames(self) -> list[Frame]: @property def unprocessed_epochs(self) -> set[EpochNumber]: if not self._epochs_to_process: - raise ValueError("Epochs to process are not set") + raise InvalidState("Epochs to process are not set; call migrate() before processing") diff = set(self._epochs_to_process) - self._processed_epochs return diff diff --git a/src/modules/performance/collector/checkpoint.py b/src/modules/performance/collector/checkpoint.py index 6d03ee1a8..007eb2a2c 100644 --- a/src/modules/performance/collector/checkpoint.py +++ b/src/modules/performance/collector/checkpoint.py @@ -179,7 +179,7 @@ def _get_block_roots(self, checkpoint_slot: SlotNumber): if is_pivot_missing: br[pivot_index] = None - logger.debug({ + logger.info({ 'msg': 'Block roots analysis', 'total_roots': len(br), 'missing_roots_count': br.count(None), @@ -269,8 +269,8 @@ def _check_duties( attestations, sync_aggregate = self.cc.get_block_attestations_and_sync(root) if (slot, root) in duty_epoch_roots: propose_duties[slot].is_proposed = True - process_sync(sync_aggregate, sync_duties) - process_attestations(attestations, att_committees, att_misses) + sync_duties = process_sync(sync_aggregate, sync_duties) + att_misses = process_attestations(attestations, att_committees, att_misses) propose_duties = list(propose_duties.values()) if len(propose_duties) > self.converter.chain_config.slots_per_epoch: @@ -391,19 +391,20 @@ def _get_dependent_root_for_proposer_duties( def process_sync( sync_aggregate: SyncAggregate, sync_duties: list[SyncDuty] -) -> None: +) -> list[SyncDuty]: # Spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#syncaggregate sync_bits = hex_bitvector_to_list(sync_aggregate.sync_committee_bits) - # Go through only UNSET indexes to get misses + # No need to process set bits because they mean that validator has participated successfully. for index_in_committee in get_unset_indices(sync_bits): sync_duties[index_in_committee].missed_count += 1 + return sync_duties def process_attestations( attestations: Iterable[BlockAttestation], committees: AttestationCommittees, misses: AttDutyMisses, -) -> None: +) -> AttDutyMisses: for attestation in attestations: committee_offset = 0 att_bits = hex_bitlist_to_list(attestation.aggregation_bits) @@ -415,13 +416,15 @@ def process_attestations( # We already checked that before or check in next epoch processing. continue att_committee_bits = att_bits[committee_offset:][: len(committee)] - # We can't use unset indices because the committee can attest partially in different blocks. - # If some part of the committee attested block X, their bits in block Y might be unset. + # Treat only set bits as reliable because committees can attest in multiple blocks. + # Unset bits do not necessarily mean a miss: when a committee was partially aggregated in + # an earlier block, the later block may legitimately keep those positions unset. for index_in_committee in get_set_indices(att_committee_bits): vid = committee[index_in_committee] if vid in misses: misses.remove(vid) committee_offset += len(committee) + return misses def get_committee_indices(attestation: BlockAttestation) -> list[CommitteeIndex]: diff --git a/src/modules/performance/collector/collector.py b/src/modules/performance/collector/collector.py index 974764551..0c49fb892 100644 --- a/src/modules/performance/collector/collector.py +++ b/src/modules/performance/collector/collector.py @@ -1,5 +1,6 @@ import logging +from src import variables from src.modules.performance.collector.checkpoint import ( FrameCheckpointsIterator, FrameCheckpointProcessor, @@ -22,7 +23,10 @@ class PerformanceCollector(BaseModule): def __init__(self, w3): super().__init__(w3) - self.db = DutiesDB() + self.db = DutiesDB( + connect_timeout=variables.PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT, + statement_timeout_ms=variables.PERFORMANCE_COLLECTOR_DB_STATEMENT_TIMEOUT_MS, + ) self.last_epochs_demand_update = self.get_epochs_demand_max_updated_at() def refresh_contracts(self): @@ -115,14 +119,17 @@ def define_epochs_to_process_range(self, finalized_epoch: EpochNumber) -> tuple[ }) # Remove from the DB just in case self.db.delete_demand(demand.consumer) + # There is no sense to lower start_epoch because the demand is already satisfied (data is in the DB) + continue start_epoch = min(start_epoch, demand.l_epoch) missing_epochs = self.db.missing_epochs_in(start_epoch, end_epoch) - if missing_epochs: - start_epoch = min(missing_epochs) - else: - # Start from the next epoch after the last epoch in the DB. + if not missing_epochs: + if max_epoch_in_db is None: + raise ValueError("No missing epochs found but the DB is empty. Probably a logic error or corrupted DB.") start_epoch = EpochNumber(max_epoch_in_db + 1) + else: + start_epoch = min(missing_epochs) log_meta_info = { "start_epoch": start_epoch, @@ -151,7 +158,5 @@ def new_epochs_range_demand_appeared(self) -> bool: return False def get_epochs_demand_max_updated_at(self) -> int: - max_updated_at = 0 - for demand in self.db.get_epochs_demands(): - max_updated_at = max(max_updated_at, demand.updated_at) - return max_updated_at + max_updated_at = self.db.get_epochs_demands_max_updated_at() + return int(max_updated_at) if max_updated_at is not None else 0 diff --git a/src/modules/performance/common/db.py b/src/modules/performance/common/db.py index a3c435788..222bb12de 100644 --- a/src/modules/performance/common/db.py +++ b/src/modules/performance/common/db.py @@ -1,8 +1,8 @@ -from typing import Sequence from time import time +from typing import Sequence -from sqlalchemy import Column, Integer, Boolean, SmallInteger, ARRAY -from sqlmodel import SQLModel, Field, create_engine, Session, select +from sqlalchemy import ARRAY, Boolean, Column, Integer, SmallInteger, delete, func, text +from sqlmodel import SQLModel, Field, Session, create_engine, select from src import variables from src.modules.performance.common.types import ProposalDuty, SyncDuty, AttDutyMisses @@ -31,16 +31,21 @@ class EpochsDemand(SQLModel, table=True): class DutiesDB: - def __init__(self): - self.engine = create_engine( + def __init__(self, *, connect_timeout: int | None = None, statement_timeout_ms: int | None = None): + self._statement_timeout_ms = statement_timeout_ms + self.engine = self._build_engine(connect_timeout) + self._setup_database() + + def _build_engine(self, connect_timeout: int | None): + return create_engine( self._get_database_url(), echo=False, - pool_pre_ping=True, # Enable connection health checks~ + pool_pre_ping=True, # Enable connection health checks pool_recycle=3600, # Recycle connections every hour - pool_size=10, # Connection pool size - max_overflow=20, # Maximum overflow connections + pool_size=10, + max_overflow=20, + connect_args={"connect_timeout": connect_timeout} if connect_timeout else {}, ) - self._setup_database() @staticmethod def _get_database_url() -> str: @@ -56,7 +61,10 @@ def _setup_database(self): SQLModel.metadata.create_all(self.engine) def get_session(self) -> Session: - return Session(self.engine) + session = Session(self.engine) + if self._statement_timeout_ms: + session.exec(text("SET LOCAL statement_timeout = :timeout"), {"timeout": self._statement_timeout_ms}) + return session def store_demand(self, consumer: str, l_epoch: EpochNumber, r_epoch: EpochNumber) -> None: with self.get_session() as session: @@ -129,9 +137,7 @@ def _auto_prune(self, current_epoch: EpochNumber) -> None: return with self.get_session() as session: - duties_to_delete = session.exec(select(Duty).where(Duty.epoch < threshold)).all() - for duty in duties_to_delete: - session.delete(duty) + session.exec(delete(Duty).where(Duty.epoch < threshold)) session.commit() def is_range_available(self, l_epoch: EpochNumber, r_epoch: EpochNumber) -> bool: @@ -139,8 +145,9 @@ def is_range_available(self, l_epoch: EpochNumber, r_epoch: EpochNumber) -> bool raise ValueError("Invalid epoch range") with self.get_session() as session: - count = session.exec(select(Duty).where(Duty.epoch >= l_epoch, Duty.epoch <= r_epoch)).all() - return len(count) == (r_epoch - l_epoch + 1) + stmt = select(func.count()).select_from(Duty).where(Duty.epoch >= l_epoch, Duty.epoch <= r_epoch) + count = session.exec(stmt).scalar_one() + return count == (r_epoch - l_epoch + 1) def missing_epochs_in(self, l_epoch: EpochNumber, r_epoch: EpochNumber) -> list[EpochNumber]: if l_epoch > r_epoch: @@ -150,13 +157,9 @@ def missing_epochs_in(self, l_epoch: EpochNumber, r_epoch: EpochNumber) -> list[ present_duties = session.exec( select(Duty.epoch).where(Duty.epoch >= l_epoch, Duty.epoch <= r_epoch).order_by(Duty.epoch) ).all() - present = [int(epoch) for epoch in present_duties] + present = {EpochNumber(int(epoch)) for epoch in present_duties} - missing = [] - for epoch in sequence(l_epoch, r_epoch): - if epoch not in present: - missing.append(epoch) - return missing + return [epoch for epoch in sequence(l_epoch, r_epoch) if epoch not in present] def get_epochs_data(self, from_epoch: EpochNumber, to_epoch: EpochNumber) -> Sequence[Duty]: with self.get_session() as session: @@ -187,3 +190,7 @@ def get_epochs_demand(self, consumer: str) -> EpochsDemand | None: def get_epochs_demands(self) -> Sequence[EpochsDemand]: with self.get_session() as session: return session.exec(select(EpochsDemand)).all() + + def get_epochs_demands_max_updated_at(self) -> int | None: + with self.get_session() as session: + return session.exec(select(func.max(EpochsDemand.updated_at))).scalar_one() diff --git a/src/modules/performance/web/middleware.py b/src/modules/performance/web/middleware.py new file mode 100644 index 000000000..aeae7a072 --- /dev/null +++ b/src/modules/performance/web/middleware.py @@ -0,0 +1,22 @@ +from anyio import fail_after, TooSlowError +from fastapi import FastAPI +from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint +from starlette.responses import JSONResponse + + +class RequestTimeoutMiddleware(BaseHTTPMiddleware): + """Bounds total request processing time.""" + + def __init__(self, app: FastAPI, timeout: float): + super().__init__(app) + self.timeout = timeout + + async def dispatch(self, request, call_next: RequestResponseEndpoint): # type: ignore[override] + try: + async with fail_after(self.timeout): + return await call_next(request) + except TooSlowError: + return JSONResponse( + {"detail": f"Request timed out after {self.timeout} seconds"}, + status_code=504, + ) diff --git a/src/modules/performance/web/server.py b/src/modules/performance/web/server.py index 0c4246540..c582737e2 100644 --- a/src/modules/performance/web/server.py +++ b/src/modules/performance/web/server.py @@ -1,15 +1,24 @@ -from typing import Optional -import logging +from typing import cast +from contextlib import asynccontextmanager + from fastapi import FastAPI, HTTPException, Depends, Query import uvicorn from pydantic import BaseModel from uvicorn.config import LOGGING_CONFIG from src.modules.performance.common.db import DutiesDB, Duty, EpochsDemand -from src.variables import PERFORMANCE_WEB_SERVER_API_PORT +from src.modules.performance.web.middleware import RequestTimeoutMiddleware +from src.variables import ( + PERFORMANCE_WEB_SERVER_API_HOST, + PERFORMANCE_WEB_SERVER_API_PORT, + PERFORMANCE_WEB_SERVER_DB_CONNECTION_TIMEOUT, + PERFORMANCE_WEB_SERVER_DB_STATEMENT_TIMEOUT_MS, + PERFORMANCE_WEB_SERVER_MAX_EPOCH_RANGE, + PERFORMANCE_WEB_SERVER_REQUEST_TIMEOUT, +) from src.modules.performance.web.metrics import attach_metrics from src.types import EpochNumber -from src.metrics.logging import JsonFormatter, handler +from src.metrics.logging import JsonFormatter class EpochsDemandRequest(BaseModel): @@ -22,83 +31,99 @@ class HealthCheckResp(BaseModel): status: str = "ok" -app = FastAPI(title="Performance Collector API") -attach_metrics(app) +@asynccontextmanager +async def lifespan(app: FastAPI): + app.state.db = DutiesDB( + connect_timeout=PERFORMANCE_WEB_SERVER_DB_CONNECTION_TIMEOUT, + statement_timeout_ms=PERFORMANCE_WEB_SERVER_DB_STATEMENT_TIMEOUT_MS, + ) + yield + -_db_instance: Optional[DutiesDB] = None +app = FastAPI(title="Performance Collector API", lifespan=lifespan) +attach_metrics(app) +app.add_middleware(RequestTimeoutMiddleware, timeout=PERFORMANCE_WEB_SERVER_REQUEST_TIMEOUT) -async def get_db() -> DutiesDB: - global _db_instance - if _db_instance is None: - _db_instance = DutiesDB() - return _db_instance +def get_db() -> DutiesDB: + return cast(DutiesDB, app.state.db) -async def validate_epoch_bounds(l_epoch: EpochNumber, r_epoch: EpochNumber) -> None: +def validate_epoch_bounds(l_epoch: EpochNumber, r_epoch: EpochNumber) -> None: if l_epoch > r_epoch: raise HTTPException(status_code=400, detail="'l_epoch' must be <= 'r_epoch'") + range_size = int(r_epoch) - int(l_epoch) + 1 + if range_size > PERFORMANCE_WEB_SERVER_MAX_EPOCH_RANGE: + raise HTTPException( + status_code=400, + detail=f"Requested epoch range is too large; maximum allowed size is {PERFORMANCE_WEB_SERVER_MAX_EPOCH_RANGE} epochs", + ) + + +def query_epoch_range( + from_epoch: EpochNumber = Query(..., alias="from"), + to_epoch: EpochNumber = Query(..., alias="to"), +) -> tuple[EpochNumber, EpochNumber]: + validate_epoch_bounds(from_epoch, to_epoch) + return from_epoch, to_epoch @app.get("/health", response_model=HealthCheckResp) -async def health(): +def health(): return {"status": "ok"} @app.get("/check-epochs", response_model=bool) -async def epochs_check( - from_epoch: EpochNumber = Query(..., alias="from"), - to_epoch: EpochNumber = Query(..., alias="to"), +def epochs_check( + epoch_range: tuple[EpochNumber, EpochNumber] = Depends(query_epoch_range), db: DutiesDB = Depends(get_db), ): - await validate_epoch_bounds(from_epoch, to_epoch) - return bool(db.is_range_available(from_epoch, to_epoch)) + l_epoch, r_epoch = epoch_range + return db.is_range_available(l_epoch, r_epoch) @app.get("/missing-epochs", response_model=list[EpochNumber]) -async def epochs_missing( - from_epoch: EpochNumber = Query(..., alias="from"), - to_epoch: EpochNumber = Query(..., alias="to"), +def epochs_missing( + epoch_range: tuple[EpochNumber, EpochNumber] = Depends(query_epoch_range), db: DutiesDB = Depends(get_db), ): - await validate_epoch_bounds(from_epoch, to_epoch) - return db.missing_epochs_in(from_epoch, to_epoch) + l_epoch, r_epoch = epoch_range + return db.missing_epochs_in(l_epoch, r_epoch) @app.get("/epochs", response_model=list[Duty]) -async def epochs_data( - from_epoch: EpochNumber = Query(..., alias="from"), - to_epoch: EpochNumber = Query(..., alias="to"), +def epochs_data( + epoch_range: tuple[EpochNumber, EpochNumber] = Depends(query_epoch_range), db: DutiesDB = Depends(get_db), ): - await validate_epoch_bounds(from_epoch, to_epoch) - return db.get_epochs_data(from_epoch, to_epoch) + l_epoch, r_epoch = epoch_range + return db.get_epochs_data(l_epoch, r_epoch) @app.get("/epochs/{epoch}", response_model=Duty | None) -async def epoch_data(epoch: EpochNumber, db: DutiesDB = Depends(get_db)): +def epoch_data(epoch: EpochNumber, db: DutiesDB = Depends(get_db)): return db.get_epoch_data(epoch) @app.get("/demands", response_model=list[EpochsDemand]) -async def epochs_demands(db: DutiesDB = Depends(get_db)): +def epochs_demands(db: DutiesDB = Depends(get_db)): return db.get_epochs_demands() @app.get("/demands/{consumer}", response_model=EpochsDemand | None) -async def one_epochs_demand(consumer: str, db: DutiesDB = Depends(get_db)): +def one_epochs_demand(consumer: str, db: DutiesDB = Depends(get_db)): return db.get_epochs_demand(consumer) @app.post("/demands", response_model=EpochsDemand) -async def set_epochs_demand(demand_to_add: EpochsDemandRequest, db: DutiesDB = Depends(get_db)): - await validate_epoch_bounds(demand_to_add.l_epoch, demand_to_add.r_epoch) +def set_epochs_demand(demand_to_add: EpochsDemandRequest, db: DutiesDB = Depends(get_db)): + validate_epoch_bounds(demand_to_add.l_epoch, demand_to_add.r_epoch) db.store_demand(demand_to_add.consumer, demand_to_add.l_epoch, demand_to_add.r_epoch) return db.get_epochs_demand(demand_to_add.consumer) @app.delete("/demands", response_model=EpochsDemand) -async def delete_epochs_demand(consumer: str = Query(...), db: DutiesDB = Depends(get_db)): +def delete_epochs_demand(consumer: str = Query(...), db: DutiesDB = Depends(get_db)): to_delete = db.get_epochs_demand(consumer) if not to_delete: raise HTTPException(status_code=404, detail=f"No demand found for consumer '{consumer}'") @@ -116,7 +141,7 @@ def serve(): uvicorn.run( app, - host="0.0.0.0", + host=PERFORMANCE_WEB_SERVER_API_HOST, port=PERFORMANCE_WEB_SERVER_API_PORT, log_config=logging_config, ) diff --git a/src/variables.py b/src/variables.py index 54c98433f..76d50b6d1 100644 --- a/src/variables.py +++ b/src/variables.py @@ -106,11 +106,18 @@ # - Performance Web-server and Collector PERFORMANCE_WEB_SERVER_API_PORT: Final = int(os.getenv('PERFORMANCE_WEB_SERVER_API_PORT', 9020)) +PERFORMANCE_WEB_SERVER_API_HOST: Final = os.getenv('PERFORMANCE_WEB_SERVER_API_HOST', '0.0.0.0') PERFORMANCE_WEB_SERVER_DB_CONNECTION_TIMEOUT: Final = int(os.getenv('PERFORMANCE_WEB_SERVER_DB_CONNECTION_TIMEOUT', 30)) PERFORMANCE_WEB_SERVER_METRICS_PREFIX: Final = os.getenv("PERFORMANCE_WEB_SERVER_METRICS_PREFIX", "lido_performance_web") +PERFORMANCE_WEB_SERVER_DB_STATEMENT_TIMEOUT_MS: Final = int( + os.getenv('PERFORMANCE_WEB_SERVER_DB_STATEMENT_TIMEOUT_MS', 10_000) +) +PERFORMANCE_WEB_SERVER_MAX_EPOCH_RANGE: Final = int(os.getenv('PERFORMANCE_WEB_SERVER_MAX_EPOCH_RANGE', 225)) +PERFORMANCE_WEB_SERVER_REQUEST_TIMEOUT: Final = int(os.getenv('PERFORMANCE_WEB_SERVER_REQUEST_TIMEOUT', 60)) PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS: Final = int(os.getenv('PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS', 28 * 225 * 6)) PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT: Final = int(os.getenv('PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT', 30)) +PERFORMANCE_COLLECTOR_DB_STATEMENT_TIMEOUT_MS: Final = int(os.getenv('PERFORMANCE_COLLECTOR_DB_STATEMENT_TIMEOUT_MS', 10_000)) PERFORMANCE_DB_HOST: Final = os.getenv('PERFORMANCE_DB_HOST', 'localhost') PERFORMANCE_DB_PORT: Final = int(os.getenv('PERFORMANCE_DB_PORT', 5432)) @@ -146,11 +153,33 @@ def check_uri_required_variables(): return [name for name, uri in required_uris.items() if '' in uri] +def check_performance_db_required_variables() -> list[str]: + errors = [] + if not PERFORMANCE_DB_HOST: + errors.append("PERFORMANCE_DB_HOST is empty") + if PERFORMANCE_DB_PORT <= 0: + errors.append("PERFORMANCE_DB_PORT must be positive") + if not PERFORMANCE_DB_NAME: + errors.append("PERFORMANCE_DB_NAME is empty") + if not PERFORMANCE_DB_USER: + errors.append("PERFORMANCE_DB_USER is empty") + if not PERFORMANCE_DB_PASSWORD: + errors.append("PERFORMANCE_DB_PASSWORD is empty") + return errors + + def check_perf_collector_required_variables(): + errors = check_performance_db_required_variables() required_uris = { 'CONSENSUS_CLIENT_URI': CONSENSUS_CLIENT_URI, } - return [name for name, uri in required_uris.items() if '' in uri] + errors.extend([name for name, uri in required_uris.items() if '' in uri]) + return errors + + +def check_perf_web_server_required_variables(): + errors = check_performance_db_required_variables() + return errors def raise_from_errors(errors): @@ -191,8 +220,12 @@ def raise_from_errors(errors): 'PROMETHEUS_PREFIX': PROMETHEUS_PREFIX, 'HEALTHCHECK_SERVER_PORT': HEALTHCHECK_SERVER_PORT, 'PERFORMANCE_WEB_SERVER_API_PORT': PERFORMANCE_WEB_SERVER_API_PORT, + 'PERFORMANCE_WEB_SERVER_API_HOST': PERFORMANCE_WEB_SERVER_API_HOST, 'PERFORMANCE_WEB_SERVER_DB_CONNECTION_TIMEOUT': PERFORMANCE_WEB_SERVER_DB_CONNECTION_TIMEOUT, 'PERFORMANCE_WEB_SERVER_METRICS_PREFIX': PERFORMANCE_WEB_SERVER_METRICS_PREFIX, + 'PERFORMANCE_WEB_SERVER_DB_STATEMENT_TIMEOUT_MS': PERFORMANCE_WEB_SERVER_DB_STATEMENT_TIMEOUT_MS, + 'PERFORMANCE_WEB_SERVER_MAX_EPOCH_RANGE': PERFORMANCE_WEB_SERVER_MAX_EPOCH_RANGE, + 'PERFORMANCE_WEB_SERVER_REQUEST_TIMEOUT': PERFORMANCE_WEB_SERVER_REQUEST_TIMEOUT, 'PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS': PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS, 'PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT': PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT, 'PERFORMANCE_DB_HOST': PERFORMANCE_DB_HOST, @@ -220,6 +253,7 @@ def raise_from_errors(errors): 'STORACHA_SPACE_DID': STORACHA_SPACE_DID, 'LIDO_IPFS_HOST': LIDO_IPFS_HOST, 'LIDO_IPFS_TOKEN': LIDO_IPFS_TOKEN, + 'PERFORMANCE_DB_PASSWORD': PERFORMANCE_DB_PASSWORD, 'PINATA_DEDICATED_GATEWAY_TOKEN': PINATA_DEDICATED_GATEWAY_TOKEN, 'MEMBER_PRIV_KEY': MEMBER_PRIV_KEY, 'OPSGENIE_API_KEY': OPSGENIE_API_KEY, From a8732ebbed0b879f0540e36e1b7039e81423d4dc Mon Sep 17 00:00:00 2001 From: vgorkavenko Date: Fri, 12 Dec 2025 16:21:39 +0100 Subject: [PATCH 35/35] fix: errors --- src/modules/performance/common/db.py | 16 ++++++++++------ src/modules/performance/web/middleware.py | 6 +++--- src/modules/performance/web/server.py | 22 +++++++++++++--------- 3 files changed, 26 insertions(+), 18 deletions(-) diff --git a/src/modules/performance/common/db.py b/src/modules/performance/common/db.py index 222bb12de..ac8933ac4 100644 --- a/src/modules/performance/common/db.py +++ b/src/modules/performance/common/db.py @@ -1,7 +1,7 @@ from time import time from typing import Sequence -from sqlalchemy import ARRAY, Boolean, Column, Integer, SmallInteger, delete, func, text +from sqlalchemy import ARRAY, Boolean, Column, Integer, SmallInteger, delete, func from sqlmodel import SQLModel, Field, Session, create_engine, select from src import variables @@ -37,6 +37,12 @@ def __init__(self, *, connect_timeout: int | None = None, statement_timeout_ms: self._setup_database() def _build_engine(self, connect_timeout: int | None): + connect_args = {} + if connect_timeout: + connect_args["connect_timeout"] = connect_timeout + if self._statement_timeout_ms: + connect_args["options"] = f"-c statement_timeout={self._statement_timeout_ms}" + return create_engine( self._get_database_url(), echo=False, @@ -44,7 +50,7 @@ def _build_engine(self, connect_timeout: int | None): pool_recycle=3600, # Recycle connections every hour pool_size=10, max_overflow=20, - connect_args={"connect_timeout": connect_timeout} if connect_timeout else {}, + connect_args=connect_args, ) @staticmethod @@ -62,8 +68,6 @@ def _setup_database(self): def get_session(self) -> Session: session = Session(self.engine) - if self._statement_timeout_ms: - session.exec(text("SET LOCAL statement_timeout = :timeout"), {"timeout": self._statement_timeout_ms}) return session def store_demand(self, consumer: str, l_epoch: EpochNumber, r_epoch: EpochNumber) -> None: @@ -146,7 +150,7 @@ def is_range_available(self, l_epoch: EpochNumber, r_epoch: EpochNumber) -> bool with self.get_session() as session: stmt = select(func.count()).select_from(Duty).where(Duty.epoch >= l_epoch, Duty.epoch <= r_epoch) - count = session.exec(stmt).scalar_one() + count = session.exec(stmt).one() return count == (r_epoch - l_epoch + 1) def missing_epochs_in(self, l_epoch: EpochNumber, r_epoch: EpochNumber) -> list[EpochNumber]: @@ -193,4 +197,4 @@ def get_epochs_demands(self) -> Sequence[EpochsDemand]: def get_epochs_demands_max_updated_at(self) -> int | None: with self.get_session() as session: - return session.exec(select(func.max(EpochsDemand.updated_at))).scalar_one() + return session.exec(select(func.max(EpochsDemand.updated_at))).one() diff --git a/src/modules/performance/web/middleware.py b/src/modules/performance/web/middleware.py index aeae7a072..aba6ad159 100644 --- a/src/modules/performance/web/middleware.py +++ b/src/modules/performance/web/middleware.py @@ -1,4 +1,4 @@ -from anyio import fail_after, TooSlowError +from anyio import fail_after from fastapi import FastAPI from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint from starlette.responses import JSONResponse @@ -13,9 +13,9 @@ def __init__(self, app: FastAPI, timeout: float): async def dispatch(self, request, call_next: RequestResponseEndpoint): # type: ignore[override] try: - async with fail_after(self.timeout): + with fail_after(self.timeout): return await call_next(request) - except TooSlowError: + except TimeoutError: return JSONResponse( {"detail": f"Request timed out after {self.timeout} seconds"}, status_code=504, diff --git a/src/modules/performance/web/server.py b/src/modules/performance/web/server.py index c582737e2..308a9a1f8 100644 --- a/src/modules/performance/web/server.py +++ b/src/modules/performance/web/server.py @@ -52,6 +52,9 @@ def get_db() -> DutiesDB: def validate_epoch_bounds(l_epoch: EpochNumber, r_epoch: EpochNumber) -> None: if l_epoch > r_epoch: raise HTTPException(status_code=400, detail="'l_epoch' must be <= 'r_epoch'") + + +def validate_range_size(l_epoch: EpochNumber, r_epoch: EpochNumber) -> None: range_size = int(r_epoch) - int(l_epoch) + 1 if range_size > PERFORMANCE_WEB_SERVER_MAX_EPOCH_RANGE: raise HTTPException( @@ -61,8 +64,8 @@ def validate_epoch_bounds(l_epoch: EpochNumber, r_epoch: EpochNumber) -> None: def query_epoch_range( - from_epoch: EpochNumber = Query(..., alias="from"), - to_epoch: EpochNumber = Query(..., alias="to"), + from_epoch: EpochNumber = Query(..., alias="from"), + to_epoch: EpochNumber = Query(..., alias="to"), ) -> tuple[EpochNumber, EpochNumber]: validate_epoch_bounds(from_epoch, to_epoch) return from_epoch, to_epoch @@ -75,8 +78,8 @@ def health(): @app.get("/check-epochs", response_model=bool) def epochs_check( - epoch_range: tuple[EpochNumber, EpochNumber] = Depends(query_epoch_range), - db: DutiesDB = Depends(get_db), + epoch_range: tuple[EpochNumber, EpochNumber] = Depends(query_epoch_range), + db: DutiesDB = Depends(get_db), ): l_epoch, r_epoch = epoch_range return db.is_range_available(l_epoch, r_epoch) @@ -84,8 +87,8 @@ def epochs_check( @app.get("/missing-epochs", response_model=list[EpochNumber]) def epochs_missing( - epoch_range: tuple[EpochNumber, EpochNumber] = Depends(query_epoch_range), - db: DutiesDB = Depends(get_db), + epoch_range: tuple[EpochNumber, EpochNumber] = Depends(query_epoch_range), + db: DutiesDB = Depends(get_db), ): l_epoch, r_epoch = epoch_range return db.missing_epochs_in(l_epoch, r_epoch) @@ -93,10 +96,11 @@ def epochs_missing( @app.get("/epochs", response_model=list[Duty]) def epochs_data( - epoch_range: tuple[EpochNumber, EpochNumber] = Depends(query_epoch_range), - db: DutiesDB = Depends(get_db), + epoch_range: tuple[EpochNumber, EpochNumber] = Depends(query_epoch_range), + db: DutiesDB = Depends(get_db), ): l_epoch, r_epoch = epoch_range + validate_range_size(l_epoch, r_epoch) return db.get_epochs_data(l_epoch, r_epoch) @@ -138,7 +142,7 @@ def serve(): logging_config["formatters"][formatter_name] = { "()": JsonFormatter, } - + uvicorn.run( app, host=PERFORMANCE_WEB_SERVER_API_HOST,