diff --git a/poetry.lock b/poetry.lock index d9496569f..869f4f603 100644 --- a/poetry.lock +++ b/poetry.lock @@ -1,4 +1,4 @@ -# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand. +# This file is automatically @generated by Poetry 2.2.1 and should not be changed by hand. [[package]] name = "aiohappyeyeballs" @@ -118,7 +118,7 @@ propcache = ">=0.2.0" yarl = ">=1.17.0,<2.0" [package.extras] -speedups = ["Brotli", "aiodns (>=3.3.0)", "brotlicffi"] +speedups = ["Brotli ; platform_python_implementation == \"CPython\"", "aiodns (>=3.3.0)", "brotlicffi ; platform_python_implementation != \"CPython\""] [[package]] name = "aiosignal" @@ -135,6 +135,18 @@ files = [ [package.dependencies] frozenlist = ">=1.1.0" +[[package]] +name = "annotated-doc" +version = "0.0.4" +description = "Document parameters, class attributes, return types, and variables inline, with Annotated." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "annotated_doc-0.0.4-py3-none-any.whl", hash = "sha256:571ac1dc6991c450b25a9c2d84a3705e2ae7a53467b5d111c24fa8baabbed320"}, + {file = "annotated_doc-0.0.4.tar.gz", hash = "sha256:fbcda96e87e9c92ad167c2e53839e57503ecfda18804ea28102353485033faa4"}, +] + [[package]] name = "annotated-types" version = "0.7.0" @@ -147,6 +159,26 @@ files = [ {file = "annotated_types-0.7.0.tar.gz", hash = "sha256:aff07c09a53a08bc8cfccb9c85b05f1aa9a2a6f23728d790723543408344ce89"}, ] +[[package]] +name = "anyio" +version = "4.11.0" +description = "High-level concurrency and networking framework on top of asyncio or Trio" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "anyio-4.11.0-py3-none-any.whl", hash = "sha256:0287e96f4d26d4149305414d4e3bc32f0dcd0862365a4bddea19d7a1ec38c4fc"}, + {file = "anyio-4.11.0.tar.gz", hash = "sha256:82a8d0b81e318cc5ce71a5f1f8b5c4e63619620b63141ef8c995fa0db95a57c4"}, +] + +[package.dependencies] +idna = ">=2.8" +sniffio = ">=1.1" +typing_extensions = {version = ">=4.5", markers = "python_version < \"3.13\""} + +[package.extras] +trio = ["trio (>=0.31.0)"] + [[package]] name = "astroid" version = "3.3.10" @@ -188,12 +220,12 @@ files = [ ] [package.extras] -benchmark = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -cov = ["cloudpickle", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -dev = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] +benchmark = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-codspeed", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +cov = ["cloudpickle ; platform_python_implementation == \"CPython\"", "coverage[toml] (>=5.3)", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +dev = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pre-commit-uv", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] docs = ["cogapp", "furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier"] -tests = ["cloudpickle", "hypothesis", "mypy (>=1.11.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] -tests-mypy = ["mypy (>=1.11.1)", "pytest-mypy-plugins"] +tests = ["cloudpickle ; platform_python_implementation == \"CPython\"", "hypothesis", "mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-xdist[psutil]"] +tests-mypy = ["mypy (>=1.11.1) ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\"", "pytest-mypy-plugins ; platform_python_implementation == \"CPython\" and python_version >= \"3.10\""] [[package]] name = "base58" @@ -660,7 +692,7 @@ version = "8.2.1" description = "Composable command line interface toolkit" optional = false python-versions = ">=3.10" -groups = ["dev"] +groups = ["main", "dev"] files = [ {file = "click-8.2.1-py3-none-any.whl", hash = "sha256:61a3265b914e850b85317d0b3109c7f8cd35a670f963866005d6ef1d5175a12b"}, {file = "click-8.2.1.tar.gz", hash = "sha256:27c491cc05d968d271d5a1db13e3b5a184636d9d930f148c50b038f0d0646202"}, @@ -676,11 +708,11 @@ description = "Cross-platform colored terminal text." optional = false python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" groups = ["main", "dev"] +markers = "sys_platform == \"win32\" or platform_system == \"Windows\"" files = [ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, ] -markers = {main = "sys_platform == \"win32\"", dev = "sys_platform == \"win32\" or platform_system == \"Windows\""} [[package]] name = "conventional-pre-commit" @@ -775,7 +807,7 @@ files = [ ] [package.extras] -toml = ["tomli"] +toml = ["tomli ; python_full_version <= \"3.11.0a6\""] [[package]] name = "cytoolz" @@ -1022,7 +1054,7 @@ pycryptodome = {version = ">=3.6.6,<4", optional = true, markers = "extra == \"p dev = ["build (>=0.9.0)", "bump_my_version (>=0.19.0)", "ipython", "mypy (==1.10.0)", "pre-commit (>=3.4.0)", "pytest (>=7.0.0)", "pytest-xdist (>=2.4.0)", "sphinx (>=6.0.0)", "sphinx-autobuild (>=2021.3.14)", "sphinx_rtd_theme (>=1.0.0)", "towncrier (>=24,<25)", "tox (>=4.0.0)", "twine", "wheel"] docs = ["sphinx (>=6.0.0)", "sphinx-autobuild (>=2021.3.14)", "sphinx_rtd_theme (>=1.0.0)", "towncrier (>=24,<25)"] pycryptodome = ["pycryptodome (>=3.6.6,<4)"] -pysha3 = ["pysha3 (>=1.0.0,<2.0.0)", "safe-pysha3 (>=1.0.0)"] +pysha3 = ["pysha3 (>=1.0.0,<2.0.0) ; python_version < \"3.9\"", "safe-pysha3 (>=1.0.0) ; python_version >= \"3.9\""] test = ["pytest (>=7.0.0)", "pytest-xdist (>=2.4.0)"] [[package]] @@ -1112,10 +1144,10 @@ rlp = ">=3.0.0" semantic_version = ">=2.6.0" [package.extras] -dev = ["build (>=0.9.0)", "bump_my_version (>=0.19.0)", "eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "eth-hash[pysha3] (>=0.1.4,<1.0.0)", "ipython", "pre-commit (>=3.4.0)", "py-evm (>=0.10.0b0,<0.11.0b0)", "pytest (>=7.0.0)", "pytest-xdist (>=2.0.0,<3)", "towncrier (>=24,<25)", "tox (>=4.0.0)", "twine", "wheel"] +dev = ["build (>=0.9.0)", "bump_my_version (>=0.19.0)", "eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "eth-hash[pycryptodome] (>=0.1.4,<1.0.0) ; implementation_name == \"pypy\"", "eth-hash[pysha3] (>=0.1.4,<1.0.0) ; implementation_name == \"cpython\"", "ipython", "pre-commit (>=3.4.0)", "py-evm (>=0.10.0b0,<0.11.0b0)", "pytest (>=7.0.0)", "pytest-xdist (>=2.0.0,<3)", "towncrier (>=24,<25)", "tox (>=4.0.0)", "twine", "wheel"] docs = ["towncrier (>=24,<25)"] -py-evm = ["eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "eth-hash[pysha3] (>=0.1.4,<1.0.0)", "py-evm (>=0.10.0b0,<0.11.0b0)"] -pyevm = ["eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "eth-hash[pysha3] (>=0.1.4,<1.0.0)", "py-evm (>=0.10.0b0,<0.11.0b0)"] +py-evm = ["eth-hash[pycryptodome] (>=0.1.4,<1.0.0) ; implementation_name == \"pypy\"", "eth-hash[pysha3] (>=0.1.4,<1.0.0) ; implementation_name == \"cpython\"", "py-evm (>=0.10.0b0,<0.11.0b0)"] +pyevm = ["eth-hash[pycryptodome] (>=0.1.4,<1.0.0) ; implementation_name == \"pypy\"", "eth-hash[pysha3] (>=0.1.4,<1.0.0) ; implementation_name == \"cpython\"", "py-evm (>=0.10.0b0,<0.11.0b0)"] test = ["eth-hash[pycryptodome] (>=0.1.4,<1.0.0)", "pytest (>=7.0.0)", "pytest-xdist (>=2.0.0,<3)"] [[package]] @@ -1190,7 +1222,7 @@ files = [ ] [package.extras] -tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich"] +tests = ["asttokens (>=2.1.0)", "coverage", "coverage-enable-subprocess", "ipython", "littleutils", "pytest", "rich ; python_version >= \"3.11\""] [[package]] name = "faker" @@ -1207,6 +1239,29 @@ files = [ [package.dependencies] tzdata = "*" +[[package]] +name = "fastapi" +version = "0.121.3" +description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "fastapi-0.121.3-py3-none-any.whl", hash = "sha256:0c78fc87587fcd910ca1bbf5bc8ba37b80e119b388a7206b39f0ecc95ebf53e9"}, + {file = "fastapi-0.121.3.tar.gz", hash = "sha256:0055bc24fe53e56a40e9e0ad1ae2baa81622c406e548e501e717634e2dfbc40b"}, +] + +[package.dependencies] +annotated-doc = ">=0.0.2" +pydantic = ">=1.7.4,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0 || >2.0.0,<2.0.1 || >2.0.1,<2.1.0 || >2.1.0,<3.0.0" +starlette = ">=0.40.0,<0.51.0" +typing-extensions = ">=4.8.0" + +[package.extras] +all = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=3.1.5)", "orjson (>=3.2.1)", "pydantic-extra-types (>=2.0.0)", "pydantic-settings (>=2.0.0)", "python-multipart (>=0.0.18)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"] +standard = ["email-validator (>=2.0.0)", "fastapi-cli[standard] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"] +standard-no-fastapi-cloud-cli = ["email-validator (>=2.0.0)", "fastapi-cli[standard-no-fastapi-cloud-cli] (>=0.0.8)", "httpx (>=0.23.0,<1.0.0)", "jinja2 (>=3.1.5)", "python-multipart (>=0.0.18)", "uvicorn[standard] (>=0.12.0)"] + [[package]] name = "filelock" version = "3.18.0" @@ -1222,7 +1277,7 @@ files = [ [package.extras] docs = ["furo (>=2024.8.6)", "sphinx (>=8.1.3)", "sphinx-autodoc-typehints (>=3)"] testing = ["covdefaults (>=2.3)", "coverage (>=7.6.10)", "diff-cover (>=9.2.1)", "pytest (>=8.3.4)", "pytest-asyncio (>=0.25.2)", "pytest-cov (>=6)", "pytest-mock (>=3.14)", "pytest-timeout (>=2.3.1)", "virtualenv (>=20.28.1)"] -typing = ["typing-extensions (>=4.12.2)"] +typing = ["typing-extensions (>=4.12.2) ; python_version < \"3.11\""] [[package]] name = "frozenlist" @@ -1338,6 +1393,99 @@ files = [ {file = "frozenlist-1.6.2.tar.gz", hash = "sha256:effc641518696471cf4962e8e32050133bc1f7b2851ae8fd0cb8797dd70dc202"}, ] +[[package]] +name = "greenlet" +version = "3.2.4" +description = "Lightweight in-process concurrent programming" +optional = false +python-versions = ">=3.9" +groups = ["main"] +markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\"" +files = [ + {file = "greenlet-3.2.4-cp310-cp310-macosx_11_0_universal2.whl", hash = "sha256:8c68325b0d0acf8d91dde4e6f930967dd52a5302cd4062932a6b2e7c2969f47c"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:94385f101946790ae13da500603491f04a76b6e4c059dab271b3ce2e283b2590"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f10fd42b5ee276335863712fa3da6608e93f70629c631bf77145021600abc23c"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c8c9e331e58180d0d83c5b7999255721b725913ff6bc6cf39fa2a45841a4fd4b"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:58b97143c9cc7b86fc458f215bd0932f1757ce649e05b640fea2e79b54cedb31"}, + {file = "greenlet-3.2.4-cp310-cp310-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c2ca18a03a8cfb5b25bc1cbe20f3d9a4c80d8c3b13ba3df49ac3961af0b1018d"}, + {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:9fe0a28a7b952a21e2c062cd5756d34354117796c6d9215a87f55e38d15402c5"}, + {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:8854167e06950ca75b898b104b63cc646573aa5fef1353d4508ecdd1ee76254f"}, + {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f47617f698838ba98f4ff4189aef02e7343952df3a615f847bb575c3feb177a7"}, + {file = "greenlet-3.2.4-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:af41be48a4f60429d5cad9d22175217805098a9ef7c40bfef44f7669fb9d74d8"}, + {file = "greenlet-3.2.4-cp310-cp310-win_amd64.whl", hash = "sha256:73f49b5368b5359d04e18d15828eecc1806033db5233397748f4ca813ff1056c"}, + {file = "greenlet-3.2.4-cp311-cp311-macosx_11_0_universal2.whl", hash = "sha256:96378df1de302bc38e99c3a9aa311967b7dc80ced1dcc6f171e99842987882a2"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:1ee8fae0519a337f2329cb78bd7a8e128ec0f881073d43f023c7b8d4831d5246"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:94abf90142c2a18151632371140b3dba4dee031633fe614cb592dbb6c9e17bc3"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:4d1378601b85e2e5171b99be8d2dc85f594c79967599328f95c1dc1a40f1c633"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0db5594dce18db94f7d1650d7489909b57afde4c580806b8d9203b6e79cdc079"}, + {file = "greenlet-3.2.4-cp311-cp311-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2523e5246274f54fdadbce8494458a2ebdcdbc7b802318466ac5606d3cded1f8"}, + {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:1987de92fec508535687fb807a5cea1560f6196285a4cde35c100b8cd632cc52"}, + {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:55e9c5affaa6775e2c6b67659f3a71684de4c549b3dd9afca3bc773533d284fa"}, + {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c9c6de1940a7d828635fbd254d69db79e54619f165ee7ce32fda763a9cb6a58c"}, + {file = "greenlet-3.2.4-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:03c5136e7be905045160b1b9fdca93dd6727b180feeafda6818e6496434ed8c5"}, + {file = "greenlet-3.2.4-cp311-cp311-win_amd64.whl", hash = "sha256:9c40adce87eaa9ddb593ccb0fa6a07caf34015a29bf8d344811665b573138db9"}, + {file = "greenlet-3.2.4-cp312-cp312-macosx_11_0_universal2.whl", hash = "sha256:3b67ca49f54cede0186854a008109d6ee71f66bd57bb36abd6d0a0267b540cdd"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ddf9164e7a5b08e9d22511526865780a576f19ddd00d62f8a665949327fde8bb"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:f28588772bb5fb869a8eb331374ec06f24a83a9c25bfa1f38b6993afe9c1e968"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:5c9320971821a7cb77cfab8d956fa8e39cd07ca44b6070db358ceb7f8797c8c9"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:c60a6d84229b271d44b70fb6e5fa23781abb5d742af7b808ae3f6efd7c9c60f6"}, + {file = "greenlet-3.2.4-cp312-cp312-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:3b3812d8d0c9579967815af437d96623f45c0f2ae5f04e366de62a12d83a8fb0"}, + {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:abbf57b5a870d30c4675928c37278493044d7c14378350b3aa5d484fa65575f0"}, + {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:20fb936b4652b6e307b8f347665e2c615540d4b42b3b4c8a321d8286da7e520f"}, + {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ee7a6ec486883397d70eec05059353b8e83eca9168b9f3f9a361971e77e0bcd0"}, + {file = "greenlet-3.2.4-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:326d234cbf337c9c3def0676412eb7040a35a768efc92504b947b3e9cfc7543d"}, + {file = "greenlet-3.2.4-cp312-cp312-win_amd64.whl", hash = "sha256:a7d4e128405eea3814a12cc2605e0e6aedb4035bf32697f72deca74de4105e02"}, + {file = "greenlet-3.2.4-cp313-cp313-macosx_11_0_universal2.whl", hash = "sha256:1a921e542453fe531144e91e1feedf12e07351b1cf6c9e8a3325ea600a715a31"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:cd3c8e693bff0fff6ba55f140bf390fa92c994083f838fece0f63be121334945"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:710638eb93b1fa52823aa91bf75326f9ecdfd5e0466f00789246a5280f4ba0fc"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:c5111ccdc9c88f423426df3fd1811bfc40ed66264d35aa373420a34377efc98a"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:d76383238584e9711e20ebe14db6c88ddcedc1829a9ad31a584389463b5aa504"}, + {file = "greenlet-3.2.4-cp313-cp313-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:23768528f2911bcd7e475210822ffb5254ed10d71f4028387e5a99b4c6699671"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_aarch64.whl", hash = "sha256:00fadb3fedccc447f517ee0d3fd8fe49eae949e1cd0f6a611818f4f6fb7dc83b"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:d25c5091190f2dc0eaa3f950252122edbbadbb682aa7b1ef2f8af0f8c0afefae"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:6e343822feb58ac4d0a1211bd9399de2b3a04963ddeec21530fc426cc121f19b"}, + {file = "greenlet-3.2.4-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ca7f6f1f2649b89ce02f6f229d7c19f680a6238af656f61e0115b24857917929"}, + {file = "greenlet-3.2.4-cp313-cp313-win_amd64.whl", hash = "sha256:554b03b6e73aaabec3745364d6239e9e012d64c68ccd0b8430c64ccc14939a8b"}, + {file = "greenlet-3.2.4-cp314-cp314-macosx_11_0_universal2.whl", hash = "sha256:49a30d5fda2507ae77be16479bdb62a660fa51b1eb4928b524975b3bde77b3c0"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:299fd615cd8fc86267b47597123e3f43ad79c9d8a22bebdce535e53550763e2f"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c17b6b34111ea72fc5a4e4beec9711d2226285f0386ea83477cbb97c30a3f3a5"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:b4a1870c51720687af7fa3e7cda6d08d801dae660f75a76f3845b642b4da6ee1"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:061dc4cf2c34852b052a8620d40f36324554bc192be474b9e9770e8c042fd735"}, + {file = "greenlet-3.2.4-cp314-cp314-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:44358b9bf66c8576a9f57a590d5f5d6e72fa4228b763d0e43fee6d3b06d3a337"}, + {file = "greenlet-3.2.4-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:2917bdf657f5859fbf3386b12d68ede4cf1f04c90c3a6bc1f013dd68a22e2269"}, + {file = "greenlet-3.2.4-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:015d48959d4add5d6c9f6c5210ee3803a830dce46356e3bc326d6776bde54681"}, + {file = "greenlet-3.2.4-cp314-cp314-win_amd64.whl", hash = "sha256:e37ab26028f12dbb0ff65f29a8d3d44a765c61e729647bf2ddfbbed621726f01"}, + {file = "greenlet-3.2.4-cp39-cp39-macosx_11_0_universal2.whl", hash = "sha256:b6a7c19cf0d2742d0809a4c05975db036fdff50cd294a93632d6a310bf9ac02c"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:27890167f55d2387576d1f41d9487ef171849ea0359ce1510ca6e06c8bece11d"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:18d9260df2b5fbf41ae5139e1be4e796d99655f023a636cd0e11e6406cca7d58"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_s390x.manylinux_2_17_s390x.whl", hash = "sha256:671df96c1f23c4a0d4077a325483c1503c96a1b7d9db26592ae770daa41233d4"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:16458c245a38991aa19676900d48bd1a6f2ce3e16595051a4db9d012154e8433"}, + {file = "greenlet-3.2.4-cp39-cp39-manylinux_2_24_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:c9913f1a30e4526f432991f89ae263459b1c64d1608c0d22a5c79c287b3c70df"}, + {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:b90654e092f928f110e0007f572007c9727b5265f7632c2fa7415b4689351594"}, + {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:81701fd84f26330f0d5f4944d4e92e61afe6319dcd9775e39396e39d7c3e5f98"}, + {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:28a3c6b7cd72a96f61b0e4b2a36f681025b60ae4779cc73c1535eb5f29560b10"}, + {file = "greenlet-3.2.4-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:52206cd642670b0b320a1fd1cbfd95bca0e043179c1d8a045f2c6109dfe973be"}, + {file = "greenlet-3.2.4-cp39-cp39-win32.whl", hash = "sha256:65458b409c1ed459ea899e939f0e1cdb14f58dbc803f2f93c5eab5694d32671b"}, + {file = "greenlet-3.2.4-cp39-cp39-win_amd64.whl", hash = "sha256:d2e685ade4dafd447ede19c31277a224a239a0a1a4eca4e6390efedf20260cfb"}, + {file = "greenlet-3.2.4.tar.gz", hash = "sha256:0dca0d95ff849f9a364385f36ab49f50065d76964944638be9691e1832e9f86d"}, +] + +[package.extras] +docs = ["Sphinx", "furo"] +test = ["objgraph", "psutil", "setuptools"] + +[[package]] +name = "h11" +version = "0.16.0" +description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "h11-0.16.0-py3-none-any.whl", hash = "sha256:63cf8bbe7522de3bf65932fda1d9c2772064ffb3dae62d55932da54b31cb6c86"}, + {file = "h11-0.16.0.tar.gz", hash = "sha256:4e35b956cf45792e4caa5885e69fba00bdbc6ffafbfa020300e549b208ee5ff1"}, +] + [[package]] name = "hexbytes" version = "1.3.1" @@ -1372,7 +1520,7 @@ attrs = ">=22.2.0" sortedcontainers = ">=2.1.0,<3.0.0" [package.extras] -all = ["black (>=19.10b0)", "click (>=7.0)", "crosshair-tool (>=0.0.88)", "django (>=4.2)", "dpcontracts (>=0.4)", "hypothesis-crosshair (>=0.0.23)", "lark (>=0.10.1)", "libcst (>=0.3.16)", "numpy (>=1.19.3)", "pandas (>=1.1)", "pytest (>=4.6)", "python-dateutil (>=1.4)", "pytz (>=2014.1)", "redis (>=3.0.0)", "rich (>=9.0.0)", "tzdata (>=2025.2)", "watchdog (>=4.0.0)"] +all = ["black (>=19.10b0)", "click (>=7.0)", "crosshair-tool (>=0.0.88)", "django (>=4.2)", "dpcontracts (>=0.4)", "hypothesis-crosshair (>=0.0.23)", "lark (>=0.10.1)", "libcst (>=0.3.16)", "numpy (>=1.19.3)", "pandas (>=1.1)", "pytest (>=4.6)", "python-dateutil (>=1.4)", "pytz (>=2014.1)", "redis (>=3.0.0)", "rich (>=9.0.0)", "tzdata (>=2025.2) ; sys_platform == \"win32\" or sys_platform == \"emscripten\"", "watchdog (>=4.0.0)"] cli = ["black (>=19.10b0)", "click (>=7.0)", "rich (>=9.0.0)"] codemods = ["libcst (>=0.3.16)"] crosshair = ["crosshair-tool (>=0.0.88)", "hypothesis-crosshair (>=0.0.23)"] @@ -1387,7 +1535,7 @@ pytest = ["pytest (>=4.6)"] pytz = ["pytz (>=2014.1)"] redis = ["redis (>=3.0.0)"] watchdog = ["watchdog (>=4.0.0)"] -zoneinfo = ["tzdata (>=2025.2)"] +zoneinfo = ["tzdata (>=2025.2) ; sys_platform == \"win32\" or sys_platform == \"emscripten\""] [[package]] name = "identify" @@ -1965,7 +2113,7 @@ eth-hash = {version = "^0.7.0", extras = ["pycryptodome"]} [package.source] type = "git" url = "https://github.com/lidofinance/oz-merkle-tree" -reference = "HEAD" +reference = "f4ad6e006b8daf05ce2ce255e123eb9f923d8ef8" resolved_reference = "f4ad6e006b8daf05ce2ce255e123eb9f923d8ef8" [[package]] @@ -2131,6 +2279,22 @@ files = [ [package.extras] twisted = ["twisted"] +[[package]] +name = "prometheus-fastapi-instrumentator" +version = "7.1.0" +description = "Instrument your FastAPI app with Prometheus metrics" +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "prometheus_fastapi_instrumentator-7.1.0-py3-none-any.whl", hash = "sha256:978130f3c0bb7b8ebcc90d35516a6fe13e02d2eb358c8f83887cdef7020c31e9"}, + {file = "prometheus_fastapi_instrumentator-7.1.0.tar.gz", hash = "sha256:be7cd61eeea4e5912aeccb4261c6631b3f227d8924542d79eaf5af3f439cbe5e"}, +] + +[package.dependencies] +prometheus-client = ">=0.8.0,<1.0.0" +starlette = ">=0.30.0,<1.0.0" + [[package]] name = "prompt-toolkit" version = "3.0.51" @@ -2273,6 +2437,83 @@ files = [ {file = "protobuf-6.31.1.tar.gz", hash = "sha256:d8cac4c982f0b957a4dc73a80e2ea24fab08e679c0de9deb835f4a12d69aca9a"}, ] +[[package]] +name = "psycopg2-binary" +version = "2.9.11" +description = "psycopg2 - Python-PostgreSQL Database Adapter" +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "psycopg2-binary-2.9.11.tar.gz", hash = "sha256:b6aed9e096bf63f9e75edf2581aa9a7e7186d97ab5c177aa6c87797cd591236c"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6fe6b47d0b42ce1c9f1fa3e35bb365011ca22e39db37074458f27921dca40f2"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a6c0e4262e089516603a09474ee13eabf09cb65c332277e39af68f6233911087"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:c47676e5b485393f069b4d7a811267d3168ce46f988fa602658b8bb901e9e64d"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:a28d8c01a7b27a1e3265b11250ba7557e5f72b5ee9e5f3a2fa8d2949c29bf5d2"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5f3f2732cf504a1aa9e9609d02f79bea1067d99edf844ab92c247bbca143303b"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:865f9945ed1b3950d968ec4690ce68c55019d79e4497366d36e090327ce7db14"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:91537a8df2bde69b1c1db01d6d944c831ca793952e4f57892600e96cee95f2cd"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:4dca1f356a67ecb68c81a7bc7809f1569ad9e152ce7fd02c2f2036862ca9f66b"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_riscv64.whl", hash = "sha256:0da4de5c1ac69d94ed4364b6cbe7190c1a70d325f112ba783d83f8440285f152"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:37d8412565a7267f7d79e29ab66876e55cb5e8e7b3bbf94f8206f6795f8f7e7e"}, + {file = "psycopg2_binary-2.9.11-cp310-cp310-win_amd64.whl", hash = "sha256:c665f01ec8ab273a61c62beeb8cce3014c214429ced8a308ca1fc410ecac3a39"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0e8480afd62362d0a6a27dd09e4ca2def6fa50ed3a4e7c09165266106b2ffa10"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:763c93ef1df3da6d1a90f86ea7f3f806dc06b21c198fa87c3c25504abec9404a"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:2e164359396576a3cc701ba8af4751ae68a07235d7a380c631184a611220d9a4"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:d57c9c387660b8893093459738b6abddbb30a7eab058b77b0d0d1c7d521ddfd7"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:2c226ef95eb2250974bf6fa7a842082b31f68385c4f3268370e3f3870e7859ee"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a311f1edc9967723d3511ea7d2708e2c3592e3405677bf53d5c7246753591fbb"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:ebb415404821b6d1c47353ebe9c8645967a5235e6d88f914147e7fd411419e6f"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f07c9c4a5093258a03b28fab9b4f151aa376989e7f35f855088234e656ee6a94"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_riscv64.whl", hash = "sha256:00ce1830d971f43b667abe4a56e42c1e2d594b32da4802e44a73bacacb25535f"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:cffe9d7697ae7456649617e8bb8d7a45afb71cd13f7ab22af3e5c61f04840908"}, + {file = "psycopg2_binary-2.9.11-cp311-cp311-win_amd64.whl", hash = "sha256:304fd7b7f97eef30e91b8f7e720b3db75fee010b520e434ea35ed1ff22501d03"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:be9b840ac0525a283a96b556616f5b4820e0526addb8dcf6525a0fa162730be4"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:f090b7ddd13ca842ebfe301cd587a76a4cf0913b1e429eb92c1be5dbeb1a19bc"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:ab8905b5dcb05bf3fb22e0cf90e10f469563486ffb6a96569e51f897c750a76a"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:bf940cd7e7fec19181fdbc29d76911741153d51cab52e5c21165f3262125685e"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:fa0f693d3c68ae925966f0b14b8edda71696608039f4ed61b1fe9ffa468d16db"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:a1cf393f1cdaf6a9b57c0a719a1068ba1069f022a59b8b1fe44b006745b59757"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:ef7a6beb4beaa62f88592ccc65df20328029d721db309cb3250b0aae0fa146c3"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:31b32c457a6025e74d233957cc9736742ac5a6cb196c6b68499f6bb51390bd6a"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:edcb3aeb11cb4bf13a2af3c53a15b3d612edeb6409047ea0b5d6a21a9d744b34"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:62b6d93d7c0b61a1dd6197d208ab613eb7dcfdcca0a49c42ceb082257991de9d"}, + {file = "psycopg2_binary-2.9.11-cp312-cp312-win_amd64.whl", hash = "sha256:b33fabeb1fde21180479b2d4667e994de7bbf0eec22832ba5d9b5e4cf65b6c6d"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:b8fb3db325435d34235b044b199e56cdf9ff41223a4b9752e8576465170bb38c"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:366df99e710a2acd90efed3764bb1e28df6c675d33a7fb40df9b7281694432ee"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:8c55b385daa2f92cb64b12ec4536c66954ac53654c7f15a203578da4e78105c0"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:c0377174bf1dd416993d16edc15357f6eb17ac998244cca19bc67cdc0e2e5766"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:5c6ff3335ce08c75afaed19e08699e8aacf95d4a260b495a4a8545244fe2ceb3"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:84011ba3109e06ac412f95399b704d3d6950e386b7994475b231cf61eec2fc1f"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:ba34475ceb08cccbdd98f6b46916917ae6eeb92b5ae111df10b544c3a4621dc4"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:b31e90fdd0f968c2de3b26ab014314fe814225b6c324f770952f7d38abf17e3c"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_riscv64.whl", hash = "sha256:d526864e0f67f74937a8fce859bd56c979f5e2ec57ca7c627f5f1071ef7fee60"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:04195548662fa544626c8ea0f06561eb6203f1984ba5b4562764fbeb4c3d14b1"}, + {file = "psycopg2_binary-2.9.11-cp313-cp313-win_amd64.whl", hash = "sha256:efff12b432179443f54e230fdf60de1f6cc726b6c832db8701227d089310e8aa"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-macosx_10_13_x86_64.whl", hash = "sha256:92e3b669236327083a2e33ccfa0d320dd01b9803b3e14dd986a4fc54aa00f4e1"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-macosx_11_0_arm64.whl", hash = "sha256:e0deeb03da539fa3577fcb0b3f2554a97f7e5477c246098dbb18091a4a01c16f"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9b52a3f9bb540a3e4ec0f6ba6d31339727b2950c9772850d6545b7eae0b9d7c5"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:db4fd476874ccfdbb630a54426964959e58da4c61c9feba73e6094d51303d7d8"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:47f212c1d3be608a12937cc131bd85502954398aaa1320cb4c14421a0ffccf4c"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:e35b7abae2b0adab776add56111df1735ccc71406e56203515e228a8dc07089f"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_aarch64.whl", hash = "sha256:fcf21be3ce5f5659daefd2b3b3b6e4727b028221ddc94e6c1523425579664747"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_ppc64le.whl", hash = "sha256:9bd81e64e8de111237737b29d68039b9c813bdf520156af36d26819c9a979e5f"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_riscv64.whl", hash = "sha256:32770a4d666fbdafab017086655bcddab791d7cb260a16679cc5a7338b64343b"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-musllinux_1_2_x86_64.whl", hash = "sha256:c3cb3a676873d7506825221045bd70e0427c905b9c8ee8d6acd70cfcbd6e576d"}, + {file = "psycopg2_binary-2.9.11-cp314-cp314-win_amd64.whl", hash = "sha256:4012c9c954dfaccd28f94e84ab9f94e12df76b4afb22331b1f0d3154893a6316"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:20e7fb94e20b03dcc783f76c0865f9da39559dcc0c28dd1a3fce0d01902a6b9c"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4bdab48575b6f870f465b397c38f1b415520e9879fdf10a53ee4f49dcbdf8a21"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:9d3a9edcfbe77a3ed4bc72836d466dfce4174beb79eda79ea155cc77237ed9e8"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux2014_ppc64le.manylinux_2_17_ppc64le.whl", hash = "sha256:44fc5c2b8fa871ce7f0023f619f1349a0aa03a0857f2c96fbc01c657dcbbdb49"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:9c55460033867b4622cda1b6872edf445809535144152e5d14941ef591980edf"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-manylinux_2_38_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:2d11098a83cca92deaeaed3d58cfd150d49b3b06ee0d0852be466bf87596899e"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:691c807d94aecfbc76a14e1408847d59ff5b5906a04a23e12a89007672b9e819"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:8b81627b691f29c4c30a8f322546ad039c40c328373b11dff7490a3e1b517855"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_riscv64.whl", hash = "sha256:b637d6d941209e8d96a072d7977238eea128046effbf37d1d8b2c0764750017d"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:41360b01c140c2a03d346cec3280cf8a71aa07d94f3b1509fa0161c366af66b4"}, + {file = "psycopg2_binary-2.9.11-cp39-cp39-win_amd64.whl", hash = "sha256:875039274f8a2361e5207857899706da840768e2a775bf8c65e82f60b197df02"}, +] + [[package]] name = "ptyprocess" version = "0.7.0" @@ -2372,7 +2613,7 @@ typing-inspection = ">=0.4.0" [package.extras] email = ["email-validator (>=2.0.0)"] -timezone = ["tzdata"] +timezone = ["tzdata ; python_version >= \"3.9\" and platform_system == \"Windows\""] [[package]] name = "pydantic-core" @@ -2833,7 +3074,7 @@ requests = ">=2.30.0,<3.0" urllib3 = ">=1.25.10,<3.0" [package.extras] -tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asyncio", "pytest-cov", "pytest-httpserver", "tomli", "tomli-w", "types-PyYAML", "types-requests"] +tests = ["coverage (>=6.0.0)", "flake8", "mypy", "pytest (>=7.0.0)", "pytest-asyncio", "pytest-cov", "pytest-httpserver", "tomli ; python_version < \"3.11\"", "tomli-w", "types-PyYAML", "types-requests"] [[package]] name = "rlp" @@ -2869,7 +3110,7 @@ files = [ ] [package.extras] -dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1)", "coverage", "flake8", "nose2", "readme-renderer (<25.0)", "tox", "wheel", "zest.releaser[recommended]"] +dev = ["Django (>=1.11)", "check-manifest", "colorama (<=0.4.1) ; python_version == \"3.4\"", "coverage", "flake8", "nose2", "readme-renderer (<25.0) ; python_version == \"3.4\"", "tox", "wheel", "zest.releaser[recommended]"] doc = ["Sphinx", "sphinx-rtd-theme"] [[package]] @@ -2884,6 +3125,18 @@ files = [ {file = "six-1.17.0.tar.gz", hash = "sha256:ff70335d468e7eb6ec65b95b99d3a2836546063f63acc5171de367e834932a81"}, ] +[[package]] +name = "sniffio" +version = "1.3.1" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, + {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, +] + [[package]] name = "sortedcontainers" version = "2.4.0" @@ -2896,6 +3149,118 @@ files = [ {file = "sortedcontainers-2.4.0.tar.gz", hash = "sha256:25caa5a06cc30b6b83d11423433f65d1f9d76c4c6a0c90e3379eaa43b9bfdb88"}, ] +[[package]] +name = "sqlalchemy" +version = "2.0.44" +description = "Database Abstraction Library" +optional = false +python-versions = ">=3.7" +groups = ["main"] +files = [ + {file = "SQLAlchemy-2.0.44-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:471733aabb2e4848d609141a9e9d56a427c0a038f4abf65dd19d7a21fd563632"}, + {file = "SQLAlchemy-2.0.44-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:48bf7d383a35e668b984c805470518b635d48b95a3c57cb03f37eaa3551b5f9f"}, + {file = "SQLAlchemy-2.0.44-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2bf4bb6b3d6228fcf3a71b50231199fb94d2dd2611b66d33be0578ea3e6c2726"}, + {file = "SQLAlchemy-2.0.44-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:e998cf7c29473bd077704cea3577d23123094311f59bdc4af551923b168332b1"}, + {file = "SQLAlchemy-2.0.44-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:ebac3f0b5732014a126b43c2b7567f2f0e0afea7d9119a3378bde46d3dcad88e"}, + {file = "SQLAlchemy-2.0.44-cp37-cp37m-win32.whl", hash = "sha256:3255d821ee91bdf824795e936642bbf43a4c7cedf5d1aed8d24524e66843aa74"}, + {file = "SQLAlchemy-2.0.44-cp37-cp37m-win_amd64.whl", hash = "sha256:78e6c137ba35476adb5432103ae1534f2f5295605201d946a4198a0dea4b38e7"}, + {file = "sqlalchemy-2.0.44-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c77f3080674fc529b1bd99489378c7f63fcb4ba7f8322b79732e0258f0ea3ce"}, + {file = "sqlalchemy-2.0.44-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:4c26ef74ba842d61635b0152763d057c8d48215d5be9bb8b7604116a059e9985"}, + {file = "sqlalchemy-2.0.44-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f4a172b31785e2f00780eccab00bc240ccdbfdb8345f1e6063175b3ff12ad1b0"}, + {file = "sqlalchemy-2.0.44-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9480c0740aabd8cb29c329b422fb65358049840b34aba0adf63162371d2a96e"}, + {file = "sqlalchemy-2.0.44-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:17835885016b9e4d0135720160db3095dc78c583e7b902b6be799fb21035e749"}, + {file = "sqlalchemy-2.0.44-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:cbe4f85f50c656d753890f39468fcd8190c5f08282caf19219f684225bfd5fd2"}, + {file = "sqlalchemy-2.0.44-cp310-cp310-win32.whl", hash = "sha256:2fcc4901a86ed81dc76703f3b93ff881e08761c63263c46991081fd7f034b165"}, + {file = "sqlalchemy-2.0.44-cp310-cp310-win_amd64.whl", hash = "sha256:9919e77403a483ab81e3423151e8ffc9dd992c20d2603bf17e4a8161111e55f5"}, + {file = "sqlalchemy-2.0.44-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fe3917059c7ab2ee3f35e77757062b1bea10a0b6ca633c58391e3f3c6c488dd"}, + {file = "sqlalchemy-2.0.44-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:de4387a354ff230bc979b46b2207af841dc8bf29847b6c7dbe60af186d97aefa"}, + {file = "sqlalchemy-2.0.44-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c3678a0fb72c8a6a29422b2732fe423db3ce119c34421b5f9955873eb9b62c1e"}, + {file = "sqlalchemy-2.0.44-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3cf6872a23601672d61a68f390e44703442639a12ee9dd5a88bbce52a695e46e"}, + {file = "sqlalchemy-2.0.44-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:329aa42d1be9929603f406186630135be1e7a42569540577ba2c69952b7cf399"}, + {file = "sqlalchemy-2.0.44-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:70e03833faca7166e6a9927fbee7c27e6ecde436774cd0b24bbcc96353bce06b"}, + {file = "sqlalchemy-2.0.44-cp311-cp311-win32.whl", hash = "sha256:253e2f29843fb303eca6b2fc645aca91fa7aa0aa70b38b6950da92d44ff267f3"}, + {file = "sqlalchemy-2.0.44-cp311-cp311-win_amd64.whl", hash = "sha256:7a8694107eb4308a13b425ca8c0e67112f8134c846b6e1f722698708741215d5"}, + {file = "sqlalchemy-2.0.44-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:72fea91746b5890f9e5e0997f16cbf3d53550580d76355ba2d998311b17b2250"}, + {file = "sqlalchemy-2.0.44-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:585c0c852a891450edbb1eaca8648408a3cc125f18cf433941fa6babcc359e29"}, + {file = "sqlalchemy-2.0.44-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9b94843a102efa9ac68a7a30cd46df3ff1ed9c658100d30a725d10d9c60a2f44"}, + {file = "sqlalchemy-2.0.44-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:119dc41e7a7defcefc57189cfa0e61b1bf9c228211aba432b53fb71ef367fda1"}, + {file = "sqlalchemy-2.0.44-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0765e318ee9179b3718c4fd7ba35c434f4dd20332fbc6857a5e8df17719c24d7"}, + {file = "sqlalchemy-2.0.44-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:2e7b5b079055e02d06a4308d0481658e4f06bc7ef211567edc8f7d5dce52018d"}, + {file = "sqlalchemy-2.0.44-cp312-cp312-win32.whl", hash = "sha256:846541e58b9a81cce7dee8329f352c318de25aa2f2bbe1e31587eb1f057448b4"}, + {file = "sqlalchemy-2.0.44-cp312-cp312-win_amd64.whl", hash = "sha256:7cbcb47fd66ab294703e1644f78971f6f2f1126424d2b300678f419aa73c7b6e"}, + {file = "sqlalchemy-2.0.44-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:ff486e183d151e51b1d694c7aa1695747599bb00b9f5f604092b54b74c64a8e1"}, + {file = "sqlalchemy-2.0.44-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:0b1af8392eb27b372ddb783b317dea0f650241cea5bd29199b22235299ca2e45"}, + {file = "sqlalchemy-2.0.44-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2b61188657e3a2b9ac4e8f04d6cf8e51046e28175f79464c67f2fd35bceb0976"}, + {file = "sqlalchemy-2.0.44-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b87e7b91a5d5973dda5f00cd61ef72ad75a1db73a386b62877d4875a8840959c"}, + {file = "sqlalchemy-2.0.44-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:15f3326f7f0b2bfe406ee562e17f43f36e16167af99c4c0df61db668de20002d"}, + {file = "sqlalchemy-2.0.44-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:1e77faf6ff919aa8cd63f1c4e561cac1d9a454a191bb864d5dd5e545935e5a40"}, + {file = "sqlalchemy-2.0.44-cp313-cp313-win32.whl", hash = "sha256:ee51625c2d51f8baadf2829fae817ad0b66b140573939dd69284d2ba3553ae73"}, + {file = "sqlalchemy-2.0.44-cp313-cp313-win_amd64.whl", hash = "sha256:c1c80faaee1a6c3428cecf40d16a2365bcf56c424c92c2b6f0f9ad204b899e9e"}, + {file = "sqlalchemy-2.0.44-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2fc44e5965ea46909a416fff0af48a219faefd5773ab79e5f8a5fcd5d62b2667"}, + {file = "sqlalchemy-2.0.44-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:dc8b3850d2a601ca2320d081874033684e246d28e1c5e89db0864077cfc8f5a9"}, + {file = "sqlalchemy-2.0.44-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d733dec0614bb8f4bcb7c8af88172b974f685a31dc3a65cca0527e3120de5606"}, + {file = "sqlalchemy-2.0.44-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22be14009339b8bc16d6b9dc8780bacaba3402aa7581658e246114abbd2236e3"}, + {file = "sqlalchemy-2.0.44-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:357bade0e46064f88f2c3a99808233e67b0051cdddf82992379559322dfeb183"}, + {file = "sqlalchemy-2.0.44-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:4848395d932e93c1595e59a8672aa7400e8922c39bb9b0668ed99ac6fa867822"}, + {file = "sqlalchemy-2.0.44-cp38-cp38-win32.whl", hash = "sha256:2f19644f27c76f07e10603580a47278abb2a70311136a7f8fd27dc2e096b9013"}, + {file = "sqlalchemy-2.0.44-cp38-cp38-win_amd64.whl", hash = "sha256:1df4763760d1de0dfc8192cc96d8aa293eb1a44f8f7a5fbe74caf1b551905c5e"}, + {file = "sqlalchemy-2.0.44-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f7027414f2b88992877573ab780c19ecb54d3a536bef3397933573d6b5068be4"}, + {file = "sqlalchemy-2.0.44-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3fe166c7d00912e8c10d3a9a0ce105569a31a3d0db1a6e82c4e0f4bf16d5eca9"}, + {file = "sqlalchemy-2.0.44-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3caef1ff89b1caefc28f0368b3bde21a7e3e630c2eddac16abd9e47bd27cc36a"}, + {file = "sqlalchemy-2.0.44-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc2856d24afa44295735e72f3c75d6ee7fdd4336d8d3a8f3d44de7aa6b766df2"}, + {file = "sqlalchemy-2.0.44-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:11bac86b0deada30b6b5f93382712ff0e911fe8d31cb9bf46e6b149ae175eff0"}, + {file = "sqlalchemy-2.0.44-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:4d18cd0e9a0f37c9f4088e50e3839fcb69a380a0ec957408e0b57cff08ee0a26"}, + {file = "sqlalchemy-2.0.44-cp39-cp39-win32.whl", hash = "sha256:9e9018544ab07614d591a26c1bd4293ddf40752cc435caf69196740516af7100"}, + {file = "sqlalchemy-2.0.44-cp39-cp39-win_amd64.whl", hash = "sha256:8e0e4e66fd80f277a8c3de016a81a554e76ccf6b8d881ee0b53200305a8433f6"}, + {file = "sqlalchemy-2.0.44-py3-none-any.whl", hash = "sha256:19de7ca1246fbef9f9d1bff8f1ab25641569df226364a0e40457dc5457c54b05"}, + {file = "sqlalchemy-2.0.44.tar.gz", hash = "sha256:0ae7454e1ab1d780aee69fd2aae7d6b8670a581d8847f2d1e0f7ddfbf47e5a22"}, +] + +[package.dependencies] +greenlet = {version = ">=1", markers = "platform_machine == \"aarch64\" or platform_machine == \"ppc64le\" or platform_machine == \"x86_64\" or platform_machine == \"amd64\" or platform_machine == \"AMD64\" or platform_machine == \"win32\" or platform_machine == \"WIN32\""} +typing-extensions = ">=4.6.0" + +[package.extras] +aiomysql = ["aiomysql (>=0.2.0)", "greenlet (>=1)"] +aioodbc = ["aioodbc", "greenlet (>=1)"] +aiosqlite = ["aiosqlite", "greenlet (>=1)", "typing_extensions (!=3.10.0.1)"] +asyncio = ["greenlet (>=1)"] +asyncmy = ["asyncmy (>=0.2.3,!=0.2.4,!=0.2.6)", "greenlet (>=1)"] +mariadb-connector = ["mariadb (>=1.0.1,!=1.1.2,!=1.1.5,!=1.1.10)"] +mssql = ["pyodbc"] +mssql-pymssql = ["pymssql"] +mssql-pyodbc = ["pyodbc"] +mypy = ["mypy (>=0.910)"] +mysql = ["mysqlclient (>=1.4.0)"] +mysql-connector = ["mysql-connector-python"] +oracle = ["cx_oracle (>=8)"] +oracle-oracledb = ["oracledb (>=1.0.1)"] +postgresql = ["psycopg2 (>=2.7)"] +postgresql-asyncpg = ["asyncpg", "greenlet (>=1)"] +postgresql-pg8000 = ["pg8000 (>=1.29.1)"] +postgresql-psycopg = ["psycopg (>=3.0.7)"] +postgresql-psycopg2binary = ["psycopg2-binary"] +postgresql-psycopg2cffi = ["psycopg2cffi"] +postgresql-psycopgbinary = ["psycopg[binary] (>=3.0.7)"] +pymysql = ["pymysql"] +sqlcipher = ["sqlcipher3_binary"] + +[[package]] +name = "sqlmodel" +version = "0.0.27" +description = "SQLModel, SQL databases in Python, designed for simplicity, compatibility, and robustness." +optional = false +python-versions = ">=3.8" +groups = ["main"] +files = [ + {file = "sqlmodel-0.0.27-py3-none-any.whl", hash = "sha256:667fe10aa8ff5438134668228dc7d7a08306f4c5c4c7e6ad3ad68defa0e7aa49"}, + {file = "sqlmodel-0.0.27.tar.gz", hash = "sha256:ad1227f2014a03905aef32e21428640848ac09ff793047744a73dfdd077ff620"}, +] + +[package.dependencies] +pydantic = ">=1.10.13,<3.0.0" +SQLAlchemy = ">=2.0.14,<2.1.0" + [[package]] name = "stack-data" version = "0.6.3" @@ -2916,6 +3281,25 @@ pure-eval = "*" [package.extras] tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] +[[package]] +name = "starlette" +version = "0.50.0" +description = "The little ASGI library that shines." +optional = false +python-versions = ">=3.10" +groups = ["main"] +files = [ + {file = "starlette-0.50.0-py3-none-any.whl", hash = "sha256:9e5391843ec9b6e472eed1365a78c8098cfceb7a74bfd4d6b1c0c0095efb3bca"}, + {file = "starlette-0.50.0.tar.gz", hash = "sha256:a2a17b22203254bcbc2e1f926d2d55f3f9497f769416b3190768befe598fa3ca"}, +] + +[package.dependencies] +anyio = ">=3.6.2,<5" +typing-extensions = {version = ">=4.10.0", markers = "python_version < \"3.13\""} + +[package.extras] +full = ["httpx (>=0.27.0,<0.29.0)", "itsdangerous", "jinja2", "python-multipart (>=0.0.18)", "pyyaml"] + [[package]] name = "timeout-decorator" version = "0.5.0" @@ -3086,11 +3470,30 @@ files = [ ] [package.extras] -brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +brotli = ["brotli (>=1.0.9) ; platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; platform_python_implementation != \"CPython\""] h2 = ["h2 (>=4,<5)"] socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] zstd = ["zstandard (>=0.18.0)"] +[[package]] +name = "uvicorn" +version = "0.38.0" +description = "The lightning-fast ASGI server." +optional = false +python-versions = ">=3.9" +groups = ["main"] +files = [ + {file = "uvicorn-0.38.0-py3-none-any.whl", hash = "sha256:48c0afd214ceb59340075b4a052ea1ee91c16fbc2a9b1469cca0e54566977b02"}, + {file = "uvicorn-0.38.0.tar.gz", hash = "sha256:fd97093bdd120a2609fc0d3afe931d4d4ad688b6e75f0f929fde1bc36fe0e91d"}, +] + +[package.dependencies] +click = ">=7.0" +h11 = ">=0.8" + +[package.extras] +standard = ["colorama (>=0.4) ; sys_platform == \"win32\"", "httptools (>=0.6.3)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.15.1) ; sys_platform != \"win32\" and sys_platform != \"cygwin\" and platform_python_implementation != \"PyPy\"", "watchfiles (>=0.13)", "websockets (>=10.4)"] + [[package]] name = "varint" version = "1.0.2" @@ -3121,7 +3524,7 @@ platformdirs = ">=3.9.1,<5" [package.extras] docs = ["furo (>=2023.7.26)", "proselint (>=0.13)", "sphinx (>=7.1.2,!=7.3)", "sphinx-argparse (>=0.4)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=23.6)"] -test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8)", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10)"] +test = ["covdefaults (>=2.3)", "coverage (>=7.2.7)", "coverage-enable-subprocess (>=1)", "flaky (>=3.7)", "packaging (>=23.1)", "pytest (>=7.4)", "pytest-env (>=0.8.2)", "pytest-freezer (>=0.4.8) ; platform_python_implementation == \"PyPy\" or platform_python_implementation == \"GraalVM\" or platform_python_implementation == \"CPython\" and sys_platform == \"win32\" and python_version >= \"3.13\"", "pytest-mock (>=3.11.1)", "pytest-randomly (>=3.12)", "pytest-timeout (>=2.1)", "setuptools (>=68)", "time-machine (>=2.10) ; platform_python_implementation == \"CPython\""] [[package]] name = "wcwidth" @@ -3389,4 +3792,4 @@ propcache = ">=0.2.1" [metadata] lock-version = "2.1" python-versions = "^3.12" -content-hash = "096751c6157c4a461a4864096214eaea0b974fec29d241da155d53adf0ad8c8b" +content-hash = "87c1aa12c90c68ebf6c631c9c0177a675aad8e5c093d6487af1a2fe4e74a1a43" diff --git a/pyproject.toml b/pyproject.toml index 955ced0f3..eb5f741dc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -30,6 +30,11 @@ oz-merkle-tree = { git = "https://github.com/lidofinance/oz-merkle-tree", rev = multiformats = "^0.3.1" protobuf="^6.31.1" dag-cbor="^0.3.3" +fastapi = "^0.121.3" +uvicorn = "^0.38.0" +sqlmodel = "^0.0.27" +psycopg2-binary = "^2.9.11" +prometheus-fastapi-instrumentator = "^7.1.0" [tool.poetry.group.dev.dependencies] base58 = "^2.1.1" diff --git a/src/constants.py b/src/constants.py index f385b4b0b..dddb2c0ec 100644 --- a/src/constants.py +++ b/src/constants.py @@ -33,6 +33,7 @@ # https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#time-parameters SLOTS_PER_HISTORICAL_ROOT = 2**13 # 8192 # https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#sync-committee +SYNC_COMMITTEE_SIZE = 512 EPOCHS_PER_SYNC_COMMITTEE_PERIOD = 256 # https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#domain-types DOMAIN_DEPOSIT_TYPE = bytes.fromhex("03000000") # 0x03000000 diff --git a/src/main.py b/src/main.py index 21b6c5b08..0a059f88f 100644 --- a/src/main.py +++ b/src/main.py @@ -15,6 +15,7 @@ from src.modules.csm.csm import CSOracle from src.modules.ejector.ejector import Ejector from src.providers.ipfs import IPFSProvider, Kubo, LidoIPFS, Pinata, Storacha +from src.modules.performance.collector.collector import PerformanceCollector from src.types import OracleModule from src.utils.build import get_build_info from src.utils.exception import IncompatibleException @@ -29,6 +30,7 @@ LidoValidatorsProvider, TransactionUtils, ) +from src.web3py.extensions.performance import PerformanceClientModule from src.web3py.types import Web3 from decimal import getcontext @@ -75,6 +77,9 @@ def main(module_name: OracleModule): logger.info({'msg': 'Initialize IPFS providers.'}) ipfs = IPFS(web3, ipfs_providers(), retries=variables.HTTP_REQUEST_RETRY_COUNT_IPFS) + logger.info({'msg': 'Initialize Performance Collector client.'}) + performance = PerformanceClientModule(variables.PERFORMANCE_COLLECTOR_URI) + logger.info({'msg': 'Check configured providers.'}) if Version(kac.get_status().appVersion) < constants.ALLOWED_KAPI_VERSION: raise IncompatibleException(f'Incompatible KAPI version. Required >= {constants.ALLOWED_KAPI_VERSION}.') @@ -89,12 +94,13 @@ def main(module_name: OracleModule): 'cc': lambda: cc, # type: ignore[dict-item] 'kac': lambda: kac, # type: ignore[dict-item] 'ipfs': lambda: ipfs, # type: ignore[dict-item] + 'performance': lambda: performance, # type: ignore[dict-item] }) logger.info({'msg': 'Initialize prometheus metrics.'}) init_metrics() - instance: Accounting | Ejector | CSOracle + instance: Accounting | Ejector | CSOracle | PerformanceCollector if module_name == OracleModule.ACCOUNTING: logger.info({'msg': 'Initialize Accounting module.'}) instance = Accounting(web3) @@ -104,10 +110,15 @@ def main(module_name: OracleModule): elif module_name == OracleModule.CSM: logger.info({'msg': 'Initialize CSM performance oracle module.'}) instance = CSOracle(web3) + elif module_name == OracleModule.PERFORMANCE_COLLECTOR: + logger.info({'msg': 'Initialize Performance Collector module.'}) + # FIXME: web3 object is overkill. only CONSENSUS_CLIENT_URI needed here. + instance = PerformanceCollector(web3) else: raise ValueError(f'Unexpected arg: {module_name=}.') - instance.check_contract_configs() + if module_name != OracleModule.PERFORMANCE_COLLECTOR: + instance.check_contract_configs() if variables.DAEMON: instance.run_as_daemon() @@ -174,7 +185,6 @@ def ipfs_providers() -> Iterator[IPFSProvider]: ) - if __name__ == '__main__': module_name_arg = sys.argv[-1] if module_name_arg not in OracleModule: @@ -183,12 +193,22 @@ def ipfs_providers() -> Iterator[IPFSProvider]: raise ValueError(msg) module = OracleModule(module_name_arg) + if module is OracleModule.CHECK: errors = variables.check_uri_required_variables() variables.raise_from_errors(errors) - sys.exit(check()) - errors = variables.check_all_required_variables(module) + if module is OracleModule.PERFORMANCE_WEB_SERVER: + from src.modules.performance.web.server import serve + errors = variables.check_perf_web_server_required_variables() + variables.raise_from_errors(errors) + logger.info({'msg': f'Starting Performance Web Server on port {variables.PERFORMANCE_WEB_SERVER_API_PORT}'}) + sys.exit(serve()) + + if module is OracleModule.PERFORMANCE_COLLECTOR: + errors = variables.check_perf_collector_required_variables() + else: + errors = variables.check_all_required_variables(module) variables.raise_from_errors(errors) main(module) diff --git a/src/metrics/prometheus/basic.py b/src/metrics/prometheus/basic.py index f09ba1ebd..644e86cef 100644 --- a/src/metrics/prometheus/basic.py +++ b/src/metrics/prometheus/basic.py @@ -68,6 +68,14 @@ class Status(Enum): buckets=requests_buckets, ) +PERFORMANCE_REQUESTS_DURATION = Histogram( + 'performance_requests_duration', + 'Duration of requests to Performance Collector API', + ['endpoint', 'code', 'domain'], + namespace=PROMETHEUS_PREFIX, + buckets=requests_buckets, +) + KEYS_API_REQUESTS_DURATION = Histogram( 'keys_api_requests_duration', 'Duration of requests to Keys API', diff --git a/src/modules/csm/csm.py b/src/modules/csm/csm.py index d2a22ddde..301996545 100644 --- a/src/modules/csm/csm.py +++ b/src/modules/csm/csm.py @@ -1,3 +1,4 @@ +import atexit import logging from hexbytes import HexBytes @@ -9,16 +10,7 @@ CSM_CURRENT_FRAME_RANGE_R_EPOCH, ) from src.metrics.prometheus.duration_meter import duration_meter -from src.modules.csm.checkpoint import ( - FrameCheckpointProcessor, - FrameCheckpointsIterator, - MinStepIsNotReached, -) -from src.modules.csm.distribution import ( - Distribution, - DistributionResult, - StrikesValidator, -) +from src.modules.csm.distribution import Distribution, DistributionResult, StrikesValidator from src.modules.csm.helpers.last_report import LastReport from src.modules.csm.log import FramePerfLog from src.modules.csm.state import State @@ -35,8 +27,11 @@ EpochNumber, ReferenceBlockStamp, SlotNumber, + ValidatorIndex, ) from src.utils.cache import global_lru_cache as lru_cache +from src.utils.range import sequence +from src.utils.validator_state import is_active_validator from src.utils.web3converter import Web3Converter from src.web3py.extensions.lido_validators import NodeOperatorId from src.web3py.types import Web3 @@ -65,32 +60,108 @@ class CSOracle(BaseModule, ConsensusModule): report_contract: CSFeeOracleContract def __init__(self, w3: Web3): + self.consumer = self.__class__.__name__ self.report_contract = w3.csm.oracle self.state = State.load() super().__init__(w3) + atexit.register(self._on_shutdown) def refresh_contracts(self): self.report_contract = self.w3.csm.oracle # type: ignore self.state.clear() + def _on_shutdown(self): + performance_client = getattr(self.w3, "performance", None) + if performance_client is None: + logger.debug({ + "msg": "Performance client is not attached, skipping demand cleanup", + "consumer": self.consumer, + }) + return + try: + performance_client.delete_epochs_demand(self.consumer) + logger.info({ + "msg": "Cleared Performance Collector demand on shutdown", + "consumer": self.consumer, + }) + except Exception as error: + logger.warning({ + "msg": "Unexpected error during Performance Collector demand cleanup", + "consumer": self.consumer, + "error": str(error), + }) + def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecuteDelay: if not self._check_compatability(last_finalized_blockstamp): return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH - collected = self.collect_data(last_finalized_blockstamp) - if not collected: - logger.info( - {"msg": "Data required for the report is not fully collected yet. Waiting for the next finalized epoch"} - ) - return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH + self.set_epochs_range_to_collect(last_finalized_blockstamp) report_blockstamp = self.get_blockstamp_for_report(last_finalized_blockstamp) if not report_blockstamp: return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH + collected = self.collect_data() + if not collected: + return ModuleExecuteDelay.NEXT_FINALIZED_EPOCH + self.process_report(report_blockstamp) return ModuleExecuteDelay.NEXT_SLOT + @duration_meter() + def set_epochs_range_to_collect(self, blockstamp: BlockStamp): + converter = self.converter(blockstamp) + + l_epoch, r_epoch = self.get_epochs_range_to_process(blockstamp) + self.state.migrate(l_epoch, r_epoch, converter.frame_config.epochs_per_frame) + self.state.log_progress() + + is_range_available = self.w3.performance.is_range_available(l_epoch, r_epoch) + if is_range_available: + logger.info({ + "msg": "Performance data range is already available", + "start_epoch": l_epoch, + "end_epoch": r_epoch + }) + return + + current_demand = self.w3.performance.get_epochs_demand(self.consumer) + current_epochs_range = (current_demand.l_epoch, current_demand.r_epoch) if current_demand else None + if current_epochs_range != (l_epoch, r_epoch): + logger.info({ + "msg": f"Updating {self.consumer} epochs demand for Performance Collector", + "old": current_epochs_range, + "new": (l_epoch, r_epoch) + }) + self.w3.performance.post_epochs_demand(self.consumer, l_epoch, r_epoch) + + @duration_meter() + def collect_data(self) -> bool: + logger.info({"msg": "Collecting data for the report from Performance Collector"}) + + self.state.ensure_initialized() + + if not self.state.is_fulfilled: + for l_epoch, r_epoch in self.state.frames: + is_data_range_available = self.w3.performance.is_range_available( + l_epoch, r_epoch + ) + if not is_data_range_available: + logger.warning({ + "msg": "Performance data range is not available yet", + "start_epoch": l_epoch, + "end_epoch": r_epoch + }) + return False + logger.info({ + "msg": "Performance data range is available", + "start_epoch": l_epoch, + "end_epoch": r_epoch + }) + self.fulfill_state() + + return self.state.is_fulfilled + @lru_cache(maxsize=1) @duration_meter() def build_report(self, blockstamp: ReferenceBlockStamp) -> tuple: @@ -162,66 +233,93 @@ def validate_state(self, blockstamp: ReferenceBlockStamp) -> None: self.state.validate(l_epoch, r_epoch) - def collect_data(self, blockstamp: BlockStamp) -> bool: - """Ongoing report data collection for the estimated reference slot""" - - logger.info({"msg": "Collecting data for the report"}) - - converter = self.converter(blockstamp) - - l_epoch, r_epoch = self.get_epochs_range_to_process(blockstamp) - logger.info({"msg": f"Epochs range for performance data collect: [{l_epoch};{r_epoch}]"}) - - # NOTE: Finalized slot is the first slot of justifying epoch, so we need to take the previous. But if the first - # slot of the justifying epoch is empty, blockstamp.slot_number will point to the slot where the last finalized - # block was created. As a result, finalized_epoch in this case will be less than the actual number of the last - # finalized epoch. As a result we can have a delay in frame finalization. - finalized_epoch = EpochNumber(converter.get_epoch_by_slot(blockstamp.slot_number) - 1) - - report_blockstamp = self.get_blockstamp_for_report(blockstamp) - - if not report_blockstamp: - logger.info({"msg": "No report blockstamp available, using pre-computed one for collecting data"}) - - if report_blockstamp and report_blockstamp.ref_epoch != r_epoch: - logger.warning( - { - "msg": f"Epochs range has been changed, but the change is not yet observed on finalized epoch {finalized_epoch}" - } - ) - return False - - if l_epoch > finalized_epoch: - logger.info({"msg": "The starting epoch of the epochs range is not finalized yet"}) - return False - - self.state.migrate(l_epoch, r_epoch, converter.frame_config.epochs_per_frame) - self.state.log_progress() - - if self.state.is_fulfilled: - logger.info({"msg": "All epochs are already processed. Nothing to collect"}) - return True - - try: - checkpoints = FrameCheckpointsIterator( - converter, - min(self.state.unprocessed_epochs), - r_epoch, - finalized_epoch, - ) - except MinStepIsNotReached: - return False - - processor = FrameCheckpointProcessor(self.w3.cc, self.state, converter, blockstamp) - - for checkpoint in checkpoints: - if self.get_epochs_range_to_process(self._receive_last_finalized_slot()) != (l_epoch, r_epoch): - logger.info({"msg": "Checkpoints were prepared for an outdated epochs range, stop processing"}) - raise ValueError("Outdated checkpoint") - processor.exec(checkpoint) - # Reset BaseOracle cycle timeout to avoid timeout errors during long checkpoints processing - self._reset_cycle_timeout() - return self.state.is_fulfilled + def fulfill_state(self): + finalized_blockstamp = self._receive_last_finalized_slot() + validators = self.w3.cc.get_validators(finalized_blockstamp) + + self.state.ensure_initialized() + + logger.info({ + "msg": "Starting state fulfillment", + "total_frames": len(self.state.frames), + "total_validators": len(validators) + }) + + for l_epoch, r_epoch in self.state.frames: + logger.info({ + "msg": "Processing frame", + "start_epoch": l_epoch, + "end_epoch": r_epoch, + "total_epochs": r_epoch - l_epoch + 1 + }) + + for epoch in sequence(l_epoch, r_epoch): + if epoch not in self.state.unprocessed_epochs: + logger.debug({"msg": f"Epoch {epoch} is already processed"}) + continue + + logger.info({ + "msg": "Requesting performance data from collector", + "epoch": epoch + }) + epoch_data = self.w3.performance.get_epoch_data(epoch) + if epoch_data is None: + raise ValueError(f"Epoch {epoch} is missing in Performance Collector") + + ( + misses_raw, + props_vids, + props_flags, + syncs_vids, + syncs_misses, + ) = ( + [ValidatorIndex(vid) for vid in epoch_data.attestations], + [ValidatorIndex(vid) for vid in epoch_data.proposals_vids], + epoch_data.proposals_flags, # proposed or not status + [ValidatorIndex(vid) for vid in epoch_data.syncs_vids], + epoch_data.syncs_misses, # count of missed blocks in sync duties + ) + + if len(props_vids) != len(props_flags) or len(syncs_vids) != len(syncs_misses): + raise ValueError(f"Epoch {epoch} data is corrupted: {len(props_vids)=}, {len(props_flags)=}, {len(syncs_vids)=}, {len(syncs_misses)=}") + + logger.info({ + "msg": "Performance data received", + "epoch": epoch, + "misses_count": len(misses_raw), + "proposals_count": len(props_vids), + "sync_duties_count": len(syncs_vids) + }) + + misses = set(misses_raw) + for validator in validators: + missed_att = validator.index in misses + included_att = validator.index not in misses + is_active = is_active_validator(validator, epoch) + if not is_active and missed_att: + raise ValueError(f"Validator {validator.index} missed attestation in epoch {epoch}, but was not active") + self.state.save_att_duty(EpochNumber(epoch), validator.index, included=included_att) + + blocks_in_epoch = 0 + + for i, vid in enumerate(props_vids): + proposed = props_flags[i] + self.state.save_prop_duty(EpochNumber(epoch), ValidatorIndex(vid), included=bool(proposed)) + blocks_in_epoch += proposed + + if blocks_in_epoch: + for i, vid in enumerate(syncs_vids): + vid = ValidatorIndex(vid) + s_misses = syncs_misses[i] + s_fulfilled = max(0, blocks_in_epoch - s_misses) + for _ in range(s_fulfilled): + self.state.save_sync_duty(EpochNumber(epoch), vid, included=True) + for _ in range(s_misses): + self.state.save_sync_duty(EpochNumber(epoch), vid, included=False) + + self.state.add_processed_epoch(EpochNumber(epoch)) + self.state.log_progress() + self.state.commit() def make_rewards_tree(self, shares: dict[NodeOperatorId, RewardsShares]) -> RewardsTree: if not shares: @@ -301,6 +399,12 @@ def get_epochs_range_to_process(self, blockstamp: BlockStamp) -> tuple[EpochNumb CSM_CURRENT_FRAME_RANGE_L_EPOCH.set(l_epoch) CSM_CURRENT_FRAME_RANGE_R_EPOCH.set(r_epoch) + logger.info({ + "msg": "Epochs range for the report", + "l_epoch": l_epoch, + "r_epoch": r_epoch + }) + return l_epoch, r_epoch def converter(self, blockstamp: BlockStamp) -> Web3Converter: diff --git a/src/modules/csm/state.py b/src/modules/csm/state.py index 65cd2e9e1..6f183f4b2 100644 --- a/src/modules/csm/state.py +++ b/src/modules/csm/state.py @@ -135,6 +135,10 @@ def buffer(self) -> Path: def is_empty(self) -> bool: return not self.data and not self._epochs_to_process and not self._processed_epochs + def ensure_initialized(self) -> None: + if self.is_empty or not self._epochs_to_process or not self.frames: + raise InvalidState("State is not initialized; call migrate() before processing") + @property def frames(self) -> list[Frame]: return list(self.data.keys()) @@ -142,7 +146,7 @@ def frames(self) -> list[Frame]: @property def unprocessed_epochs(self) -> set[EpochNumber]: if not self._epochs_to_process: - raise ValueError("Epochs to process are not set") + raise InvalidState("Epochs to process are not set; call migrate() before processing") diff = set(self._epochs_to_process) - self._processed_epochs return diff diff --git a/src/modules/performance/__init__.py b/src/modules/performance/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/modules/performance/collector/__init__.py b/src/modules/performance/collector/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/modules/csm/checkpoint.py b/src/modules/performance/collector/checkpoint.py similarity index 70% rename from src/modules/csm/checkpoint.py rename to src/modules/performance/collector/checkpoint.py index 0fb6573c1..007eb2a2c 100644 --- a/src/modules/csm/checkpoint.py +++ b/src/modules/performance/collector/checkpoint.py @@ -9,9 +9,9 @@ from hexbytes import HexBytes from src import variables -from src.constants import SLOTS_PER_HISTORICAL_ROOT, EPOCHS_PER_SYNC_COMMITTEE_PERIOD -from src.metrics.prometheus.csm import CSM_UNPROCESSED_EPOCHS_COUNT, CSM_MIN_UNPROCESSED_EPOCH -from src.modules.csm.state import State +from src.constants import SLOTS_PER_HISTORICAL_ROOT, EPOCHS_PER_SYNC_COMMITTEE_PERIOD, SYNC_COMMITTEE_SIZE +from src.modules.performance.common.types import ProposalDuty, SyncDuty, AttDutyMisses +from src.modules.performance.common.db import DutiesDB from src.modules.submodules.types import ZERO_HASH from src.providers.consensus.client import ConsensusClient from src.providers.consensus.types import SyncCommittee, SyncAggregate @@ -22,15 +22,18 @@ from src.utils.slot import get_prev_non_missed_slot from src.utils.timeit import timeit from src.utils.types import hex_str_to_bytes -from src.utils.web3converter import Web3Converter +from src.utils.web3converter import ChainConverter ZERO_BLOCK_ROOT = HexBytes(ZERO_HASH).to_0x_hex() logger = logging.getLogger(__name__) lock = Lock() +type SlotBlockRoot = tuple[SlotNumber, BlockRoot | None] + +type AttestationCommittees = dict[tuple[SlotNumber, CommitteeIndex], list[ValidatorIndex]] + -class MinStepIsNotReached(Exception): ... class SlotOutOfRootsRange(Exception): ... @@ -40,14 +43,8 @@ class FrameCheckpoint: duty_epochs: Sequence[EpochNumber] # NOTE: max 255 elements. -@dataclass -class ValidatorDuty: - validator_index: ValidatorIndex - included: bool - - class FrameCheckpointsIterator: - converter: Web3Converter + converter: ChainConverter l_epoch: EpochNumber r_epoch: EpochNumber @@ -55,8 +52,6 @@ class FrameCheckpointsIterator: # Max available epoch to process according to the finalized epoch max_available_epoch_to_check: EpochNumber - # Min checkpoint step is 10 because it's a reasonable number of epochs to process at once (~1 hour) - MIN_CHECKPOINT_STEP = 10 # Max checkpoint step is 255 epochs because block_roots size from state is 8192 slots (256 epochs) # to check duty of every epoch, we need to check 64 slots (32 slots of duty epoch + 32 slots of next epoch). # In the end we got 255 committees and 8192 block_roots to check them for every checkpoint. @@ -68,10 +63,10 @@ class FrameCheckpointsIterator: CHECKPOINT_SLOT_DELAY_EPOCHS = 2 def __init__( - self, converter: Web3Converter, l_epoch: EpochNumber, r_epoch: EpochNumber, finalized_epoch: EpochNumber + self, converter: ChainConverter, l_epoch: EpochNumber, r_epoch: EpochNumber, finalized_epoch: EpochNumber ): if l_epoch > r_epoch: - raise ValueError("Left border epoch should be less or equal right border epoch") + raise ValueError(f"Left border epoch should be less or equal right border epoch: {l_epoch=} > {r_epoch=}") self.converter = converter self.l_epoch = l_epoch self.r_epoch = r_epoch @@ -80,8 +75,11 @@ def __init__( self.r_epoch, EpochNumber(finalized_epoch - self.CHECKPOINT_SLOT_DELAY_EPOCHS) ) - if self.r_epoch > self.max_available_epoch_to_check and not self._is_min_step_reached(): - raise MinStepIsNotReached() + if self.l_epoch > self.max_available_epoch_to_check: + raise ValueError(f"Left border epoch is greater than max available epoch to check: {l_epoch=} > {self.max_available_epoch_to_check=}") + + if self.r_epoch > self.max_available_epoch_to_check: + raise ValueError(f"Right border epoch is greater than max available epoch to check: {r_epoch=} > {self.max_available_epoch_to_check=}") def __iter__(self): for checkpoint_epochs in batched( @@ -96,27 +94,6 @@ def __iter__(self): ) yield FrameCheckpoint(checkpoint_slot, checkpoint_epochs) - def _is_min_step_reached(self): - # NOTE: processing delay can be negative - # if the finalized epoch is less than next epoch to check (l_epoch) - processing_delay = self.max_available_epoch_to_check - self.l_epoch - if processing_delay >= self.MIN_CHECKPOINT_STEP: - return True - logger.info( - { - "msg": f"Minimum checkpoint step is not reached, current delay is {processing_delay} epochs", - "max_available_epoch_to_check": self.max_available_epoch_to_check, - "l_epoch": self.l_epoch, - "r_epoch": self.r_epoch, - } - ) - return False - - -type SlotBlockRoot = tuple[SlotNumber, BlockRoot | None] -type SyncCommittees = dict[SlotNumber, list[ValidatorDuty]] -type AttestationCommittees = dict[tuple[SlotNumber, CommitteeIndex], list[ValidatorDuty]] - class SyncCommitteesCache(UserDict): @@ -133,38 +110,51 @@ def __setitem__(self, sync_committee_period: int, value: SyncCommittee): class FrameCheckpointProcessor: cc: ConsensusClient - converter: Web3Converter + converter: ChainConverter - state: State + db: DutiesDB finalized_blockstamp: BlockStamp def __init__( self, cc: ConsensusClient, - state: State, - converter: Web3Converter, + db: DutiesDB, + converter: ChainConverter, finalized_blockstamp: BlockStamp, ): self.cc = cc self.converter = converter - self.state = state + self.db = db self.finalized_blockstamp = finalized_blockstamp def exec(self, checkpoint: FrameCheckpoint) -> int: logger.info( {"msg": f"Processing checkpoint for slot {checkpoint.slot} with {len(checkpoint.duty_epochs)} epochs"} ) - unprocessed_epochs = [e for e in checkpoint.duty_epochs if e in self.state.unprocessed_epochs] + unprocessed_epochs = [e for e in checkpoint.duty_epochs if not self.db.has_epoch(e)] if not unprocessed_epochs: logger.info({"msg": "Nothing to process in the checkpoint"}) return 0 + + logger.info({ + 'msg': 'Starting epochs batch processing', + 'unprocessed_epochs_count': len(unprocessed_epochs), + 'checkpoint_slot': checkpoint.slot + }) + block_roots = self._get_block_roots(checkpoint.slot) duty_epochs_roots = { duty_epoch: self._select_block_roots(block_roots, duty_epoch, checkpoint.slot) for duty_epoch in unprocessed_epochs } self._process(block_roots, checkpoint.slot, unprocessed_epochs, duty_epochs_roots) - self.state.commit() + + logger.info({ + 'msg': 'All epochs processing completed', + 'processed_epochs': len(unprocessed_epochs), + 'checkpoint_slot': checkpoint.slot + }) + return len(unprocessed_epochs) def _get_block_roots(self, checkpoint_slot: SlotNumber): @@ -183,13 +173,20 @@ def _get_block_roots(self, checkpoint_slot: SlotNumber): # Replace duplicated roots with `None` to mark missing slots br = [ - br[i] if br[i] != ZERO_BLOCK_ROOT and (i == pivot_index or br[i] != br[i - 1]) - else None + br[i] if br[i] != ZERO_BLOCK_ROOT and (i == pivot_index or br[i] != br[i - 1]) else None for i in range(len(br)) ] if is_pivot_missing: br[pivot_index] = None + logger.info({ + 'msg': 'Block roots analysis', + 'total_roots': len(br), + 'missing_roots_count': br.count(None), + 'pivot_index': pivot_index, + 'is_pivot_missing': is_pivot_missing + }) + return br def _select_block_roots( @@ -212,7 +209,9 @@ def _select_block_roots( return duty_epoch_roots, next_epoch_roots @staticmethod - def _select_block_root_by_slot(block_roots: list[BlockRoot | None], checkpoint_slot: SlotNumber, root_slot: SlotNumber) -> BlockRoot | None: + def _select_block_root_by_slot( + block_roots: list[BlockRoot | None], checkpoint_slot: SlotNumber, root_slot: SlotNumber + ) -> BlockRoot | None: # From spec # https://github.com/ethereum/consensus-specs/blob/dev/specs/phase0/beacon-chain.md#get_block_root_at_slot if not root_slot < checkpoint_slot <= root_slot + SLOTS_PER_HISTORICAL_ROOT: @@ -224,7 +223,7 @@ def _process( checkpoint_block_roots: list[BlockRoot | None], checkpoint_slot: SlotNumber, unprocessed_epochs: list[EpochNumber], - epochs_roots_to_check: dict[EpochNumber, tuple[list[SlotBlockRoot], list[SlotBlockRoot]]] + epochs_roots_to_check: dict[EpochNumber, tuple[list[SlotBlockRoot], list[SlotBlockRoot]]], ): executor = ThreadPoolExecutor(max_workers=variables.CSM_ORACLE_MAX_CONCURRENCY) try: @@ -234,14 +233,14 @@ def _process( checkpoint_block_roots, checkpoint_slot, duty_epoch, - *epochs_roots_to_check[duty_epoch] + *epochs_roots_to_check[duty_epoch], ) for duty_epoch in unprocessed_epochs } for future in as_completed(futures): future.result() except Exception as e: - logger.error({"msg": "Error processing epochs in threads", "error": repr(e)}) + logger.error({"msg": "Error processing epochs in threads", "error": str(e)}) raise SystemExit(1) from e finally: logger.info({"msg": "Shutting down the executor"}) @@ -259,9 +258,9 @@ def _check_duties( ): logger.info({"msg": f"Processing epoch {duty_epoch}"}) - att_committees = self._prepare_attestation_duties(duty_epoch) propose_duties = self._prepare_propose_duties(duty_epoch, checkpoint_block_roots, checkpoint_slot) - sync_committees = self._prepare_sync_committee_duties(duty_epoch, duty_epoch_roots) + att_committees, att_misses = self._prepare_attestation_duties(duty_epoch) + sync_duties = self._prepare_sync_committee_duties(duty_epoch) for slot, root in [*duty_epoch_roots, *next_epoch_roots]: missed_slot = root is None @@ -269,90 +268,72 @@ def _check_duties( continue attestations, sync_aggregate = self.cc.get_block_attestations_and_sync(root) if (slot, root) in duty_epoch_roots: - propose_duties[slot].included = True - process_sync(slot, sync_aggregate, sync_committees) - process_attestations(attestations, att_committees) - - with lock: - if duty_epoch not in self.state.unprocessed_epochs: - raise ValueError(f"Epoch {duty_epoch} is not in epochs that should be processed") - for att_committee in att_committees.values(): - for att_duty in att_committee: - self.state.save_att_duty( - duty_epoch, - att_duty.validator_index, - included=att_duty.included, - ) - for sync_committee in sync_committees.values(): - for sync_duty in sync_committee: - self.state.save_sync_duty( - duty_epoch, - sync_duty.validator_index, - included=sync_duty.included, - ) - for proposer_duty in propose_duties.values(): - self.state.save_prop_duty( - duty_epoch, - proposer_duty.validator_index, - included=proposer_duty.included - ) - self.state.add_processed_epoch(duty_epoch) - self.state.log_progress() - unprocessed_epochs = self.state.unprocessed_epochs - CSM_UNPROCESSED_EPOCHS_COUNT.set(len(unprocessed_epochs)) - CSM_MIN_UNPROCESSED_EPOCH.set(min(unprocessed_epochs or {EpochNumber(-1)})) + propose_duties[slot].is_proposed = True + sync_duties = process_sync(sync_aggregate, sync_duties) + att_misses = process_attestations(attestations, att_committees, att_misses) + + propose_duties = list(propose_duties.values()) + if len(propose_duties) > self.converter.chain_config.slots_per_epoch: + raise ValueError(f"Invalid number of propose duties prepared in epoch {duty_epoch}") + if len(sync_duties) > SYNC_COMMITTEE_SIZE: + raise ValueError(f"Invalid number of sync duties prepared in epoch {duty_epoch}") + self.db.store_epoch( + duty_epoch, + att_misses=att_misses, + proposals=propose_duties, + syncs=sync_duties, + ) @timeit( lambda args, duration: logger.info( {"msg": f"Attestation Committees for epoch {args.epoch} prepared in {duration:.2f} seconds"} ) ) - def _prepare_attestation_duties(self, epoch: EpochNumber) -> AttestationCommittees: - committees = {} + def _prepare_attestation_duties(self, epoch: EpochNumber) -> tuple[AttestationCommittees, AttDutyMisses]: + committees: AttestationCommittees = {} + att_misses: AttDutyMisses = set() for committee in self.cc.get_attestation_committees(self.finalized_blockstamp, epoch): - validators = [] - # Order of insertion is used to track the positions in the committees. - for validator_index in committee.validators: - validators.append(ValidatorDuty(validator_index, included=False)) - committees[(committee.slot, committee.index)] = validators - return committees + committees[(committee.slot, committee.index)] = committee.validators + att_misses.update(committee.validators) + return committees, att_misses @timeit( lambda args, duration: logger.info( {"msg": f"Sync Committee for epoch {args.epoch} prepared in {duration:.2f} seconds"} ) ) - def _prepare_sync_committee_duties( - self, epoch: EpochNumber, epoch_block_roots: list[SlotBlockRoot] - ) -> dict[SlotNumber, list[ValidatorDuty]]: - + def _prepare_sync_committee_duties(self, epoch: EpochNumber) -> list[SyncDuty]: with lock: sync_committee = self._get_sync_committee(epoch) - duties = {} - for slot, root in epoch_block_roots: - missed_slot = root is None - if missed_slot: - continue - duties[slot] = [ - ValidatorDuty(validator_index=validator_index, included=False) - for validator_index in sync_committee.validators - ] + duties: list[SyncDuty] = [] + for vid in sync_committee.validators: + duties.append(SyncDuty(validator_index=vid, missed_count=0)) return duties def _get_sync_committee(self, epoch: EpochNumber) -> SyncCommittee: sync_committee_period = epoch // EPOCHS_PER_SYNC_COMMITTEE_PERIOD if cached_sync_committee := SYNC_COMMITTEES_CACHE.get(sync_committee_period): + logger.debug({ + 'msg': 'Sync committee cache hit', + 'period': sync_committee_period, + 'cache_size': len(SYNC_COMMITTEES_CACHE) + }) return cached_sync_committee + + logger.debug({ + 'msg': 'Sync committee cache miss', + 'period': sync_committee_period, + 'cache_size': len(SYNC_COMMITTEES_CACHE) + }) + from_epoch = EpochNumber(epoch - epoch % EPOCHS_PER_SYNC_COMMITTEE_PERIOD) to_epoch = EpochNumber(from_epoch + EPOCHS_PER_SYNC_COMMITTEE_PERIOD - 1) logger.info({"msg": f"Preparing cached Sync Committee for [{from_epoch};{to_epoch}] chain epochs"}) state_blockstamp = build_blockstamp( get_prev_non_missed_slot( - self.cc, - self.converter.get_epoch_first_slot(epoch), - self.finalized_blockstamp.slot_number + self.cc, self.converter.get_epoch_first_slot(epoch), self.finalized_blockstamp.slot_number ) ) sync_committee = self.cc.get_sync_committee(state_blockstamp, epoch) @@ -365,23 +346,17 @@ def _get_sync_committee(self, epoch: EpochNumber) -> SyncCommittee: ) ) def _prepare_propose_duties( - self, - epoch: EpochNumber, - checkpoint_block_roots: list[BlockRoot | None], - checkpoint_slot: SlotNumber - ) -> dict[SlotNumber, ValidatorDuty]: + self, epoch: EpochNumber, checkpoint_block_roots: list[BlockRoot | None], checkpoint_slot: SlotNumber + ) -> dict[SlotNumber, ProposalDuty]: duties = {} dependent_root = self._get_dependent_root_for_proposer_duties(epoch, checkpoint_block_roots, checkpoint_slot) proposer_duties = self.cc.get_proposer_duties(epoch, dependent_root) for duty in proposer_duties: - duties[duty.slot] = ValidatorDuty(validator_index=duty.validator_index, included=False) + duties[duty.slot] = ProposalDuty(validator_index=duty.validator_index, is_proposed=False) return duties def _get_dependent_root_for_proposer_duties( - self, - epoch: EpochNumber, - checkpoint_block_roots: list[BlockRoot | None], - checkpoint_slot: SlotNumber + self, epoch: EpochNumber, checkpoint_block_roots: list[BlockRoot | None], checkpoint_slot: SlotNumber ) -> BlockRoot: dependent_root = None dependent_slot = self.converter.get_epoch_last_slot(EpochNumber(epoch - 1)) @@ -394,48 +369,62 @@ def _get_dependent_root_for_proposer_duties( logger.debug( { "msg": f"Got dependent root from state block roots for epoch {epoch}. " - f"{dependent_slot=} {dependent_root=}" + f"{dependent_slot=} {dependent_root=}" } ) break dependent_slot = SlotNumber(int(dependent_slot - 1)) except SlotOutOfRootsRange: dependent_non_missed_slot = get_prev_non_missed_slot( - self.cc, - dependent_slot, - self.finalized_blockstamp.slot_number + self.cc, dependent_slot, self.finalized_blockstamp.slot_number ).message.slot dependent_root = self.cc.get_block_root(dependent_non_missed_slot).root logger.debug( { "msg": f"Got dependent root from CL for epoch {epoch}. " - f"{dependent_non_missed_slot=} {dependent_root=}" + f"{dependent_non_missed_slot=} {dependent_root=}" } ) return dependent_root -def process_sync(slot: SlotNumber, sync_aggregate: SyncAggregate, committees: SyncCommittees) -> None: - committee = committees[slot] +def process_sync( + sync_aggregate: SyncAggregate, + sync_duties: list[SyncDuty] +) -> list[SyncDuty]: # Spec: https://github.com/ethereum/consensus-specs/blob/dev/specs/altair/beacon-chain.md#syncaggregate sync_bits = hex_bitvector_to_list(sync_aggregate.sync_committee_bits) - for index_in_committee in get_set_indices(sync_bits): - committee[index_in_committee].included = True + # No need to process set bits because they mean that validator has participated successfully. + for index_in_committee in get_unset_indices(sync_bits): + sync_duties[index_in_committee].missed_count += 1 + return sync_duties def process_attestations( attestations: Iterable[BlockAttestation], committees: AttestationCommittees, -) -> None: + misses: AttDutyMisses, +) -> AttDutyMisses: for attestation in attestations: committee_offset = 0 att_bits = hex_bitlist_to_list(attestation.aggregation_bits) + att_slot = attestation.data.slot for committee_idx in get_committee_indices(attestation): - committee = committees.get((attestation.data.slot, committee_idx), []) + committee = committees.get((att_slot, committee_idx)) + if not committee: + # It is attestation from prev or future epoch. + # We already checked that before or check in next epoch processing. + continue att_committee_bits = att_bits[committee_offset:][: len(committee)] + # Treat only set bits as reliable because committees can attest in multiple blocks. + # Unset bits do not necessarily mean a miss: when a committee was partially aggregated in + # an earlier block, the later block may legitimately keep those positions unset. for index_in_committee in get_set_indices(att_committee_bits): - committee[index_in_committee].included = True + vid = committee[index_in_committee] + if vid in misses: + misses.remove(vid) committee_offset += len(committee) + return misses def get_committee_indices(attestation: BlockAttestation) -> list[CommitteeIndex]: @@ -447,6 +436,11 @@ def get_set_indices(bits: Sequence[bool]) -> list[int]: return [i for i, bit in enumerate(bits) if bit] +def get_unset_indices(bits: Sequence[bool]) -> list[int]: + """Returns indices of false values in the supplied sequence""" + return [i for i, bit in enumerate(bits) if not bit] + + def hex_bitvector_to_list(bitvector: str) -> list[bool]: bytes_ = hex_str_to_bytes(bitvector) return _bytes_to_bool_list(bytes_) diff --git a/src/modules/performance/collector/collector.py b/src/modules/performance/collector/collector.py new file mode 100644 index 000000000..0c49fb892 --- /dev/null +++ b/src/modules/performance/collector/collector.py @@ -0,0 +1,162 @@ +import logging + +from src import variables +from src.modules.performance.collector.checkpoint import ( + FrameCheckpointsIterator, + FrameCheckpointProcessor, +) +from src.modules.performance.common.db import DutiesDB +from src.modules.submodules.oracle_module import BaseModule, ModuleExecuteDelay +from src.modules.submodules.types import ChainConfig +from src.types import BlockStamp, EpochNumber +from src.utils.web3converter import ChainConverter + +logger = logging.getLogger(__name__) + + +class PerformanceCollector(BaseModule): + """ + Continuously collects performance data from Consensus Layer into db for the given epoch range. + """ + # Timestamp of the last epochs demand update + last_epochs_demand_update: int = 0 + + def __init__(self, w3): + super().__init__(w3) + self.db = DutiesDB( + connect_timeout=variables.PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT, + statement_timeout_ms=variables.PERFORMANCE_COLLECTOR_DB_STATEMENT_TIMEOUT_MS, + ) + self.last_epochs_demand_update = self.get_epochs_demand_max_updated_at() + + def refresh_contracts(self): + # No need to refresh contracts for this module. There are no contracts used. + return None + + def _build_converter(self) -> ChainConverter: + cc_spec = self.w3.cc.get_config_spec() + genesis = self.w3.cc.get_genesis() + chain_cfg = ChainConfig( + slots_per_epoch=cc_spec.SLOTS_PER_EPOCH, + seconds_per_slot=cc_spec.SECONDS_PER_SLOT, + genesis_time=genesis.genesis_time, + ) + return ChainConverter(chain_cfg) + + def execute_module(self, last_finalized_blockstamp: BlockStamp) -> ModuleExecuteDelay: + converter = self._build_converter() + + # NOTE: Finalized slot is the first slot of justifying epoch, so we need to take the previous. But if the first + # slot of the justifying epoch is empty, blockstamp.slot_number will point to the slot where the last finalized + # block was created. As a result, finalized_epoch in this case will be less than the actual number of the last + # finalized epoch. As a result we can have a delay in frame finalization. + finalized_epoch = EpochNumber(converter.get_epoch_by_slot(last_finalized_blockstamp.slot_number) - 1) + + epochs_range_demand = self.define_epochs_to_process_range(finalized_epoch) + if not epochs_range_demand: + return ModuleExecuteDelay.NEXT_SLOT + start_epoch, end_epoch = epochs_range_demand + + checkpoints = FrameCheckpointsIterator( + converter, + start_epoch, + end_epoch, + finalized_epoch, + ) + processor = FrameCheckpointProcessor(self.w3.cc, self.db, converter, last_finalized_blockstamp) + + checkpoint_count = 0 + for checkpoint in checkpoints: + processed_epochs = processor.exec(checkpoint) + checkpoint_count += 1 + logger.info({ + 'msg': 'Checkpoint processing completed', + 'checkpoint_slot': checkpoint.slot, + 'processed_epochs': processed_epochs + }) + # Reset BaseOracle cycle timeout to avoid timeout errors during long checkpoints processing + self._reset_cycle_timeout() + + if self.new_epochs_range_demand_appeared(): + logger.info({"msg": "New epochs demand is found during processing"}) + return ModuleExecuteDelay.NEXT_SLOT + + logger.info({ + 'msg': 'All checkpoints processing completed', + 'total_checkpoints_processed': checkpoint_count + }) + + return ModuleExecuteDelay.NEXT_SLOT + + def define_epochs_to_process_range(self, finalized_epoch: EpochNumber) -> tuple[EpochNumber, EpochNumber] | None: + max_available_epoch_to_check = finalized_epoch - FrameCheckpointsIterator.CHECKPOINT_SLOT_DELAY_EPOCHS + if max_available_epoch_to_check < 0: + logger.info({"msg": "No available epochs to process yet"}) + return None + + min_epoch_in_db = self.db.min_epoch() + max_epoch_in_db = self.db.max_epoch() + + if min_epoch_in_db and max_available_epoch_to_check < min_epoch_in_db: + raise ValueError( + "Max available epoch to check is lower than the minimum epoch in the DB. CL node is not synced" + ) + + start_epoch = EpochNumber(max_available_epoch_to_check) + end_epoch = EpochNumber(max_available_epoch_to_check) + + epochs_demand = self.db.get_epochs_demands() + if not epochs_demand: + logger.info({"msg": "No epoch demands found"}) + for demand in epochs_demand: + logger.info({ + "msg": "Epochs demand", **demand.model_dump() + }) + is_range_available = self.db.is_range_available(EpochNumber(demand.l_epoch), EpochNumber(demand.r_epoch)) + if is_range_available: + logger.info({ + "msg": f"Epochs demand for {demand.consumer} is already satisfied", + }) + # Remove from the DB just in case + self.db.delete_demand(demand.consumer) + # There is no sense to lower start_epoch because the demand is already satisfied (data is in the DB) + continue + start_epoch = min(start_epoch, demand.l_epoch) + + missing_epochs = self.db.missing_epochs_in(start_epoch, end_epoch) + if not missing_epochs: + if max_epoch_in_db is None: + raise ValueError("No missing epochs found but the DB is empty. Probably a logic error or corrupted DB.") + start_epoch = EpochNumber(max_epoch_in_db + 1) + else: + start_epoch = min(missing_epochs) + + log_meta_info = { + "start_epoch": start_epoch, + "end_epoch": end_epoch, + "finalized_epoch": finalized_epoch, + "max_available_epoch_to_check": max_available_epoch_to_check, + "min_epoch_in_db": min_epoch_in_db, + "max_epoch_in_db": max_epoch_in_db, + "missing_epochs": len(missing_epochs) if missing_epochs else 0, + } + + if start_epoch > max_available_epoch_to_check: + logger.info({"msg": "No available to process epochs range demand yet", **log_meta_info}) + return None + + logger.info({"msg": "Epochs range to process is determined", **log_meta_info}) + + return start_epoch, end_epoch + + def new_epochs_range_demand_appeared(self) -> bool: + max_updated_at = self.get_epochs_demand_max_updated_at() + updated = self.last_epochs_demand_update != max_updated_at + if updated: + self.last_epochs_demand_update = max_updated_at + return True + return False + + def get_epochs_demand_max_updated_at(self) -> int: + max_updated_at = self.db.get_epochs_demands_max_updated_at() + return int(max_updated_at) if max_updated_at is not None else 0 diff --git a/src/modules/performance/common/__init__.py b/src/modules/performance/common/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/modules/performance/common/db.py b/src/modules/performance/common/db.py new file mode 100644 index 000000000..ac8933ac4 --- /dev/null +++ b/src/modules/performance/common/db.py @@ -0,0 +1,200 @@ +from time import time +from typing import Sequence + +from sqlalchemy import ARRAY, Boolean, Column, Integer, SmallInteger, delete, func +from sqlmodel import SQLModel, Field, Session, create_engine, select + +from src import variables +from src.modules.performance.common.types import ProposalDuty, SyncDuty, AttDutyMisses +from src.types import EpochNumber +from src.utils.range import sequence + + +class Duty(SQLModel, table=True): + __tablename__ = "duties" + + epoch: int = Field(primary_key=True) + attestations: list[int] = Field(default=None, sa_column=Column(ARRAY(Integer()))) + proposals_vids: list[int] = Field(default=None, sa_column=Column(ARRAY(Integer()))) + proposals_flags: list[bool] = Field(default=None, sa_column=Column(ARRAY(Boolean()))) + syncs_vids: list[int] = Field(default=None, sa_column=Column(ARRAY(Integer()))) + syncs_misses: list[int] = Field(default=None, sa_column=Column(ARRAY(SmallInteger()))) + + +class EpochsDemand(SQLModel, table=True): + __tablename__ = "epochs_demands" + + consumer: str = Field(primary_key=True) + l_epoch: int + r_epoch: int + updated_at: int + + +class DutiesDB: + def __init__(self, *, connect_timeout: int | None = None, statement_timeout_ms: int | None = None): + self._statement_timeout_ms = statement_timeout_ms + self.engine = self._build_engine(connect_timeout) + self._setup_database() + + def _build_engine(self, connect_timeout: int | None): + connect_args = {} + if connect_timeout: + connect_args["connect_timeout"] = connect_timeout + if self._statement_timeout_ms: + connect_args["options"] = f"-c statement_timeout={self._statement_timeout_ms}" + + return create_engine( + self._get_database_url(), + echo=False, + pool_pre_ping=True, # Enable connection health checks + pool_recycle=3600, # Recycle connections every hour + pool_size=10, + max_overflow=20, + connect_args=connect_args, + ) + + @staticmethod + def _get_database_url() -> str: + """Get PostgreSQL database URL from environment variables""" + host = variables.PERFORMANCE_DB_HOST + port = variables.PERFORMANCE_DB_PORT + name = variables.PERFORMANCE_DB_NAME + user = variables.PERFORMANCE_DB_USER + password = variables.PERFORMANCE_DB_PASSWORD + return f"postgresql://{user}:{password}@{host}:{port}/{name}" + + def _setup_database(self): + SQLModel.metadata.create_all(self.engine) + + def get_session(self) -> Session: + session = Session(self.engine) + return session + + def store_demand(self, consumer: str, l_epoch: EpochNumber, r_epoch: EpochNumber) -> None: + with self.get_session() as session: + demand = session.get(EpochsDemand, consumer) + if demand: + demand.l_epoch = l_epoch + demand.r_epoch = r_epoch + demand.updated_at = int(time()) + else: + demand = EpochsDemand(consumer=consumer, l_epoch=l_epoch, r_epoch=r_epoch, updated_at=int(time())) + session.add(demand) + session.commit() + + def delete_demand(self, consumer: str) -> None: + with self.get_session() as session: + demand = session.get(EpochsDemand, consumer) + if demand: + session.delete(demand) + session.commit() + + def store_epoch( + self, + epoch: EpochNumber, + att_misses: AttDutyMisses, + proposals: list[ProposalDuty], + syncs: list[SyncDuty], + ) -> None: + # TODO: test that store and get are consistent + self._store_data(epoch, att_misses, proposals, syncs) + self._auto_prune(epoch) + + def _store_data( + self, + epoch: EpochNumber, + att_misses: AttDutyMisses, + proposals: list[ProposalDuty], + syncs: list[SyncDuty], + ): + att_list: list[int] = [int(v) for v in att_misses] if att_misses else [] + prop_vids: list[int] = [int(p.validator_index) for p in proposals] if proposals else [] + prop_flags: list[bool] = [bool(p.is_proposed) for p in proposals] if proposals else [] + sync_vids: list[int] = [int(s.validator_index) for s in syncs] if syncs else [] + sync_misses: list[int] = [int(s.missed_count) for s in syncs] if syncs else [] + + with self.get_session() as session: + duty = session.get(Duty, epoch) + if duty: + duty.attestations = att_list + duty.proposals_vids = prop_vids + duty.proposals_flags = prop_flags + duty.syncs_vids = sync_vids + duty.syncs_misses = sync_misses + else: + duty = Duty( + epoch=epoch, + attestations=att_list, + proposals_vids=prop_vids, + proposals_flags=prop_flags, + syncs_vids=sync_vids, + syncs_misses=sync_misses, + ) + session.add(duty) + session.commit() + + def _auto_prune(self, current_epoch: EpochNumber) -> None: + if variables.PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS <= 0: + return + threshold = int(current_epoch) - variables.PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS + if threshold <= 0: + return + + with self.get_session() as session: + session.exec(delete(Duty).where(Duty.epoch < threshold)) + session.commit() + + def is_range_available(self, l_epoch: EpochNumber, r_epoch: EpochNumber) -> bool: + if int(l_epoch) > int(r_epoch): + raise ValueError("Invalid epoch range") + + with self.get_session() as session: + stmt = select(func.count()).select_from(Duty).where(Duty.epoch >= l_epoch, Duty.epoch <= r_epoch) + count = session.exec(stmt).one() + return count == (r_epoch - l_epoch + 1) + + def missing_epochs_in(self, l_epoch: EpochNumber, r_epoch: EpochNumber) -> list[EpochNumber]: + if l_epoch > r_epoch: + raise ValueError("Invalid epoch range") + + with self.get_session() as session: + present_duties = session.exec( + select(Duty.epoch).where(Duty.epoch >= l_epoch, Duty.epoch <= r_epoch).order_by(Duty.epoch) + ).all() + present = {EpochNumber(int(epoch)) for epoch in present_duties} + + return [epoch for epoch in sequence(l_epoch, r_epoch) if epoch not in present] + + def get_epochs_data(self, from_epoch: EpochNumber, to_epoch: EpochNumber) -> Sequence[Duty]: + with self.get_session() as session: + return session.exec(select(Duty).where(Duty.epoch >= from_epoch, Duty.epoch <= to_epoch)).all() + + def get_epoch_data(self, epoch: EpochNumber) -> Duty | None: + with self.get_session() as session: + return session.get(Duty, epoch) + + def has_epoch(self, epoch: EpochNumber) -> bool: + return self.get_epoch_data(epoch) is not None + + def min_epoch(self) -> EpochNumber | None: + with self.get_session() as session: + result = session.exec(select(Duty.epoch).order_by(Duty.epoch).limit(1)).first() + return EpochNumber(int(result)) if result else None + + def max_epoch(self) -> EpochNumber | None: + with self.get_session() as session: + # pylint: disable=no-member + result = session.exec(select(Duty.epoch).order_by(Duty.epoch.desc()).limit(1)).first() + return EpochNumber(int(result)) if result else None + + def get_epochs_demand(self, consumer: str) -> EpochsDemand | None: + with self.get_session() as session: + return session.get(EpochsDemand, consumer) + + def get_epochs_demands(self) -> Sequence[EpochsDemand]: + with self.get_session() as session: + return session.exec(select(EpochsDemand)).all() + + def get_epochs_demands_max_updated_at(self) -> int | None: + with self.get_session() as session: + return session.exec(select(func.max(EpochsDemand.updated_at))).one() diff --git a/src/modules/performance/common/types.py b/src/modules/performance/common/types.py new file mode 100644 index 000000000..3941f71d8 --- /dev/null +++ b/src/modules/performance/common/types.py @@ -0,0 +1,18 @@ +from typing import TypeAlias + +from pydantic import BaseModel + +from src.types import ValidatorIndex + + +class ProposalDuty(BaseModel): + validator_index: int + is_proposed: bool + + +class SyncDuty(BaseModel): + validator_index: int + missed_count: int # 0..32 + + +AttDutyMisses: TypeAlias = set[ValidatorIndex] diff --git a/src/modules/performance/web/__init__.py b/src/modules/performance/web/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/modules/performance/web/metrics.py b/src/modules/performance/web/metrics.py new file mode 100644 index 000000000..52577a3a9 --- /dev/null +++ b/src/modules/performance/web/metrics.py @@ -0,0 +1,32 @@ +from fastapi import FastAPI +from prometheus_client import CollectorRegistry +from prometheus_fastapi_instrumentator import Instrumentator, metrics + +from src import variables +from src.metrics.prometheus.basic import BUILD_INFO +from src.utils.build import get_build_info + +# To avoid auto-scraping metrics from `src/metrics/prometheus` and any other possible places. +CUSTOM_REGISTRY = CollectorRegistry() +CUSTOM_REGISTRY.register(BUILD_INFO) + + +def attach_metrics(app: FastAPI): + build_info = get_build_info() + BUILD_INFO.info(build_info) + + instrumentator = Instrumentator( + excluded_handlers=["/health", "/metrics"], + registry=CUSTOM_REGISTRY, + ) + instrumentator.add( + metrics.default( + metric_namespace=variables.PERFORMANCE_WEB_SERVER_METRICS_PREFIX, + registry=CUSTOM_REGISTRY + ) + ) + instrumentator.instrument(app).expose( + app, + include_in_schema=True, + should_gzip=True, + ) diff --git a/src/modules/performance/web/middleware.py b/src/modules/performance/web/middleware.py new file mode 100644 index 000000000..aba6ad159 --- /dev/null +++ b/src/modules/performance/web/middleware.py @@ -0,0 +1,22 @@ +from anyio import fail_after +from fastapi import FastAPI +from starlette.middleware.base import BaseHTTPMiddleware, RequestResponseEndpoint +from starlette.responses import JSONResponse + + +class RequestTimeoutMiddleware(BaseHTTPMiddleware): + """Bounds total request processing time.""" + + def __init__(self, app: FastAPI, timeout: float): + super().__init__(app) + self.timeout = timeout + + async def dispatch(self, request, call_next: RequestResponseEndpoint): # type: ignore[override] + try: + with fail_after(self.timeout): + return await call_next(request) + except TimeoutError: + return JSONResponse( + {"detail": f"Request timed out after {self.timeout} seconds"}, + status_code=504, + ) diff --git a/src/modules/performance/web/server.py b/src/modules/performance/web/server.py new file mode 100644 index 000000000..308a9a1f8 --- /dev/null +++ b/src/modules/performance/web/server.py @@ -0,0 +1,151 @@ +from typing import cast +from contextlib import asynccontextmanager + +from fastapi import FastAPI, HTTPException, Depends, Query +import uvicorn +from pydantic import BaseModel +from uvicorn.config import LOGGING_CONFIG + +from src.modules.performance.common.db import DutiesDB, Duty, EpochsDemand +from src.modules.performance.web.middleware import RequestTimeoutMiddleware +from src.variables import ( + PERFORMANCE_WEB_SERVER_API_HOST, + PERFORMANCE_WEB_SERVER_API_PORT, + PERFORMANCE_WEB_SERVER_DB_CONNECTION_TIMEOUT, + PERFORMANCE_WEB_SERVER_DB_STATEMENT_TIMEOUT_MS, + PERFORMANCE_WEB_SERVER_MAX_EPOCH_RANGE, + PERFORMANCE_WEB_SERVER_REQUEST_TIMEOUT, +) +from src.modules.performance.web.metrics import attach_metrics +from src.types import EpochNumber +from src.metrics.logging import JsonFormatter + + +class EpochsDemandRequest(BaseModel): + consumer: str + l_epoch: EpochNumber + r_epoch: EpochNumber + + +class HealthCheckResp(BaseModel): + status: str = "ok" + + +@asynccontextmanager +async def lifespan(app: FastAPI): + app.state.db = DutiesDB( + connect_timeout=PERFORMANCE_WEB_SERVER_DB_CONNECTION_TIMEOUT, + statement_timeout_ms=PERFORMANCE_WEB_SERVER_DB_STATEMENT_TIMEOUT_MS, + ) + yield + + +app = FastAPI(title="Performance Collector API", lifespan=lifespan) +attach_metrics(app) +app.add_middleware(RequestTimeoutMiddleware, timeout=PERFORMANCE_WEB_SERVER_REQUEST_TIMEOUT) + + +def get_db() -> DutiesDB: + return cast(DutiesDB, app.state.db) + + +def validate_epoch_bounds(l_epoch: EpochNumber, r_epoch: EpochNumber) -> None: + if l_epoch > r_epoch: + raise HTTPException(status_code=400, detail="'l_epoch' must be <= 'r_epoch'") + + +def validate_range_size(l_epoch: EpochNumber, r_epoch: EpochNumber) -> None: + range_size = int(r_epoch) - int(l_epoch) + 1 + if range_size > PERFORMANCE_WEB_SERVER_MAX_EPOCH_RANGE: + raise HTTPException( + status_code=400, + detail=f"Requested epoch range is too large; maximum allowed size is {PERFORMANCE_WEB_SERVER_MAX_EPOCH_RANGE} epochs", + ) + + +def query_epoch_range( + from_epoch: EpochNumber = Query(..., alias="from"), + to_epoch: EpochNumber = Query(..., alias="to"), +) -> tuple[EpochNumber, EpochNumber]: + validate_epoch_bounds(from_epoch, to_epoch) + return from_epoch, to_epoch + + +@app.get("/health", response_model=HealthCheckResp) +def health(): + return {"status": "ok"} + + +@app.get("/check-epochs", response_model=bool) +def epochs_check( + epoch_range: tuple[EpochNumber, EpochNumber] = Depends(query_epoch_range), + db: DutiesDB = Depends(get_db), +): + l_epoch, r_epoch = epoch_range + return db.is_range_available(l_epoch, r_epoch) + + +@app.get("/missing-epochs", response_model=list[EpochNumber]) +def epochs_missing( + epoch_range: tuple[EpochNumber, EpochNumber] = Depends(query_epoch_range), + db: DutiesDB = Depends(get_db), +): + l_epoch, r_epoch = epoch_range + return db.missing_epochs_in(l_epoch, r_epoch) + + +@app.get("/epochs", response_model=list[Duty]) +def epochs_data( + epoch_range: tuple[EpochNumber, EpochNumber] = Depends(query_epoch_range), + db: DutiesDB = Depends(get_db), +): + l_epoch, r_epoch = epoch_range + validate_range_size(l_epoch, r_epoch) + return db.get_epochs_data(l_epoch, r_epoch) + + +@app.get("/epochs/{epoch}", response_model=Duty | None) +def epoch_data(epoch: EpochNumber, db: DutiesDB = Depends(get_db)): + return db.get_epoch_data(epoch) + + +@app.get("/demands", response_model=list[EpochsDemand]) +def epochs_demands(db: DutiesDB = Depends(get_db)): + return db.get_epochs_demands() + + +@app.get("/demands/{consumer}", response_model=EpochsDemand | None) +def one_epochs_demand(consumer: str, db: DutiesDB = Depends(get_db)): + return db.get_epochs_demand(consumer) + + +@app.post("/demands", response_model=EpochsDemand) +def set_epochs_demand(demand_to_add: EpochsDemandRequest, db: DutiesDB = Depends(get_db)): + validate_epoch_bounds(demand_to_add.l_epoch, demand_to_add.r_epoch) + db.store_demand(demand_to_add.consumer, demand_to_add.l_epoch, demand_to_add.r_epoch) + return db.get_epochs_demand(demand_to_add.consumer) + + +@app.delete("/demands", response_model=EpochsDemand) +def delete_epochs_demand(consumer: str = Query(...), db: DutiesDB = Depends(get_db)): + to_delete = db.get_epochs_demand(consumer) + if not to_delete: + raise HTTPException(status_code=404, detail=f"No demand found for consumer '{consumer}'") + db.delete_demand(consumer) + return to_delete + + +def serve(): + # Prepare logging config with the app-wise formatter + logging_config = LOGGING_CONFIG.copy() + for formatter_name in logging_config["formatters"]: + logging_config["formatters"][formatter_name] = { + "()": JsonFormatter, + } + + uvicorn.run( + app, + host=PERFORMANCE_WEB_SERVER_API_HOST, + port=PERFORMANCE_WEB_SERVER_API_PORT, + log_config=logging_config, + ) diff --git a/src/modules/submodules/consensus.py b/src/modules/submodules/consensus.py index d75bb1bc9..276524d53 100644 --- a/src/modules/submodules/consensus.py +++ b/src/modules/submodules/consensus.py @@ -483,7 +483,7 @@ def _get_web3_converter(self, blockstamp: BlockStamp) -> Web3Converter: @lru_cache(maxsize=1) def get_frame_number_by_slot(self, blockstamp: ReferenceBlockStamp) -> FrameNumber: converter = self._get_web3_converter(blockstamp) - frame_number = converter.get_frame_by_slot(blockstamp.ref_slot) + frame_number = converter.get_frame_by_slot(SlotNumber(blockstamp.ref_slot + 1)) logger.info({ "msg": "Get current frame from blockstamp", "frame": frame_number, diff --git a/src/providers/http_provider.py b/src/providers/http_provider.py index 4af9c347f..751d8c467 100644 --- a/src/providers/http_provider.py +++ b/src/providers/http_provider.py @@ -106,7 +106,7 @@ def _get( stream: bool = False, ) -> tuple[Any, dict]: """ - Get plain or streamed request with fallbacks + Plain or streamed GET request with fallbacks Returns (data, meta) or raises exception force_raise - function that returns an Exception if it should be thrown immediately. @@ -152,7 +152,7 @@ def _get_without_fallbacks( retval_validator: ReturnValueValidator = data_is_any, ) -> tuple[Any, dict]: """ - Simple get request without fallbacks + Simple GET request without fallbacks Returns (data, meta) or raises an exception """ complete_endpoint = endpoint.format(*path_params) if path_params else endpoint @@ -208,8 +208,224 @@ def _get_without_fallbacks( if not stream: del json_response["data"] meta = json_response + except (KeyError, TypeError): + # NOTE: Used by KeysAPIClient and PerformanceClient only. + data = json_response + meta = {} + + retval_validator(data, meta, endpoint=endpoint) + return data, meta + + def _post( + self, + endpoint: str, + path_params: Sequence[str | int] | None = None, + query_params: dict | None = None, + body_data: dict | None = None, + force_raise: Callable[..., Exception | None] = lambda _: None, + retval_validator: ReturnValueValidator = data_is_any, + ) -> tuple[dict, dict]: + """ + Plain POST request with fallbacks + Returns (data, meta) or raises exception + + force_raise - function that returns an Exception if it should be thrown immediately. + Sometimes NotOk response from first provider is the response that we are expecting. + """ + errors: list[Exception] = [] + + for host in self.hosts: + try: + return self._post_without_fallbacks( + host, + endpoint, + path_params, + query_params, + body_data, + retval_validator=retval_validator, + ) + except Exception as e: # pylint: disable=W0703 + errors.append(e) + + # Check if exception should be raised immediately + if to_force_raise := force_raise(errors): + raise to_force_raise from e + + logger.warning( + { + 'msg': f'[{self.__class__.__name__}] Host [{urlparse(host).netloc}] responded with error', + 'error': str(e), + 'provider': urlparse(host).netloc, + } + ) + + # Raise error from last provider. + raise errors[-1] + + def _post_without_fallbacks( + self, + host: str, + endpoint: str, + path_params: Sequence[str | int] | None = None, + query_params: dict | None = None, + body_data: dict | None = None, + retval_validator: ReturnValueValidator = data_is_any, + ) -> tuple[dict, dict]: + """ + Simple POST request without fallbacks + Returns (data, meta) or raises an exception + """ + complete_endpoint = endpoint.format(*path_params) if path_params else endpoint + + with self.PROMETHEUS_HISTOGRAM.time() as t: + try: + response = self.session.post( + self._urljoin(host, complete_endpoint if path_params else endpoint), + params=query_params, + json=body_data, + timeout=self.request_timeout, + ) + except Exception as error: + logger.error({'msg': str(error)}) + t.labels( + endpoint=endpoint, + code=0, + domain=urlparse(host).netloc, + ) + raise self.PROVIDER_EXCEPTION(status=0, text='Response error.') from error + + t.labels( + endpoint=endpoint, + code=response.status_code, + domain=urlparse(host).netloc, + ) + + if response.status_code != HTTPStatus.OK: + response_fail_msg = ( + f'Response from {complete_endpoint} [{response.status_code}]' + f' with text: "{str(response.text)}" returned.' + ) + logger.debug({'msg': response_fail_msg}) + raise self.PROVIDER_EXCEPTION(response_fail_msg, status=response.status_code, text=response.text) + + try: + json_response = response.json() + except JSONDecodeError as error: + response_fail_msg = ( + f'Failed to decode JSON response from {complete_endpoint} with text: "{str(response.text)}"' + ) + logger.debug({'msg': response_fail_msg}) + raise self.PROVIDER_EXCEPTION(status=0, text='JSON decode error.') from error + + try: + data = json_response["data"] + del json_response["data"] + meta = json_response + except KeyError: + data = json_response + meta = {} + + retval_validator(data, meta, endpoint=endpoint) + return data, meta + + def _delete( + self, + endpoint: str, + path_params: Sequence[str | int] | None = None, + query_params: dict | None = None, + body_data: dict | None = None, + force_raise: Callable[..., Exception | None] = lambda _: None, + retval_validator: ReturnValueValidator = data_is_any, + ) -> tuple[dict, dict]: + errors: list[Exception] = [] + + for host in self.hosts: + try: + return self._delete_without_fallbacks( + host, + endpoint, + path_params, + query_params, + body_data, + retval_validator=retval_validator, + ) + except Exception as e: # pylint: disable=W0703 + errors.append(e) + + if to_force_raise := force_raise(errors): + raise to_force_raise from e + + logger.warning( + { + 'msg': f'[{self.__class__.__name__}] Host [{urlparse(host).netloc}] responded with error', + 'error': str(e), + 'provider': urlparse(host).netloc, + } + ) + + if not errors: + raise RuntimeError('No hosts available for DELETE request') + raise errors[-1] + + def _delete_without_fallbacks( + self, + host: str, + endpoint: str, + path_params: Sequence[str | int] | None = None, + query_params: dict | None = None, + body_data: dict | None = None, + retval_validator: ReturnValueValidator = data_is_any, + ) -> tuple[dict, dict]: + complete_endpoint = endpoint.format(*path_params) if path_params else endpoint + + with self.PROMETHEUS_HISTOGRAM.time() as t: + try: + response = self.session.delete( + self._urljoin(host, complete_endpoint if path_params else endpoint), + params=query_params, + json=body_data, + timeout=self.request_timeout, + ) + except Exception as error: # pylint: disable=W0703 + logger.error({'msg': str(error)}) + t.labels( + endpoint=endpoint, + code=0, + domain=urlparse(host).netloc, + ) + raise self.PROVIDER_EXCEPTION(status=0, text='Response error.') from error + + t.labels( + endpoint=endpoint, + code=response.status_code, + domain=urlparse(host).netloc, + ) + + if response.status_code != HTTPStatus.OK: + response_fail_msg = ( + f'Response from {complete_endpoint} [{response.status_code}]' + f' with text: "{str(response.text)}" returned.' + ) + logger.debug({'msg': response_fail_msg}) + raise self.PROVIDER_EXCEPTION(response_fail_msg, status=response.status_code, text=response.text) + + if not response.content: + json_response: dict = {} + else: + try: + json_response = response.json() + except JSONDecodeError as error: + response_fail_msg = ( + f'Failed to decode JSON response from {complete_endpoint} with text: "{str(response.text)}"' + ) + logger.debug({'msg': response_fail_msg}) + raise self.PROVIDER_EXCEPTION(status=0, text='JSON decode error.') from error + + try: + data = json_response["data"] + del json_response["data"] + meta = json_response except KeyError: - # NOTE: Used by KeysAPIClient only. data = json_response meta = {} diff --git a/src/providers/performance/__init__.py b/src/providers/performance/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/providers/performance/client.py b/src/providers/performance/client.py new file mode 100644 index 000000000..d49740363 --- /dev/null +++ b/src/providers/performance/client.py @@ -0,0 +1,51 @@ +from src.metrics.prometheus.basic import PERFORMANCE_REQUESTS_DURATION +from src.modules.performance.common.db import Duty, EpochsDemand +from src.providers.http_provider import ( + HTTPProvider, + NotOkResponse, +) +from src.types import EpochNumber + + +class PerformanceClientError(NotOkResponse): + pass + + +class PerformanceClient(HTTPProvider): + PROVIDER_EXCEPTION = PerformanceClientError + PROMETHEUS_HISTOGRAM = PERFORMANCE_REQUESTS_DURATION + + API_EPOCHS_CHECK = 'check-epochs' + API_EPOCHS_DATA = 'epochs' + API_EPOCHS_DEMAND = 'demands' + + def is_range_available(self, l_epoch: EpochNumber, r_epoch: EpochNumber) -> bool: + data, _ = self._get( + self.API_EPOCHS_CHECK, + query_params={'from': l_epoch, 'to': r_epoch}, + ) + return bool(data) + + def get_epoch_data(self, epoch: EpochNumber) -> Duty | None: + data, _ = self._get( + self.API_EPOCHS_DATA + f"/{epoch}", + ) + return Duty.model_validate(data) if data else None + + def get_epochs_demand(self, consumer: str) -> EpochsDemand | None: + data, _ = self._get( + self.API_EPOCHS_DEMAND + f"/{consumer}", + ) + return EpochsDemand.model_validate(data) if data else None + + def post_epochs_demand(self, consumer: str, l_epoch: EpochNumber, r_epoch: EpochNumber) -> None: + self._post( + self.API_EPOCHS_DEMAND, + body_data={'consumer': consumer, 'l_epoch': l_epoch, 'r_epoch': r_epoch}, + ) + + def delete_epochs_demand(self, consumer: str) -> None: + self._delete( + self.API_EPOCHS_DEMAND, + query_params={'consumer': consumer}, + ) diff --git a/src/services/safe_border.py b/src/services/safe_border.py index ffe668392..47ed5fcc9 100644 --- a/src/services/safe_border.py +++ b/src/services/safe_border.py @@ -30,10 +30,7 @@ class SafeBorder(Web3Converter): 2. Negative rebase border 3. Associated slashing border """ - chain_config: ChainConfig - frame_config: FrameConfig blockstamp: ReferenceBlockStamp - converter: Web3Converter def __init__( self, @@ -48,10 +45,7 @@ def __init__( self.lido_contracts = w3.lido_contracts self.blockstamp = blockstamp - self.chain_config = chain_config - self.frame_config = frame_config - self.converter = Web3Converter(chain_config, frame_config) self._retrieve_constants() def _retrieve_constants(self): diff --git a/src/types.py b/src/types.py index 22ed30d2d..dfb2e8f74 100644 --- a/src/types.py +++ b/src/types.py @@ -11,6 +11,8 @@ class OracleModule(StrEnum): EJECTOR = 'ejector' CHECK = 'check' CSM = 'csm' + PERFORMANCE_WEB_SERVER = 'performance_web_server' + PERFORMANCE_COLLECTOR = 'performance_collector' EpochNumber = NewType('EpochNumber', int) diff --git a/src/utils/web3converter.py b/src/utils/web3converter.py index e8706785f..58a705e97 100644 --- a/src/utils/web3converter.py +++ b/src/utils/web3converter.py @@ -9,31 +9,42 @@ def epoch_from_slot(slot: SlotNumber, slots_per_epoch: int) -> EpochNumber: return EpochNumber(slot // slots_per_epoch) -class Web3Converter: +class ChainConverter: + chain_config: ChainConfig + + def __init__(self, chain_config: ChainConfig): + self.chain_config = chain_config + + def get_epoch_first_slot(self, epoch: EpochNumber) -> SlotNumber: + return SlotNumber(epoch * self.chain_config.slots_per_epoch) + + def get_epoch_last_slot(self, epoch: EpochNumber) -> SlotNumber: + return SlotNumber((epoch + 1) * self.chain_config.slots_per_epoch - 1) + + def get_epoch_by_slot(self, slot: SlotNumber) -> EpochNumber: + return EpochNumber(slot // self.chain_config.slots_per_epoch) + + def get_slot_by_timestamp(self, timestamp: int) -> SlotNumber: + return SlotNumber((timestamp - self.chain_config.genesis_time) // self.chain_config.seconds_per_slot) + + +class Web3Converter(ChainConverter): """ The Web3Converter class contains methods for converting between slot, epoch, and frame numbers using chain and frame settings passed as arguments when the class instance is created. Frame is the distance between two oracle reports. """ - - chain_config: ChainConfig frame_config: FrameConfig def __init__(self, chain_config: ChainConfig, frame_config: FrameConfig): - self.chain_config = chain_config + super().__init__(chain_config) self.frame_config = frame_config @property def slots_per_frame(self) -> int: return self.frame_config.epochs_per_frame * self.chain_config.slots_per_epoch - def get_epoch_first_slot(self, epoch: EpochNumber) -> SlotNumber: - return SlotNumber(epoch * self.chain_config.slots_per_epoch) - - def get_epoch_last_slot(self, epoch: EpochNumber) -> SlotNumber: - return SlotNumber((epoch + 1) * self.chain_config.slots_per_epoch - 1) - def get_frame_last_slot(self, frame: FrameNumber) -> SlotNumber: return SlotNumber(self.get_frame_first_slot(FrameNumber(frame + 1)) - 1) @@ -42,16 +53,10 @@ def get_frame_first_slot(self, frame: FrameNumber) -> SlotNumber: (self.frame_config.initial_epoch + frame * self.frame_config.epochs_per_frame) * self.chain_config.slots_per_epoch ) - def get_epoch_by_slot(self, ref_slot: SlotNumber) -> EpochNumber: - return EpochNumber(ref_slot // self.chain_config.slots_per_epoch) - def get_epoch_by_timestamp(self, timestamp: int) -> EpochNumber: slot = self.get_slot_by_timestamp(timestamp) return self.get_epoch_by_slot(slot) - def get_slot_by_timestamp(self, timestamp: int) -> SlotNumber: - return SlotNumber((timestamp - self.chain_config.genesis_time) // self.chain_config.seconds_per_slot) - def get_frame_by_slot(self, slot: SlotNumber) -> FrameNumber: return self.get_frame_by_epoch(self.get_epoch_by_slot(slot)) diff --git a/src/variables.py b/src/variables.py index 3c1131fcc..76d50b6d1 100644 --- a/src/variables.py +++ b/src/variables.py @@ -11,6 +11,7 @@ EXECUTION_CLIENT_URI: Final = os.getenv('EXECUTION_CLIENT_URI', '').split(',') CONSENSUS_CLIENT_URI: Final = os.getenv('CONSENSUS_CLIENT_URI', '').split(',') KEYS_API_URI: Final = os.getenv('KEYS_API_URI', '').split(',') +PERFORMANCE_COLLECTOR_URI: Final = os.getenv('PERFORMANCE_COLLECTOR_URI', '').split(',') PINATA_JWT: Final = from_file_or_env('PINATA_JWT') PINATA_DEDICATED_GATEWAY_URL: Final = os.getenv('PINATA_DEDICATED_GATEWAY_URL') @@ -76,6 +77,13 @@ os.getenv('HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_CONSENSUS', 5) ) +# Performance Collector HTTP client variables +HTTP_REQUEST_TIMEOUT_PERFORMANCE: Final = int(os.getenv('HTTP_REQUEST_TIMEOUT_PERFORMANCE', 60)) +HTTP_REQUEST_RETRY_COUNT_PERFORMANCE: Final = int(os.getenv('HTTP_REQUEST_RETRY_COUNT_PERFORMANCE', 3)) +HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE: Final = int( + os.getenv('HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE', 2) +) + HTTP_REQUEST_TIMEOUT_KEYS_API: Final = int(os.getenv('HTTP_REQUEST_TIMEOUT_KEYS_API', 120)) HTTP_REQUEST_RETRY_COUNT_KEYS_API: Final = int(os.getenv('HTTP_REQUEST_RETRY_COUNT_KEYS_API', 5)) HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_KEYS_API: Final = int( @@ -96,6 +104,27 @@ HEALTHCHECK_SERVER_PORT: Final = int(os.getenv('HEALTHCHECK_SERVER_PORT', 9010)) +# - Performance Web-server and Collector +PERFORMANCE_WEB_SERVER_API_PORT: Final = int(os.getenv('PERFORMANCE_WEB_SERVER_API_PORT', 9020)) +PERFORMANCE_WEB_SERVER_API_HOST: Final = os.getenv('PERFORMANCE_WEB_SERVER_API_HOST', '0.0.0.0') +PERFORMANCE_WEB_SERVER_DB_CONNECTION_TIMEOUT: Final = int(os.getenv('PERFORMANCE_WEB_SERVER_DB_CONNECTION_TIMEOUT', 30)) +PERFORMANCE_WEB_SERVER_METRICS_PREFIX: Final = os.getenv("PERFORMANCE_WEB_SERVER_METRICS_PREFIX", "lido_performance_web") +PERFORMANCE_WEB_SERVER_DB_STATEMENT_TIMEOUT_MS: Final = int( + os.getenv('PERFORMANCE_WEB_SERVER_DB_STATEMENT_TIMEOUT_MS', 10_000) +) +PERFORMANCE_WEB_SERVER_MAX_EPOCH_RANGE: Final = int(os.getenv('PERFORMANCE_WEB_SERVER_MAX_EPOCH_RANGE', 225)) +PERFORMANCE_WEB_SERVER_REQUEST_TIMEOUT: Final = int(os.getenv('PERFORMANCE_WEB_SERVER_REQUEST_TIMEOUT', 60)) + +PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS: Final = int(os.getenv('PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS', 28 * 225 * 6)) +PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT: Final = int(os.getenv('PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT', 30)) +PERFORMANCE_COLLECTOR_DB_STATEMENT_TIMEOUT_MS: Final = int(os.getenv('PERFORMANCE_COLLECTOR_DB_STATEMENT_TIMEOUT_MS', 10_000)) + +PERFORMANCE_DB_HOST: Final = os.getenv('PERFORMANCE_DB_HOST', 'localhost') +PERFORMANCE_DB_PORT: Final = int(os.getenv('PERFORMANCE_DB_PORT', 5432)) +PERFORMANCE_DB_NAME: Final = os.getenv('PERFORMANCE_DB_NAME', 'performance') +PERFORMANCE_DB_USER: Final = os.getenv('PERFORMANCE_DB_USER', 'performance') +PERFORMANCE_DB_PASSWORD: Final = os.getenv('PERFORMANCE_DB_PASSWORD', 'performance') + MAX_CYCLE_LIFETIME_IN_SECONDS: Final = int(os.getenv("MAX_CYCLE_LIFETIME_IN_SECONDS", 3000)) CACHE_PATH: Final = Path(os.getenv("CACHE_PATH", ".")) @@ -103,6 +132,7 @@ VAULT_PAGINATION_LIMIT: Final = int(os.getenv("VAULT_PAGINATION_LIMIT", 1_000)) VAULT_VALIDATOR_STAGES_BATCH_SIZE: Final = int(os.getenv("VAULT_VALIDATOR_STAGES_BATCH_SIZE", 1_00)) + def check_all_required_variables(module: OracleModule): errors = check_uri_required_variables() if not LIDO_LOCATOR_ADDRESS: @@ -123,6 +153,35 @@ def check_uri_required_variables(): return [name for name, uri in required_uris.items() if '' in uri] +def check_performance_db_required_variables() -> list[str]: + errors = [] + if not PERFORMANCE_DB_HOST: + errors.append("PERFORMANCE_DB_HOST is empty") + if PERFORMANCE_DB_PORT <= 0: + errors.append("PERFORMANCE_DB_PORT must be positive") + if not PERFORMANCE_DB_NAME: + errors.append("PERFORMANCE_DB_NAME is empty") + if not PERFORMANCE_DB_USER: + errors.append("PERFORMANCE_DB_USER is empty") + if not PERFORMANCE_DB_PASSWORD: + errors.append("PERFORMANCE_DB_PASSWORD is empty") + return errors + + +def check_perf_collector_required_variables(): + errors = check_performance_db_required_variables() + required_uris = { + 'CONSENSUS_CLIENT_URI': CONSENSUS_CLIENT_URI, + } + errors.extend([name for name, uri in required_uris.items() if '' in uri]) + return errors + + +def check_perf_web_server_required_variables(): + errors = check_performance_db_required_variables() + return errors + + def raise_from_errors(errors): if errors: raise ValueError("The following variables are required: " + ", ".join(errors)) @@ -160,6 +219,22 @@ def raise_from_errors(errors): 'PROMETHEUS_PORT': PROMETHEUS_PORT, 'PROMETHEUS_PREFIX': PROMETHEUS_PREFIX, 'HEALTHCHECK_SERVER_PORT': HEALTHCHECK_SERVER_PORT, + 'PERFORMANCE_WEB_SERVER_API_PORT': PERFORMANCE_WEB_SERVER_API_PORT, + 'PERFORMANCE_WEB_SERVER_API_HOST': PERFORMANCE_WEB_SERVER_API_HOST, + 'PERFORMANCE_WEB_SERVER_DB_CONNECTION_TIMEOUT': PERFORMANCE_WEB_SERVER_DB_CONNECTION_TIMEOUT, + 'PERFORMANCE_WEB_SERVER_METRICS_PREFIX': PERFORMANCE_WEB_SERVER_METRICS_PREFIX, + 'PERFORMANCE_WEB_SERVER_DB_STATEMENT_TIMEOUT_MS': PERFORMANCE_WEB_SERVER_DB_STATEMENT_TIMEOUT_MS, + 'PERFORMANCE_WEB_SERVER_MAX_EPOCH_RANGE': PERFORMANCE_WEB_SERVER_MAX_EPOCH_RANGE, + 'PERFORMANCE_WEB_SERVER_REQUEST_TIMEOUT': PERFORMANCE_WEB_SERVER_REQUEST_TIMEOUT, + 'PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS': PERFORMANCE_COLLECTOR_DB_RETENTION_EPOCHS, + 'PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT': PERFORMANCE_COLLECTOR_DB_CONNECTION_TIMEOUT, + 'PERFORMANCE_DB_HOST': PERFORMANCE_DB_HOST, + 'PERFORMANCE_DB_PORT': PERFORMANCE_DB_PORT, + 'PERFORMANCE_DB_NAME': PERFORMANCE_DB_NAME, + 'PERFORMANCE_DB_USER': PERFORMANCE_DB_USER, + 'HTTP_REQUEST_TIMEOUT_PERFORMANCE': HTTP_REQUEST_TIMEOUT_PERFORMANCE, + 'HTTP_REQUEST_RETRY_COUNT_PERFORMANCE': HTTP_REQUEST_RETRY_COUNT_PERFORMANCE, + 'HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE': HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE, 'MAX_CYCLE_LIFETIME_IN_SECONDS': MAX_CYCLE_LIFETIME_IN_SECONDS, 'CACHE_PATH': CACHE_PATH, 'VAULT_PAGINATION_LIMIT': VAULT_PAGINATION_LIMIT, @@ -171,12 +246,14 @@ def raise_from_errors(errors): 'EXECUTION_CLIENT_URI': EXECUTION_CLIENT_URI, 'CONSENSUS_CLIENT_URI': CONSENSUS_CLIENT_URI, 'KEYS_API_URI': KEYS_API_URI, + 'PERFORMANCE_COLLECTOR_URI': PERFORMANCE_COLLECTOR_URI, 'PINATA_JWT': PINATA_JWT, 'STORACHA_AUTH_SECRET': STORACHA_AUTH_SECRET, 'STORACHA_AUTHORIZATION': STORACHA_AUTHORIZATION, 'STORACHA_SPACE_DID': STORACHA_SPACE_DID, 'LIDO_IPFS_HOST': LIDO_IPFS_HOST, 'LIDO_IPFS_TOKEN': LIDO_IPFS_TOKEN, + 'PERFORMANCE_DB_PASSWORD': PERFORMANCE_DB_PASSWORD, 'PINATA_DEDICATED_GATEWAY_TOKEN': PINATA_DEDICATED_GATEWAY_TOKEN, 'MEMBER_PRIV_KEY': MEMBER_PRIV_KEY, 'OPSGENIE_API_KEY': OPSGENIE_API_KEY, diff --git a/src/web3py/extensions/__init__.py b/src/web3py/extensions/__init__.py index 80f83141d..01ce1d1a6 100644 --- a/src/web3py/extensions/__init__.py +++ b/src/web3py/extensions/__init__.py @@ -6,3 +6,4 @@ from src.web3py.extensions.fallback import FallbackProviderModule from src.web3py.extensions.csm import CSM, LazyCSM from src.web3py.extensions.ipfs import IPFS +from src.web3py.extensions.performance import PerformanceClientModule diff --git a/src/web3py/extensions/performance.py b/src/web3py/extensions/performance.py new file mode 100644 index 000000000..708c14503 --- /dev/null +++ b/src/web3py/extensions/performance.py @@ -0,0 +1,20 @@ +from web3.module import Module + +from src.providers.performance.client import PerformanceClient +from src.variables import ( + HTTP_REQUEST_TIMEOUT_PERFORMANCE, + HTTP_REQUEST_RETRY_COUNT_PERFORMANCE, + HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE, +) + + +class PerformanceClientModule(PerformanceClient, Module): + def __init__(self, hosts: list[str]): + + super(PerformanceClient, self).__init__( + hosts, + HTTP_REQUEST_TIMEOUT_PERFORMANCE, + HTTP_REQUEST_RETRY_COUNT_PERFORMANCE, + HTTP_REQUEST_SLEEP_BEFORE_RETRY_IN_SECONDS_PERFORMANCE, + ) + super(Module, self).__init__() diff --git a/src/web3py/types.py b/src/web3py/types.py index 26832abeb..713ac6207 100644 --- a/src/web3py/types.py +++ b/src/web3py/types.py @@ -1,5 +1,6 @@ from web3 import Web3 as _Web3 +from src.providers.performance.client import PerformanceClient from src.web3py.extensions import ( CSM, ConsensusClientModule, @@ -19,3 +20,4 @@ class Web3(_Web3): kac: KeysAPIClientModule csm: CSM ipfs: IPFS + performance: PerformanceClient diff --git a/tests/fork/conftest.py b/tests/fork/conftest.py index fde6f9767..a6f927a1b 100644 --- a/tests/fork/conftest.py +++ b/tests/fork/conftest.py @@ -42,6 +42,7 @@ LidoContracts, LidoValidatorsProvider, TransactionUtils, + PerformanceClientModule, ) logger = logging.getLogger('fork_tests') @@ -180,6 +181,15 @@ def real_cl_client(): ) +@pytest.fixture +def real_el_client(): + return FallbackProviderModule( + variables.EXECUTION_CLIENT_URI, + request_kwargs={'timeout': variables.HTTP_REQUEST_TIMEOUT_EXECUTION}, + cache_allowed_requests=True, + ) + + @pytest.fixture def real_finalized_slot(real_cl_client: ConsensusClient) -> SlotNumber: finalized_slot = real_cl_client.get_block_header('finalized').data.header.message.slot @@ -275,6 +285,7 @@ def forked_el_client(blockstamp_for_forking: BlockStamp, testrun_path: str, anvi @pytest.fixture() def web3(forked_el_client, patched_cl_client, mocked_ipfs_client): kac = KeysAPIClientModule(variables.KEYS_API_URI, forked_el_client) + performance = PerformanceClientModule(variables.PERFORMANCE_COLLECTOR_URI) forked_el_client.attach_modules( { 'lido_contracts': LidoContracts, @@ -284,6 +295,7 @@ def web3(forked_el_client, patched_cl_client, mocked_ipfs_client): 'cc': lambda: patched_cl_client, # type: ignore[dict-item] 'kac': lambda: kac, # type: ignore[dict-item] "ipfs": lambda: mocked_ipfs_client, + 'performance': lambda: performance, } ) yield forked_el_client diff --git a/tests/fork/test_csm_oracle_cycle.py b/tests/fork/test_csm_oracle_cycle.py index db9a4074c..aab43efd4 100644 --- a/tests/fork/test_csm_oracle_cycle.py +++ b/tests/fork/test_csm_oracle_cycle.py @@ -1,15 +1,13 @@ -import os -import subprocess -from pathlib import Path +from threading import Thread import pytest from src.modules.csm.csm import CSOracle +from src.modules.performance.collector.collector import PerformanceCollector from src.modules.submodules.types import FrameConfig from src.utils.range import sequence from src.web3py.types import Web3 -from tests.fork.conftest import first_slot_of_epoch, logger -from tests.fork.utils.lock import LockedDir +from tests.fork.conftest import first_slot_of_epoch @pytest.fixture() @@ -18,61 +16,75 @@ def hash_consensus_bin(): yield f.read() -@pytest.fixture(scope='session') -def csm_repo_path(testruns_folder_path): - return Path(testruns_folder_path) / "community-staking-module" +@pytest.fixture() +def csm_module(web3: Web3): + yield CSOracle(web3) -@pytest.fixture(scope='session') -def prepared_csm_repo(testruns_folder_path, csm_repo_path): +@pytest.fixture() +def performance_local_db(testrun_path): + from unittest.mock import patch + from pathlib import Path + from sqlmodel import create_engine + from sqlalchemy import JSON + from src.modules.performance.common.db import Duty + + def mock_get_database_url(self): + db_path = Path(testrun_path) / "test_duties.db" + return f"sqlite:///{db_path}" + + def mock_init(self): + self.engine = create_engine( + self._get_database_url(), + echo=False + ) + self._setup_database() - if os.environ.get("GITHUB_ACTIONS") == "true": - # CI should have the repo cloned and prepared - if os.path.exists(csm_repo_path): - return csm_repo_path - raise ValueError("No cloned community-staking-module repo found, but running in CI. Fix the workflow.") + table = Duty.__table__ + for col_name in ("attestations", "proposals_vids", "proposals_flags", "syncs_vids", "syncs_misses"): + if col_name in table.c: + table.c[col_name].type = JSON() - original_dir = os.getcwd() + with patch('src.modules.performance.common.db.DutiesDB._get_database_url', mock_get_database_url): + with patch('src.modules.performance.common.db.DutiesDB.__init__', mock_init): + yield - with LockedDir(testruns_folder_path): - if not os.path.exists(csm_repo_path / ".prepared"): - logger.info("TESTRUN Cloning community-staking-module repo") - subprocess.run( - ["git", "clone", "https://github.com/lidofinance/community-staking-module", csm_repo_path], check=True - ) - os.chdir(csm_repo_path) - subprocess.run(["git", "checkout", "develop"], check=True) - subprocess.run(["just", "deps"], check=True) - subprocess.run(["just", "build"], check=True) - subprocess.run(["touch", ".prepared"], check=True) - os.chdir(original_dir) - return csm_repo_path +@pytest.fixture() +def performance_collector(performance_local_db, web3: Web3, frame_config: FrameConfig): + yield PerformanceCollector(web3) @pytest.fixture() -def csm_module(web3: Web3): - yield CSOracle(web3) +def performance_web_server(performance_local_db): + from src.modules.performance.web.server import serve + Thread(target=serve, daemon=True).start() + yield + + +@pytest.fixture +def cycle_iterations(): + return 4 @pytest.fixture -def start_before_initial_epoch(frame_config: FrameConfig): +def start_before_initial_epoch(frame_config: FrameConfig, cycle_iterations): _from = frame_config.initial_epoch - 1 - _to = frame_config.initial_epoch + 4 + _to = frame_config.initial_epoch + cycle_iterations return [first_slot_of_epoch(i) for i in sequence(_from, _to)] @pytest.fixture -def start_after_initial_epoch(frame_config: FrameConfig): +def start_after_initial_epoch(frame_config: FrameConfig, cycle_iterations): _from = frame_config.initial_epoch + 1 - _to = frame_config.initial_epoch + 4 + _to = frame_config.initial_epoch + cycle_iterations return [first_slot_of_epoch(i) for i in sequence(_from, _to)] @pytest.fixture -def missed_initial_frame(frame_config: FrameConfig): +def missed_initial_frame(frame_config: FrameConfig, cycle_iterations): _from = frame_config.initial_epoch + frame_config.epochs_per_frame + 1 - _to = _from + 4 + _to = _from + cycle_iterations return [first_slot_of_epoch(i) for i in sequence(_from, _to)] @@ -87,7 +99,9 @@ def missed_initial_frame(frame_config: FrameConfig): [start_before_initial_epoch, start_after_initial_epoch, missed_initial_frame], indirect=True, ) -def test_csm_module_report(module, set_oracle_members, running_finalized_slots, account_from): +def test_csm_module_report( + performance_web_server, performance_collector, module, set_oracle_members, running_finalized_slots, account_from +): assert module.report_contract.get_last_processing_ref_slot() == 0, "Last processing ref slot should be 0" members = set_oracle_members(count=2) @@ -97,6 +111,7 @@ def test_csm_module_report(module, set_oracle_members, running_finalized_slots, switch_finalized, _ = running_finalized_slots # pylint:disable=duplicate-code while switch_finalized(): + performance_collector.cycle_handler() for _, private_key in members: # NOTE: reporters using the same cache with account_from(private_key): diff --git a/tests/modules/csm/test_csm_module.py b/tests/modules/csm/test_csm_module.py index 3308bafc5..ffd1b8975 100644 --- a/tests/modules/csm/test_csm_module.py +++ b/tests/modules/csm/test_csm_module.py @@ -2,21 +2,25 @@ from collections import defaultdict from dataclasses import dataclass from typing import Literal, NoReturn, Type -from unittest.mock import Mock, PropertyMock, patch +from unittest.mock import Mock, PropertyMock, call import pytest from hexbytes import HexBytes from src.constants import UINT64_MAX -from src.modules.csm.csm import CSOracle, LastReport +from src.modules.csm.csm import CSMError, CSOracle, LastReport from src.modules.csm.distribution import Distribution +from src.modules.csm.log import FramePerfLog from src.modules.csm.state import State from src.modules.csm.tree import RewardsTree, StrikesTree from src.modules.csm.types import StrikesList +from src.modules.performance.common.db import Duty from src.modules.submodules.oracle_module import ModuleExecuteDelay from src.modules.submodules.types import ZERO_HASH, CurrentFrame +from src.providers.consensus.types import Validator, ValidatorState +from src.providers.execution.exceptions import InconsistentData from src.providers.ipfs import CID -from src.types import NodeOperatorId, SlotNumber, FrameNumber +from src.types import EpochNumber, FrameNumber, Gwei, NodeOperatorId, SlotNumber, ValidatorIndex from src.utils.types import hex_str_to_bytes from src.web3py.types import Web3 from tests.factory.blockstamp import ReferenceBlockStampFactory @@ -49,6 +53,23 @@ def slot_to_epoch(slot: int) -> int: return slot // 32 +def make_validator(index: int, activation_epoch: int = 0, exit_epoch: int = 100) -> Validator: + return Validator( + index=ValidatorIndex(index), + balance=Gwei(0), + validator=ValidatorState( + pubkey=f"0x{index:02x}", + withdrawal_credentials="0x00", + effective_balance=Gwei(0), + slashed=False, + activation_eligibility_epoch=EpochNumber(activation_epoch), + activation_epoch=EpochNumber(activation_epoch), + exit_epoch=EpochNumber(exit_epoch), + withdrawable_epoch=EpochNumber(exit_epoch + 1), + ), + ) + + @pytest.fixture() def mock_chain_config(module: CSOracle): module.get_chain_config = Mock( @@ -73,7 +94,7 @@ class FrameTestParam: last_processing_ref_slot: int current_ref_slot: int finalized_slot: int - expected_frame: tuple[int, int] | Type[ValueError] + expected_frame: tuple[int, int] | Type[Exception] @pytest.mark.parametrize( @@ -101,17 +122,18 @@ class FrameTestParam: ), id="holesky_testnet", ), - pytest.param( - FrameTestParam( - epochs_per_frame=32, - initial_ref_slot=last_slot_of_epoch(100), - last_processing_ref_slot=0, - current_ref_slot=0, - finalized_slot=0, - expected_frame=(69, 100), - ), - id="not_yet_reached_initial_epoch", - ), + # NOTE: Impossible case in current processing + # pytest.param( + # FrameTestParam( + # epochs_per_frame=32, + # initial_ref_slot=last_slot_of_epoch(100), + # last_processing_ref_slot=0, + # current_ref_slot=0, + # finalized_slot=0, + # expected_frame=(69, 100), + # ), + # id="not_yet_reached_initial_epoch", + # ), pytest.param( FrameTestParam( epochs_per_frame=32, @@ -167,6 +189,28 @@ class FrameTestParam: ), id="initial_epoch_moved_forward_with_missed_frame", ), + pytest.param( + FrameTestParam( + epochs_per_frame=32, + initial_ref_slot=last_slot_of_epoch(10), + last_processing_ref_slot=last_slot_of_epoch(20), + current_ref_slot=last_slot_of_epoch(15), + finalized_slot=last_slot_of_epoch(15), + expected_frame=InconsistentData, + ), + id="last_processing_ref_slot_in_future", + ), + pytest.param( + FrameTestParam( + epochs_per_frame=4, + initial_ref_slot=last_slot_of_epoch(1), + last_processing_ref_slot=0, + current_ref_slot=last_slot_of_epoch(1), + finalized_slot=last_slot_of_epoch(1), + expected_frame=CSMError, + ), + id="negative_first_frame", + ), ], ) @pytest.mark.unit @@ -188,16 +232,60 @@ def test_current_frame_range(module: CSOracle, mock_chain_config: NoReturn, para ) module.get_initial_ref_slot = Mock(return_value=param.initial_ref_slot) - if param.expected_frame is ValueError: - with pytest.raises(ValueError): - module.get_epochs_range_to_process(ReferenceBlockStampFactory.build(slot_number=param.finalized_slot)) + ref_epoch = slot_to_epoch(param.current_ref_slot) + if isinstance(param.expected_frame, type) and issubclass(param.expected_frame, Exception): + with pytest.raises(param.expected_frame): + module.get_epochs_range_to_process( + ReferenceBlockStampFactory.build(slot_number=param.current_ref_slot, ref_epoch=ref_epoch) + ) else: - bs = ReferenceBlockStampFactory.build(slot_number=param.finalized_slot) + bs = ReferenceBlockStampFactory.build(slot_number=param.current_ref_slot, ref_epoch=ref_epoch) l_epoch, r_epoch = module.get_epochs_range_to_process(bs) assert (l_epoch, r_epoch) == param.expected_frame +@pytest.mark.unit +def test_set_epochs_range_to_collect_posts_new_demand(module: CSOracle, mock_chain_config: NoReturn): + blockstamp = ReferenceBlockStampFactory.build() + module.state = Mock(migrate=Mock(), log_progress=Mock()) + converter = Mock() + converter.frame_config = Mock(epochs_per_frame=4) + module.converter = Mock(return_value=converter) + module.get_epochs_range_to_process = Mock(return_value=(10, 20)) + module.w3 = Mock() + module.w3.performance.is_range_available = Mock(return_value=False) + module.w3.performance.get_epochs_demand = Mock(return_value={}) + module.w3.performance.post_epochs_demand = Mock() + + module.set_epochs_range_to_collect(blockstamp) + + module.state.migrate.assert_called_once_with(10, 20, 4) + module.state.log_progress.assert_called_once() + module.w3.performance.is_range_available.assert_called_once_with(10, 20) + module.w3.performance.get_epochs_demand.assert_called_once() + module.w3.performance.post_epochs_demand.assert_called_once_with("CSOracle", 10, 20) + + +@pytest.mark.unit +def test_set_epochs_range_to_collect_skips_post_when_demand_same(module: CSOracle, mock_chain_config: NoReturn): + blockstamp = ReferenceBlockStampFactory.build() + module.state = Mock(migrate=Mock(), log_progress=Mock()) + converter = Mock() + converter.frame_config = Mock(epochs_per_frame=4) + module.converter = Mock(return_value=converter) + module.get_epochs_range_to_process = Mock(return_value=(10, 20)) + module.w3 = Mock() + module.w3.performance.get_epochs_demands = Mock(return_value={"CSOracle": (10, 20)}) + module.w3.performance.post_epochs_demand = Mock() + + module.set_epochs_range_to_collect(blockstamp) + + module.state.migrate.assert_called_once_with(10, 20, 4) + module.state.log_progress.assert_called_once() + module.w3.performance.post_epochs_demand.assert_not_called() + + @pytest.fixture() def mock_frame_config(module: CSOracle): module.get_frame_config = Mock( @@ -210,163 +298,252 @@ def mock_frame_config(module: CSOracle): @dataclass(frozen=True) -class CollectDataTestParam: - collect_blockstamp: Mock - collect_frame_range: Mock - report_blockstamp: Mock - state: Mock - expected_msg: str - expected_result: bool | Exception +class CollectDataCase: + frames: list[tuple[int, int]] + range_available: bool + is_fulfilled_side_effect: list[bool] + expected_result: bool + expect_fulfill_call: bool + expect_range_call: tuple[int, int] + check_no_completed_msg: bool @pytest.mark.parametrize( - "param", + "case", [ pytest.param( - CollectDataTestParam( - collect_blockstamp=Mock(slot_number=64), - collect_frame_range=Mock(return_value=(0, 1)), - report_blockstamp=Mock(ref_epoch=3), - state=Mock(), - expected_msg="Epochs range has been changed, but the change is not yet observed on finalized epoch 1", - expected_result=False, - ), - id="frame_changed_forward", - ), - pytest.param( - CollectDataTestParam( - collect_blockstamp=Mock(slot_number=64), - collect_frame_range=Mock(return_value=(0, 2)), - report_blockstamp=Mock(ref_epoch=1), - state=Mock(), - expected_msg="Epochs range has been changed, but the change is not yet observed on finalized epoch 1", + CollectDataCase( + frames=[(10, 12)], + range_available=False, + is_fulfilled_side_effect=[False], expected_result=False, + expect_fulfill_call=False, + expect_range_call=(10, 12), + check_no_completed_msg=False, ), - id="frame_changed_backward", + id="range_not_available", ), pytest.param( - CollectDataTestParam( - collect_blockstamp=Mock(slot_number=32), - collect_frame_range=Mock(return_value=(1, 2)), - report_blockstamp=Mock(ref_epoch=2), - state=Mock(), - expected_msg="The starting epoch of the epochs range is not finalized yet", - expected_result=False, - ), - id="starting_epoch_not_finalized", - ), - pytest.param( - CollectDataTestParam( - collect_blockstamp=Mock(slot_number=32), - collect_frame_range=Mock(return_value=(0, 2)), - report_blockstamp=Mock(ref_epoch=2), - state=Mock( - migrate=Mock(), - log_status=Mock(), - is_fulfilled=True, - ), - expected_msg="All epochs are already processed. Nothing to collect", + CollectDataCase( + frames=[(10, 12)], + range_available=True, + is_fulfilled_side_effect=[False, True], expected_result=True, + expect_fulfill_call=True, + expect_range_call=(10, 12), + check_no_completed_msg=False, ), - id="state_fulfilled", + id="range_available", ), pytest.param( - CollectDataTestParam( - collect_blockstamp=Mock(slot_number=320), - collect_frame_range=Mock(return_value=(0, 100)), - report_blockstamp=Mock(ref_epoch=100), - state=Mock( - migrate=Mock(), - log_status=Mock(), - unprocessed_epochs=[5], - is_fulfilled=False, - ), - expected_msg="Minimum checkpoint step is not reached, current delay is 2 epochs", - expected_result=False, + CollectDataCase( + frames=[(0, 100)], + range_available=True, + is_fulfilled_side_effect=[False, True], + expected_result=True, + expect_fulfill_call=True, + expect_range_call=(0, 100), + check_no_completed_msg=True, ), - id="min_step_not_reached", + id="fulfilled_state", ), ], ) @pytest.mark.unit -def test_collect_data( - module: CSOracle, - param: CollectDataTestParam, - mock_chain_config: NoReturn, - mock_frame_config: NoReturn, - caplog, - monkeypatch, +def test_collect_data_handles_range_availability( + module: CSOracle, mock_chain_config: NoReturn, mock_frame_config: NoReturn, caplog, case: CollectDataCase ): module.w3 = Mock() - module._receive_last_finalized_slot = Mock() - module.state = param.state - module.get_epochs_range_to_process = param.collect_frame_range - module.get_blockstamp_for_report = Mock(return_value=param.report_blockstamp) + module.w3.performance.is_range_available = Mock(return_value=case.range_available) + module.fulfill_state = Mock() + state = Mock(frames=case.frames) + type(state).is_fulfilled = PropertyMock(side_effect=case.is_fulfilled_side_effect) + module.state = state with caplog.at_level(logging.DEBUG): - if isinstance(param.expected_result, Exception): - with pytest.raises(type(param.expected_result)): - module.collect_data(blockstamp=param.collect_blockstamp) - else: - collected = module.collect_data(blockstamp=param.collect_blockstamp) - assert collected == param.expected_result + result = module.collect_data() - msg = list(filter(lambda log: param.expected_msg in log, caplog.messages)) - assert len(msg), f"Expected message '{param.expected_msg}' not found in logs" + assert result is case.expected_result + module.w3.performance.is_range_available.assert_called_once_with(*case.expect_range_call) + if case.expect_fulfill_call: + module.fulfill_state.assert_called_once() + else: + module.fulfill_state.assert_not_called() + + if case.check_no_completed_msg: + assert "All epochs are already processed. Nothing to collect" not in caplog.messages @pytest.mark.unit -def test_collect_data_outdated_checkpoint( - module: CSOracle, mock_chain_config: NoReturn, mock_frame_config: NoReturn, caplog -): +def test_fulfill_state_handles_epoch_data(module: CSOracle): + module._receive_last_finalized_slot = Mock(return_value="finalized") + validator_a = make_validator(0, activation_epoch=0, exit_epoch=10) + validator_b = make_validator(1, activation_epoch=0, exit_epoch=10) module.w3 = Mock() - module._receive_last_finalized_slot = Mock() - module.state = Mock( - migrate=Mock(), - log_status=Mock(), - unprocessed_epochs=list(range(0, 101)), - is_fulfilled=False, + module.w3.cc.get_validators = Mock(return_value=[validator_a, validator_b]) + + module.w3.performance.get_epoch_data = Mock( + side_effect=[ + Duty( + epoch_number=EpochNumber(0), + attestations=[validator_a.index], + proposals_vids=[int(validator_a.index), int(validator_b.index)], + proposals_flags=[True, False], + syncs_vids=[int(validator_a.index), int(validator_b.index)], + syncs_misses=[0, 1], + ), + Duty( + epoch_number=EpochNumber(1), + attestations=[], + proposals_vids=[int(validator_b.index)], + proposals_flags=[True], + syncs_vids=[int(validator_a.index), int(validator_b.index)], + syncs_misses=[2, 3], + ), + ] ) - module.get_epochs_range_to_process = Mock(side_effect=[(0, 100), (50, 150)]) - module.get_blockstamp_for_report = Mock(return_value=Mock(ref_epoch=100)) + frames = [(0, 1)] + unprocessed = {0, 1} + + state = Mock() + state.frames = frames + state.unprocessed_epochs = unprocessed + state.save_att_duty = Mock() + state.save_prop_duty = Mock() + state.save_sync_duty = Mock() + state.add_processed_epoch = Mock() + state.log_progress = Mock() + module.state = state + + module.fulfill_state() + + module._receive_last_finalized_slot.assert_called_once() + module.w3.cc.get_validators.assert_called_once_with("finalized") + + module.w3.performance.get_epoch_data.assert_has_calls([call(0), call(1)]) + assert state.save_att_duty.call_args_list == [ + call(EpochNumber(0), validator_a.index, included=False), + call(EpochNumber(0), validator_b.index, included=True), + call(EpochNumber(1), validator_a.index, included=True), + call(EpochNumber(1), validator_b.index, included=True), + ] + assert state.save_prop_duty.call_args_list == [ + call(EpochNumber(0), ValidatorIndex(int(validator_a.index)), included=True), + call(EpochNumber(0), ValidatorIndex(int(validator_b.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_b.index)), included=True), + ] + assert state.save_sync_duty.call_args_list == [ + call(EpochNumber(0), ValidatorIndex(int(validator_a.index)), included=True), + call(EpochNumber(0), ValidatorIndex(int(validator_b.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_a.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_a.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_b.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_b.index)), included=False), + call(EpochNumber(1), ValidatorIndex(int(validator_b.index)), included=False), + ] + assert state.add_processed_epoch.call_args_list == [ + call(EpochNumber(0)), + call(EpochNumber(1)), + ] + assert state.log_progress.call_count == 2 - with caplog.at_level(logging.DEBUG): - with pytest.raises(ValueError): - module.collect_data(blockstamp=Mock(slot_number=640)) - msg = list( - filter( - lambda log: "Checkpoints were prepared for an outdated epochs range, stop processing" in log, - caplog.messages, +@pytest.mark.unit +def test_fulfill_state_raises_on_inactive_missed_attestation(module: CSOracle): + inactive_validator = make_validator(5, activation_epoch=10, exit_epoch=20) + module._receive_last_finalized_slot = Mock(return_value="finalized") + module.w3 = Mock() + module.w3.cc.get_validators = Mock(return_value=[inactive_validator]) + module.w3.performance.get_epoch_data = Mock(return_value=Duty(epoch=0, attestations=[inactive_validator.index], proposals_vids=[], proposals_flags= [], syncs_vids=[], syncs_misses=[])) + state = Mock() + state.frames = [(0, 0)] + state.unprocessed_epochs = {0} + state.save_att_duty = Mock() + state.save_prop_duty = Mock() + state.save_sync_duty = Mock() + state.add_processed_epoch = Mock() + state.log_progress = Mock() + module.state = state + + with pytest.raises(ValueError, match="not active"): + module.fulfill_state() + + module.w3.performance.get_epoch_data.assert_called_once_with(0) + state.save_att_duty.assert_not_called() + state.add_processed_epoch.assert_not_called() + + +@pytest.mark.unit +def test_validate_state_uses_ref_epoch(module: CSOracle): + blockstamp = ReferenceBlockStampFactory.build(ref_epoch=123) + module.get_epochs_range_to_process = Mock(return_value=(5, 10)) + module.state = Mock(validate=Mock()) + + module.validate_state(blockstamp) + + module.get_epochs_range_to_process.assert_called_once_with(blockstamp) + module.state.validate.assert_called_once_with(5, 123) + + +@pytest.mark.parametrize( + "last_ref_slot,current_ref_slot,expected", + [ + pytest.param(64, 64, True, id="already_submitted"), + pytest.param(32, 64, False, id="pending_submission"), + ], +) +@pytest.mark.unit +def test_is_main_data_submitted(module: CSOracle, last_ref_slot: int, current_ref_slot: int, expected: bool): + blockstamp = ReferenceBlockStampFactory.build() + module.w3 = Mock() + module.w3.csm.get_csm_last_processing_ref_slot = Mock(return_value=SlotNumber(last_ref_slot)) + module.get_initial_or_current_frame = Mock( + return_value=CurrentFrame( + ref_slot=SlotNumber(current_ref_slot), + report_processing_deadline_slot=SlotNumber(0), ) ) - assert len(msg), "Expected message not found in logs" + + assert module.is_main_data_submitted(blockstamp) is expected +@pytest.mark.parametrize("submitted", [True, False]) @pytest.mark.unit -def test_collect_data_fulfilled_state( - module: CSOracle, mock_chain_config: NoReturn, mock_frame_config: NoReturn, caplog -): +def test_is_contract_reportable_relies_on_is_main_data_submitted(module: CSOracle, submitted: bool): + module.is_main_data_submitted = Mock(return_value=submitted) + + result = module.is_contract_reportable(ReferenceBlockStampFactory.build()) + + module.is_main_data_submitted.assert_called_once() + assert result is (not submitted) + + +@pytest.mark.unit +def test_publish_tree_uploads_encoded_tree(module: CSOracle): + tree = Mock() + tree.encode.return_value = b"tree" module.w3 = Mock() - module._reset_cycle_timeout = Mock() - module._receive_last_finalized_slot = Mock() - module.state = Mock( - migrate=Mock(), - log_status=Mock(), - unprocessed_epochs=list(range(0, 101)), - ) - type(module.state).is_fulfilled = PropertyMock(side_effect=[False, True]) - module.get_epochs_range_to_process = Mock(return_value=(0, 100)) - module.get_blockstamp_for_report = Mock(return_value=Mock(ref_epoch=100)) + module.w3.ipfs.publish = Mock(return_value=CID("QmTree")) - with caplog.at_level(logging.DEBUG): - with patch('src.modules.csm.csm.FrameCheckpointProcessor.exec', return_value=None): - collected = module.collect_data(blockstamp=Mock(slot_number=640)) - assert collected is True + cid = module.publish_tree(tree) - # assert that it is not early return from function - msg = list(filter(lambda log: "All epochs are already processed. Nothing to collect" in log, caplog.messages)) - assert len(msg) == 0, "Unexpected message found in logs" + module.w3.ipfs.publish.assert_called_once_with(b"tree") + assert cid == CID("QmTree") + + +@pytest.mark.unit +def test_publish_log_uploads_encoded_log(module: CSOracle, monkeypatch: pytest.MonkeyPatch): + logs = [Mock(spec=FramePerfLog)] + encode_mock = Mock(return_value=b"log") + monkeypatch.setattr("src.modules.csm.csm.FramePerfLog.encode", encode_mock) + module.w3 = Mock() + module.w3.ipfs.publish = Mock(return_value=CID("QmLog")) + + cid = module.publish_log(logs) + + encode_mock.assert_called_once_with(logs) + module.w3.ipfs.publish.assert_called_once_with(b"log") + assert cid == CID("QmLog") @dataclass(frozen=True) @@ -588,6 +765,8 @@ def test_build_report(module: CSOracle, param: BuildReportTestParam): @pytest.mark.unit def test_execute_module_not_collected(module: CSOracle): module._check_compatability = Mock(return_value=True) + module.get_blockstamp_for_report = Mock(return_value=Mock(slot_number=100500)) + module.set_epochs_range_to_collect = Mock() module.collect_data = Mock(return_value=False) execute_delay = module.execute_module( @@ -611,6 +790,7 @@ def test_execute_module_skips_collecting_if_forward_compatible(module: CSOracle) @pytest.mark.unit def test_execute_module_no_report_blockstamp(module: CSOracle): module._check_compatability = Mock(return_value=True) + module.set_epochs_range_to_collect = Mock() module.collect_data = Mock(return_value=True) module.get_blockstamp_for_report = Mock(return_value=None) @@ -622,6 +802,7 @@ def test_execute_module_no_report_blockstamp(module: CSOracle): @pytest.mark.unit def test_execute_module_processed(module: CSOracle): + module.set_epochs_range_to_collect = Mock() module.collect_data = Mock(return_value=True) module.get_blockstamp_for_report = Mock(return_value=Mock(slot_number=100500)) module.process_report = Mock() diff --git a/tests/modules/csm/test_checkpoint.py b/tests/modules/performance_collector/test_checkpoint.py similarity index 87% rename from tests/modules/csm/test_checkpoint.py rename to tests/modules/performance_collector/test_checkpoint.py index 52b99aaf5..4be093e60 100644 --- a/tests/modules/csm/test_checkpoint.py +++ b/tests/modules/performance_collector/test_checkpoint.py @@ -4,20 +4,19 @@ import pytest -import src.modules.csm.checkpoint as checkpoint_module +import src.modules.performance.collector.checkpoint as checkpoint_module from src.constants import EPOCHS_PER_SYNC_COMMITTEE_PERIOD -from src.modules.csm.checkpoint import ( +from src.modules.performance.collector.checkpoint import ( FrameCheckpoint, FrameCheckpointProcessor, FrameCheckpointsIterator, - MinStepIsNotReached, SlotNumber, SlotOutOfRootsRange, SyncCommitteesCache, - ValidatorDuty, process_attestations, ) -from src.modules.csm.state import State +from src.modules.performance.common.db import DutiesDB +from src.modules.performance.common.types import AttDutyMisses, ProposalDuty, SyncDuty from src.modules.submodules.types import ChainConfig, FrameConfig from src.providers.consensus.client import ConsensusClient from src.providers.consensus.types import BeaconSpecResponse, BlockAttestation, SlotAttestationCommittee, SyncCommittee @@ -34,8 +33,8 @@ @pytest.fixture(autouse=True) -def no_commit(monkeypatch): - monkeypatch.setattr(State, "commit", Mock()) +def no_db_write(monkeypatch): + monkeypatch.setattr(DutiesDB, "store_epoch", Mock()) @pytest.fixture @@ -61,16 +60,10 @@ def converter(frame_config: FrameConfig, chain_config: ChainConfig) -> Web3Conve @pytest.fixture def sync_committees_cache(): - with patch('src.modules.csm.checkpoint.SYNC_COMMITTEES_CACHE', SyncCommitteesCache()) as cache: + with patch('src.modules.performance_collector.checkpoint.SYNC_COMMITTEES_CACHE', SyncCommitteesCache()) as cache: yield cache -@pytest.mark.unit -def test_checkpoints_iterator_min_epoch_is_not_reached(converter): - with pytest.raises(MinStepIsNotReached): - FrameCheckpointsIterator(converter, 100, 600, 109) - - @pytest.mark.unit @pytest.mark.parametrize( "l_epoch,r_epoch,finalized_epoch,expected_checkpoints", @@ -335,10 +328,10 @@ def test_checkpoints_processor_process_attestations_undefined_committee( @pytest.fixture def frame_checkpoint_processor(): cc = Mock() - state = Mock() + db = Mock() converter = Mock() finalized_blockstamp = Mock(slot_number=SlotNumber(0)) - return FrameCheckpointProcessor(cc, state, converter, finalized_blockstamp) + return FrameCheckpointProcessor(cc, db, converter, finalized_blockstamp) @pytest.mark.unit @@ -348,18 +341,18 @@ def test_check_duties_processes_epoch_with_attestations_and_sync_committee(frame duty_epoch = EpochNumber(10) duty_epoch_roots = [(SlotNumber(100), Mock(spec=BlockRoot)), (SlotNumber(101), Mock(spec=BlockRoot))] next_epoch_roots = [(SlotNumber(102), Mock(spec=BlockRoot)), (SlotNumber(103), Mock(spec=BlockRoot))] - frame_checkpoint_processor._prepare_attestation_duties = Mock( - return_value={SlotNumber(100): [ValidatorDuty(1, False)]} - ) + frame_checkpoint_processor._prepare_attestation_duties = Mock(return_value={SlotNumber(100): AttDutyMisses([1])}) frame_checkpoint_processor._prepare_propose_duties = Mock( - return_value={SlotNumber(100): ValidatorDuty(1, False), SlotNumber(101): ValidatorDuty(1, False)} - ) - frame_checkpoint_processor._prepare_sync_committee_duties = Mock( return_value={ - 100: [ValidatorDuty(1, False) for _ in range(32)], - 101: [ValidatorDuty(1, False) for _ in range(32)], + SlotNumber(100): ProposalDuty(validator_index=1, is_proposed=False), + SlotNumber(101): ProposalDuty(validator_index=1, is_proposed=False), } ) + frame_checkpoint_processor._prepare_sync_committee_duties = Mock( + return_value=[ + SyncDuty(validator_index=1, missed_count=2), + ] + ) attestation = Mock() attestation.data.slot = SlotNumber(100) @@ -371,15 +364,13 @@ def test_check_duties_processes_epoch_with_attestations_and_sync_committee(frame sync_aggregate.sync_committee_bits = "0xff" frame_checkpoint_processor.cc.get_block_attestations_and_sync = Mock(return_value=([attestation], sync_aggregate)) - frame_checkpoint_processor.state.unprocessed_epochs = [duty_epoch] + frame_checkpoint_processor.db.has_epoch = lambda: False frame_checkpoint_processor._check_duties( checkpoint_block_roots, checkpoint_slot, duty_epoch, duty_epoch_roots, next_epoch_roots ) - frame_checkpoint_processor.state.save_att_duty.assert_called() - frame_checkpoint_processor.state.save_sync_duty.assert_called() - frame_checkpoint_processor.state.save_prop_duty.assert_called() + frame_checkpoint_processor.db.store_epoch_from_duties.assert_called() @pytest.mark.unit @@ -391,25 +382,28 @@ def test_check_duties_processes_epoch_with_no_attestations(frame_checkpoint_proc next_epoch_roots = [(SlotNumber(102), Mock(spec=BlockRoot)), (SlotNumber(103), Mock(spec=BlockRoot))] frame_checkpoint_processor._prepare_attestation_duties = Mock(return_value={}) frame_checkpoint_processor._prepare_propose_duties = Mock( - return_value={SlotNumber(100): ValidatorDuty(1, False), SlotNumber(101): ValidatorDuty(1, False)} + return_value={ + SlotNumber(100): ProposalDuty(validator_index=1, is_proposed=False), + SlotNumber(101): ProposalDuty(validator_index=1, is_proposed=False), + } ) frame_checkpoint_processor._prepare_sync_committee_duties = Mock( - return_value={100: [ValidatorDuty(1, False)], 101: [ValidatorDuty(1, False)]} + return_value=[ + SyncDuty(validator_index=1, missed_count=2), + ] ) sync_aggregate = Mock() sync_aggregate.sync_committee_bits = "0x00" frame_checkpoint_processor.cc.get_block_attestations_and_sync = Mock(return_value=([], sync_aggregate)) - frame_checkpoint_processor.state.unprocessed_epochs = [duty_epoch] + frame_checkpoint_processor.db.has_epoch = lambda: False frame_checkpoint_processor._check_duties( checkpoint_block_roots, checkpoint_slot, duty_epoch, duty_epoch_roots, next_epoch_roots ) - assert frame_checkpoint_processor.state.save_att_duty.call_count == 0 - assert frame_checkpoint_processor.state.save_sync_duty.call_count == 2 - assert frame_checkpoint_processor.state.save_prop_duty.call_count == 2 + frame_checkpoint_processor.db.store_epoch.assert_called() @pytest.mark.unit @@ -422,18 +416,11 @@ def test_prepare_sync_committee_returns_duties_for_valid_sync_committee(frame_ch duties = frame_checkpoint_processor._prepare_sync_committee_duties(epoch, duty_block_roots) - expected_duties = { - SlotNumber(100): [ - ValidatorDuty(validator_index=1, included=False), - ValidatorDuty(validator_index=2, included=False), - ValidatorDuty(validator_index=3, included=False), - ], - SlotNumber(101): [ - ValidatorDuty(validator_index=1, included=False), - ValidatorDuty(validator_index=2, included=False), - ValidatorDuty(validator_index=3, included=False), - ], - } + expected_duties = [ + SyncDuty(validator_index=1, missed_count=2), + SyncDuty(validator_index=2, missed_count=2), + SyncDuty(validator_index=3, missed_count=2), + ] assert duties == expected_duties @@ -447,13 +434,11 @@ def test_prepare_sync_committee_skips_duties_for_missed_slots(frame_checkpoint_p duties = frame_checkpoint_processor._prepare_sync_committee_duties(epoch, duty_block_roots) - expected_duties = { - SlotNumber(101): [ - ValidatorDuty(validator_index=1, included=False), - ValidatorDuty(validator_index=2, included=False), - ValidatorDuty(validator_index=3, included=False), - ] - } + expected_duties = [ + SyncDuty(validator_index=1, missed_count=1), + SyncDuty(validator_index=2, missed_count=1), + SyncDuty(validator_index=3, missed_count=1), + ] assert duties == expected_duties @@ -484,7 +469,9 @@ def test_get_sync_committee_fetches_and_caches_when_not_cached( prev_slot_response = Mock() prev_slot_response.message.slot = SlotNumber(0) prev_slot_response.message.body.execution_payload.block_hash = "0x00" - with patch('src.modules.csm.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response)): + with patch( + 'src.modules.performance_collector.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response) + ): result = frame_checkpoint_processor._get_sync_committee(epoch) assert result.validators == sync_committee.validators @@ -508,7 +495,9 @@ def test_get_sync_committee_handles_cache_eviction( prev_slot_response = Mock() prev_slot_response.message.slot = SlotNumber(0) prev_slot_response.message.body.execution_payload.block_hash = "0x00" - with patch('src.modules.csm.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response)): + with patch( + 'src.modules.performance_collector.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response) + ): result = frame_checkpoint_processor._get_sync_committee(epoch) assert result == sync_committee @@ -530,8 +519,8 @@ def test_prepare_propose_duties(frame_checkpoint_processor): duties = frame_checkpoint_processor._prepare_propose_duties(epoch, checkpoint_block_roots, checkpoint_slot) expected_duties = { - SlotNumber(101): ValidatorDuty(validator_index=1, included=False), - SlotNumber(102): ValidatorDuty(validator_index=2, included=False), + SlotNumber(101): ProposalDuty(validator_index=1, is_proposed=False), + SlotNumber(102): ProposalDuty(validator_index=2, is_proposed=False), } assert duties == expected_duties @@ -564,7 +553,9 @@ def test_get_dependent_root_for_proposer_duties_from_cl_when_slot_out_of_range(f prev_slot_response = Mock() prev_slot_response.message.slot = non_missed_slot - with patch('src.modules.csm.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response)): + with patch( + 'src.modules.performance_collector.checkpoint.get_prev_non_missed_slot', Mock(return_value=prev_slot_response) + ): frame_checkpoint_processor.cc.get_block_root = Mock(return_value=Mock(root=checkpoint_block_roots[0])) dependent_root = frame_checkpoint_processor._get_dependent_root_for_proposer_duties( diff --git a/tests/modules/performance_collector/test_performance_collector.py b/tests/modules/performance_collector/test_performance_collector.py new file mode 100644 index 000000000..93ac50783 --- /dev/null +++ b/tests/modules/performance_collector/test_performance_collector.py @@ -0,0 +1,386 @@ +import pytest +from unittest.mock import Mock, patch + +from src.modules.performance.collector.collector import PerformanceCollector +from src.modules.performance.common.db import DutiesDB +from src.types import EpochNumber + + +@pytest.fixture +def mock_w3(): + """Mock Web3 instance""" + return Mock() + + +@pytest.fixture +def mock_db(): + """Mock DutiesDB instance""" + return Mock(spec=DutiesDB) + + +@pytest.fixture +def performance_collector(mock_w3, mock_db): + """Create PerformanceCollector instance with mocked dependencies""" + with patch('src.modules.performance.common.db.DutiesDB', return_value=mock_db), patch( + 'src.modules.performance.web.server.serve' + ), patch( + 'src.modules.performance.web.server.PERFORMANCE_WEB_SERVER_API_PORT', 8080 + ): + collector = PerformanceCollector(mock_w3) + collector.db = mock_db + return collector + + +class TestDefineEpochsToProcessRange: + """Test cases for define_epochs_to_process_range method""" + + @pytest.mark.unit + def test_empty_db_default_range(self, performance_collector, mock_db): + """Test when database is empty - should return default range""" + finalized_epoch = EpochNumber(100) + + # Setup empty DB + mock_db.min_epoch.return_value = None + mock_db.max_epoch.return_value = None + mock_db.epochs_demand.return_value = {} + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Expected calculations: + # max_available_epoch_to_check = max(0, 100 - 2) = 98 + # start_epoch = 98 + # end_epoch = 98 + assert result == (EpochNumber(98), EpochNumber(98)) + + @pytest.mark.unit + def test_empty_db_with_low_finalized_epoch(self, performance_collector, mock_db): + """Test when finalized epoch is low and DB is empty""" + finalized_epoch = EpochNumber(5) + + # Setup empty DB + mock_db.min_epoch.return_value = None + mock_db.max_epoch.return_value = None + mock_db.epochs_demand.return_value = {} + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Expected calculations: + # max_available_epoch_to_check = max(0, 5 - 2) = 3 + # start_epoch = 3 + # end_epoch = 3 + assert result == (EpochNumber(3), EpochNumber(3)) + + @pytest.mark.unit + def test_db_with_gap_in_range(self, performance_collector, mock_db): + """Test when there's a gap in the database""" + finalized_epoch = EpochNumber(100) + + # Setup DB with gap + mock_db.min_epoch.return_value = 10 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [50, 51, 52] # Gap in the middle + mock_db.epochs_demand.return_value = {} + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should start from the first missing epoch + assert result[0] == EpochNumber(50) + # End epoch should be max_available = max(0, 100 - 2) = 98 + assert result[1] == EpochNumber(98) + + @pytest.mark.unit + def test_db_without_gap_continuous_collection(self, performance_collector, mock_db): + """Test when DB has no gaps - should collect next epochs""" + finalized_epoch = EpochNumber(100) + + # Setup DB without gaps + mock_db.min_epoch.return_value = 10 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [] # No gaps + mock_db.epochs_demand.return_value = {} + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should start from next epoch after max + # start_epoch = 90 + 1 = 91 + assert result[0] == EpochNumber(91) + # End epoch should be max_available = max(0, 100 - 2) = 98 + assert result[1] == EpochNumber(98) + + @pytest.mark.unit + def test_unsatisfied_epochs_demand_before_db_range(self, performance_collector, mock_db): + """Test when there's unsatisfied demand before existing DB range""" + finalized_epoch = EpochNumber(100) + + # Setup DB + mock_db.min_epoch.return_value = 50 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [] + + # Setup epochs demand before DB range + mock_db.epochs_demand.return_value = {'consumer1': (20, 30)} # Demand before min_epoch_in_db + mock_db.is_range_available.return_value = False # Unsatisfied demand + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should start from the earliest demand + assert result[0] == EpochNumber(20) + # End epoch should be max_available = max(0, 100 - 2) = 98 + assert result[1] == EpochNumber(98) + + @pytest.mark.unit + def test_unsatisfied_epochs_demand_after_db_range(self, performance_collector, mock_db): + """Test when there's unsatisfied demand after existing DB range""" + finalized_epoch = EpochNumber(200) + + # Setup DB + mock_db.min_epoch.return_value = 50 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [] + + # Setup epochs demand after DB range + mock_db.epochs_demand.return_value = {'consumer1': (95, 105)} # Demand after max_epoch_in_db + mock_db.is_range_available.return_value = False # Unsatisfied demand + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should start from next epoch after max DB epoch + assert result[0] == EpochNumber(91) + assert result[1] == EpochNumber(198) + + @pytest.mark.unit + def test_satisfied_epochs_demand_ignored(self, performance_collector, mock_db): + """Test that satisfied epochs demand is ignored""" + finalized_epoch = EpochNumber(100) + + # Setup DB + mock_db.min_epoch.return_value = 50 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [] + + # Setup satisfied epochs demand + mock_db.epochs_demand.return_value = {'consumer1': (60, 70)} # Demand within DB range + mock_db.is_range_available.return_value = True # Satisfied demand + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should start from next epoch after max (ignoring satisfied demand) + assert result[0] == EpochNumber(91) + # End epoch should be max_available = max(0, 100 - 2) = 98 + assert result[1] == EpochNumber(98) + + @pytest.mark.unit + def test_multiple_unsatisfied_demands(self, performance_collector, mock_db): + """Test with multiple unsatisfied demands""" + finalized_epoch = EpochNumber(200) + + # Setup DB + mock_db.min_epoch.return_value = 50 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [] + + # Setup multiple unsatisfied demands + mock_db.epochs_demand.return_value = { + 'consumer1': (20, 30), # Before DB range + 'consumer2': (95, 105), # After DB range + 'consumer3': (60, 70), # Within DB range (satisfied) + } + + def mock_is_range_available(l_epoch, r_epoch): + if l_epoch == 60 and r_epoch == 70: + return True # Satisfied + return False # Unsatisfied + + mock_db.is_range_available.side_effect = mock_is_range_available + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should take the minimum of unsatisfied demands for start + # start_epoch = min(91, 20) = 20 (91 from DB continuation, 20 from demand) + assert result[0] == EpochNumber(20) + assert result[1] == EpochNumber(198) + + @pytest.mark.unit + def test_very_low_finalized_epoch(self, performance_collector, mock_db): + """Test with very low finalized epoch (edge case)""" + finalized_epoch = EpochNumber(1) + + # Setup empty DB + mock_db.min_epoch.return_value = None + mock_db.max_epoch.return_value = None + mock_db.epochs_demand.return_value = {} + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # max_available_epoch_to_check = (1 - 2) = -1 + assert result is None + + @pytest.mark.unit + def test_no_epochs_demand_logged(self, performance_collector, mock_db, caplog): + """Test logging when no epochs demand is found""" + finalized_epoch = EpochNumber(100) + + # Setup DB + mock_db.min_epoch.return_value = 50 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [] + mock_db.epochs_demand.return_value = {} # No demand + + with caplog.at_level('INFO'): + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + assert "No epochs demand found" in caplog.text + assert result is not None + + @pytest.mark.unit + def test_complex_scenario_with_gap_and_demand(self, performance_collector, mock_db): + """Test complex scenario with both gaps and unsatisfied demand""" + finalized_epoch = EpochNumber(200) + + # Setup DB with gap + mock_db.min_epoch.return_value = 30 + mock_db.max_epoch.return_value = 150 + mock_db.missing_epochs_in.return_value = [100, 101, 102] # Gap in DB + + # Setup unsatisfied demand + mock_db.epochs_demand.return_value = { + 'consumer1': (10, 20), # Before DB range + } + mock_db.is_range_available.return_value = False # Unsatisfied + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should start from gap (100) vs demand (10) -> min(100, 10) = 10 + assert result[0] == EpochNumber(10) + # End epoch should be max_available = max(0, 200 - 2) = 198 + assert result[1] == EpochNumber(198) + + @pytest.mark.unit + def test_finalized_epoch_zero(self, performance_collector, mock_db): + """Test with zero finalized epoch (edge case)""" + finalized_epoch = EpochNumber(0) + + # Setup empty DB + mock_db.min_epoch.return_value = None + mock_db.max_epoch.return_value = None + mock_db.epochs_demand.return_value = {} + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # max_available_epoch_to_check = -2 + assert result is None + + @pytest.mark.unit + def test_epochs_demand_exactly_at_db_boundaries(self, performance_collector, mock_db): + """Test epochs demand exactly at database boundaries""" + finalized_epoch = EpochNumber(200) + + # Setup DB + mock_db.min_epoch.return_value = 50 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [] + + # Setup demand exactly at boundaries + mock_db.epochs_demand.return_value = { + 'consumer1': (50, 90), # Exactly the DB range + } + mock_db.is_range_available.return_value = True # Satisfied demand + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should ignore satisfied demand and continue from max + 1 + assert result[0] == EpochNumber(91) + # End epoch should be max_available = max(0, 200 - 2) = 198 + assert result[1] == EpochNumber(198) + + @pytest.mark.unit + def test_negative_start_epoch_calculation(self, performance_collector, mock_db): + """Test when calculation would result in negative start epoch""" + finalized_epoch = EpochNumber(5) # Very low + + # Setup DB that would lead to high start epoch + mock_db.min_epoch.return_value = 100 + mock_db.max_epoch.return_value = 200 + mock_db.missing_epochs_in.return_value = [] + mock_db.epochs_demand.return_value = {} + + with pytest.raises(ValueError): + # Finalized epoch is lower than min_epoch_in_db + performance_collector.define_epochs_to_process_range(finalized_epoch) + + @pytest.mark.unit + def test_overlapping_epochs_demands(self, performance_collector, mock_db): + """Test with overlapping epochs demands""" + finalized_epoch = EpochNumber(200) + + # Setup DB + mock_db.min_epoch.return_value = 80 + mock_db.max_epoch.return_value = 120 + mock_db.missing_epochs_in.return_value = [] + + # Setup overlapping demands + mock_db.epochs_demand.return_value = { + 'consumer1': (40, 60), # Before DB range + 'consumer2': (50, 70), # Overlapping with consumer1 + 'consumer3': (140, 160), # After DB range + } + mock_db.is_range_available.return_value = False # All unsatisfied + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should take the earliest start (40) and appropriate end + assert result[0] == EpochNumber(40) + assert result[1] == EpochNumber(198) + + @pytest.mark.unit + def test_empty_epochs_demand_dict(self, performance_collector, mock_db): + """Test with explicitly empty epochs demand dictionary""" + finalized_epoch = EpochNumber(100) + + # Setup DB + mock_db.min_epoch.return_value = 50 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [] + mock_db.epochs_demand.return_value = {} # Explicitly empty + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should proceed with normal DB continuation logic + assert result[0] == EpochNumber(91) + assert result[1] == EpochNumber(98) # max_available = 98 + + @pytest.mark.unit + def test_gap_at_beginning_of_db_range(self, performance_collector, mock_db): + """Test when gap is at the very beginning of DB range""" + finalized_epoch = EpochNumber(100) + + # Setup DB with gap at the beginning + mock_db.min_epoch.return_value = 10 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [10, 11, 12] # Gap at beginning + mock_db.epochs_demand.return_value = {} + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should start from the first missing epoch + assert result[0] == EpochNumber(10) + # End epoch should be max_available = max(0, 100 - 2) = 98 + assert result[1] == EpochNumber(98) + + @pytest.mark.unit + def test_gap_at_end_of_db_range(self, performance_collector, mock_db): + """Test when gap is at the very end of DB range""" + finalized_epoch = EpochNumber(100) + + # Setup DB with gap at the end + mock_db.min_epoch.return_value = 10 + mock_db.max_epoch.return_value = 90 + mock_db.missing_epochs_in.return_value = [88, 89, 90] # Gap at end + mock_db.epochs_demand.return_value = {} + + result = performance_collector.define_epochs_to_process_range(finalized_epoch) + + # Should start from the first missing epoch + assert result[0] == EpochNumber(88) + # End epoch should be max_available = max(0, 100 - 2) = 98 + assert result[1] == EpochNumber(98) diff --git a/tests/modules/csm/test_processing_attestation.py b/tests/modules/performance_collector/test_processing_attestation.py similarity index 98% rename from tests/modules/csm/test_processing_attestation.py rename to tests/modules/performance_collector/test_processing_attestation.py index 80eb036e1..79ad30873 100644 --- a/tests/modules/csm/test_processing_attestation.py +++ b/tests/modules/performance_collector/test_processing_attestation.py @@ -3,7 +3,7 @@ import pytest -from src.modules.csm.checkpoint import ( +from src.modules.performance.collector.checkpoint import ( get_committee_indices, hex_bitlist_to_list, hex_bitvector_to_list,